Merge branch 'develop' into cs/msi-cont

# Conflicts:
#	Cargo.lock
#	src/bins/src/update.rs
#	src/lib-csharp/UpdateManager.cs
#	src/lib-rust/src/lib.rs
#	src/lib-rust/src/manager.rs
This commit is contained in:
Caelan Sayler
2025-05-23 20:21:08 +01:00
126 changed files with 5409 additions and 2371 deletions

View File

@@ -5,7 +5,7 @@
":semanticCommitsDisabled"
],
"labels": ["renovate"],
"schedule": ["on friday"],
"schedule": ["on friday before 11am"],
"timezone": "Europe/London",
"ignorePaths": [
"**/node_modules/**",
@@ -65,9 +65,8 @@
"matchPackageNames": [
"winsafe", // newer versions causes runtime errors in release builds
"System.CommandLine", // too many breaking changes too frequently
"Microsoft.Extensions.Logging.Abstractions", // we have multiple versions of this
"@vercel/webpack-asset-relocator-loader", // an update was incompatible with electron
"xunit.runner.visualstudio" // 20-12-2024: broke tests (something about sn signing maybe?)
"xunit.runner.visualstudio", // 20-12-2024: broke tests (something about sn signing maybe?)
"Microsoft.NET.Test.Sdk", // 23-05-2025: 17.13.0 was the last version which supported net6
],
"dependencyDashboardApproval": true
},

View File

@@ -2,7 +2,7 @@ param(
[string]$version = $(nbgv get-version -v NuGetPackageVersion).Trim()
)
$scriptDir = $PSScriptRoot
$scriptDir = "$PSScriptRoot/.."
$path = Join-Path $scriptDir "Cargo.toml"
Write-Host "Setting version to $version"

View File

@@ -29,43 +29,42 @@ jobs:
key: "rust-build-windows"
- name: Install Dependencies
run: |
rustup toolchain install 1.75.0-x86_64-pc-windows-msvc
rustup target add i686-pc-windows-msvc --toolchain 1.75.0-x86_64-pc-windows-msvc
rustup target add aarch64-pc-windows-msvc --toolchain 1.75.0-x86_64-pc-windows-msvc
rustup target add aarch64-pc-windows-msvc --toolchain nightly-x86_64-pc-windows-msvc
rustup component add rust-src --toolchain nightly-x86_64-pc-windows-msvc
- name: Update Version
shell: pwsh
run: .\set-nbgv-version.ps1
- name: Build Rust Binaries
run: cargo +1.75.0-x86_64-pc-windows-msvc build --target i686-pc-windows-msvc --features windows --release -p velopack_bins
run: ./.github/set-nbgv-version.ps1
- name: Build Rust Binaries (x86)
run: cargo +nightly build --target i686-win7-windows-msvc -Z build-std="core,alloc,std,panic_abort" --features windows --release -p velopack_bins
- name: Upload Rust Build Artifacts
uses: actions/upload-artifact@v4
with:
name: rust-windows-latest
path: |
target\i686-pc-windows-msvc\release\*.exe
target\i686-pc-windows-msvc\release\*.pdb
target\i686-win7-windows-msvc\release\*.exe
target\i686-win7-windows-msvc\release\*.pdb
- name: Build Rust (x86)
run: cargo +1.75.0-x86_64-pc-windows-msvc build --target i686-pc-windows-msvc --release -p velopack_nodeffi -p velopack_libc
run: cargo +nightly build --target i686-win7-windows-msvc -Z build-std="core,alloc,std,panic_abort" --release -p velopack_nodeffi -p velopack_libc
- name: Build Rust (x64)
run: cargo +1.75.0-x86_64-pc-windows-msvc build --target x86_64-pc-windows-msvc --release -p velopack_nodeffi -p velopack_libc
run: cargo +nightly build --target x86_64-win7-windows-msvc -Z build-std="core,alloc,std,panic_abort" --release -p velopack_nodeffi -p velopack_libc
- name: Build Rust (arm64)
run: cargo +1.75.0-x86_64-pc-windows-msvc build --target aarch64-pc-windows-msvc --release -p velopack_nodeffi -p velopack_libc
run: cargo +nightly build --target aarch64-pc-windows-msvc --release -p velopack_nodeffi -p velopack_libc
- name: Collect Artifacts
run: |
move target\i686-pc-windows-msvc\release\velopack_nodeffi.dll target\velopack_nodeffi_win_x86_msvc.node
move target\x86_64-pc-windows-msvc\release\velopack_nodeffi.dll target\velopack_nodeffi_win_x64_msvc.node
move target\i686-win7-windows-msvc\release\velopack_nodeffi.dll target\velopack_nodeffi_win_x86_msvc.node
move target\x86_64-win7-windows-msvc\release\velopack_nodeffi.dll target\velopack_nodeffi_win_x64_msvc.node
move target\aarch64-pc-windows-msvc\release\velopack_nodeffi.dll target\velopack_nodeffi_win_arm64_msvc.node
move target\i686-pc-windows-msvc\release\velopack_libc.dll target\velopack_libc_win_x86_msvc.dll
move target\x86_64-pc-windows-msvc\release\velopack_libc.dll target\velopack_libc_win_x64_msvc.dll
move target\i686-win7-windows-msvc\release\velopack_libc.dll target\velopack_libc_win_x86_msvc.dll
move target\x86_64-win7-windows-msvc\release\velopack_libc.dll target\velopack_libc_win_x64_msvc.dll
move target\aarch64-pc-windows-msvc\release\velopack_libc.dll target\velopack_libc_win_arm64_msvc.dll
move target\i686-pc-windows-msvc\release\velopack_libc.dll.lib target\velopack_libc_win_x86_msvc.dll.lib
move target\x86_64-pc-windows-msvc\release\velopack_libc.dll.lib target\velopack_libc_win_x64_msvc.dll.lib
move target\i686-win7-windows-msvc\release\velopack_libc.dll.lib target\velopack_libc_win_x86_msvc.dll.lib
move target\x86_64-win7-windows-msvc\release\velopack_libc.dll.lib target\velopack_libc_win_x64_msvc.dll.lib
move target\aarch64-pc-windows-msvc\release\velopack_libc.dll.lib target\velopack_libc_win_arm64_msvc.dll.lib
move target\i686-pc-windows-msvc\release\velopack_libc.lib target\velopack_libc_win_x86_msvc.lib
move target\x86_64-pc-windows-msvc\release\velopack_libc.lib target\velopack_libc_win_x64_msvc.lib
move target\i686-win7-windows-msvc\release\velopack_libc.lib target\velopack_libc_win_x86_msvc.lib
move target\x86_64-win7-windows-msvc\release\velopack_libc.lib target\velopack_libc_win_x64_msvc.lib
move target\aarch64-pc-windows-msvc\release\velopack_libc.lib target\velopack_libc_win_arm64_msvc.lib
- name: Upload Rust Build Artifacts
uses: actions/upload-artifact@v4
@@ -75,9 +74,9 @@ jobs:
target\*.node
target\*.dll
target\*.lib
- name: Cancel workflow if failed
uses: andymckay/cancel-action@0.5
if: ${{ failure() }}
# - name: Cancel workflow if failed
# uses: andymckay/cancel-action@0.5
# if: ${{ failure() }}
build-rust-linux:
runs-on: ubuntu-latest
@@ -85,42 +84,60 @@ jobs:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: Swatinem/rust-cache@v2
with:
key: "rust-build-linux"
- name: Install Dependencies
run: |
curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
cargo binstall cross
rustup target add aarch64-unknown-linux-gnu
sudo apt update
sudo apt install -y g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
mkdir ./artifacts
# rustup target add x86_64-unknown-linux-musl
# rustup target add aarch64-unknown-linux-musl
# sudo apt install -y g++-aarch64-linux-gnu gcc-aarch64-linux-gnu musl-tools musl:arm64
- name: Update Version
shell: pwsh
run: ./set-nbgv-version.ps1
- name: Build Rust (x64)
run: ./.github/set-nbgv-version.ps1
- name: Build Rust Binaries (x64)
run: |
cargo build --release --target x86_64-unknown-linux-gnu
ldd ./target/x86_64-unknown-linux-gnu/release/update || true
cp ./target/x86_64-unknown-linux-gnu/release/update ./target/UpdateNix_x64
cp ./target/x86_64-unknown-linux-gnu/release/libvelopack_nodeffi.so ./target/velopack_nodeffi_linux_x64_gnu.node
cp ./target/x86_64-unknown-linux-gnu/release/libvelopack_libc.so ./target/velopack_libc_linux_x64_gnu.so
cp ./target/x86_64-unknown-linux-gnu/release/libvelopack_libc.a ./target/velopack_libc_linux_x64_gnu.a
- name: Build Rust (arm64)
cargo clean
cross build --release --target x86_64-unknown-linux-musl -p velopack_bins
ldd ./target/x86_64-unknown-linux-musl/release/update || true
cp ./target/x86_64-unknown-linux-musl/release/update ./artifacts/UpdateNix_x64
- name: Build Rust Binaries (arm64)
run: |
cargo build --release --target aarch64-unknown-linux-gnu
ldd ./target/aarch64-unknown-linux-gnu/release/update || true
cp ./target/aarch64-unknown-linux-gnu/release/update ./target/UpdateNix_arm64
cp ./target/aarch64-unknown-linux-gnu/release/libvelopack_nodeffi.so ./target/velopack_nodeffi_linux_arm64_gnu.node
cp ./target/aarch64-unknown-linux-gnu/release/libvelopack_libc.so ./target/velopack_libc_linux_arm64_gnu.so
cp ./target/aarch64-unknown-linux-gnu/release/libvelopack_libc.a ./target/velopack_libc_linux_arm64_gnu.a
cargo clean
cross build --release --target aarch64-unknown-linux-musl -p velopack_bins
ldd ./target/aarch64-unknown-linux-musl/release/update || true
cp ./target/aarch64-unknown-linux-musl/release/update ./artifacts/UpdateNix_arm64
- name: Upload Rust Build Artifacts
uses: actions/upload-artifact@v4
with:
name: rust-ubuntu-latest
path: |
target/UpdateNix*
target/*.so
target/*.node
target/*.a
artifacts/UpdateNix*
- name: Build Rust (x64)
run: |
cargo clean
cargo build --release --target x86_64-unknown-linux-gnu -p velopack_nodeffi -p velopack_libc
cp ./target/x86_64-unknown-linux-gnu/release/libvelopack_nodeffi.so ./artifacts/velopack_nodeffi_linux_x64_gnu.node
cp ./target/x86_64-unknown-linux-gnu/release/libvelopack_libc.so ./artifacts/velopack_libc_linux_x64_gnu.so
cp ./target/x86_64-unknown-linux-gnu/release/libvelopack_libc.a ./artifacts/velopack_libc_linux_x64_gnu.a
- name: Build Rust (arm64)
run: |
cargo clean
cargo build --release --target aarch64-unknown-linux-gnu -p velopack_nodeffi -p velopack_libc
cp ./target/aarch64-unknown-linux-gnu/release/libvelopack_nodeffi.so ./artifacts/velopack_nodeffi_linux_arm64_gnu.node
cp ./target/aarch64-unknown-linux-gnu/release/libvelopack_libc.so ./artifacts/velopack_libc_linux_arm64_gnu.so
cp ./target/aarch64-unknown-linux-gnu/release/libvelopack_libc.a ./artifacts/velopack_libc_linux_arm64_gnu.a
- name: Upload Rust Build Artifacts
uses: actions/upload-artifact@v4
with:
name: rust-ubuntu-latest-libs
path: |
artifacts/*.so
artifacts/*.node
artifacts/*.a
- name: Cancel workflow if failed
uses: andymckay/cancel-action@0.5
if: ${{ failure() }}
@@ -140,7 +157,7 @@ jobs:
dotnet tool update -g nbgv
- name: Update Version
shell: pwsh
run: ./set-nbgv-version.ps1
run: ./.github/set-nbgv-version.ps1
- name: Build Rust (x64)
run: |
cargo build --release --target x86_64-apple-darwin
@@ -191,7 +208,7 @@ jobs:
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Check lib-rust
run: cargo check -p velopack -F async,delta
run: cargo check -p velopack -F async
- name: Check lib-nodejs
working-directory: src/lib-nodejs
run: |
@@ -255,7 +272,7 @@ jobs:
merge-multiple: true
- name: Azure login
uses: azure/login@v2
if: github.repository == 'velopack/velopack'
if: github.event.pull_request.head.repo.full_name == github.repository
with:
client-id: ${{ secrets.AZURE_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TENANT_ID }}
@@ -316,7 +333,7 @@ jobs:
setAllVars: true
- name: Update Version
shell: pwsh
run: ./set-nbgv-version.ps1
run: ./.github/set-nbgv-version.ps1
- name: Download Rust Artifacts
uses: actions/download-artifact@v4
with:

827
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -34,12 +34,12 @@ derivative = "2.2"
glob = "0.3"
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0" }
zip = { version = "2.2", default-features = false, features = ["deflate"] }
zip = { version = "3.0", default-features = false, features = ["deflate"] }
thiserror = "2.0"
lazy_static = "1.5"
regex = "1.10"
normpath = "1.3"
bitflags = "2.6"
bitflags = "2.9"
rand = "0.9"
ts-rs = "10.0"
zstd = "0.13"
@@ -54,7 +54,7 @@ strum = { version = "0.27", features = ["derive"] }
file-rotate = "0.8"
simple-stopwatch = "0.1"
enum-flags = "0.4"
remove_dir_all = { git = "https://github.com/caesay/remove_dir_all.git", features = ["log"] }
remove_dir_all = "1.0"
sha1 = "0.10"
sha2 = "0.10"
sha1_smol = "1.0"
@@ -83,6 +83,13 @@ log-panics = "2.1.0"
core-foundation = "0.10"
core-foundation-sys = "0.8"
uuid = { version = "1.13.1", features = ["v4", "fast-rng", "macro-diagnostics"] }
walkdir = "2.5"
rayon = "1.6"
progress-streams = "1.1"
flate2 = { version = "1.0", default-features = false }
# mtzip = "=4.0.2"
# ripunzip = "=2.0.1"
# zerofrom = "=0.1.5"
# default to small, optimized workspace release binaries
[profile.release]

View File

@@ -6,11 +6,10 @@
---
[![Nuget](https://img.shields.io/nuget/v/Velopack?style=flat-square&logo=nuget&logoColor=white)](https://www.nuget.org/packages/Velopack/)
[![Discord](https://img.shields.io/badge/chat-Discord-5865F2?style=flat-square&logo=discord&logoColor=white)](https://discord.gg/CjrCrNzd3F)
[![Discord](https://img.shields.io/badge/chat-Discord-5865F2?style=flat-square&logo=discord&logoColor=white)](https://discord.gg/M6he8ZPAAJ)
[![Build](https://img.shields.io/github/actions/workflow/status/velopack/velopack/build.yml?branch=develop&style=flat-square&logo=github&logoColor=white)](https://github.com/velopack/velopack/actions)
[![Codecov](https://img.shields.io/codecov/c/github/velopack/velopack?style=flat-square&logo=codecov&logoColor=white)](https://app.codecov.io/gh/velopack/velopack)
[![License](https://img.shields.io/github/license/velopack/velopack?style=flat-square)](https://github.com/velopack/velopack/blob/develop/LICENSE)
[![Hits](https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fgithub.com%2Fvelopack%2Fvelopack&count_bg=%2379C83D&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=hits&edge_flat=true)](https://hits.seeyoufarm.com)
Velopack is an installation and auto-update framework for cross-platform applications. It's opinionated, extremely easy to use with zero config needed. With just one command you can be up and running with an installable application, and it's lightning fast for your users, too.
@@ -50,4 +49,4 @@ I've used a lot of installer frameworks and Velopack is by far the best. Everyth
[- RandomEngy (Discord)](https://discord.com/channels/767856501477343282/947444323765583913/1200897478036299861)
I'm extremely impressed with Velopack's performance in creating releases, as well as checking for and applying updates. It is significantly faster than other tools. The vpk CLI is intuitive and easy to implement, even with my complex build pipeline. Thanks to Velopack, I've been able to streamline my workflow and save valuable time. It's a fantastic tool that I highly recommend!
[- khdc (Discord)](https://discord.com/channels/767856501477343282/947444323765583913/1216460920696344576)
[- khdc (Discord)](https://discord.com/channels/767856501477343282/947444323765583913/1216460920696344576)

View File

@@ -10,12 +10,12 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Avalonia" Version="11.2.6" />
<PackageReference Include="Avalonia.Desktop" Version="11.2.6" />
<PackageReference Include="Avalonia.Themes.Fluent" Version="11.2.6" />
<PackageReference Include="Avalonia.Fonts.Inter" Version="11.2.6" />
<PackageReference Include="Avalonia" Version="11.3.0" />
<PackageReference Include="Avalonia.Desktop" Version="11.3.0" />
<PackageReference Include="Avalonia.Themes.Fluent" Version="11.3.0" />
<PackageReference Include="Avalonia.Fonts.Inter" Version="11.3.0" />
<!--Condition below is needed to remove Avalonia.Diagnostics package from build output in Release configuration. Keep it in your app.-->
<PackageReference Condition="'$(Configuration)' == 'Debug'" Include="Avalonia.Diagnostics" Version="11.2.6" />
<PackageReference Condition="'$(Configuration)' == 'Debug'" Include="Avalonia.Diagnostics" Version="11.3.0" />
</ItemGroup>
<ItemGroup>

View File

@@ -1,7 +1,7 @@
{
// To update the version of Uno please update the version of the Uno.Sdk here. See https://aka.platform.uno/upgrade-uno-packages for more information.
"msbuild-sdks": {
"Uno.Sdk": "5.6.51"
"Uno.Sdk": "5.6.54"
},
"sdk":{
"allowPrerelease": false

View File

@@ -18,7 +18,7 @@
"@electron/fuses": "^1.8.0",
"@vercel/webpack-asset-relocator-loader": "=1.7.3",
"css-loader": "^7.0.0",
"electron": "35.1.4",
"electron": "36.2.0",
"fork-ts-checker-webpack-plugin": "^9.0.0",
"node-loader": "^2.0.0",
"style-loader": "^4.0.0",
@@ -78,9 +78,9 @@
}
},
"node_modules/@electron-forge/cli": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/cli/-/cli-7.8.0.tgz",
"integrity": "sha512-XZ+Hg7pxeE9pgrahqcpMlND+VH0l0UTZLyO5wkI+YfanNyBQksB2mw24XeEtCA6x8F2IaEYdIGgijmPF6qpjzA==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/cli/-/cli-7.8.1.tgz",
"integrity": "sha512-QI3EShutfq9Y+2TWWrPjm4JZM3eSAKzoQvRZdVhAfVpUbyJ8K23VqJShg3kGKlPf9BXHAGvE+8LyH5s2yDr1qA==",
"dev": true,
"funding": [
{
@@ -94,9 +94,9 @@
],
"license": "MIT",
"dependencies": {
"@electron-forge/core": "7.8.0",
"@electron-forge/core-utils": "7.8.0",
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/core": "7.8.1",
"@electron-forge/core-utils": "7.8.1",
"@electron-forge/shared-types": "7.8.1",
"@electron/get": "^3.0.0",
"chalk": "^4.0.0",
"commander": "^11.1.0",
@@ -116,9 +116,9 @@
}
},
"node_modules/@electron-forge/core": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/core/-/core-7.8.0.tgz",
"integrity": "sha512-7byf660ECZND+irOhGxvpmRXjk1bMrsTWh5J2AZMEvaXI8tub9OrZY9VSbi5fcDt0lpHPKmgVk7NRf/ZjJ+beQ==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/core/-/core-7.8.1.tgz",
"integrity": "sha512-jkh0QPW5p0zmruu1E8+2XNufc4UMxy13WLJcm7hn9jbaXKLkMbKuEvhrN1tH/9uGp1mhr/t8sC4N67gP+gS87w==",
"dev": true,
"funding": [
{
@@ -132,17 +132,17 @@
],
"license": "MIT",
"dependencies": {
"@electron-forge/core-utils": "7.8.0",
"@electron-forge/maker-base": "7.8.0",
"@electron-forge/plugin-base": "7.8.0",
"@electron-forge/publisher-base": "7.8.0",
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/template-base": "7.8.0",
"@electron-forge/template-vite": "7.8.0",
"@electron-forge/template-vite-typescript": "7.8.0",
"@electron-forge/template-webpack": "7.8.0",
"@electron-forge/template-webpack-typescript": "7.8.0",
"@electron-forge/tracer": "7.8.0",
"@electron-forge/core-utils": "7.8.1",
"@electron-forge/maker-base": "7.8.1",
"@electron-forge/plugin-base": "7.8.1",
"@electron-forge/publisher-base": "7.8.1",
"@electron-forge/shared-types": "7.8.1",
"@electron-forge/template-base": "7.8.1",
"@electron-forge/template-vite": "7.8.1",
"@electron-forge/template-vite-typescript": "7.8.1",
"@electron-forge/template-webpack": "7.8.1",
"@electron-forge/template-webpack-typescript": "7.8.1",
"@electron-forge/tracer": "7.8.1",
"@electron/get": "^3.0.0",
"@electron/packager": "^18.3.5",
"@electron/rebuild": "^3.7.0",
@@ -156,6 +156,7 @@
"global-dirs": "^3.0.0",
"got": "^11.8.5",
"interpret": "^3.1.1",
"jiti": "^2.4.2",
"listr2": "^7.0.2",
"lodash": "^4.17.20",
"log-symbols": "^4.0.0",
@@ -171,13 +172,13 @@
}
},
"node_modules/@electron-forge/core-utils": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/core-utils/-/core-utils-7.8.0.tgz",
"integrity": "sha512-ZioRzqkXVOGuwkfvXN/FPZxcssJ9AkOZx6RvxomQn90F77G2KfEbw4ZwAxVTQ+jWNUzydTic5qavWle++Y5IeA==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/core-utils/-/core-utils-7.8.1.tgz",
"integrity": "sha512-mRoPLDNZgmjyOURE/K0D3Op53XGFmFRgfIvFC7c9S/BqsRpovVblrqI4XxPRdNmH9dvhd8On9gGz+XIYAKD3aQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/shared-types": "7.8.1",
"@electron/rebuild": "^3.7.0",
"@malept/cross-spawn-promise": "^2.0.0",
"chalk": "^4.0.0",
@@ -192,13 +193,13 @@
}
},
"node_modules/@electron-forge/maker-base": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/maker-base/-/maker-base-7.8.0.tgz",
"integrity": "sha512-yGRvz70w+NnKO7PhzNFRgYM+x6kxYFgpbChJIQBs3WChd9bGjL+MZLrwYqmxOFLpWNwRAJ6PEi4E/8U5GgV6AQ==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/maker-base/-/maker-base-7.8.1.tgz",
"integrity": "sha512-GUZqschGuEBzSzE0bMeDip65IDds48DZXzldlRwQ+85SYVA6RMU2AwDDqx3YiYsvP2OuxKruuqIJZtOF5ps4FQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/shared-types": "7.8.1",
"fs-extra": "^10.0.0",
"which": "^2.0.2"
},
@@ -207,14 +208,14 @@
}
},
"node_modules/@electron-forge/maker-deb": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/maker-deb/-/maker-deb-7.8.0.tgz",
"integrity": "sha512-9jjhLm/1IBIo0UuRdELgvBhUkNjK3tHNlUsrqeb8EJwWJZShbPwHYZJj+VbgjQfJFFzhHwBBDJViBXJ/4ePv+g==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/maker-deb/-/maker-deb-7.8.1.tgz",
"integrity": "sha512-tjjeesQtCP5Xht1X7gl4+K9bwoETPmQfBkOVAY/FZIxPj40uQh/hOUtLX2tYENNGNVZ1ryDYRs8TuPi+I41Vfw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/maker-base": "7.8.0",
"@electron-forge/shared-types": "7.8.0"
"@electron-forge/maker-base": "7.8.1",
"@electron-forge/shared-types": "7.8.1"
},
"engines": {
"node": ">= 16.4.0"
@@ -224,14 +225,14 @@
}
},
"node_modules/@electron-forge/maker-rpm": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/maker-rpm/-/maker-rpm-7.8.0.tgz",
"integrity": "sha512-oTH951NE39LOX2wYMg+C06vBZDWUP/0dsK01PlXEl5e5YfQM5Cifsk3E7BzE6BpZdWRJL3k/ETqpyYeIGNb1jw==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/maker-rpm/-/maker-rpm-7.8.1.tgz",
"integrity": "sha512-TF6wylft3BHkw9zdHcxmjEPBZYgTIc0jE31skFnMEQ/aExbNRiNaCZvsXy+7ptTWZxhxUKRc9KHhLFRMCmOK8g==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/maker-base": "7.8.0",
"@electron-forge/shared-types": "7.8.0"
"@electron-forge/maker-base": "7.8.1",
"@electron-forge/shared-types": "7.8.1"
},
"engines": {
"node": ">= 16.4.0"
@@ -241,14 +242,14 @@
}
},
"node_modules/@electron-forge/maker-zip": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/maker-zip/-/maker-zip-7.8.0.tgz",
"integrity": "sha512-7MLD7GkZdlGecC9GvgBu0sWYt48p3smYvr+YCwlpdH1CTeLmWhvCqeH33a2AB0XI5CY8U8jnkG2jgdTkzr/EQw==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/maker-zip/-/maker-zip-7.8.1.tgz",
"integrity": "sha512-unIxEoV1lnK4BLVqCy3L2y897fTyg8nKY1WT4rrpv0MUKnQG4qmigDfST5zZNNHHaulEn/ElAic2GEiP7d6bhQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/maker-base": "7.8.0",
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/maker-base": "7.8.1",
"@electron-forge/shared-types": "7.8.1",
"cross-zip": "^4.0.0",
"fs-extra": "^10.0.0",
"got": "^11.8.5"
@@ -258,41 +259,41 @@
}
},
"node_modules/@electron-forge/plugin-auto-unpack-natives": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/plugin-auto-unpack-natives/-/plugin-auto-unpack-natives-7.8.0.tgz",
"integrity": "sha512-JGal5ltZmbTQ5rNq67OgGC4MJ2zjjFW0fqykHy8X9J8cgaH7SRdKkT4yYZ8jH01IAF1J57FD2zIob1MvcBqjcg==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/plugin-auto-unpack-natives/-/plugin-auto-unpack-natives-7.8.1.tgz",
"integrity": "sha512-4URAgWX9qqqKe6Bfad0VmpFRrwINYMODfKGd2nFQrfHxmBtdpXnsWlLwVGE/wGssIQaTMI5bWQ6F2RNeXTgnhA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/plugin-base": "7.8.0",
"@electron-forge/shared-types": "7.8.0"
"@electron-forge/plugin-base": "7.8.1",
"@electron-forge/shared-types": "7.8.1"
},
"engines": {
"node": ">= 16.4.0"
}
},
"node_modules/@electron-forge/plugin-base": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/plugin-base/-/plugin-base-7.8.0.tgz",
"integrity": "sha512-rDeeChRWIp5rQVo3Uc1q0ncUvA+kWWURW7tMuQjPvy2qVSgX+jIf5krk+T1Dp06+D4YZzEIrkibRaamAaIcR1w==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/plugin-base/-/plugin-base-7.8.1.tgz",
"integrity": "sha512-iCZC2d7CbsZ9l6j5d+KPIiyQx0U1QBfWAbKnnQhWCSizjcrZ7A9V4sMFZeTO6+PVm48b/r9GFPm+slpgZtYQLg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/shared-types": "7.8.0"
"@electron-forge/shared-types": "7.8.1"
},
"engines": {
"node": ">= 16.4.0"
}
},
"node_modules/@electron-forge/plugin-fuses": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/plugin-fuses/-/plugin-fuses-7.8.0.tgz",
"integrity": "sha512-ZxFtol3aHNY+oYrZWa7EDBLl4uk/+NlOCJmqC7C32R/3S/Kn2ebVRxpLwrFM12KtHeD+Z3gmZNBhwOe0TECgOA==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/plugin-fuses/-/plugin-fuses-7.8.1.tgz",
"integrity": "sha512-dYTwvbV1HcDOIQ0wTybpdtPq6YoBYXIWBTb7DJuvFu/c/thj1eoEdnbwr8mT9hEivjlu5p4ls46n16P5EtZ0oA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/plugin-base": "7.8.0",
"@electron-forge/shared-types": "7.8.0"
"@electron-forge/plugin-base": "7.8.1",
"@electron-forge/shared-types": "7.8.1"
},
"engines": {
"node": ">= 16.4.0"
@@ -302,16 +303,16 @@
}
},
"node_modules/@electron-forge/plugin-webpack": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/plugin-webpack/-/plugin-webpack-7.8.0.tgz",
"integrity": "sha512-9X/+OLoGgzCjqdDAT12O1UQeS+P0RjoEdD4ms53yiWUaKAwrSI4rJeb4gJBrLuYxUH5jcLD7mzQj+yDswZfGNQ==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/plugin-webpack/-/plugin-webpack-7.8.1.tgz",
"integrity": "sha512-4SqQyX7abx6wcMSB8JwsM6gm72r3/8b//JcYZxWihYaqoz9ZMWQqci47FFSpncRlYZjUi7mbRpC2dSAjuQks2A==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/core-utils": "7.8.0",
"@electron-forge/plugin-base": "7.8.0",
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/web-multi-logger": "7.8.0",
"@electron-forge/core-utils": "7.8.1",
"@electron-forge/plugin-base": "7.8.1",
"@electron-forge/shared-types": "7.8.1",
"@electron-forge/web-multi-logger": "7.8.1",
"chalk": "^4.0.0",
"debug": "^4.3.1",
"fast-glob": "^3.2.7",
@@ -327,26 +328,26 @@
}
},
"node_modules/@electron-forge/publisher-base": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/publisher-base/-/publisher-base-7.8.0.tgz",
"integrity": "sha512-wrZyptJ0Uqvlh2wYzDZfIu2HgCQ+kdGiBlcucmLY4W+GUqf043O8cbYso3D9NXQxOow55QC/1saCQkgLphprPA==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/publisher-base/-/publisher-base-7.8.1.tgz",
"integrity": "sha512-z2C+C4pcFxyCXIFwXGDcxhU8qtVUPZa3sPL6tH5RuMxJi77768chLw2quDWk2/dfupcSELXcOMYCs7aLysCzeQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/shared-types": "7.8.0"
"@electron-forge/shared-types": "7.8.1"
},
"engines": {
"node": ">= 16.4.0"
}
},
"node_modules/@electron-forge/shared-types": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/shared-types/-/shared-types-7.8.0.tgz",
"integrity": "sha512-Ul+7HPvAZiAirqpZm0vc9YvlkAE+2bcrI10p3t50mEtuxn5VO/mB72NXiEKfWzHm8F31JySIe9bUV6s1MHQcCw==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/shared-types/-/shared-types-7.8.1.tgz",
"integrity": "sha512-guLyGjIISKQQRWHX+ugmcjIOjn2q/BEzCo3ioJXFowxiFwmZw/oCZ2KlPig/t6dMqgUrHTH5W/F0WKu0EY4M+Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/tracer": "7.8.0",
"@electron-forge/tracer": "7.8.1",
"@electron/packager": "^18.3.5",
"@electron/rebuild": "^3.7.0",
"listr2": "^7.0.2"
@@ -356,14 +357,14 @@
}
},
"node_modules/@electron-forge/template-base": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/template-base/-/template-base-7.8.0.tgz",
"integrity": "sha512-hc8NwoDqEEmZFH/p0p3MK/7xygMmI+cm8Gavoj2Mr2xS7VUUu4r3b5PwIGKvkLfPG34uwsiVwtid2t1rWGF4UA==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/template-base/-/template-base-7.8.1.tgz",
"integrity": "sha512-k8jEUr0zWFWb16ZGho+Es2OFeKkcbTgbC6mcH4eNyF/sumh/4XZMcwRtX1i7EiZAYiL9sVxyI6KVwGu254g+0g==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/core-utils": "7.8.0",
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/core-utils": "7.8.1",
"@electron-forge/shared-types": "7.8.1",
"@malept/cross-spawn-promise": "^2.0.0",
"debug": "^4.3.1",
"fs-extra": "^10.0.0",
@@ -374,14 +375,14 @@
}
},
"node_modules/@electron-forge/template-vite": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/template-vite/-/template-vite-7.8.0.tgz",
"integrity": "sha512-bf/jd8WzD0gU7Jet+WSi0Lm0SQmseb08WY27ZfJYEs2EVNMiwDfPicgQnOaqP++2yTrXhj1OY/rolZCP9CUyVw==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/template-vite/-/template-vite-7.8.1.tgz",
"integrity": "sha512-qzSlJaBYYqQAbBdLk4DqAE3HCNz4yXbpkb+VC74ddL4JGwPdPU57DjCthr6YetKJ2FsOVy9ipovA8HX5UbXpAg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/template-base": "7.8.0",
"@electron-forge/shared-types": "7.8.1",
"@electron-forge/template-base": "7.8.1",
"fs-extra": "^10.0.0"
},
"engines": {
@@ -389,14 +390,14 @@
}
},
"node_modules/@electron-forge/template-vite-typescript": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/template-vite-typescript/-/template-vite-typescript-7.8.0.tgz",
"integrity": "sha512-kW3CaVxKHUYuVfY+rT3iepeZ69frBRGh3YZOngLY2buCvGIqNEx+VCgrFBRDDbOKGmwQtwO1E9wp2rtC8q6Ztg==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/template-vite-typescript/-/template-vite-typescript-7.8.1.tgz",
"integrity": "sha512-CccQhwUjZcc6svzuOi3BtbDal591DzyX2J5GPa6mwVutDP8EMtqJL1VyOHdcWO/7XjI6GNAD0fiXySOJiUAECA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/template-base": "7.8.0",
"@electron-forge/shared-types": "7.8.1",
"@electron-forge/template-base": "7.8.1",
"fs-extra": "^10.0.0"
},
"engines": {
@@ -404,14 +405,14 @@
}
},
"node_modules/@electron-forge/template-webpack": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/template-webpack/-/template-webpack-7.8.0.tgz",
"integrity": "sha512-AdLGC6NVgrd7Q0SaaeiwJKmSBjN6C2EHxZgLMy1yxNSpazU9m3DtYQilDjXqmCWfxkeNzdke0NaeDvLgdJSw5A==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/template-webpack/-/template-webpack-7.8.1.tgz",
"integrity": "sha512-DA77o9kTCHrq+W211pyNP49DyAt0d1mzMp2gisyNz7a+iKvlv2DsMAeRieLoCQ44akb/z8ZsL0YLteSjKLy4AA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/template-base": "7.8.0",
"@electron-forge/shared-types": "7.8.1",
"@electron-forge/template-base": "7.8.1",
"fs-extra": "^10.0.0"
},
"engines": {
@@ -419,14 +420,14 @@
}
},
"node_modules/@electron-forge/template-webpack-typescript": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/template-webpack-typescript/-/template-webpack-typescript-7.8.0.tgz",
"integrity": "sha512-Pl8l+gv3HzqCfFIMLxlEsoAkNd0VEWeZZ675SYyqs0/kBQUifn0bKNhVE4gUZwKGgQCcG1Gvb23KdVGD3H3XmA==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/template-webpack-typescript/-/template-webpack-typescript-7.8.1.tgz",
"integrity": "sha512-h922E+6zWwym1RT6WKD79BLTc4H8YxEMJ7wPWkBX59kw/exsTB/KFdiJq6r82ON5jSJ+Q8sDGqSmDWdyCfo+Gg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@electron-forge/shared-types": "7.8.0",
"@electron-forge/template-base": "7.8.0",
"@electron-forge/shared-types": "7.8.1",
"@electron-forge/template-base": "7.8.1",
"fs-extra": "^10.0.0"
},
"engines": {
@@ -434,9 +435,9 @@
}
},
"node_modules/@electron-forge/tracer": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/tracer/-/tracer-7.8.0.tgz",
"integrity": "sha512-t4fIATZEX6/7PJNfyh6tLzKEsNMpO01Nz/rgHWBxeRvjCw5UNul9OOxoM7b43vfFAO9Jv++34oI3VJ09LeVQ2Q==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/tracer/-/tracer-7.8.1.tgz",
"integrity": "sha512-r2i7aHVp2fylGQSPDw3aTcdNfVX9cpL1iL2MKHrCRNwgrfR+nryGYg434T745GGm1rNQIv5Egdkh5G9xf00oWA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -447,9 +448,9 @@
}
},
"node_modules/@electron-forge/web-multi-logger": {
"version": "7.8.0",
"resolved": "https://registry.npmjs.org/@electron-forge/web-multi-logger/-/web-multi-logger-7.8.0.tgz",
"integrity": "sha512-2nUP7O9auXDsoa185AsZPlIbpargj1lNFweNH1Lch1MCwLlJOI9ZJHiCTAB4qviS4usRs00WeebWg/uN/zOWvA==",
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/@electron-forge/web-multi-logger/-/web-multi-logger-7.8.1.tgz",
"integrity": "sha512-Z8oU39sbrVDvyk0yILBqL0CFIysVlxkM5m4RWyeo+GLoc/t4LYAhGLSquFTOD1t20nzqZzgzG8M56zIgYuyX1w==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -764,9 +765,9 @@
}
},
"node_modules/@electron/rebuild": {
"version": "3.7.1",
"resolved": "https://registry.npmjs.org/@electron/rebuild/-/rebuild-3.7.1.tgz",
"integrity": "sha512-sKGD+xav4Gh25+LcLY0rjIwcCFTw+f/HU1pB48UVbwxXXRGaXEqIH0AaYKN46dgd/7+6kuiDXzoyAEvx1zCsdw==",
"version": "3.7.2",
"resolved": "https://registry.npmjs.org/@electron/rebuild/-/rebuild-3.7.2.tgz",
"integrity": "sha512-19/KbIR/DAxbsCkiaGMXIdPnMCJLkcf8AvGnduJtWBs/CBwiAjY1apCqOLVxrXg+rtXFCngbXhBanWjxLUt1Mg==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -793,9 +794,9 @@
}
},
"node_modules/@electron/universal": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@electron/universal/-/universal-2.0.2.tgz",
"integrity": "sha512-mqY1szx5/d5YLvfCDWWoJdkSIjIz+NdWN4pN0r78lYiE7De+slLpuF3lVxIT+hlJnwk5sH2wFRMl6/oUgUVO3A==",
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/@electron/universal/-/universal-2.0.3.tgz",
"integrity": "sha512-Wn9sPYIVFRFl5HmwMJkARCCf7rqK/EurkfQ/rJZ14mHP3iYTjZSIOSVonEAnhWeAXwtw7zOekGRlc6yTtZ0t+g==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -853,9 +854,9 @@
}
},
"node_modules/@electron/windows-sign": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/@electron/windows-sign/-/windows-sign-1.2.1.tgz",
"integrity": "sha512-YfASnrhJ+ve6Q43ZiDwmpBgYgi2u0bYjeAVi2tDfN7YWAKO8X9EEOuPGtqbJpPLM6TfAHimghICjWe2eaJ8BAg==",
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@electron/windows-sign/-/windows-sign-1.2.2.tgz",
"integrity": "sha512-dfZeox66AvdPtb2lD8OsIIQh12Tp0GNCRUDfBHIKGpbmopZto2/A8nSpYYLoedPIHpqkeblZ/k8OV0Gy7PYuyQ==",
"dev": true,
"license": "BSD-2-Clause",
"dependencies": {
@@ -3137,9 +3138,9 @@
}
},
"node_modules/detect-libc": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz",
"integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==",
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz",
"integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==",
"dev": true,
"license": "Apache-2.0",
"engines": {
@@ -3297,9 +3298,9 @@
"license": "MIT"
},
"node_modules/electron": {
"version": "35.1.4",
"resolved": "https://registry.npmjs.org/electron/-/electron-35.1.4.tgz",
"integrity": "sha512-8HjE2wqxY//T09Of8k1eTpK/NeTG2FkTyRD+fyKXmec4wZVscGgZcmWFC0HYN4ktyHAjtplpxdFXjtqRnvzBMg==",
"version": "36.2.0",
"resolved": "https://registry.npmjs.org/electron/-/electron-36.2.0.tgz",
"integrity": "sha512-5yldoRjBKxPQfI0QMX+qq750o3Nl8N1SZnJqOPMq0gZ6rIJ+7y4ZLp808GrFwjfTm05TYgq3GSD8FGuKQZqwEw==",
"dev": true,
"hasInstallScript": true,
"license": "MIT",
@@ -5732,6 +5733,16 @@
"url": "https://github.com/chalk/supports-color?sponsor=1"
}
},
"node_modules/jiti": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/jiti/-/jiti-2.4.2.tgz",
"integrity": "sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==",
"dev": true,
"license": "MIT",
"bin": {
"jiti": "lib/jiti-cli.mjs"
}
},
"node_modules/js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
@@ -6465,9 +6476,9 @@
}
},
"node_modules/node-abi": {
"version": "3.74.0",
"resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.74.0.tgz",
"integrity": "sha512-c5XK0MjkGBrQPGYG24GBADZud0NCbznxNx0ZkS+ebUTrmV1qTDxPxSL8zEAPURXSbLRWVexxmP4986BziahL5w==",
"version": "3.75.0",
"resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.75.0.tgz",
"integrity": "sha512-OhYaY5sDsIka7H7AtijtI9jwGYLyl29eQn/W623DiN/MIv5sUqc4g7BIDThX+gb7di9f6xK02nkp8sdfFWZLTg==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -9020,9 +9031,9 @@
}
},
"node_modules/typescript": {
"version": "5.8.2",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz",
"integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==",
"version": "5.8.3",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz",
"integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==",
"dev": true,
"license": "Apache-2.0",
"bin": {

View File

@@ -23,7 +23,7 @@
"@electron/fuses": "^1.8.0",
"@vercel/webpack-asset-relocator-loader": "=1.7.3",
"css-loader": "^7.0.0",
"electron": "35.1.4",
"electron": "36.2.0",
"fork-ts-checker-webpack-plugin": "^9.0.0",
"node-loader": "^2.0.0",
"style-loader": "^4.0.0",

View File

@@ -101,9 +101,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.97"
version = "1.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f"
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
[[package]]
name = "approx"
@@ -1041,7 +1041,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
dependencies = [
"libc",
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
@@ -2487,9 +2487,9 @@ dependencies = [
[[package]]
name = "once_cell"
version = "1.21.2"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2806eaa3524762875e21c3dcd057bc4b7bfa01ce4da8d46be1cd43649e1cc6b"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "orbclient"
@@ -3035,7 +3035,7 @@ dependencies = [
"errno",
"libc",
"linux-raw-sys 0.4.15",
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
@@ -3469,7 +3469,7 @@ dependencies = [
"getrandom 0.3.1",
"once_cell",
"rustix",
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
@@ -3831,21 +3831,23 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
[[package]]
name = "uuid"
version = "1.16.0"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9"
checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d"
dependencies = [
"getrandom 0.3.1",
"js-sys",
"rand 0.9.0",
"serde",
"uuid-macro-internal",
"wasm-bindgen",
]
[[package]]
name = "uuid-macro-internal"
version = "1.16.0"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72dcd78c4f979627a754f5522cea6e6a25e55139056535fe6e69c506cd64a862"
checksum = "26b682e8c381995ea03130e381928e0e005b7c9eb483c6c8682f50e07b33c2b7"
dependencies = [
"proc-macro2",
"quote",
@@ -3887,7 +3889,7 @@ dependencies = [
"ureq",
"url",
"uuid",
"windows 0.60.0",
"windows 0.61.1",
"xml",
"zip",
"zstd",
@@ -4300,7 +4302,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
@@ -4335,12 +4337,12 @@ dependencies = [
[[package]]
name = "windows"
version = "0.60.0"
version = "0.61.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddf874e74c7a99773e62b1c671427abf01a425e77c3d3fb9fb1e4883ea934529"
checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419"
dependencies = [
"windows-collections",
"windows-core 0.60.1",
"windows-core 0.61.0",
"windows-future",
"windows-link",
"windows-numerics",
@@ -4348,11 +4350,11 @@ dependencies = [
[[package]]
name = "windows-collections"
version = "0.1.1"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5467f79cc1ba3f52ebb2ed41dbb459b8e7db636cc3429458d9a852e15bc24dec"
checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8"
dependencies = [
"windows-core 0.60.1",
"windows-core 0.61.0",
]
[[package]]
@@ -4366,9 +4368,9 @@ dependencies = [
[[package]]
name = "windows-core"
version = "0.60.1"
version = "0.61.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca21a92a9cae9bf4ccae5cf8368dce0837100ddf6e6d57936749e85f152f6247"
checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980"
dependencies = [
"windows-implement",
"windows-interface",
@@ -4379,19 +4381,19 @@ dependencies = [
[[package]]
name = "windows-future"
version = "0.1.1"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a787db4595e7eb80239b74ce8babfb1363d8e343ab072f2ffe901400c03349f0"
checksum = "7a1d6bbefcb7b60acd19828e1bc965da6fcf18a7e39490c5f8be71e54a19ba32"
dependencies = [
"windows-core 0.60.1",
"windows-core 0.61.0",
"windows-link",
]
[[package]]
name = "windows-implement"
version = "0.59.0"
version = "0.60.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83577b051e2f49a058c308f17f273b570a6a758386fc291b5f6a934dd84e48c1"
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
dependencies = [
"proc-macro2",
"quote",
@@ -4400,9 +4402,9 @@ dependencies = [
[[package]]
name = "windows-interface"
version = "0.59.0"
version = "0.59.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb26fd936d991781ea39e87c3a27285081e3c0da5ca0fcbc02d368cc6f52ff01"
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
dependencies = [
"proc-macro2",
"quote",
@@ -4411,34 +4413,34 @@ dependencies = [
[[package]]
name = "windows-link"
version = "0.1.0"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3"
checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38"
[[package]]
name = "windows-numerics"
version = "0.1.1"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "005dea54e2f6499f2cee279b8f703b3cf3b5734a2d8d21867c8f44003182eeed"
checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1"
dependencies = [
"windows-core 0.60.1",
"windows-core 0.61.0",
"windows-link",
]
[[package]]
name = "windows-result"
version = "0.3.1"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06374efe858fab7e4f881500e6e86ec8bc28f9462c47e5a9941a0142ad86b189"
checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-strings"
version = "0.3.1"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319"
checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97"
dependencies = [
"windows-link",
]

View File

@@ -63,6 +63,13 @@ wait-timeout.workspace = true
pretty-bytes-rust.workspace = true
enum-flags.workspace = true
log-panics.workspace = true
zstd.workspace = true
zip.workspace = true
walkdir.workspace = true
sha1_smol.workspace = true
rayon.workspace = true
progress-streams.workspace = true
flate2.workspace = true
[target.'cfg(target_os="linux")'.dependencies]
waitpid-any.workspace = true
@@ -101,6 +108,7 @@ windows = { workspace = true, features = [
"Win32_UI_Shell_Common",
"Win32_UI_Shell_PropertiesSystem",
"Win32_UI_WindowsAndMessaging",
"Win32_System_ApplicationInstallationAndServicing",
"Win32_System_Kernel",
"Wdk",
"Wdk_System",
@@ -116,7 +124,6 @@ same-file.workspace = true
tempfile.workspace = true
ntest.workspace = true
pretty_assertions.workspace = true
sha1_smol.workspace = true
[build-dependencies]
semver.workspace = true

View File

@@ -4,6 +4,9 @@ pub use apply::*;
mod start;
pub use start::*;
mod patch;
pub use patch::*;
#[cfg(target_os = "linux")]
mod apply_linux_impl;
#[cfg(target_os = "macos")]

View File

@@ -0,0 +1,184 @@
use crate::shared::fastzip;
use anyhow::{anyhow, bail, Result};
use std::{
collections::HashSet,
fs, io,
path::{Path, PathBuf},
};
pub fn zstd_patch_single<P1: AsRef<Path>, P2: AsRef<Path>, P3: AsRef<Path>>(old_file: P1, patch_file: P2, output_file: P3) -> Result<()> {
let old_file = old_file.as_ref();
let patch_file = patch_file.as_ref();
let output_file = output_file.as_ref();
if !old_file.exists() {
bail!("Old file does not exist: {}", old_file.to_string_lossy());
}
if !patch_file.exists() {
bail!("Patch file does not exist: {}", patch_file.to_string_lossy());
}
let dict = fs::read(old_file)?;
// info!("Loading Dictionary (Size: {})", dict.len());
let patch = fs::OpenOptions::new().read(true).open(patch_file)?;
let patch_reader = io::BufReader::new(patch);
let mut decoder = zstd::Decoder::with_dictionary(patch_reader, &dict)?;
let window_log = fio_highbit64(dict.len() as u64) + 1;
if window_log >= 27 {
info!("Large File detected. Overriding windowLog to {}", window_log);
decoder.window_log_max(window_log)?;
}
// info!("Decoder loaded. Beginning patch...");
let mut output = fs::OpenOptions::new().write(true).create(true).truncate(true).open(output_file)?;
io::copy(&mut decoder, &mut output)?;
// info!("Patch applied successfully.");
Ok(())
}
fn fio_highbit64(v: u64) -> u32 {
let mut count: u32 = 0;
let mut v = v;
v >>= 1;
while v > 0 {
v >>= 1;
count += 1;
}
return count;
}
pub fn delta<P1: AsRef<Path>, P2: AsRef<Path>, P3: AsRef<Path>>(
old_file: P1,
delta_files: Vec<&PathBuf>,
temp_dir: P2,
output_file: P3,
) -> Result<()> {
let old_file = old_file.as_ref().to_path_buf();
let temp_dir = temp_dir.as_ref().to_path_buf();
let output_file = output_file.as_ref().to_path_buf();
if !old_file.exists() {
bail!("Old file does not exist: {}", old_file.to_string_lossy());
}
if delta_files.is_empty() {
bail!("No delta files provided.");
}
for delta_file in &delta_files {
if !delta_file.exists() {
bail!("Delta file does not exist: {}", delta_file.to_string_lossy());
}
}
let time = simple_stopwatch::Stopwatch::start_new();
info!("Extracting base package for delta patching: {}", temp_dir.to_string_lossy());
let work_dir = temp_dir.join("_work");
fs::create_dir_all(&work_dir)?;
fastzip::extract_to_directory(&old_file, &work_dir, None)?;
info!("Base package extracted. {} delta packages to apply.", delta_files.len());
for (i, delta_file) in delta_files.iter().enumerate() {
info!("{}: extracting apply delta patch: {}", i, delta_file.to_string_lossy());
let delta_dir = temp_dir.join(format!("delta_{}", i));
fs::create_dir_all(&delta_dir)?;
fastzip::extract_to_directory(&delta_file, &delta_dir, None)?;
let delta_relative_paths = fastzip::enumerate_files_relative(&delta_dir);
let mut visited_paths = HashSet::new();
// apply all the zsdiff patches for files which exist in both the delta and the base package
for relative_path in &delta_relative_paths {
if relative_path.starts_with("lib") {
let file_name = relative_path.file_name().ok_or(anyhow!("Failed to get file name"))?;
let file_name_str = file_name.to_string_lossy();
if file_name_str.ends_with(".zsdiff") || file_name_str.ends_with(".diff") || file_name_str.ends_with(".bsdiff") {
// this is a zsdiff patch, we need to apply it to the old file
let file_without_extension = relative_path.with_extension("");
// let shasum_path = delta_dir.join(relative_path).with_extension("shasum");
let old_file_path = work_dir.join(&file_without_extension);
let patch_file_path = delta_dir.join(&relative_path);
let output_file_path = delta_dir.join(&file_without_extension);
visited_paths.insert(file_without_extension);
if fs::metadata(&patch_file_path)?.len() == 0 {
// file has not changed, so we can continue.
continue;
}
if file_name_str.ends_with(".zsdiff") {
info!("{}: applying zsdiff patch: {:?}", i, relative_path);
zstd_patch_single(&old_file_path, &patch_file_path, &output_file_path)?;
} else {
bail!("Unsupported patch format: {:?}", relative_path);
}
fs::rename(&output_file_path, &old_file_path)?;
} else if file_name_str.ends_with(".shasum") {
// skip shasum files
} else {
// if this file is inside the lib folder without a known extension, it is a new file
let file_path = delta_dir.join(relative_path);
let dest_path = work_dir.join(relative_path);
info!("{}: new file: {:?}", i, relative_path);
fs::copy(&file_path, &dest_path)?;
visited_paths.insert(relative_path.clone());
}
} else {
// if this file is not inside the lib folder, we always copy it over
let file_path = delta_dir.join(relative_path);
let dest_path = work_dir.join(relative_path);
info!("{}: copying metadata file: {:?}", i, relative_path);
fs::copy(&file_path, &dest_path)?;
visited_paths.insert(relative_path.clone());
}
}
// anything in the work dir which was not visited is an old / deleted file and should be removed
let workdir_relative_paths = fastzip::enumerate_files_relative(&work_dir);
for relative_path in &workdir_relative_paths {
if !visited_paths.contains(relative_path) {
let file_to_delete = work_dir.join(relative_path);
info!("{}: deleting old/removed file: {:?}", i, relative_path);
let _ = fs::remove_file(file_to_delete); // soft error
}
}
}
info!("All delta patches applied. Asembling output package at: {}", output_file.to_string_lossy());
fastzip::compress_directory(&work_dir, &output_file, fastzip::CompressionLevel::fast())?;
info!("Successfully applied {} delta patches in {}s.", delta_files.len(), time.s());
Ok(())
}
// NOTE: this is some code to do checksum verification, but it is not being used
// by the current implementation because zstd patching already has checksum verification
//
// let actual_checksum = get_sha1(&output_file_path);
// let expected_checksum = load_release_entry_shasum(&shasum_path)?;
//
// if !actual_checksum.eq_ignore_ascii_case(&expected_checksum) {
// bail!("Checksum mismatch for: {:?}. Expected: {}, Actual: {}", relative_path, expected_checksum, actual_checksum);
// }
// fn load_release_entry_shasum(file: &PathBuf) -> Result<String> {
// let raw_text = fs::read_to_string(file)?.trim().to_string();
// let first_word = raw_text.splitn(2, ' ').next().unwrap();
// let cleaned = first_word.trim().trim_matches(|c: char| !c.is_ascii_hexdigit());
// Ok(cleaned.to_string())
// }
//
// fn get_sha1(file: &PathBuf) -> String {
// let file_bytes = fs::read(file).unwrap();
// let mut sha1 = sha1_smol::Sha1::new();
// sha1.update(&file_bytes);
// sha1.digest().to_string()
// }

View File

@@ -175,7 +175,7 @@ fn try_legacy_migration(root_dir: &PathBuf, manifest: &Manifest) -> Result<Velop
let new_locator = super::apply(&locator, false, OperationWait::NoWait, Some(&buf), None, false)?;
info!("Removing old app-* folders...");
shared::delete_app_prefixed_folders(&root_dir)?;
shared::delete_app_prefixed_folders(&root_dir);
let _ = remove_dir_all::remove_dir_all(root_dir.join("staging"));
Ok(new_locator)
}

View File

@@ -7,49 +7,31 @@ use std::fs::File;
pub fn uninstall(locator: &VelopackLocator, delete_self: bool) -> Result<()> {
info!("Command: Uninstall");
let root_path = locator.get_root_dir();
fn _uninstall_impl(locator: &VelopackLocator) -> bool {
let root_path = locator.get_root_dir();
// the real app could be running at the moment
let _ = shared::force_stop_package(&root_path);
// the real app could be running at the moment
let _ = shared::force_stop_package(&root_path);
let mut finished_with_errors = false;
// run uninstall hook
windows::run_hook(&locator, constants::HOOK_CLI_UNINSTALL, 60);
// run uninstall hook
windows::run_hook(&locator, constants::HOOK_CLI_UNINSTALL, 60);
// remove all shortcuts pointing to the app
windows::remove_all_shortcuts_for_root_dir(&root_path);
// remove all shortcuts pointing to the app
windows::remove_all_shortcuts_for_root_dir(&root_path);
info!("Removing directory '{}'", root_path.to_string_lossy());
let _ = remove_dir_all::remove_dir_contents(&root_path);
info!("Removing directory '{}'", root_path.to_string_lossy());
if let Err(e) = shared::retry_io(|| remove_dir_all::remove_dir_but_not_self(&root_path)) {
error!("Unable to remove directory, some files may be in use ({}).", e);
finished_with_errors = true;
}
if let Err(e) = windows::registry::remove_uninstall_entry(&locator) {
error!("Unable to remove uninstall registry entry ({}).", e);
// finished_with_errors = true;
}
!finished_with_errors
if let Err(e) = windows::registry::remove_uninstall_entry(&locator) {
error!("Unable to remove uninstall registry entry ({}).", e);
}
// if it returns true, it was a success.
// if it returns false, it was completed with errors which the user should be notified of.
let result = _uninstall_impl(&locator);
let app_title = locator.get_manifest_title();
if result {
info!("Finished successfully.");
shared::dialogs::show_info(format!("{} Uninstall", app_title).as_str(), None, "The application was successfully uninstalled.");
} else {
error!("Finished with errors.");
shared::dialogs::show_uninstall_complete_with_errors_dialog(&app_title, None);
}
info!("Finished successfully.");
shared::dialogs::show_info(format!("{} Uninstall", app_title).as_str(), None, "The application was successfully uninstalled.");
let dead_path = root_path.join(".dead");
let _ = File::create(dead_path);

View File

@@ -0,0 +1,168 @@
// Copyright 2022 Google LLC
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{
io::{Read, Seek, SeekFrom},
sync::{Arc, Mutex},
};
use super::ripunzip::determine_stream_len;
struct Inner<R: Read + Seek> {
/// The underlying Read implementation.
r: R,
/// The position of r.
pos: u64,
/// The length of r, lazily loaded.
len: Option<u64>,
}
impl<R: Read + Seek> Inner<R> {
fn new(r: R) -> Self {
Self { r, pos: 0, len: None }
}
/// Get the length of the data stream. This is assumed to be constant.
fn len(&mut self) -> std::io::Result<u64> {
// Return cached size
if let Some(len) = self.len {
return Ok(len);
}
let len = determine_stream_len(&mut self.r)?;
self.len = Some(len);
Ok(len)
}
/// Read into the given buffer, starting at the given offset in the data stream.
fn read_at(&mut self, offset: u64, buf: &mut [u8]) -> std::io::Result<usize> {
if offset != self.pos {
self.r.seek(SeekFrom::Start(offset))?;
}
let read_result = self.r.read(buf);
if let Ok(bytes_read) = read_result {
// TODO, once stabilised, use checked_add_signed
self.pos += bytes_read as u64;
}
read_result
}
}
/// A [`Read`] which refers to its underlying stream by reference count,
/// and thus can be cloned cheaply. It supports seeking; each cloned instance
/// maintains its own pointer into the file, and the underlying instance
/// is seeked prior to each read.
pub(crate) struct CloneableSeekableReader<R: Read + Seek> {
/// The wrapper around the Read implementation, shared between threads.
inner: Arc<Mutex<Inner<R>>>,
/// The position of _this_ reader.
pos: u64,
}
impl<R: Read + Seek> Clone for CloneableSeekableReader<R> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone(), pos: self.pos }
}
}
impl<R: Read + Seek> CloneableSeekableReader<R> {
/// Constructor. Takes ownership of the underlying `Read`.
/// You should pass in only streams whose total length you expect
/// to be fixed and unchanging. Odd behavior may occur if the length
/// of the stream changes; any subsequent seeks will not take account
/// of the changed stream length.
pub(crate) fn new(r: R) -> Self {
Self { inner: Arc::new(Mutex::new(Inner::new(r))), pos: 0u64 }
}
}
impl<R: Read + Seek> Read for CloneableSeekableReader<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let mut inner = self.inner.lock().unwrap();
let read_result = inner.read_at(self.pos, buf);
if let Ok(bytes_read) = read_result {
self.pos = self
.pos
.checked_add(bytes_read as u64)
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Read too far forward"))?;
}
read_result
}
}
impl<R: Read + Seek> Seek for CloneableSeekableReader<R> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
let new_pos = match pos {
SeekFrom::Start(pos) => pos,
SeekFrom::End(offset_from_end) => {
let file_len = self.inner.lock().unwrap().len()?;
if -offset_from_end as u64 > file_len {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Seek too far backwards"));
}
file_len
.checked_add_signed(offset_from_end)
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Seek too far backward from end"))?
}
SeekFrom::Current(offset_from_pos) => self
.pos
.checked_add_signed(offset_from_pos)
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Seek too far forward from current pos"))?,
};
self.pos = new_pos;
Ok(new_pos)
}
}
#[cfg(test)]
mod test {
use super::CloneableSeekableReader;
use std::io::{Cursor, Read, Seek, SeekFrom};
// use test_log::test;
#[test]
fn test_cloneable_seekable_reader() -> std::io::Result<()> {
let buf: Vec<u8> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let buf = Cursor::new(buf);
let mut reader = CloneableSeekableReader::new(buf);
let mut out = vec![0; 2];
reader.read_exact(&mut out)?;
assert_eq!(&out, &[0, 1]);
reader.rewind()?;
reader.read_exact(&mut out)?;
assert_eq!(&out, &[0, 1]);
reader.stream_position()?;
reader.read_exact(&mut out)?;
assert_eq!(&out, &[2, 3]);
reader.seek(SeekFrom::End(-2))?;
reader.read_exact(&mut out)?;
assert_eq!(&out, &[8, 9]);
assert!(reader.read_exact(&mut out).is_err());
Ok(())
}
#[test]
fn test_cloned_independent_positions() -> std::io::Result<()> {
let buf: Vec<u8> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let buf = Cursor::new(buf);
let mut r1 = CloneableSeekableReader::new(buf);
let mut r2 = r1.clone();
let mut out = vec![0; 2];
r1.read_exact(&mut out)?;
assert_eq!(&out, &[0, 1]);
r2.read_exact(&mut out)?;
assert_eq!(&out, &[0, 1]);
r1.read_exact(&mut out)?;
assert_eq!(&out, &[2, 3]);
r2.seek(SeekFrom::End(-2))?;
r2.read_exact(&mut out)?;
assert_eq!(&out, &[8, 9]);
r1.read_exact(&mut out)?;
assert_eq!(&out, &[4, 5]);
Ok(())
}
}

View File

@@ -0,0 +1,86 @@
#![allow(dead_code)]
mod cloneable_seekable_reader;
mod mtzip;
mod progress_updater;
mod ripunzip;
use anyhow::Result;
pub use mtzip::level::CompressionLevel;
use ripunzip::{UnzipEngine, UnzipOptions};
use std::{
fs::File,
path::{Path, PathBuf},
};
use walkdir::WalkDir;
/// A trait of types which wish to hear progress updates on the unzip.
pub trait UnzipProgressReporter: Sync {
/// Extraction has begun on a file.
fn extraction_starting(&self, _display_name: &str) {}
/// Extraction has finished on a file.
fn extraction_finished(&self, _display_name: &str) {}
/// The total number of compressed bytes we expect to extract.
fn total_bytes_expected(&self, _expected: u64) {}
/// Some bytes of a file have been decompressed. This is probably
/// the best way to display an overall progress bar. This should eventually
/// add up to the number you're given using `total_bytes_expected`.
/// The 'count' parameter is _not_ a running total - you must add up
/// each call to this function into the running total.
/// It's a bit unfortunate that we give compressed bytes rather than
/// uncompressed bytes, but currently we can't calculate uncompressed
/// bytes without downloading the whole zip file first, which rather
/// defeats the point.
fn bytes_extracted(&self, _count: u64) {}
}
/// A progress reporter which does nothing.
struct NullProgressReporter;
impl UnzipProgressReporter for NullProgressReporter {}
pub fn extract_to_directory<'b, P1: AsRef<Path>, P2: AsRef<Path>>(
archive_file: P1,
target_dir: P2,
progress_reporter: Option<Box<dyn UnzipProgressReporter + Sync + 'b>>,
) -> Result<()> {
let target_dir = target_dir.as_ref().to_path_buf();
let file = File::open(archive_file)?;
let engine = UnzipEngine::for_file(file)?;
let null_progress = Box::new(NullProgressReporter {});
let options = UnzipOptions {
filename_filter: None,
progress_reporter: progress_reporter.unwrap_or(null_progress),
output_directory: Some(target_dir),
password: None,
single_threaded: false,
};
engine.unzip(options)?;
Ok(())
}
pub fn compress_directory<'b, P1: AsRef<Path>, P2: AsRef<Path>>(target_dir: P1, output_file: P2, level: CompressionLevel) -> Result<()> {
let target_dir = target_dir.as_ref().to_path_buf();
let mut zipper = mtzip::ZipArchive::new();
let workdir_relative_paths = enumerate_files_relative(&target_dir);
for relative_path in &workdir_relative_paths {
zipper
.add_file_from_fs(target_dir.join(&relative_path), relative_path.to_string_lossy().to_string())
.compression_level(level)
.done();
}
let mut file = File::create(&output_file)?;
zipper.write_with_rayon(&mut file)?;
Ok(())
}
pub fn enumerate_files_relative<P: AsRef<Path>>(dir: P) -> Vec<PathBuf> {
WalkDir::new(&dir)
.follow_links(false)
.into_iter()
.filter_map(|entry| entry.ok())
.filter(|entry| entry.file_type().is_file())
.map(|entry| entry.path().strip_prefix(&dir).map(|p| p.to_path_buf()))
.filter_map(|entry| entry.ok())
.collect()
}

View File

@@ -0,0 +1,126 @@
//! Compression level
use core::fmt::Display;
use std::error::Error;
use flate2::Compression;
/// Compression level that should be used when compressing a file or data.
///
/// Current compression providers support only levels from 0 to 9, so these are the only ones being
/// supported.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct CompressionLevel(u8);
impl CompressionLevel {
/// Construct a new value of a compression level setting.
///
/// The integer value must be less than or equal to 9, otherwise `None` is returned
#[inline]
pub const fn new(level: u8) -> Option<Self> {
if level <= 9 { Some(Self(level)) } else { None }
}
/// Construct a new value of a compression level setting without checking the value.
///
/// # Safety
///
/// The value must be a valid supported compression level
#[inline]
pub const unsafe fn new_unchecked(level: u8) -> Self {
Self(level)
}
/// No compression
#[inline]
pub const fn none() -> Self {
Self(0)
}
/// Fastest compression
#[inline]
pub const fn fast() -> Self {
Self(1)
}
/// Balanced level with moderate compression and speed. The raw value is 6.
#[inline]
pub const fn balanced() -> Self {
Self(6)
}
/// Best compression ratio, comes at a worse performance
#[inline]
pub const fn best() -> Self {
Self(9)
}
/// Get the compression level as an integer
#[inline]
pub const fn get(self) -> u8 {
self.0
}
}
impl Default for CompressionLevel {
/// Equivalent to [`Self::balanced`]
fn default() -> Self {
Self::balanced()
}
}
/// The number for compression level was invalid
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct InvalidCompressionLevel(u32);
impl InvalidCompressionLevel {
/// The value which was supplied
pub fn value(self) -> u32 {
self.0
}
}
impl Display for InvalidCompressionLevel {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Invalid compression level number: {}", self.0)
}
}
impl Error for InvalidCompressionLevel {}
impl From<CompressionLevel> for Compression {
#[inline]
fn from(value: CompressionLevel) -> Self {
Compression::new(value.0.into())
}
}
impl TryFrom<Compression> for CompressionLevel {
type Error = InvalidCompressionLevel;
fn try_from(value: Compression) -> Result<Self, Self::Error> {
let level = value.level();
Self::new(
level
.try_into()
.map_err(|_| InvalidCompressionLevel(level))?,
)
.ok_or(InvalidCompressionLevel(level))
}
}
impl From<CompressionLevel> for u8 {
#[inline]
fn from(value: CompressionLevel) -> Self {
value.0
}
}
impl TryFrom<u8> for CompressionLevel {
type Error = InvalidCompressionLevel;
#[inline]
fn try_from(value: u8) -> Result<Self, Self::Error> {
Self::new(value).ok_or(InvalidCompressionLevel(value.into()))
}
}

View File

@@ -0,0 +1,433 @@
//! # mtzip
//!
//! MTZIP (Stands for Multi-Threaded ZIP) is a library for making zip archives while utilising all
//! available performance available with multithreading. The amount of threads can be limited by
//! the user or detected automatically.
//!
//! Example usage:
//!
//! ```ignore
//! # use std::path::Path;
//! # use std::fs::File;
//! use mtzip::ZipArchive;
//!
//! // Creating the zipper that holds data and handles compression
//! let mut zipper = ZipArchive::new();
//!
//! // Adding a file from filesystem
//! zipper.add_file_from_fs(
//! Path::new("input/test_text_file.txt"),
//! "test_text_file.txt".to_owned(),
//! );
//!
//! // Adding a file with data from a memory location
//! zipper.add_file_from_memory(b"Hello, world!", "hello_world.txt".to_owned());
//!
//! // Adding a directory and a file to it
//! zipper.add_directory("test_dir".to_owned());
//! zipper.add_file_from_fs(
//! Path::new("input/file_that_goes_to_a_dir.txt"),
//! "test_dir/file_that_goes_to_a_dir.txt".to_owned(),
//! );
//!
//! // Writing to a file
//! // First, open the file
//! let mut file = File::create("output.zip").unwrap();
//! // Then, write to it
//! zipper.write(&mut file); // Amount of threads is chosen automatically
//! ```
use std::{
borrow::Cow,
io::{Read, Seek, Write},
num::NonZeroUsize,
panic::{RefUnwindSafe, UnwindSafe},
path::Path,
sync::{mpsc, Mutex},
};
use level::CompressionLevel;
use rayon::prelude::*;
use zip_archive_parts::{
data::ZipData,
extra_field::{ExtraField, ExtraFields},
file::ZipFile,
job::{ZipJob, ZipJobOrigin},
};
pub mod level;
mod platform;
mod zip_archive_parts;
// TODO: tests, maybe examples
/// Compression type for the file. Directories always use [`Stored`](CompressionType::Stored).
/// Default is [`Deflate`](CompressionType::Deflate).
#[repr(u16)]
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum CompressionType {
/// No compression at all, the data is stored as-is.
///
/// This is used for directories because they have no data (no payload)
Stored = 0,
#[default]
/// Deflate compression, the most common in ZIP files.
Deflate = 8,
}
/// Builder used to optionally add additional attributes to a file or directory.
/// The default compression type is [`CompressionType::Deflate`] and default compression level is
/// [`CompressionLevel::best`]
#[must_use]
#[derive(Debug)]
pub struct ZipFileBuilder<'a, 'b> {
archive_handle: &'a mut ZipArchive<'b>,
job: ZipJob<'b>,
}
impl<'a, 'b> ZipFileBuilder<'a, 'b> {
/// Call this when you're done configuring the file entry and it will be added to the job list,
/// or directly into the resulting dataset if it's a directory. Always needs to be called.
pub fn done(self) {
let Self { archive_handle, job } = self;
match &job.data_origin {
ZipJobOrigin::Directory => {
let file = job.into_file().expect("No failing code path");
archive_handle.push_file(file);
}
_ => archive_handle.push_job(job),
}
}
/// Read filesystem metadata from filesystem and add the properties to this file. It sets
/// external attributes (as with [`Self::external_attributes`]) and adds extra fields generated
/// with [`ExtraFields::new_from_fs`]
pub fn metadata_from_fs(self, fs_path: &Path) -> std::io::Result<Self> {
let metadata = std::fs::metadata(fs_path)?;
let external_attributes = platform::attributes_from_fs(&metadata);
let extra_fields = ExtraFields::new_from_fs(&metadata);
Ok(self.external_attributes(external_attributes).extra_fields(extra_fields))
}
/// Add a file comment.
pub fn file_comment(mut self, comment: String) -> Self {
self.job.file_comment = Some(comment);
self
}
/// Add additional [`ExtraField`].
pub fn extra_field(mut self, extra_field: ExtraField) -> Self {
self.job.extra_fields.values.push(extra_field);
self
}
/// Add additional [`ExtraField`]s.
pub fn extra_fields(mut self, extra_fields: impl IntoIterator<Item = ExtraField>) -> Self {
self.job.extra_fields.extend(extra_fields);
self
}
/// Set compression type. Ignored for directories, as they use no compression.
///
/// Default is [`CompressionType::Deflate`].
pub fn compression_type(mut self, compression_type: CompressionType) -> Self {
self.job.compression_type = compression_type;
self
}
/// Set compression level. Ignored for directories, as they use no compression.
///
/// Default is [`CompressionLevel::best`]
pub fn compression_level(mut self, compression_level: CompressionLevel) -> Self {
self.job.compression_level = compression_level;
self
}
/// Set external attributes. The format depends on a filesystem and is mostly a legacy
/// mechanism, usually a default value is used if this is not a filesystem source. When a file
/// is added from the filesystem, these attributes will be read and used and the ones set wit
/// hthis method are ignored.
pub fn external_attributes(mut self, external_attributes: u16) -> Self {
self.job.external_attributes = external_attributes;
self
}
/// Set external file attributes from a filesystem item. Use of this method is discouraged in
/// favor of [`Self::metadata_from_fs`], which also sets extra fields which contain modern
/// filesystem attributes instead of using old 16-bit system-dependent format.
pub fn external_attributes_from_fs(mut self, fs_path: &Path) -> std::io::Result<Self> {
let metadata = std::fs::metadata(fs_path)?;
self.job.external_attributes = platform::attributes_from_fs(&metadata);
Ok(self)
}
#[inline]
fn new(archive: &'a mut ZipArchive<'b>, filename: String, origin: ZipJobOrigin<'b>) -> Self {
Self {
archive_handle: archive,
job: ZipJob {
data_origin: origin,
archive_path: filename,
extra_fields: ExtraFields::default(),
file_comment: None,
external_attributes: platform::default_file_attrs(),
compression_type: CompressionType::Deflate,
compression_level: CompressionLevel::best(),
},
}
}
#[inline]
fn new_dir(archive: &'a mut ZipArchive<'b>, filename: String) -> Self {
Self {
archive_handle: archive,
job: ZipJob {
data_origin: ZipJobOrigin::Directory,
archive_path: filename,
extra_fields: ExtraFields::default(),
file_comment: None,
external_attributes: platform::default_dir_attrs(),
compression_type: CompressionType::Deflate,
compression_level: CompressionLevel::best(),
},
}
}
}
/// Structure that holds the current state of ZIP archive creation.
///
/// # Lifetimes
///
/// Because some of the methods allow supplying borrowed data, the lifetimes are used to indicate
/// that [`Self`](ZipArchive) borrows them. If you only provide owned data, such as
/// [`Vec<u8>`](Vec) or [`PathBuf`](std::path::PathBuf), you won't have to worry about lifetimes
/// and can simply use `'static`, if you ever need to specify them in your code.
///
/// The lifetime `'a` is for the borrowed data passed in
/// [`add_file_from_memory`](Self::add_file_from_memory),
/// [`add_file_from_fs`](Self::add_file_from_fs) and
/// [`add_file_from_reader`](Self::add_file_from_reader)
#[derive(Debug, Default)]
pub struct ZipArchive<'a> {
jobs_queue: Vec<ZipJob<'a>>,
data: ZipData,
}
impl<'a> ZipArchive<'a> {
fn push_job(&mut self, job: ZipJob<'a>) {
self.jobs_queue.push(job);
}
fn push_file(&mut self, file: ZipFile) {
self.data.files.push(file);
}
/// Create an empty [`ZipArchive`]
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Add file from filesystem.
///
/// Opens the file and reads data from it when [`compress`](Self::compress) is called.
///
/// ```
/// # use mtzip::ZipArchive;
/// # use std::path::Path;
/// let mut zipper = ZipArchive::new();
/// zipper
/// .add_file_from_fs(Path::new("input.txt"), "input.txt".to_owned())
/// .done();
/// ```
#[inline]
pub fn add_file_from_fs(&mut self, fs_path: impl Into<Cow<'a, Path>>, archived_path: String) -> ZipFileBuilder<'_, 'a> {
ZipFileBuilder::new(self, archived_path, ZipJobOrigin::Filesystem { path: fs_path.into() })
}
/// Add file with data from memory.
///
/// The data can be either borrowed or owned by the [`ZipArchive`] struct to avoid lifetime
/// hell.
///
/// ```
/// # use mtzip::ZipArchive;
/// # use std::path::Path;
/// let mut zipper = ZipArchive::new();
/// let data: &[u8] = "Hello, world!".as_ref();
/// zipper
/// .add_file_from_memory(data, "hello_world.txt".to_owned())
/// .done();
/// ```
#[inline]
pub fn add_file_from_memory(&mut self, data: impl Into<Cow<'a, [u8]>>, archived_path: String) -> ZipFileBuilder<'_, 'a> {
ZipFileBuilder::new(self, archived_path, ZipJobOrigin::RawData(data.into()))
}
/// Add a file with data from a reader.
///
/// This method takes any type implementing [`Read`] and allows it to have borrowed data (`'r`)
///
/// ```
/// # use mtzip::ZipArchive;
/// # use std::path::Path;
/// let mut zipper = ZipArchive::new();
/// let data_input = std::io::stdin();
/// zipper
/// .add_file_from_reader(data_input, "stdin_file.txt".to_owned())
/// .done();
/// ```
#[inline]
pub fn add_file_from_reader<R: Read + Send + Sync + UnwindSafe + RefUnwindSafe + 'a>(
&mut self,
reader: R,
archived_path: String,
) -> ZipFileBuilder<'_, 'a> {
ZipFileBuilder::new(self, archived_path, ZipJobOrigin::Reader(Box::new(reader)))
}
/// Add a directory entry.
///
/// All directories in the tree should be added. This method does not asssociate any filesystem
/// properties to the entry.
///
/// ```
/// # use mtzip::ZipArchive;
/// # use std::path::Path;
/// let mut zipper = ZipArchive::new();
/// zipper.add_directory("test_dir/".to_owned()).done();
/// ```
#[inline]
pub fn add_directory(&mut self, archived_path: String) -> ZipFileBuilder<'_, 'a> {
ZipFileBuilder::new_dir(self, archived_path)
}
/// Compress contents. Will be done automatically on [`write`](Self::write) call if files were
/// added between last compression and [`write`](Self::write) call. Automatically chooses
/// amount of threads to use based on how much are available.
#[inline]
pub fn compress(&mut self) {
self.compress_with_threads(Self::get_threads());
}
/// Compress contents. Will be done automatically on
/// [`write_with_threads`](Self::write_with_threads) call if files were added between last
/// compression and [`write`](Self::write). Allows specifying amount of threads that will be
/// used.
///
/// Example of getting amount of threads that this library uses in
/// [`compress`](Self::compress):
///
/// ```
/// # use std::num::NonZeroUsize;
/// # use mtzip::ZipArchive;
/// # let mut zipper = ZipArchive::new();
/// let threads = std::thread::available_parallelism()
/// .map(NonZeroUsize::get)
/// .unwrap_or(1);
///
/// zipper.compress_with_threads(threads);
/// ```
#[inline]
pub fn compress_with_threads(&mut self, threads: usize) {
if !self.jobs_queue.is_empty() {
self.compress_with_consumer(threads, |zip_data, rx| zip_data.files.extend(rx))
}
}
/// Write compressed data to a writer (usually a file). Executes [`compress`](Self::compress)
/// if files were added between last [`compress`](Self::compress) call and this call.
/// Automatically chooses the amount of threads cpu has.
#[inline]
pub fn write<W: Write + Seek>(&mut self, writer: &mut W) -> std::io::Result<()> {
self.write_with_threads(writer, Self::get_threads())
}
/// Write compressed data to a writer (usually a file). Executes
/// [`compress_with_threads`](Self::compress_with_threads) if files were added between last
/// [`compress`](Self::compress) call and this call. Allows specifying amount of threads that
/// will be used.
///
/// Example of getting amount of threads that this library uses in [`write`](Self::write):
///
/// ```
/// # use std::num::NonZeroUsize;
/// # use mtzip::ZipArchive;
/// # let mut zipper = ZipArchive::new();
/// let threads = std::thread::available_parallelism()
/// .map(NonZeroUsize::get)
/// .unwrap_or(1);
///
/// zipper.compress_with_threads(threads);
/// ```
#[inline]
pub fn write_with_threads<W: Write + Seek>(&mut self, writer: &mut W, threads: usize) -> std::io::Result<()> {
if !self.jobs_queue.is_empty() {
self.compress_with_consumer(threads, |zip_data, rx| zip_data.write(writer, rx))
} else {
self.data.write(writer, std::iter::empty())
}
}
/// Starts the compression jobs and passes teh mpsc receiver to teh consumer function, which
/// might either store the data in [`ZipData`] - [`Self::compress_with_threads`]; or write the
/// zip data as soon as it's available - [`Self::write_with_threads`]
fn compress_with_consumer<F, T>(&mut self, threads: usize, consumer: F) -> T
where
F: FnOnce(&mut ZipData, mpsc::Receiver<ZipFile>) -> T,
{
let jobs_drain = Mutex::new(self.jobs_queue.drain(..));
let jobs_drain_ref = &jobs_drain;
std::thread::scope(|s| {
let rx = {
let (tx, rx) = mpsc::channel();
for _ in 0..threads {
let thread_tx = tx.clone();
s.spawn(move || loop {
let next_job = jobs_drain_ref.lock().unwrap().next_back();
if let Some(job) = next_job {
thread_tx.send(job.into_file().unwrap()).unwrap();
} else {
break;
}
});
}
rx
};
consumer(&mut self.data, rx)
})
}
fn get_threads() -> usize {
std::thread::available_parallelism().map(NonZeroUsize::get).unwrap_or(1)
}
}
impl ZipArchive<'_> {
/// Compress contents and use rayon for parallelism.
///
/// Uses whatever thread pool this function is executed in.
///
/// If you want to limit the amount of threads to be used, use
/// [`rayon::ThreadPoolBuilder::num_threads`] and either set it as a global pool, or
/// [`rayon::ThreadPool::install`] the call to this method in it.
pub fn compress_with_rayon(&mut self) {
if !self.jobs_queue.is_empty() {
let files_par_iter = self.jobs_queue.par_drain(..).map(|job| job.into_file().unwrap());
self.data.files.par_extend(files_par_iter)
}
}
/// Write the contents to a writer.
///
/// This method uses teh same thread logic as [`Self::compress_with_rayon`], refer to its
/// documentation for details on how to control the parallelism and thread allocation.
pub fn write_with_rayon<W: Write + Seek + Send>(&mut self, writer: &mut W) -> std::io::Result<()> {
if !self.jobs_queue.is_empty() {
let files_par_iter = self.jobs_queue.par_drain(..).map(|job| job.into_file().unwrap());
self.data.write_rayon(writer, files_par_iter)
} else {
self.data.write_rayon(writer, rayon::iter::empty())
}
}
}

View File

@@ -0,0 +1,96 @@
//! Platform-specific stuff
use std::fs::Metadata;
#[cfg(target_os = "windows")]
/// OS - Windows, id 11 per Info-Zip spec
/// Specification version 6.2
pub(crate) const VERSION_MADE_BY: u16 = (11 << 8) + 62;
#[cfg(target_os = "macos")]
/// OS - MacOS darwin, id 19
/// Specification version 6.2
pub(crate) const VERSION_MADE_BY: u16 = (19 << 8) + 62;
#[cfg(not(any(target_os = "windows", target_os = "macos")))]
// Fallback
/// OS - Unix assumed, id 3
/// Specification version 6.2
pub(crate) const VERSION_MADE_BY: u16 = (3 << 8) + 62;
#[allow(dead_code)]
pub(crate) const DEFAULT_UNIX_FILE_ATTRS: u16 = 0o100644;
#[allow(dead_code)]
pub(crate) const DEFAULT_UNIX_DIR_ATTRS: u16 = 0o040755;
#[cfg(target_os = "windows")]
pub(crate) const DEFAULT_WINDOWS_FILE_ATTRS: u16 = 128;
#[cfg(target_os = "windows")]
pub(crate) const DEFAULT_WINDOWS_DIR_ATTRS: u16 = 16;
#[inline]
#[allow(dead_code)]
const fn convert_attrs(attrs: u32) -> u16 {
attrs as u16
}
pub(crate) fn attributes_from_fs(metadata: &Metadata) -> u16 {
#[cfg(target_os = "windows")]
{
use std::os::windows::fs::MetadataExt;
return convert_attrs(metadata.file_attributes());
}
#[cfg(target_os = "linux")]
{
use std::os::linux::fs::MetadataExt;
return convert_attrs(metadata.st_mode());
}
#[cfg(target_os = "macos")]
{
use std::os::darwin::fs::MetadataExt;
return convert_attrs(metadata.st_mode());
}
#[cfg(all(unix, not(target_os = "linux"), not(target_os = "macos")))]
{
use std::os::unix::fs::PermissionsExt;
return convert_attrs(metadata.permissions().mode());
}
#[cfg(not(any(target_os = "windows", target_os = "linux", target_os = "macos", unix)))]
{
if metadata.is_dir() {
return DEFAULT_UNIX_DIR_ATTRS;
} else {
return DEFAULT_UNIX_FILE_ATTRS;
}
}
}
#[cfg(target_os = "windows")]
pub(crate) const fn default_file_attrs() -> u16 {
DEFAULT_WINDOWS_FILE_ATTRS
}
#[cfg(not(windows))]
pub(crate) const fn default_file_attrs() -> u16 {
DEFAULT_UNIX_FILE_ATTRS
}
#[cfg(target_os = "windows")]
pub(crate) const fn default_dir_attrs() -> u16 {
DEFAULT_WINDOWS_DIR_ATTRS
}
#[cfg(any(target_os = "linux", unix))]
#[cfg(not(target_os = "windows"))]
pub(crate) const fn default_dir_attrs() -> u16 {
DEFAULT_UNIX_DIR_ATTRS
}
#[cfg(not(any(target_os = "windows", target_os = "linux", unix)))]
pub(crate) const fn default_dir_attrs() -> u16 {
0
}

View File

@@ -0,0 +1,156 @@
use std::io::{Seek, Write};
use std::sync::Mutex;
use rayon::prelude::*;
use super::file::{ZipFile, ZipFileNoData};
const END_OF_CENTRAL_DIR_SIGNATURE: u32 = 0x06054B50;
#[derive(Debug, Default)]
pub struct ZipData {
pub files: Vec<ZipFile>,
}
impl ZipData {
pub fn write<W: Write + Seek, I: IntoIterator<Item = ZipFile>>(
&mut self,
buf: &mut W,
zip_file_iter: I,
) -> std::io::Result<()> {
let zip_files = self.write_files_contained_and_iter(buf, zip_file_iter)?;
let files_amount = super::files_amount_u16(&zip_files);
let central_dir_offset = super::stream_position_u32(buf)?;
self.write_central_dir(zip_files, buf)?;
let central_dir_start = super::stream_position_u32(buf)?;
self.write_end_of_central_directory(
buf,
central_dir_offset,
central_dir_start,
files_amount,
)
}
pub fn write_rayon<W: Write + Seek + Send, I: ParallelIterator<Item = ZipFile>>(
&mut self,
buf: &mut W,
zip_file_iter: I,
) -> std::io::Result<()> {
let zip_files = self.write_files_contained_and_par_iter(buf, zip_file_iter)?;
let files_amount = super::files_amount_u16(&zip_files);
let central_dir_offset = super::stream_position_u32(buf)?;
self.write_central_dir(zip_files, buf)?;
let central_dir_start = super::stream_position_u32(buf)?;
self.write_end_of_central_directory(
buf,
central_dir_offset,
central_dir_start,
files_amount,
)
}
#[inline]
fn write_files_contained_and_iter<W: Write + Seek, I: IntoIterator<Item = ZipFile>>(
&mut self,
buf: &mut W,
zip_files_iter: I,
) -> std::io::Result<Vec<ZipFileNoData>> {
let zip_files = std::mem::take(&mut self.files);
self.write_files_iter(buf, zip_files.into_iter().chain(zip_files_iter))
}
#[inline]
pub fn write_files_contained_and_par_iter<
W: Write + Seek + Send,
I: ParallelIterator<Item = ZipFile>,
>(
&mut self,
buf: &mut W,
zip_files_iter: I,
) -> std::io::Result<Vec<ZipFileNoData>> {
let zip_files = std::mem::take(&mut self.files);
self.write_files_par_iter(buf, zip_files.into_par_iter().chain(zip_files_iter))
}
pub fn write_files_iter<W: Write + Seek, I: IntoIterator<Item = ZipFile>>(
&mut self,
buf: &mut W,
zip_files: I,
) -> std::io::Result<Vec<ZipFileNoData>> {
zip_files
.into_iter()
.map(|zipfile| zipfile.write_local_file_header_with_data_consuming(buf))
.collect::<std::io::Result<Vec<_>>>()
}
pub fn write_files_par_iter<W: Write + Seek + Send, I: ParallelIterator<Item = ZipFile>>(
&mut self,
buf: &mut W,
zip_files: I,
) -> std::io::Result<Vec<ZipFileNoData>> {
let buf = Mutex::new(buf);
zip_files
.map(|zipfile| {
let mut buf_lock = buf.lock().unwrap();
zipfile.write_local_file_header_with_data_consuming(*buf_lock)
})
.collect::<std::io::Result<Vec<_>>>()
}
fn write_central_dir<W: Write, I: IntoIterator<Item = ZipFileNoData>>(
&self,
zip_files: I,
buf: &mut W,
) -> std::io::Result<()> {
zip_files
.into_iter()
.try_for_each(|zip_file| zip_file.write_central_directory_entry(buf))
}
const FOOTER_LENGTH: usize = 22;
fn write_end_of_central_directory<W: Write>(
&self,
buf: &mut W,
central_dir_offset: u32,
central_dir_start: u32,
files_amount: u16,
) -> std::io::Result<()> {
// Temporary in-memory statically sized array
let mut central_dir = [0; Self::FOOTER_LENGTH];
{
let mut central_dir_buf: &mut [u8] = &mut central_dir;
// Signature
central_dir_buf.write_all(&END_OF_CENTRAL_DIR_SIGNATURE.to_le_bytes())?;
// number of this disk
central_dir_buf.write_all(&0_u16.to_le_bytes())?;
// number of the disk with start
central_dir_buf.write_all(&0_u16.to_le_bytes())?;
// Number of entries on this disk
central_dir_buf.write_all(&files_amount.to_le_bytes())?;
// Number of entries
central_dir_buf.write_all(&files_amount.to_le_bytes())?;
// Central dir size
central_dir_buf.write_all(&(central_dir_start - central_dir_offset).to_le_bytes())?;
// Central dir offset
central_dir_buf.write_all(&central_dir_offset.to_le_bytes())?;
// Comment length
central_dir_buf.write_all(&0_u16.to_le_bytes())?;
}
buf.write_all(&central_dir)?;
Ok(())
}
}

View File

@@ -0,0 +1,273 @@
//! ZIP file extra field
use std::{fs::Metadata, io::Write};
/// This is a structure containing [`ExtraField`]s associated with a file or directory in a zip
/// file, mostly used for filesystem properties, and this is the only functionality implemented
/// here.
///
/// The [`new_from_fs`](Self::new_from_fs) method will use the metadata the filesystem provides to
/// construct the collection.
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct ExtraFields {
pub(crate) values: Vec<ExtraField>,
}
impl Extend<ExtraField> for ExtraFields {
fn extend<T: IntoIterator<Item = ExtraField>>(&mut self, iter: T) {
self.values.extend(iter)
}
}
impl IntoIterator for ExtraFields {
type Item = <Vec<ExtraField> as IntoIterator>::Item;
type IntoIter = <Vec<ExtraField> as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.values.into_iter()
}
}
impl ExtraFields {
/// Create a new set of [`ExtraField`]s. [`Self::new_from_fs`] should be preferred.
///
/// # Safety
///
/// All fields must have valid values depending on the field type.
pub unsafe fn new<I>(fields: I) -> Self
where
I: IntoIterator<Item = ExtraField>,
{
Self { values: fields.into_iter().collect() }
}
/// This method will use the filesystem metadata to get the properties that can be stored in
/// ZIP [`ExtraFields`].
///
/// The behavior is dependent on the target platform. Will return an empty set if the target os
/// is not Windows or Linux and not of UNIX family.
pub fn new_from_fs(metadata: &Metadata) -> Self {
#[cfg(target_os = "windows")]
{
return Self::new_windows(metadata);
}
#[cfg(target_os = "linux")]
{
return Self::new_linux(metadata);
}
#[cfg(all(unix, not(target_os = "linux")))]
{
return Self::new_unix(metadata);
}
}
#[cfg(target_os = "linux")]
fn new_linux(metadata: &Metadata) -> Self {
use std::os::linux::fs::MetadataExt;
let mod_time = Some(metadata.st_mtime() as i32);
let ac_time = Some(metadata.st_atime() as i32);
let cr_time = Some(metadata.st_ctime() as i32);
let uid = metadata.st_uid();
let gid = metadata.st_gid();
Self { values: vec![ExtraField::UnixExtendedTimestamp { mod_time, ac_time, cr_time }, ExtraField::UnixAttrs { uid, gid }] }
}
#[cfg(all(unix, not(target_os = "linux")))]
#[allow(dead_code)]
fn new_unix(metadata: &Metadata) -> Self {
use std::os::unix::fs::MetadataExt;
let mod_time = Some(metadata.mtime() as i32);
let ac_time = Some(metadata.atime() as i32);
let cr_time = Some(metadata.ctime() as i32);
let uid = metadata.uid();
let gid = metadata.gid();
Self { values: vec![ExtraField::UnixExtendedTimestamp { mod_time, ac_time, cr_time }, ExtraField::UnixAttrs { uid, gid }] }
}
#[cfg(target_os = "windows")]
fn new_windows(metadata: &Metadata) -> Self {
use std::os::windows::fs::MetadataExt;
let mtime = metadata.last_write_time();
let atime = metadata.last_access_time();
let ctime = metadata.creation_time();
Self { values: vec![ExtraField::Ntfs { mtime, atime, ctime }] }
}
pub(crate) fn data_length<const CENTRAL_HEADER: bool>(&self) -> u16 {
self.values.iter().map(|f| 4 + f.field_size::<CENTRAL_HEADER>()).sum()
}
pub(crate) fn write<W: Write, const CENTRAL_HEADER: bool>(&self, writer: &mut W) -> std::io::Result<()> {
for field in &self.values {
field.write::<_, CENTRAL_HEADER>(writer)?;
}
Ok(())
}
}
/// Extra data that can be associated with a file or directory.
///
/// This library only implements the filesystem properties in NTFS and UNIX format.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ExtraField {
/// NTFS file properties.
Ntfs {
/// Last modification timestamp
mtime: u64,
/// Last access timestamp
atime: u64,
/// File/directory creation timestamp
ctime: u64,
},
/// Info-Zip extended unix timestamp. Each part is optional by definition, but will be
/// populated by [`ExtraFields::new_from_fs`].
UnixExtendedTimestamp {
/// Last modification timestamp
mod_time: Option<i32>,
/// Last access timestamp
ac_time: Option<i32>,
/// Creation timestamp
cr_time: Option<i32>,
},
/// UNIX file/directory attributes defined by Info-Zip.
UnixAttrs {
/// UID of the owner
uid: u32,
/// GID of the group
gid: u32,
},
}
const MOD_TIME_PRESENT: u8 = 1;
const AC_TIME_PRESENT: u8 = 1 << 1;
const CR_TIME_PRESENT: u8 = 1 << 2;
impl ExtraField {
#[inline]
fn header_id(&self) -> u16 {
match self {
Self::Ntfs { mtime: _, atime: _, ctime: _ } => 0x000a,
Self::UnixExtendedTimestamp { mod_time: _, ac_time: _, cr_time: _ } => 0x5455,
Self::UnixAttrs { uid: _, gid: _ } => 0x7875,
}
}
#[inline]
const fn optional_field_size<T: Sized>(field: &Option<T>) -> u16 {
match field {
Some(_) => std::mem::size_of::<T>() as u16,
None => 0,
}
}
#[inline]
const fn field_size<const CENTRAL_HEADER: bool>(&self) -> u16 {
match self {
Self::Ntfs { mtime: _, atime: _, ctime: _ } => 32,
Self::UnixExtendedTimestamp { mod_time, ac_time, cr_time } => {
1 + Self::optional_field_size(mod_time) + {
if !CENTRAL_HEADER {
Self::optional_field_size(ac_time) + Self::optional_field_size(cr_time)
} else {
0
}
}
}
Self::UnixAttrs { uid: _, gid: _ } => 11,
}
}
#[inline]
const fn if_present(val: Option<i32>, if_present: u8) -> u8 {
match val {
Some(_) => if_present,
None => 0,
}
}
const NTFS_FIELD_LEN: usize = 32;
const UNIX_ATTRS_LEN: usize = 11;
pub(crate) fn write<W: Write, const CENTRAL_HEADER: bool>(self, writer: &mut W) -> std::io::Result<()> {
// Header ID
writer.write_all(&self.header_id().to_le_bytes())?;
// Field data size
writer.write_all(&self.field_size::<CENTRAL_HEADER>().to_le_bytes())?;
match self {
Self::Ntfs { mtime, atime, ctime } => {
// Writing to a temporary in-memory array
let mut field = [0; Self::NTFS_FIELD_LEN];
{
let mut field_buf: &mut [u8] = &mut field;
// Reserved field
field_buf.write_all(&0_u32.to_le_bytes())?;
// Tag1 number
field_buf.write_all(&1_u16.to_le_bytes())?;
// Tag1 size
field_buf.write_all(&24_u16.to_le_bytes())?;
// Mtime
field_buf.write_all(&mtime.to_le_bytes())?;
// Atime
field_buf.write_all(&atime.to_le_bytes())?;
// Ctime
field_buf.write_all(&ctime.to_le_bytes())?;
}
writer.write_all(&field)?;
}
Self::UnixExtendedTimestamp { mod_time, ac_time, cr_time } => {
let flags = Self::if_present(mod_time, MOD_TIME_PRESENT)
| Self::if_present(ac_time, AC_TIME_PRESENT)
| Self::if_present(cr_time, CR_TIME_PRESENT);
writer.write_all(&[flags])?;
if let Some(mod_time) = mod_time {
writer.write_all(&mod_time.to_le_bytes())?;
}
if !CENTRAL_HEADER {
if let Some(ac_time) = ac_time {
writer.write_all(&ac_time.to_le_bytes())?;
}
if let Some(cr_time) = cr_time {
writer.write_all(&cr_time.to_le_bytes())?;
}
}
}
Self::UnixAttrs { uid, gid } => {
// Writing to a temporary in-memory array
let mut field = [0; Self::UNIX_ATTRS_LEN];
{
let mut field_buf: &mut [u8] = &mut field;
// Version of the field
field_buf.write_all(&[1])?;
// UID size
field_buf.write_all(&[4])?;
// UID
field_buf.write_all(&uid.to_le_bytes())?;
// GID size
field_buf.write_all(&[4])?;
// GID
field_buf.write_all(&gid.to_le_bytes())?;
}
writer.write_all(&field)?;
}
}
Ok(())
}
}

View File

@@ -0,0 +1,201 @@
use std::io::{Seek, Write};
use super::extra_field::ExtraFields;
use super::super::{CompressionType, platform::VERSION_MADE_BY};
const LOCAL_FILE_HEADER_SIGNATURE: u32 = 0x04034B50;
const CENTRAL_FILE_HEADER_SIGNATURE: u32 = 0x02014B50;
const VERSION_NEEDED_TO_EXTRACT: u16 = 20;
/// Set bit 11 to indicate that the file names are in UTF-8, because all strings in rust are valid
/// UTF-8
const GENERAL_PURPOSE_BIT_FLAG: u16 = 1 << 11;
#[derive(Debug)]
pub struct ZipFile {
pub header: ZipFileHeader,
pub data: Vec<u8>,
}
#[derive(Debug)]
pub struct ZipFileHeader {
pub compression_type: CompressionType,
pub crc: u32,
pub uncompressed_size: u32,
pub filename: String,
pub file_comment: Option<String>,
pub external_file_attributes: u32,
pub extra_fields: ExtraFields,
}
#[derive(Debug)]
pub struct ZipFileNoData {
pub header: ZipFileHeader,
pub local_header_offset: u32,
pub compressed_size: u32,
}
impl ZipFile {
pub fn write_local_file_header_with_data_consuming<W: Write + Seek>(
self,
buf: &mut W,
) -> std::io::Result<ZipFileNoData> {
let local_header_offset = super::stream_position_u32(buf)?;
self.write_local_file_header_and_data(buf)?;
let Self { header, data } = self;
Ok(ZipFileNoData {
header,
local_header_offset,
compressed_size: data.len() as u32,
})
}
const LOCAL_FILE_HEADER_LEN: usize = 30;
pub fn write_local_file_header_and_data<W: Write>(&self, buf: &mut W) -> std::io::Result<()> {
// Writing to a temporary in-memory statically sized array first
let mut header = [0; Self::LOCAL_FILE_HEADER_LEN];
{
let mut header_buf: &mut [u8] = &mut header;
// signature
header_buf.write_all(&LOCAL_FILE_HEADER_SIGNATURE.to_le_bytes())?;
// version needed to extract
header_buf.write_all(&VERSION_NEEDED_TO_EXTRACT.to_le_bytes())?;
// general purpose bit flag
header_buf.write_all(&GENERAL_PURPOSE_BIT_FLAG.to_le_bytes())?;
// compression type
header_buf.write_all(&(self.header.compression_type as u16).to_le_bytes())?;
// Last modification time // moved to extra fields
header_buf.write_all(&0_u16.to_le_bytes())?;
// Last modification date // moved to extra fields
header_buf.write_all(&0_u16.to_le_bytes())?;
// crc
header_buf.write_all(&self.header.crc.to_le_bytes())?;
// Compressed size
debug_assert!(self.data.len() <= u32::MAX as usize);
header_buf.write_all(&(self.data.len() as u32).to_le_bytes())?;
// Uncompressed size
header_buf.write_all(&self.header.uncompressed_size.to_le_bytes())?;
// Filename size
debug_assert!(self.header.filename.len() <= u16::MAX as usize);
header_buf.write_all(&(self.header.filename.len() as u16).to_le_bytes())?;
// extra field size
header_buf.write_all(
&self
.header
.extra_fields
.data_length::<false>()
.to_le_bytes(),
)?;
}
buf.write_all(&header)?;
// Filename
buf.write_all(self.header.filename.as_bytes())?;
// Extra field
self.header.extra_fields.write::<_, false>(buf)?;
// Data
buf.write_all(&self.data)?;
Ok(())
}
#[inline]
pub fn directory(
mut name: String,
extra_fields: ExtraFields,
external_attributes: u16,
file_comment: Option<String>,
) -> Self {
if !(name.ends_with('/') || name.ends_with('\\')) {
name += "/"
};
Self {
header: ZipFileHeader {
compression_type: CompressionType::Stored,
crc: 0,
uncompressed_size: 0,
filename: name,
external_file_attributes: (external_attributes as u32) << 16,
extra_fields,
file_comment,
},
data: vec![],
}
}
}
impl ZipFileNoData {
const CENTRAL_DIR_ENTRY_LEN: usize = 46;
pub fn write_central_directory_entry<W: Write>(&self, buf: &mut W) -> std::io::Result<()> {
// Writing to a temporary in-memory statically sized array first
let mut central_dir_entry_header = [0; Self::CENTRAL_DIR_ENTRY_LEN];
{
let mut central_dir_entry_buf: &mut [u8] = &mut central_dir_entry_header;
// signature
central_dir_entry_buf.write_all(&CENTRAL_FILE_HEADER_SIGNATURE.to_le_bytes())?;
// version made by
central_dir_entry_buf.write_all(&VERSION_MADE_BY.to_le_bytes())?;
// version needed to extract
central_dir_entry_buf.write_all(&VERSION_NEEDED_TO_EXTRACT.to_le_bytes())?;
// general purpose bit flag
central_dir_entry_buf.write_all(&GENERAL_PURPOSE_BIT_FLAG.to_le_bytes())?;
// compression type
central_dir_entry_buf
.write_all(&(self.header.compression_type as u16).to_le_bytes())?;
// Last modification time // moved to extra fields
central_dir_entry_buf.write_all(&0_u16.to_le_bytes())?;
// Last modification date // moved to extra fields
central_dir_entry_buf.write_all(&0_u16.to_le_bytes())?;
// crc
central_dir_entry_buf.write_all(&self.header.crc.to_le_bytes())?;
// Compressed size
central_dir_entry_buf.write_all(&self.compressed_size.to_le_bytes())?;
// Uncompressed size
central_dir_entry_buf.write_all(&self.header.uncompressed_size.to_le_bytes())?;
// Filename size
debug_assert!(self.header.filename.len() <= u16::MAX as usize);
central_dir_entry_buf.write_all(&(self.header.filename.len() as u16).to_le_bytes())?;
// extra field size
central_dir_entry_buf
.write_all(&self.header.extra_fields.data_length::<true>().to_le_bytes())?;
// comment size
central_dir_entry_buf.write_all(
&(self
.header
.file_comment
.as_ref()
.map(|fc| fc.len())
.unwrap_or(0) as u16)
.to_le_bytes(),
)?;
// disk number start
central_dir_entry_buf.write_all(&0_u16.to_le_bytes())?;
// internal file attributes
central_dir_entry_buf.write_all(&0_u16.to_le_bytes())?;
// external file attributes
central_dir_entry_buf.write_all(&self.header.external_file_attributes.to_le_bytes())?;
// relative offset of local header
central_dir_entry_buf.write_all(&self.local_header_offset.to_le_bytes())?;
}
buf.write_all(&central_dir_entry_header)?;
// Filename
buf.write_all(self.header.filename.as_bytes())?;
// Extra field
self.header.extra_fields.write::<_, true>(buf)?;
// File comment
if let Some(file_comment) = &self.header.file_comment {
buf.write_all(file_comment.as_bytes())?;
}
Ok(())
}
}

View File

@@ -0,0 +1,179 @@
use std::{
borrow::Cow,
fs::File,
io::Read,
panic::{RefUnwindSafe, UnwindSafe},
path::Path,
};
use flate2::{CrcReader, read::DeflateEncoder};
use super::{extra_field::ExtraFields, file::ZipFile};
use super::super::{
CompressionType, level::CompressionLevel, platform::attributes_from_fs,
zip_archive_parts::file::ZipFileHeader,
};
pub enum ZipJobOrigin<'a> {
Directory,
Filesystem { path: Cow<'a, Path> },
RawData(Cow<'a, [u8]>),
Reader(Box<dyn Read + Send + Sync + UnwindSafe + RefUnwindSafe + 'a>),
}
impl core::fmt::Debug for ZipJobOrigin<'_> {
#[inline]
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Directory => f.write_str("Directory"),
Self::Filesystem { path } => f.debug_struct("Filesystem").field("path", &path).finish(),
Self::RawData(raw_data) => f.debug_tuple("RawData").field(&raw_data).finish(),
Self::Reader(_reader) => f.debug_tuple("Reader").finish_non_exhaustive(),
}
}
}
#[derive(Debug)]
struct FileDigest {
data: Vec<u8>,
uncompressed_size: u32,
crc: u32,
}
#[derive(Debug)]
pub struct ZipJob<'a> {
pub data_origin: ZipJobOrigin<'a>,
pub extra_fields: ExtraFields,
pub archive_path: String,
pub file_comment: Option<String>,
pub external_attributes: u16,
/// Ignored when [`data_origin`](Self::data_origin) is a [`ZipJobOrigin::Directory`]
pub compression_level: CompressionLevel,
/// Ignored when [`data_origin`](Self::data_origin) is a [`ZipJobOrigin::Directory`]
pub compression_type: CompressionType,
}
impl ZipJob<'_> {
fn compress_file<R: Read>(
source: R,
uncompressed_size_approx: Option<u32>,
compression_type: CompressionType,
compression_level: CompressionLevel,
) -> std::io::Result<FileDigest> {
let mut crc_reader = CrcReader::new(source);
let mut data = Vec::with_capacity(uncompressed_size_approx.unwrap_or(0) as usize);
let uncompressed_size = match compression_type {
CompressionType::Deflate => {
let mut encoder = DeflateEncoder::new(&mut crc_reader, compression_level.into());
encoder.read_to_end(&mut data)?;
encoder.total_in() as usize
}
CompressionType::Stored => crc_reader.read_to_end(&mut data)?,
};
debug_assert!(uncompressed_size <= u32::MAX as usize);
let uncompressed_size = uncompressed_size as u32;
data.shrink_to_fit();
let crc = crc_reader.crc().sum();
Ok(FileDigest {
data,
uncompressed_size,
crc,
})
}
pub fn into_file(self) -> std::io::Result<ZipFile> {
match self.data_origin {
ZipJobOrigin::Directory => Ok(ZipFile::directory(
self.archive_path,
self.extra_fields,
self.external_attributes,
self.file_comment,
)),
ZipJobOrigin::Filesystem { path } => {
let file = File::open(path).unwrap();
let file_metadata = file.metadata().unwrap();
let uncompressed_size_approx = file_metadata.len();
debug_assert!(uncompressed_size_approx <= u32::MAX.into());
let uncompressed_size_approx = uncompressed_size_approx as u32;
let external_file_attributes = attributes_from_fs(&file_metadata);
let mut extra_fields = ExtraFields::new_from_fs(&file_metadata);
extra_fields.extend(self.extra_fields);
let FileDigest {
data,
uncompressed_size,
crc,
} = Self::compress_file(
file,
Some(uncompressed_size_approx),
self.compression_type,
self.compression_level,
)?;
Ok(ZipFile {
header: ZipFileHeader {
compression_type: CompressionType::Deflate,
crc,
uncompressed_size,
filename: self.archive_path,
external_file_attributes: (external_file_attributes as u32) << 16,
extra_fields,
file_comment: self.file_comment,
},
data,
})
}
ZipJobOrigin::RawData(data) => {
let uncompressed_size_approx = data.len();
debug_assert!(uncompressed_size_approx <= u32::MAX as usize);
let uncompressed_size_approx = uncompressed_size_approx as u32;
let FileDigest {
data,
uncompressed_size,
crc,
} = Self::compress_file(
data.as_ref(),
Some(uncompressed_size_approx),
self.compression_type,
self.compression_level,
)?;
Ok(ZipFile {
header: ZipFileHeader {
compression_type: CompressionType::Deflate,
crc,
uncompressed_size,
filename: self.archive_path,
external_file_attributes: (self.external_attributes as u32) << 16,
extra_fields: self.extra_fields,
file_comment: self.file_comment,
},
data,
})
}
ZipJobOrigin::Reader(reader) => {
let FileDigest {
data,
uncompressed_size,
crc,
} = Self::compress_file(
reader,
None,
self.compression_type,
self.compression_level,
)?;
Ok(ZipFile {
header: ZipFileHeader {
compression_type: CompressionType::Deflate,
crc,
uncompressed_size,
filename: self.archive_path,
external_file_attributes: (self.external_attributes as u32) << 16,
extra_fields: self.extra_fields,
file_comment: self.file_comment,
},
data,
})
}
}
}
}

View File

@@ -0,0 +1,17 @@
pub mod data;
pub mod extra_field;
pub mod file;
pub mod job;
use std::io::Seek;
#[inline]
pub fn stream_position_u32<W: Seek>(buf: &mut W) -> std::io::Result<u32> {
let offset = buf.stream_position()?;
debug_assert!(offset <= u32::MAX.into());
Ok(offset as u32)
}
#[inline]
pub fn files_amount_u16<T>(files: &[T]) -> u16 {
let amount = files.len();
debug_assert!(amount <= u16::MAX as usize);
amount as u16
}

View File

@@ -0,0 +1,142 @@
// Copyright 2023 Google LLC
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp::min;
/// A struct which can issue periodic updates indicating progress towards
/// an external total, based on updates towards an internal goal.
pub struct ProgressUpdater<F: Fn(u64)> {
callback: F,
internal_progress: u64,
per_update_internal: u64,
update_external_amount: u64,
external_updates_sent: u64,
remainder_external: u64,
internal_total: u64,
}
impl<F: Fn(u64)> ProgressUpdater<F> {
/// Create a new progress updater, with a callback to be called periodically.
pub fn new(callback: F, external_total: u64, internal_total: u64, per_update_internal: u64) -> Self {
let per_update_internal = min(internal_total, per_update_internal);
let total_updates_expected = if per_update_internal == 0 { 0 } else { internal_total / per_update_internal };
let (update_external_amount, remainder_external) = if total_updates_expected == 0 {
(0, external_total)
} else {
(external_total / total_updates_expected, external_total % total_updates_expected)
};
Self {
callback,
internal_progress: 0u64,
per_update_internal,
update_external_amount,
external_updates_sent: 0u64,
remainder_external,
internal_total,
}
}
/// Indicate some progress towards the internal goal. May call back the
/// external callback function to show some progress towards the external
/// goal.
pub fn progress(&mut self, amount_internal: u64) {
self.internal_progress += amount_internal;
self.send_due_updates();
}
fn send_due_updates(&mut self) {
let updates_due = if self.per_update_internal == 0 { 0 } else { self.internal_progress / self.per_update_internal };
while updates_due > self.external_updates_sent {
(self.callback)(self.update_external_amount);
self.external_updates_sent += 1;
}
}
/// Indicate completion of the task. Fully update the callback towards the
/// external state.
pub fn finish(&mut self) {
self.internal_progress = self.internal_total;
self.send_due_updates();
if self.remainder_external > 0 {
(self.callback)(self.remainder_external);
}
}
}
#[test]
fn test_progress_updater() {
let amount_received = std::rc::Rc::new(std::cell::RefCell::new(0u64));
let mut progresser = ProgressUpdater::new(
|progress| {
*(amount_received.borrow_mut()) += progress;
},
100,
1000,
100,
);
assert_eq!(*amount_received.borrow(), 0);
progresser.progress(1);
assert_eq!(*amount_received.borrow(), 0);
progresser.progress(100);
assert_eq!(*amount_received.borrow(), 10);
progresser.progress(800);
assert_eq!(*amount_received.borrow(), 90);
progresser.finish();
assert_eq!(*amount_received.borrow(), 100);
}
#[test]
fn test_progress_updater_zero_external() {
let amount_received = std::rc::Rc::new(std::cell::RefCell::new(0u64));
let mut progresser = ProgressUpdater::new(
|progress| {
*(amount_received.borrow_mut()) += progress;
},
0,
1000,
100,
);
assert_eq!(*amount_received.borrow(), 0);
progresser.progress(1);
progresser.progress(800);
progresser.finish();
assert_eq!(*amount_received.borrow(), 0);
}
#[test]
fn test_progress_updater_small_internal() {
let amount_received = std::rc::Rc::new(std::cell::RefCell::new(0u64));
let mut progresser = ProgressUpdater::new(
|progress| {
*(amount_received.borrow_mut()) += progress;
},
100,
5,
100,
);
assert_eq!(*amount_received.borrow(), 0);
progresser.progress(1);
progresser.finish();
assert_eq!(*amount_received.borrow(), 100);
}
#[test]
fn test_progress_updater_zero_internal() {
let amount_received = std::rc::Rc::new(std::cell::RefCell::new(0u64));
let mut progresser = ProgressUpdater::new(
|progress| {
*(amount_received.borrow_mut()) += progress;
},
100,
0,
100,
);
assert_eq!(*amount_received.borrow(), 0);
progresser.finish();
assert_eq!(*amount_received.borrow(), 100);
}

View File

@@ -0,0 +1,327 @@
// Copyright 2022 Google LLC
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{
borrow::Cow,
fs::File,
io::{ErrorKind, Read, Seek, SeekFrom},
path::{Path, PathBuf},
sync::Mutex,
};
use anyhow::{Context, Result};
use rayon::prelude::*;
use zip::{read::ZipFile, ZipArchive};
use super::{cloneable_seekable_reader::CloneableSeekableReader, progress_updater::ProgressUpdater, UnzipProgressReporter};
pub(crate) fn determine_stream_len<R: Seek>(stream: &mut R) -> std::io::Result<u64> {
let old_pos = stream.stream_position()?;
let len = stream.seek(SeekFrom::End(0))?;
if old_pos != len {
stream.seek(SeekFrom::Start(old_pos))?;
}
Ok(len)
}
/// Options for unzipping.
pub struct UnzipOptions<'a, 'b> {
/// The destination directory.
pub output_directory: Option<PathBuf>,
/// Password if encrypted.
pub password: Option<String>,
/// Whether to run in single-threaded mode.
pub single_threaded: bool,
/// A filename filter, optionally
pub filename_filter: Option<Box<dyn FilenameFilter + Sync + 'a>>,
/// An object to receive notifications of unzip progress.
pub progress_reporter: Box<dyn UnzipProgressReporter + Sync + 'b>,
}
/// An object which can unzip a zip file, in its entirety, from a local
/// file or from a network stream. It tries to do this in parallel wherever
/// possible.
pub struct UnzipEngine {
zipfile: Box<dyn UnzipEngineImpl>,
compressed_length: u64,
directory_creator: DirectoryCreator,
}
/// Code which can determine whether to unzip a given filename.
pub trait FilenameFilter {
/// Returns true if the given filename should be unzipped.
fn should_unzip(&self, filename: &str) -> bool;
}
/// The underlying engine used by the unzipper. This is different
/// for files and URIs.
trait UnzipEngineImpl {
fn unzip(&mut self, options: UnzipOptions, directory_creator: &DirectoryCreator) -> Vec<anyhow::Error>;
// Due to lack of RPITIT we'll return a Vec<String> here
fn list(&self) -> Result<Vec<String>, anyhow::Error>;
}
/// Engine which knows how to unzip a file.
#[derive(Clone)]
struct UnzipFileEngine(ZipArchive<CloneableSeekableReader<File>>);
impl UnzipEngineImpl for UnzipFileEngine {
fn unzip(&mut self, options: UnzipOptions, directory_creator: &DirectoryCreator) -> Vec<anyhow::Error> {
unzip_serial_or_parallel(self.0.len(), options, directory_creator, || self.0.clone(), || {})
}
fn list(&self) -> Result<Vec<String>, anyhow::Error> {
list(&self.0)
}
}
impl UnzipEngine {
/// Create an unzip engine which knows how to unzip a file.
pub fn for_file(mut zipfile: File) -> Result<Self> {
// The following line doesn't actually seem to make any significant
// performance difference.
// let zipfile = BufReader::new(zipfile);
let compressed_length = determine_stream_len(&mut zipfile)?;
let zipfile = CloneableSeekableReader::new(zipfile);
Ok(Self {
zipfile: Box::new(UnzipFileEngine(ZipArchive::new(zipfile)?)),
compressed_length,
directory_creator: DirectoryCreator::default(),
})
}
/// The total compressed length that we expect to retrieve over
/// the network or from the compressed file.
pub fn zip_length(&self) -> u64 {
self.compressed_length
}
// Perform the unzip.
pub fn unzip(mut self, options: UnzipOptions) -> Result<()> {
log::debug!("Starting extract");
options.progress_reporter.total_bytes_expected(self.compressed_length);
let errors = self.zipfile.unzip(options, &self.directory_creator);
// Return the first error code, if any.
errors.into_iter().next().map(Result::Err).unwrap_or(Ok(()))
}
/// List the filenames in the archive
pub fn list(self) -> Result<impl Iterator<Item = String>> {
// In future this might be a more dynamic iterator type.
self.zipfile.list().map(|mut v| {
// Names are returned in a HashMap iteration order so let's
// sort thme to be more reasonable
v.sort();
v.into_iter()
})
}
}
/// Return a list of filenames from the zip. For now this is infallible
/// but provide the option of an error code in case we do something
/// smarter in future.
fn list<'a, T: Read + Seek + 'a>(zip_archive: &ZipArchive<T>) -> Result<Vec<String>> {
Ok(zip_archive.file_names().map(|s| s.to_string()).collect())
}
fn unzip_serial_or_parallel<'a, T: Read + Seek + 'a>(
len: usize,
options: UnzipOptions,
directory_creator: &DirectoryCreator,
get_ziparchive_clone: impl Fn() -> ZipArchive<T> + Sync,
// Call when a file is going to be skipped
file_skip_callback: impl Fn() + Sync + Send + Clone,
) -> Vec<anyhow::Error> {
let progress_reporter: &dyn UnzipProgressReporter = options.progress_reporter.as_ref();
match (options.filename_filter, options.single_threaded) {
(None, true) => (0..len)
.map(|i| {
extract_file_by_index(
&get_ziparchive_clone,
i,
&options.output_directory,
&options.password,
progress_reporter,
directory_creator,
)
})
.filter_map(Result::err)
.collect(),
(None, false) => {
// We use par_bridge here rather than into_par_iter because it turns
// out to better preserve ordering of the IDs in the input range,
// i.e. we're more likely to ask our initial threads to act upon
// file IDs 0, 1, 2, 3, 4, 5 rather than 0, 1000, 2000, 3000 etc.
// On a device which is CPU-bound or IO-bound (rather than network
// bound) that's beneficial because we can start to decompress
// and write data to disk as soon as it arrives from the network.
(0..len)
.par_bridge()
.map(|i| {
extract_file_by_index(
&get_ziparchive_clone,
i,
&options.output_directory,
&options.password,
progress_reporter,
directory_creator,
)
})
.filter_map(Result::err)
.collect()
}
(Some(filename_filter), single_threaded) => {
// If we have a filename filter, an easy thing would be to
// iterate through each file index as above, and check to see if its
// name matches. Unfortunately, that seeks all over the place
// to get the filename from the local header.
// Instead, let's get a list of the filenames we need
// and request them from the zip library directly.
// As we can't predict their order in the file, this may involve
// arbitrary rewinds, so let's do it single-threaded.
if !single_threaded {
log::warn!("Unzipping specific files - assuming --single-threaded since we currently cannot unzip specific files in a multi-threaded mode. If you need that, consider launching multiple copies of ripunzip in parallel.");
}
let mut filenames: Vec<_> = get_ziparchive_clone()
.file_names()
.filter(|name| filename_filter.as_ref().should_unzip(name))
.map(|s| s.to_string())
.collect();
// The filenames returned by the file_names() method above are in
// HashMap iteration order (i.e. random). To avoid creating lots
// of HTTPS streams for files which are nearby each other in the
// zip, we'd ideally extract them in order of file position.
// We have no way of knowing file position (without iterating the
// whole file) so instead let's sort them and hope that files were
// zipped in alphabetical order, or close to it. If we're wrong,
// we'll just end up rewinding, that is, creating extra redundant
// HTTP(S) streams.
filenames.sort();
log::info!("Will unzip {} matching filenames", filenames.len());
file_skip_callback();
// let progress_reporter: &dyn UnzipProgressReporter = options.progress_reporter.as_ref();
filenames
.into_iter()
.map(|name| {
let myzip: &mut zip::ZipArchive<T> = &mut get_ziparchive_clone();
let file: ZipFile<T> = match &options.password {
None => myzip.by_name(&name)?,
Some(string) => myzip.by_name_decrypt(&name, string.as_bytes())?,
};
let r = extract_file(file, &options.output_directory, progress_reporter, directory_creator);
file_skip_callback();
r
})
.filter_map(Result::err)
.collect()
}
}
}
fn extract_file_by_index<'a, T: Read + Seek + 'a>(
get_ziparchive_clone: impl Fn() -> ZipArchive<T> + Sync,
i: usize,
output_directory: &Option<PathBuf>,
password: &Option<String>,
progress_reporter: &dyn UnzipProgressReporter,
directory_creator: &DirectoryCreator,
) -> Result<(), anyhow::Error> {
let myzip: &mut zip::ZipArchive<T> = &mut get_ziparchive_clone();
let file: ZipFile<T> = match password {
None => myzip.by_index(i)?,
Some(string) => myzip.by_index_decrypt(i, string.as_bytes())?,
};
extract_file(file, output_directory, progress_reporter, directory_creator)
}
fn extract_file<R: Read>(
file: ZipFile<R>,
output_directory: &Option<PathBuf>,
progress_reporter: &dyn UnzipProgressReporter,
directory_creator: &DirectoryCreator,
) -> Result<(), anyhow::Error> {
let name = file.enclosed_name().as_deref().map(Path::to_string_lossy).unwrap_or_else(|| Cow::Borrowed("<unprintable>")).to_string();
extract_file_inner(file, output_directory, progress_reporter, directory_creator).with_context(|| format!("Failed to extract {name}"))
}
/// Extracts a file from a zip file.
fn extract_file_inner<R: Read>(
mut file: ZipFile<R>,
output_directory: &Option<PathBuf>,
progress_reporter: &dyn UnzipProgressReporter,
directory_creator: &DirectoryCreator,
) -> Result<()> {
let name = file.enclosed_name().ok_or_else(|| std::io::Error::new(ErrorKind::Unsupported, "path not safe to extract"))?;
let display_name = name.display().to_string();
let out_path = match output_directory {
Some(output_directory) => output_directory.join(name),
None => name,
};
progress_reporter.extraction_starting(&display_name);
log::debug!("Start extract of file at {:x}, length {:x}, name {}", file.data_start(), file.compressed_size(), display_name);
if file.name().ends_with('/') {
directory_creator.create_dir_all(&out_path)?;
} else {
if let Some(parent) = out_path.parent() {
directory_creator.create_dir_all(parent)?;
}
let out_file = File::create(&out_path).with_context(|| "Failed to create file")?;
// Progress bar strategy. The overall progress across the entire zip file must be
// denoted in terms of *compressed* bytes, since at the outset we don't know the uncompressed
// size of each file. Yet, within a given file, we update progress based on the bytes
// of uncompressed data written, once per 1MB, because that's the information that we happen
// to have available. So, calculate how many compressed bytes relate to 1MB of uncompressed
// data, and the remainder.
let uncompressed_size = file.size();
let compressed_size = file.compressed_size();
let mut progress_updater = ProgressUpdater::new(
|external_progress| {
progress_reporter.bytes_extracted(external_progress);
},
compressed_size,
uncompressed_size,
1024 * 1024,
);
let mut out_file = progress_streams::ProgressWriter::new(out_file, |bytes_written| progress_updater.progress(bytes_written as u64));
// Using a BufWriter here doesn't improve performance even on a VM with
// spinny disks.
std::io::copy(&mut file, &mut out_file).with_context(|| "Failed to write directory")?;
progress_updater.finish();
}
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
if let Some(mode) = file.unix_mode() {
std::fs::set_permissions(&out_path, std::fs::Permissions::from_mode(mode)).with_context(|| "Failed to set permissions")?;
}
}
log::debug!("Finished extract of file at {:x}, length {:x}, name {}", file.data_start(), file.compressed_size(), display_name);
progress_reporter.extraction_finished(&display_name);
Ok(())
}
/// An engine used to ensure we don't conflict in creating directories
/// between threads
#[derive(Default)]
struct DirectoryCreator(Mutex<()>);
impl DirectoryCreator {
fn create_dir_all(&self, path: &Path) -> Result<()> {
// Fast path - avoid locking if the directory exists
if path.exists() {
return Ok(());
}
let _exclusivity = self.0.lock().unwrap();
if path.exists() {
return Ok(());
}
std::fs::create_dir_all(path).with_context(|| "Failed to create directory")
}
}

View File

@@ -1,5 +1,6 @@
pub mod runtime_arch;
pub mod cli_host;
pub mod fastzip;
mod dialogs_const;
mod dialogs_common;

View File

@@ -78,7 +78,7 @@ fn check_arch_windows() -> Option<RuntimeArch> {
#[cfg(target_os = "windows")]
type IsWow64Process2Fn = unsafe extern "system" fn(
hProcess: windows::Win32::Foundation::HANDLE,
hprocess: windows::Win32::Foundation::HANDLE,
pprocessmachine: *mut windows::Win32::System::SystemInformation::IMAGE_FILE_MACHINE,
pnativemachine: *mut windows::Win32::System::SystemInformation::IMAGE_FILE_MACHINE,
) -> windows::core::BOOL;

View File

@@ -171,12 +171,21 @@ pub fn has_app_prefixed_folder<P: AsRef<Path>>(parent_path: P) -> bool {
}
}
pub fn delete_app_prefixed_folders<P: AsRef<Path>>(parent_path: P) -> Result<()> {
let folders = get_app_prefixed_folders(parent_path)?;
for folder in folders {
super::retry_io(|| remove_dir_all::remove_dir_all(&folder))?;
pub fn delete_app_prefixed_folders<P: AsRef<Path>>(parent_path: P) {
match get_app_prefixed_folders(parent_path) {
Ok(folders) => {
for folder in folders {
if let Err(e) = super::retry_io(|| remove_dir_all::remove_dir_all(&folder)) {
warn!("Failed to delete app-prefixed folder: {} ({})", folder.display(), e);
} else {
info!("Deleted app-prefixed folder: {}", folder.display());
}
}
}
Err(e) => {
warn!("Failed to find app-prefixed folders: {}", e);
}
}
Ok(())
}
fn parse_version_from_folder_name(folder_name: &str) -> Option<Version> {

View File

@@ -5,7 +5,7 @@
extern crate log;
use anyhow::{anyhow, bail, Result};
use clap::{arg, value_parser, ArgMatches, Command};
use clap::{arg, value_parser, ArgAction, ArgMatches, Command};
use std::{env, path::PathBuf};
use velopack::locator::{auto_locate_app_manifest, LocationContext};
use velopack::logging::*;
@@ -34,9 +34,9 @@ fn root_command() -> Command {
.long_flag_aliases(vec!["processStart", "processStartAndWait"])
)
.subcommand(Command::new("patch")
.about("Applies a Zstd patch file")
.about("Applies a series of delta bundles to a base file")
.arg(arg!(--old <FILE> "Base / old file to apply the patch to").required(true).value_parser(value_parser!(PathBuf)))
.arg(arg!(--patch <FILE> "The Zstd patch to apply to the old file").required(true).value_parser(value_parser!(PathBuf)))
.arg(arg!(--delta <FILE> "The delta bundle to apply to the base package").required(true).action(ArgAction::Append).value_parser(value_parser!(PathBuf)))
.arg(arg!(--output <FILE> "The file to create with the patch applied").required(true).value_parser(value_parser!(PathBuf)))
)
.arg(arg!(--verbose "Print debug messages to console / log").global(true))
@@ -57,6 +57,13 @@ fn root_command() -> Command {
.about("Remove all app shortcuts, files, and registry entries.")
.long_flag_alias("uninstall")
);
#[cfg(target_os = "windows")]
let cmd = cmd.subcommand(Command::new("update-self")
.about("Copy the currently executing Update.exe into the default location.")
.long_flag_alias("updateSelf")
.hide(true)
);
cmd
}
@@ -164,6 +171,8 @@ fn main() -> Result<()> {
let result = match subcommand {
#[cfg(target_os = "windows")]
"uninstall" => uninstall(location_context, subcommand_matches).map_err(|e| anyhow!("Uninstall error: {}", e)),
#[cfg(target_os = "windows")]
"update-self" => update_self(location_context, subcommand_matches).map_err(|e| anyhow!("Update-self error: {}", e)),
"start" => start(location_context, subcommand_matches).map_err(|e| anyhow!("Start error: {}", e)),
"apply" => apply(location_context, subcommand_matches).map_err(|e| anyhow!("Apply error: {}", e)),
"patch" => patch(location_context, subcommand_matches).map_err(|e| anyhow!("Patch error: {}", e)),
@@ -180,19 +189,34 @@ fn main() -> Result<()> {
fn patch(_context: LocationContext, matches: &ArgMatches) -> Result<()> {
let old_file = matches.get_one::<PathBuf>("old");
let patch_file = matches.get_one::<PathBuf>("patch");
let deltas: Vec<&PathBuf> = matches.get_many::<PathBuf>("delta").unwrap_or_default().collect();
let output_file = matches.get_one::<PathBuf>("output");
info!("Command: Patch");
info!(" Old File: {:?}", old_file);
info!(" Patch File: {:?}", patch_file);
info!(" Delta Files: {:?}", deltas);
info!(" Output File: {:?}", output_file);
if old_file.is_none() || patch_file.is_none() || output_file.is_none() {
bail!("Missing required arguments. Please provide --old, --patch, and --output.");
if old_file.is_none() || deltas.is_empty() || output_file.is_none() {
bail!("Missing required arguments. Please provide --old, --delta, and --output.");
}
velopack::delta::zstd_patch_single(old_file.unwrap(), patch_file.unwrap(), output_file.unwrap())?;
let temp_dir = match auto_locate_app_manifest(LocationContext::IAmUpdateExe) {
Ok(locator) => locator.get_temp_dir_rand16(),
Err(_) => {
let mut temp_dir = std::env::temp_dir();
let rand = shared::random_string(16);
temp_dir.push("velopack_".to_owned() + &rand);
temp_dir
}
};
let result = commands::delta(old_file.unwrap(), deltas, &temp_dir, output_file.unwrap());
let _ = remove_dir_all::remove_dir_all(temp_dir);
if let Err(e) = result {
bail!("Delta error: {}", e);
}
Ok(())
}
@@ -252,6 +276,47 @@ fn uninstall(context: LocationContext, _matches: &ArgMatches) -> Result<()> {
commands::uninstall(&locator, true)
}
#[cfg(target_os = "windows")]
fn update_self(context: LocationContext, _matches: &ArgMatches) -> Result<()> {
info!("Command: Update Self");
let my_path = env::current_exe()?;
const RETRY_DELAY: i32 = 500;
const RETRY_COUNT: i32 = 20;
match auto_locate_app_manifest(context) {
Ok(locator) => {
let target_update_path = locator.get_update_path();
if same_file::is_same_file(&target_update_path, &my_path)? {
bail!("Update.exe is already in the default location. No need to update.");
} else {
info!("Copying Update.exe to the default location: {:?}", target_update_path);
shared::retry_io_ex(|| std::fs::copy(&my_path, &target_update_path), RETRY_DELAY, RETRY_COUNT)?;
info!("Update.exe copied successfully.");
}
}
Err(e) => {
warn!("Failed to initialise locator: {}", e);
// search for an Update.exe in parent directories (at least 2 levels up)
let mut current_dir = env::current_dir()?;
let mut found = false;
for _ in 0..2 {
current_dir.pop();
let target_update_path = current_dir.join("Update.exe");
if target_update_path.exists() {
info!("Found Update.exe in parent directory: {:?}", target_update_path);
shared::retry_io_ex(|| std::fs::copy(&my_path, &target_update_path), RETRY_DELAY, RETRY_COUNT)?;
info!("Update.exe copied successfully.");
found = true;
break;
}
}
if !found {
bail!("Failed to locate Update.exe in parent directories, so it could not be updated.");
}
}
}
Ok(())
}
#[cfg(target_os = "windows")]
#[test]
fn test_cli_parse_handles_equals_spaces() {

View File

@@ -3,7 +3,7 @@ use windows::Win32::System::LibraryLoader::LOAD_LIBRARY_SEARCH_SYSTEM32;
use windows::Win32::System::LibraryLoader::LOAD_LIBRARY_FLAGS;
#[cfg(target_os = "windows")]
type SetDefaultDllDirectoriesFn = unsafe extern "system" fn(DirectoryFlags: u32) -> BOOL;
type SetDefaultDllDirectoriesFn = unsafe extern "system" fn(directory_flags: u32) -> BOOL;
#[cfg(target_os = "windows")]
unsafe fn set_default_dll_directories(flags: LOAD_LIBRARY_FLAGS) {

View File

@@ -498,7 +498,7 @@ fn test_dotnet_detects_installed_versions() {
assert!(parse_dotnet_version("net8-runtime").unwrap().is_installed());
assert!(parse_dotnet_version("net8-desktop").unwrap().is_installed());
assert!(parse_dotnet_version("net8-asp").unwrap().is_installed());
assert!(parse_dotnet_version("net8-sdk").unwrap().is_installed());
assert!(parse_dotnet_version("net9-sdk").unwrap().is_installed());
assert!(!parse_dotnet_version("net11").unwrap().is_installed());
}

View File

@@ -86,9 +86,9 @@ pub fn expand_environment_strings<P: AsRef<str>>(input: P) -> Result<String> {
#[test]
fn test_expand_environment_strings() {
assert_eq!(expand_environment_strings("%windir%").unwrap(), "C:\\Windows");
assert_eq!(expand_environment_strings("%windir%\\system32").unwrap(), "C:\\Windows\\system32");
assert_eq!(expand_environment_strings("%windir%\\system32\\").unwrap(), "C:\\Windows\\system32\\");
assert!(expand_environment_strings("%windir%").unwrap().eq_ignore_ascii_case("C:\\Windows"));
assert!(expand_environment_strings("%windir%\\system32").unwrap().eq_ignore_ascii_case("C:\\Windows\\system32"));
assert!(expand_environment_strings("%windir%\\system32\\").unwrap().eq_ignore_ascii_case("C:\\Windows\\system32\\"));
}
pub fn get_long_path<P: AsRef<str>>(str: P) -> Result<String> {

View File

@@ -2,14 +2,15 @@
mod common;
use common::*;
use std::hint::assert_unchecked;
use std::{fs, path::Path, path::PathBuf};
use tempfile::tempdir;
use velopack_bins::*;
#[cfg(target_os = "windows")]
use winsafe::{self as w, co};
use velopack::bundle::load_bundle_from_file;
use velopack::locator::{auto_locate_app_manifest, LocationContext};
#[cfg(target_os = "windows")]
use winsafe::{self as w, co};
#[cfg(target_os = "windows")]
#[test]
@@ -87,7 +88,7 @@ pub fn test_install_preserve_symlinks() {
let tmp_dir = tempdir().unwrap();
let tmp_buf = tmp_dir.path().to_path_buf();
let mut tmp_zip = load_bundle_from_file(nupkg).unwrap();
commands::install(&mut tmp_zip, Some(&tmp_buf), None).unwrap();
assert!(tmp_buf.join("current").join("actual").join("file.txt").exists());
@@ -119,13 +120,45 @@ pub fn test_patch_apply() {
let expected_sha1 = get_sha1(&new_file);
let tmp_file = Path::new("temp.patch").to_path_buf();
velopack::delta::zstd_patch_single(&old_file, &p1, &tmp_file).unwrap();
velopack_bins::commands::zstd_patch_single(&old_file, &p1, &tmp_file).unwrap();
let tmp_sha1 = get_sha1(&tmp_file);
fs::remove_file(&tmp_file).unwrap();
assert_eq!(expected_sha1, tmp_sha1);
velopack::delta::zstd_patch_single(&old_file, &p2, &tmp_file).unwrap();
velopack_bins::commands::zstd_patch_single(&old_file, &p2, &tmp_file).unwrap();
let tmp_sha1 = get_sha1(&tmp_file);
fs::remove_file(&tmp_file).unwrap();
assert_eq!(expected_sha1, tmp_sha1);
}
}
#[test]
pub fn test_delta_apply_legacy() {
dialogs::set_silent(true);
let fixtures = find_fixtures();
let base = fixtures.join("Clowd-3.4.287-full.nupkg");
let d1 = fixtures.join("Clowd-3.4.288-delta.nupkg");
let d2 = fixtures.join("Clowd-3.4.291-delta.nupkg");
let d3 = fixtures.join("Clowd-3.4.292-delta.nupkg");
let d4 = fixtures.join("Clowd-3.4.293-delta.nupkg");
let deltas = vec![&d1, &d2, &d3, &d4];
let tmp_dir = tempdir().unwrap();
let temp_output = tmp_dir.path().join("Clowd-3.4.293-full.nupkg");
commands::delta(&base, deltas, tmp_dir.path(), &temp_output).unwrap();
let mut bundle = load_bundle_from_file(temp_output).unwrap();
let manifest = bundle.read_manifest().unwrap();
assert_eq!(manifest.id, "Clowd");
assert_eq!(manifest.version, semver::Version::parse("3.4.293").unwrap());
#[cfg(not(target_os = "linux"))]
{
let extract_dir = tmp_dir.path().join("_extracted");
bundle.extract_lib_contents_to_path(&extract_dir, |_| {}).unwrap();
let extracted = extract_dir.join("Clowd.dll");
assert!(extracted.exists());
}
}

View File

@@ -107,7 +107,7 @@ typedef struct vpkc_update_options_t {
*/
bool AllowVersionDowngrade;
/**
* **This option should usually be left None**. <br/>
* **This option should usually be left None**.
* Overrides the default channel used to fetch updates.
* The default channel will be whatever channel was specified on the command line when building this release.
* For example, if the current release was packaged with '--channel beta', then the default channel will be 'beta'.
@@ -116,6 +116,11 @@ typedef struct vpkc_update_options_t {
* without having to reinstall the application.
*/
char *ExplicitChannel;
/**
* Sets the maximum number of deltas to consider before falling back to a full update.
* The default is 10. Set to a negative number (eg. -1) to disable deltas.
*/
int32_t MaximumDeltasBeforeFallback;
} vpkc_update_options_t;
/**
@@ -160,7 +165,19 @@ typedef struct vpkc_update_info_t {
/**
* The available version that we are updating to.
*/
struct vpkc_asset_t TargetFullRelease;
struct vpkc_asset_t *TargetFullRelease;
/**
* The base release that this update is based on. This is only available if the update is a delta update.
*/
struct vpkc_asset_t *BaseRelease;
/**
* The list of delta updates that can be applied to the base version to get to the target version.
*/
struct vpkc_asset_t **DeltasToTarget;
/**
* The number of elements in the DeltasToTarget array.
*/
size_t DeltasToTargetCount;
/**
* True if the update is a version downgrade or lateral move (such as when switching channels to the same version number).
* In this case, only full updates are allowed, and any local packages on disk newer than the downloaded version will be
@@ -267,14 +284,14 @@ bool vpkc_is_portable(vpkc_update_manager_t *p_manager);
* Returns an UpdateInfo object if there is an update downloaded which still needs to be applied.
* You can pass the UpdateInfo object to waitExitThenApplyUpdate to apply the update.
*/
bool vpkc_update_pending_restart(vpkc_update_manager_t *p_manager, struct vpkc_asset_t *p_asset);
bool vpkc_update_pending_restart(vpkc_update_manager_t *p_manager, struct vpkc_asset_t **p_asset);
/**
* Checks for updates, returning None if there are none available. If there are updates available, this method will return an
* UpdateInfo object containing the latest available release, and any delta updates that can be applied if they are available.
*/
vpkc_update_check_t vpkc_check_for_updates(vpkc_update_manager_t *p_manager,
struct vpkc_update_info_t *p_update);
struct vpkc_update_info_t **p_update);
/**
* Downloads the specified updates to the local app packages directory. Progress is reported back to the caller via an optional callback.

View File

@@ -1,6 +1,5 @@
//! This header provides the C++ API for the Velopack library.
//! This C++ API is a thin wrapper around the C API, providing a more idiomatic C++ interface.
//! You should not mix and match the C and C++ APIs in the same program.
#ifndef VELOPACK_HPP
#define VELOPACK_HPP
@@ -19,181 +18,27 @@
namespace Velopack {
static inline void throw_last_error() {
static inline void throw_last_error()
{
size_t neededSize = vpkc_get_last_error(nullptr, 0);
std::string strError(neededSize, '\0');
vpkc_get_last_error(&strError[0], neededSize);
throw std::runtime_error(strError);
}
static inline std::string to_cppstring(const char* psz) {
return psz == nullptr ? "" : psz;
static inline std::optional<std::string> to_cpp_string(const char* psz)
{
return psz == nullptr ? std::optional<std::string>("") : std::optional<std::string>(psz);
}
static inline char* to_cstring(const std::string& str) {
return const_cast<char*>(str.c_str());
static inline char* alloc_c_string(const std::optional<std::string>& str)
{
if (!str.has_value()) { return nullptr; }
return alloc_c_string(str.value());
}
static inline char* to_cstring_opt(const std::optional<std::string>& str) {
return str.has_value() ? to_cstring(str.value()) : nullptr;
}
static inline std::optional<std::string> to_cppstring_opt(const char* psz) {
return psz == nullptr ? std::nullopt : std::optional<std::string>(psz);
}
static inline bool to_cppbool(bool b) { return b; }
static inline bool to_cbool(bool b) { return b; }
static inline uint64_t to_cu64(uint64_t i) { return i; }
static inline uint64_t to_cppu64(uint64_t i) { return i; }
// !! AUTO-GENERATED-START CPP_TYPES
/// VelopackLocator provides some utility functions for locating the current app important paths (eg. path to packages, update binary, and so forth).
struct VelopackLocatorConfig {
/// The root directory of the current app.
std::string RootAppDir;
/// The path to the Update.exe binary.
std::string UpdateExePath;
/// The path to the packages' directory.
std::string PackagesDir;
/// The current app manifest.
std::string ManifestPath;
/// The directory containing the application's user binaries.
std::string CurrentBinaryDir;
/// Whether the current application is portable or installed.
bool IsPortable;
};
/// An individual Velopack asset, could refer to an asset on-disk or in a remote package feed.
struct VelopackAsset {
/// The name or Id of the package containing this release.
std::string PackageId;
/// The version of this release.
std::string Version;
/// The type of asset (eg. "Full" or "Delta").
std::string Type;
/// The filename of the update package containing this release.
std::string FileName;
/// The SHA1 checksum of the update package containing this release.
std::string SHA1;
/// The SHA256 checksum of the update package containing this release.
std::string SHA256;
/// The size in bytes of the update package containing this release.
uint64_t Size;
/// The release notes in markdown format, as passed to Velopack when packaging the release. This may be an empty string.
std::string NotesMarkdown;
/// The release notes in HTML format, transformed from Markdown when packaging the release. This may be an empty string.
std::string NotesHtml;
};
/// Holds information about the current version and pending updates, such as how many there are, and access to release notes.
struct UpdateInfo {
/// The available version that we are updating to.
VelopackAsset TargetFullRelease;
/// True if the update is a version downgrade or lateral move (such as when switching channels to the same version number).
/// In this case, only full updates are allowed, and any local packages on disk newer than the downloaded version will be
/// deleted.
bool IsDowngrade;
};
/// Options to customise the behaviour of UpdateManager.
struct UpdateOptions {
/// Allows UpdateManager to update to a version that's lower than the current version (i.e. downgrading).
/// This could happen if a release has bugs and was retracted from the release feed, or if you're using
/// ExplicitChannel to switch channels to another channel where the latest version on that
/// channel is lower than the current version.
bool AllowVersionDowngrade;
/// **This option should usually be left None**. <br/>
/// Overrides the default channel used to fetch updates.
/// The default channel will be whatever channel was specified on the command line when building this release.
/// For example, if the current release was packaged with '--channel beta', then the default channel will be 'beta'.
/// This allows users to automatically receive updates from the same channel they installed from. This options
/// allows you to explicitly switch channels, for example if the user wished to switch back to the 'stable' channel
/// without having to reinstall the application.
std::optional<std::string> ExplicitChannel;
};
static inline vpkc_locator_config_t to_c(const VelopackLocatorConfig& dto) {
return {
to_cstring(dto.RootAppDir),
to_cstring(dto.UpdateExePath),
to_cstring(dto.PackagesDir),
to_cstring(dto.ManifestPath),
to_cstring(dto.CurrentBinaryDir),
to_cbool(dto.IsPortable),
};
}
static inline VelopackLocatorConfig to_cpp(const vpkc_locator_config_t& dto) {
return {
to_cppstring(dto.RootAppDir),
to_cppstring(dto.UpdateExePath),
to_cppstring(dto.PackagesDir),
to_cppstring(dto.ManifestPath),
to_cppstring(dto.CurrentBinaryDir),
to_cppbool(dto.IsPortable),
};
}
static inline vpkc_asset_t to_c(const VelopackAsset& dto) {
return {
to_cstring(dto.PackageId),
to_cstring(dto.Version),
to_cstring(dto.Type),
to_cstring(dto.FileName),
to_cstring(dto.SHA1),
to_cstring(dto.SHA256),
to_cu64(dto.Size),
to_cstring(dto.NotesMarkdown),
to_cstring(dto.NotesHtml),
};
}
static inline VelopackAsset to_cpp(const vpkc_asset_t& dto) {
return {
to_cppstring(dto.PackageId),
to_cppstring(dto.Version),
to_cppstring(dto.Type),
to_cppstring(dto.FileName),
to_cppstring(dto.SHA1),
to_cppstring(dto.SHA256),
to_cppu64(dto.Size),
to_cppstring(dto.NotesMarkdown),
to_cppstring(dto.NotesHtml),
};
}
static inline vpkc_update_info_t to_c(const UpdateInfo& dto) {
return {
to_c(dto.TargetFullRelease),
to_cbool(dto.IsDowngrade),
};
}
static inline UpdateInfo to_cpp(const vpkc_update_info_t& dto) {
return {
to_cpp(dto.TargetFullRelease),
to_cppbool(dto.IsDowngrade),
};
}
static inline vpkc_update_options_t to_c(const UpdateOptions& dto) {
return {
to_cbool(dto.AllowVersionDowngrade),
to_cstring_opt(dto.ExplicitChannel),
};
}
static inline UpdateOptions to_cpp(const vpkc_update_options_t& dto) {
return {
to_cppbool(dto.AllowVersionDowngrade),
to_cppstring_opt(dto.ExplicitChannel),
};
}
// !! AUTO-GENERATED-END CPP_TYPES
static inline char* allocate_cstring(const std::string& str) {
static inline char* alloc_c_string(const std::string& str)
{
char* result = new char[str.size() + 1]; // +1 for null-terminator
#ifdef _WIN32
strcpy_s(result, str.size() + 1, str.c_str()); // Copy string content
@@ -204,26 +49,415 @@ static inline char* allocate_cstring(const std::string& str) {
return result;
}
static inline void free_cstring(char* str) {
static inline void free_c_string(char* str)
{
delete[] str;
}
static inline char** allocate_cstring_array(const std::vector<std::string>& vec) {
char** result = new char*[vec.size()];
for (size_t i = 0; i < vec.size(); ++i) {
result[i] = allocate_cstring(vec[i]);
static inline char** alloc_c_string_vec(const std::vector<std::string>& dto, size_t* count)
{
if (dto.empty()) {
*count = 0;
return nullptr;
}
return result;
*count = dto.size();
char** arr = new char* [*count];
for (size_t i = 0; i < *count; ++i) {
arr[i] = alloc_c_string(dto[i]);
}
return arr;
}
static inline void free_cstring_array(char** arr, size_t size) {
static inline void free_c_string_vec(char** arr, size_t size)
{
for (size_t i = 0; i < size; ++i) {
free_cstring(arr[i]);
free_c_string(arr[i]);
arr[i] = nullptr;
}
delete[] arr;
}
template<typename T>
inline T unwrap(const std::optional<T>& opt, const std::string& message = "Expected value not present") {
if (!opt.has_value()) {
throw std::runtime_error(message);
}
return opt.value();
}
// !! AUTO-GENERATED-START CPP_TYPES
/** VelopackLocator provides some utility functions for locating the current app important paths (eg. path to packages, update binary, and so forth). */
struct VelopackLocatorConfig {
/** The root directory of the current app. */
std::string RootAppDir;
/** The path to the Update.exe binary. */
std::string UpdateExePath;
/** The path to the packages' directory. */
std::string PackagesDir;
/** The current app manifest. */
std::string ManifestPath;
/** The directory containing the application's user binaries. */
std::string CurrentBinaryDir;
/** Whether the current application is portable or installed. */
bool IsPortable;
};
static inline std::optional<VelopackLocatorConfig> to_cpp_VelopackLocatorConfig(const vpkc_locator_config_t* dto) {
if (dto == nullptr) { return std::nullopt; }
return std::optional<VelopackLocatorConfig>({
unwrap(to_cpp_string(dto->RootAppDir), "Required property RootAppDir was null"),
unwrap(to_cpp_string(dto->UpdateExePath), "Required property UpdateExePath was null"),
unwrap(to_cpp_string(dto->PackagesDir), "Required property PackagesDir was null"),
unwrap(to_cpp_string(dto->ManifestPath), "Required property ManifestPath was null"),
unwrap(to_cpp_string(dto->CurrentBinaryDir), "Required property CurrentBinaryDir was null"),
dto->IsPortable,
});
}
static inline std::vector<VelopackLocatorConfig> to_cpp_VelopackLocatorConfig_vec(const vpkc_locator_config_t* const* arr, size_t c) {
if (arr == nullptr || c < 1) { return std::vector<VelopackLocatorConfig>(); }
std::vector<VelopackLocatorConfig> result;
result.reserve(c);
for (size_t i = 0; i < c; ++i) {
auto dto = arr[i];
if (dto == nullptr) { continue; }
result.push_back(unwrap(to_cpp_VelopackLocatorConfig(dto)));
}
return result;
}
static inline vpkc_locator_config_t* alloc_c_VelopackLocatorConfig_ptr(const VelopackLocatorConfig* dto) {
if (dto == nullptr) { return nullptr; }
vpkc_locator_config_t* obj = new vpkc_locator_config_t{};
obj->RootAppDir = alloc_c_string(dto->RootAppDir);
obj->UpdateExePath = alloc_c_string(dto->UpdateExePath);
obj->PackagesDir = alloc_c_string(dto->PackagesDir);
obj->ManifestPath = alloc_c_string(dto->ManifestPath);
obj->CurrentBinaryDir = alloc_c_string(dto->CurrentBinaryDir);
obj->IsPortable = dto->IsPortable;
return obj;
}
static inline vpkc_locator_config_t* alloc_c_VelopackLocatorConfig(const std::optional<VelopackLocatorConfig>& dto) {
if (!dto.has_value()) { return nullptr; }
VelopackLocatorConfig obj = unwrap(dto);
return alloc_c_VelopackLocatorConfig_ptr(&obj);
}
static inline vpkc_locator_config_t** alloc_c_VelopackLocatorConfig_vec(const std::vector<VelopackLocatorConfig>& dto, size_t* count) {
if (dto.empty()) {
*count = 0;
return nullptr;
}
*count = dto.size();
vpkc_locator_config_t** arr = new vpkc_locator_config_t*[*count];
for (size_t i = 0; i < *count; ++i) {
arr[i] = alloc_c_VelopackLocatorConfig(dto[i]);
}
return arr;
}
static inline void free_c_VelopackLocatorConfig(vpkc_locator_config_t* obj) {
if (obj == nullptr) { return; }
free_c_string(obj->RootAppDir);
free_c_string(obj->UpdateExePath);
free_c_string(obj->PackagesDir);
free_c_string(obj->ManifestPath);
free_c_string(obj->CurrentBinaryDir);
delete obj;
}
static inline void free_c_VelopackLocatorConfig_vec(vpkc_locator_config_t** arr, size_t count) {
if (arr == nullptr || count < 1) { return; }
for (size_t i = 0; i < count; ++i) {
free_c_VelopackLocatorConfig(arr[i]);
}
delete[] arr;
}
/** An individual Velopack asset, could refer to an asset on-disk or in a remote package feed. */
struct VelopackAsset {
/** The name or Id of the package containing this release. */
std::string PackageId;
/** The version of this release. */
std::string Version;
/** The type of asset (eg. "Full" or "Delta"). */
std::string Type;
/** The filename of the update package containing this release. */
std::string FileName;
/** The SHA1 checksum of the update package containing this release. */
std::string SHA1;
/** The SHA256 checksum of the update package containing this release. */
std::string SHA256;
/** The size in bytes of the update package containing this release. */
uint64_t Size;
/** The release notes in markdown format, as passed to Velopack when packaging the release. This may be an empty string. */
std::string NotesMarkdown;
/** The release notes in HTML format, transformed from Markdown when packaging the release. This may be an empty string. */
std::string NotesHtml;
};
static inline std::optional<VelopackAsset> to_cpp_VelopackAsset(const vpkc_asset_t* dto) {
if (dto == nullptr) { return std::nullopt; }
return std::optional<VelopackAsset>({
unwrap(to_cpp_string(dto->PackageId), "Required property PackageId was null"),
unwrap(to_cpp_string(dto->Version), "Required property Version was null"),
unwrap(to_cpp_string(dto->Type), "Required property Type was null"),
unwrap(to_cpp_string(dto->FileName), "Required property FileName was null"),
unwrap(to_cpp_string(dto->SHA1), "Required property SHA1 was null"),
unwrap(to_cpp_string(dto->SHA256), "Required property SHA256 was null"),
dto->Size,
unwrap(to_cpp_string(dto->NotesMarkdown), "Required property NotesMarkdown was null"),
unwrap(to_cpp_string(dto->NotesHtml), "Required property NotesHtml was null"),
});
}
static inline std::vector<VelopackAsset> to_cpp_VelopackAsset_vec(const vpkc_asset_t* const* arr, size_t c) {
if (arr == nullptr || c < 1) { return std::vector<VelopackAsset>(); }
std::vector<VelopackAsset> result;
result.reserve(c);
for (size_t i = 0; i < c; ++i) {
auto dto = arr[i];
if (dto == nullptr) { continue; }
result.push_back(unwrap(to_cpp_VelopackAsset(dto)));
}
return result;
}
static inline vpkc_asset_t* alloc_c_VelopackAsset_ptr(const VelopackAsset* dto) {
if (dto == nullptr) { return nullptr; }
vpkc_asset_t* obj = new vpkc_asset_t{};
obj->PackageId = alloc_c_string(dto->PackageId);
obj->Version = alloc_c_string(dto->Version);
obj->Type = alloc_c_string(dto->Type);
obj->FileName = alloc_c_string(dto->FileName);
obj->SHA1 = alloc_c_string(dto->SHA1);
obj->SHA256 = alloc_c_string(dto->SHA256);
obj->Size = dto->Size;
obj->NotesMarkdown = alloc_c_string(dto->NotesMarkdown);
obj->NotesHtml = alloc_c_string(dto->NotesHtml);
return obj;
}
static inline vpkc_asset_t* alloc_c_VelopackAsset(const std::optional<VelopackAsset>& dto) {
if (!dto.has_value()) { return nullptr; }
VelopackAsset obj = unwrap(dto);
return alloc_c_VelopackAsset_ptr(&obj);
}
static inline vpkc_asset_t** alloc_c_VelopackAsset_vec(const std::vector<VelopackAsset>& dto, size_t* count) {
if (dto.empty()) {
*count = 0;
return nullptr;
}
*count = dto.size();
vpkc_asset_t** arr = new vpkc_asset_t*[*count];
for (size_t i = 0; i < *count; ++i) {
arr[i] = alloc_c_VelopackAsset(dto[i]);
}
return arr;
}
static inline void free_c_VelopackAsset(vpkc_asset_t* obj) {
if (obj == nullptr) { return; }
free_c_string(obj->PackageId);
free_c_string(obj->Version);
free_c_string(obj->Type);
free_c_string(obj->FileName);
free_c_string(obj->SHA1);
free_c_string(obj->SHA256);
free_c_string(obj->NotesMarkdown);
free_c_string(obj->NotesHtml);
delete obj;
}
static inline void free_c_VelopackAsset_vec(vpkc_asset_t** arr, size_t count) {
if (arr == nullptr || count < 1) { return; }
for (size_t i = 0; i < count; ++i) {
free_c_VelopackAsset(arr[i]);
}
delete[] arr;
}
/** Holds information about the current version and pending updates, such as how many there are, and access to release notes. */
struct UpdateInfo {
/** The available version that we are updating to. */
VelopackAsset TargetFullRelease;
/** The base release that this update is based on. This is only available if the update is a delta update. */
std::optional<VelopackAsset> BaseRelease;
/** The list of delta updates that can be applied to the base version to get to the target version. */
std::vector<VelopackAsset> DeltasToTarget;
/**
* True if the update is a version downgrade or lateral move (such as when switching channels to the same version number).
* In this case, only full updates are allowed, and any local packages on disk newer than the downloaded version will be
* deleted.
*/
bool IsDowngrade;
};
static inline std::optional<UpdateInfo> to_cpp_UpdateInfo(const vpkc_update_info_t* dto) {
if (dto == nullptr) { return std::nullopt; }
return std::optional<UpdateInfo>({
unwrap(to_cpp_VelopackAsset(dto->TargetFullRelease), "Required property TargetFullRelease was null"),
to_cpp_VelopackAsset(dto->BaseRelease),
to_cpp_VelopackAsset_vec(dto->DeltasToTarget, dto->DeltasToTargetCount),
dto->IsDowngrade,
});
}
static inline std::vector<UpdateInfo> to_cpp_UpdateInfo_vec(const vpkc_update_info_t* const* arr, size_t c) {
if (arr == nullptr || c < 1) { return std::vector<UpdateInfo>(); }
std::vector<UpdateInfo> result;
result.reserve(c);
for (size_t i = 0; i < c; ++i) {
auto dto = arr[i];
if (dto == nullptr) { continue; }
result.push_back(unwrap(to_cpp_UpdateInfo(dto)));
}
return result;
}
static inline vpkc_update_info_t* alloc_c_UpdateInfo_ptr(const UpdateInfo* dto) {
if (dto == nullptr) { return nullptr; }
vpkc_update_info_t* obj = new vpkc_update_info_t{};
obj->TargetFullRelease = alloc_c_VelopackAsset(dto->TargetFullRelease);
obj->BaseRelease = alloc_c_VelopackAsset(dto->BaseRelease);
obj->DeltasToTarget = alloc_c_VelopackAsset_vec(dto->DeltasToTarget, &obj->DeltasToTargetCount);
obj->IsDowngrade = dto->IsDowngrade;
return obj;
}
static inline vpkc_update_info_t* alloc_c_UpdateInfo(const std::optional<UpdateInfo>& dto) {
if (!dto.has_value()) { return nullptr; }
UpdateInfo obj = unwrap(dto);
return alloc_c_UpdateInfo_ptr(&obj);
}
static inline vpkc_update_info_t** alloc_c_UpdateInfo_vec(const std::vector<UpdateInfo>& dto, size_t* count) {
if (dto.empty()) {
*count = 0;
return nullptr;
}
*count = dto.size();
vpkc_update_info_t** arr = new vpkc_update_info_t*[*count];
for (size_t i = 0; i < *count; ++i) {
arr[i] = alloc_c_UpdateInfo(dto[i]);
}
return arr;
}
static inline void free_c_UpdateInfo(vpkc_update_info_t* obj) {
if (obj == nullptr) { return; }
free_c_VelopackAsset(obj->TargetFullRelease);
free_c_VelopackAsset(obj->BaseRelease);
free_c_VelopackAsset_vec(obj->DeltasToTarget, obj->DeltasToTargetCount);
delete obj;
}
static inline void free_c_UpdateInfo_vec(vpkc_update_info_t** arr, size_t count) {
if (arr == nullptr || count < 1) { return; }
for (size_t i = 0; i < count; ++i) {
free_c_UpdateInfo(arr[i]);
}
delete[] arr;
}
/** Options to customise the behaviour of UpdateManager. */
struct UpdateOptions {
/**
* Allows UpdateManager to update to a version that's lower than the current version (i.e. downgrading).
* This could happen if a release has bugs and was retracted from the release feed, or if you're using
* ExplicitChannel to switch channels to another channel where the latest version on that
* channel is lower than the current version.
*/
bool AllowVersionDowngrade;
/**
* **This option should usually be left None**.
* Overrides the default channel used to fetch updates.
* The default channel will be whatever channel was specified on the command line when building this release.
* For example, if the current release was packaged with '--channel beta', then the default channel will be 'beta'.
* This allows users to automatically receive updates from the same channel they installed from. This options
* allows you to explicitly switch channels, for example if the user wished to switch back to the 'stable' channel
* without having to reinstall the application.
*/
std::optional<std::string> ExplicitChannel;
/**
* Sets the maximum number of deltas to consider before falling back to a full update.
* The default is 10. Set to a negative number (eg. -1) to disable deltas.
*/
int32_t MaximumDeltasBeforeFallback;
};
static inline std::optional<UpdateOptions> to_cpp_UpdateOptions(const vpkc_update_options_t* dto) {
if (dto == nullptr) { return std::nullopt; }
return std::optional<UpdateOptions>({
dto->AllowVersionDowngrade,
to_cpp_string(dto->ExplicitChannel),
dto->MaximumDeltasBeforeFallback,
});
}
static inline std::vector<UpdateOptions> to_cpp_UpdateOptions_vec(const vpkc_update_options_t* const* arr, size_t c) {
if (arr == nullptr || c < 1) { return std::vector<UpdateOptions>(); }
std::vector<UpdateOptions> result;
result.reserve(c);
for (size_t i = 0; i < c; ++i) {
auto dto = arr[i];
if (dto == nullptr) { continue; }
result.push_back(unwrap(to_cpp_UpdateOptions(dto)));
}
return result;
}
static inline vpkc_update_options_t* alloc_c_UpdateOptions_ptr(const UpdateOptions* dto) {
if (dto == nullptr) { return nullptr; }
vpkc_update_options_t* obj = new vpkc_update_options_t{};
obj->AllowVersionDowngrade = dto->AllowVersionDowngrade;
obj->ExplicitChannel = alloc_c_string(dto->ExplicitChannel);
obj->MaximumDeltasBeforeFallback = dto->MaximumDeltasBeforeFallback;
return obj;
}
static inline vpkc_update_options_t* alloc_c_UpdateOptions(const std::optional<UpdateOptions>& dto) {
if (!dto.has_value()) { return nullptr; }
UpdateOptions obj = unwrap(dto);
return alloc_c_UpdateOptions_ptr(&obj);
}
static inline vpkc_update_options_t** alloc_c_UpdateOptions_vec(const std::vector<UpdateOptions>& dto, size_t* count) {
if (dto.empty()) {
*count = 0;
return nullptr;
}
*count = dto.size();
vpkc_update_options_t** arr = new vpkc_update_options_t*[*count];
for (size_t i = 0; i < *count; ++i) {
arr[i] = alloc_c_UpdateOptions(dto[i]);
}
return arr;
}
static inline void free_c_UpdateOptions(vpkc_update_options_t* obj) {
if (obj == nullptr) { return; }
free_c_string(obj->ExplicitChannel);
delete obj;
}
static inline void free_c_UpdateOptions_vec(vpkc_update_options_t** arr, size_t count) {
if (arr == nullptr || count < 1) { return; }
for (size_t i = 0; i < count; ++i) {
free_c_UpdateOptions(arr[i]);
}
delete[] arr;
}
// !! AUTO-GENERATED-END CPP_TYPES
/**
* VelopackApp helps you to handle app activation events correctly.
* This should be used as early as possible in your application startup code.
@@ -260,9 +494,10 @@ public:
* Override the command line arguments used by VelopackApp. (by default this is env::args().skip(1))
*/
VelopackApp& SetArgs(const std::vector<std::string>& args) {
char** pArgs = allocate_cstring_array(args);
vpkc_app_set_args(pArgs, args.size());
free_cstring_array(pArgs, args.size());
size_t c;
char** pArgs = alloc_c_string_vec(args, &c);
vpkc_app_set_args(pArgs, c);
free_c_string_vec(pArgs, c);
return *this;
};
@@ -270,8 +505,9 @@ public:
* VelopackLocator provides some utility functions for locating the current app important paths (eg. path to packages, update binary, and so forth).
*/
VelopackApp& SetLocator(const VelopackLocatorConfig& locator) {
vpkc_locator_config_t vpkc_locator = to_c(locator);
vpkc_app_set_locator(&vpkc_locator);
vpkc_locator_config_t* vpkc_locator = alloc_c_VelopackLocatorConfig(locator);
vpkc_app_set_locator(vpkc_locator);
free_c_VelopackLocatorConfig(vpkc_locator);
return *this;
};
@@ -347,7 +583,7 @@ public:
/**
* Progress callback function. Call with values between 0 and 100 inclusive.
*/
typedef std::function<void(size_t)> vpkc_progress_send_t;
typedef std::function<void(int16_t)> vpkc_progress_send_t;
/**
* Abstract class for retrieving release feeds and downloading assets. You should subclass this and
@@ -372,16 +608,16 @@ public:
[](void* userData, const char* releasesName) {
IUpdateSource* source = reinterpret_cast<IUpdateSource*>(userData);
std::string json = source->GetReleaseFeed(releasesName);
return allocate_cstring(json);
return alloc_c_string(json);
},
[](void* userData, char* pszFeed) {
free_cstring(pszFeed);
free_c_string(pszFeed);
},
[](void* userData, const struct vpkc_asset_t *pAsset, const char* pszLocalPath, size_t progressCallbackId) {
IUpdateSource* source = reinterpret_cast<IUpdateSource*>(userData);
VelopackAsset asset = to_cpp(*pAsset);
std::string localPath = to_cppstring(pszLocalPath);
std::function<void(size_t)> progress_callback = [progressCallbackId](size_t progress) {
VelopackAsset asset = to_cpp_VelopackAsset(pAsset).value();
std::string localPath = to_cpp_string(pszLocalPath).value();
std::function<void(int16_t)> progress_callback = [progressCallbackId](int16_t progress) {
vpkc_source_report_progress(progressCallbackId, progress);
};
return source->DownloadReleaseEntry(asset, localPath, progress_callback);
@@ -444,21 +680,12 @@ public:
* @param locator Override the default locator configuration (usually used for testing / mocks).
*/
UpdateManager(const std::string& urlOrPath, const UpdateOptions* options = nullptr, const VelopackLocatorConfig* locator = nullptr) {
vpkc_update_options_t vpkc_options;
vpkc_update_options_t* pOptions = nullptr;
if (options != nullptr) {
vpkc_options = to_c(*options);
pOptions = &vpkc_options;
}
vpkc_locator_config_t vpkc_locator;
vpkc_locator_config_t* pLocator = nullptr;
if (locator != nullptr) {
vpkc_locator = to_c(*locator);
pLocator = &vpkc_locator;
}
if (!vpkc_new_update_manager(urlOrPath.c_str(), pOptions, pLocator, &m_pManager)) {
vpkc_update_options_t* pOptions = alloc_c_UpdateOptions_ptr(options);
vpkc_locator_config_t* pLocator = alloc_c_VelopackLocatorConfig_ptr(locator);
bool result = vpkc_new_update_manager(urlOrPath.c_str(), pOptions, pLocator, &m_pManager);
free_c_UpdateOptions(pOptions);
free_c_VelopackLocatorConfig(pLocator);
if (!result) {
throw_last_error();
}
};
@@ -471,23 +698,14 @@ public:
*/
template <typename T, typename = std::enable_if_t<std::is_base_of_v<IUpdateSource, T>>>
UpdateManager(std::unique_ptr<T> pUpdateSource, const UpdateOptions* options = nullptr, const VelopackLocatorConfig* locator = nullptr) {
vpkc_update_options_t vpkc_options;
vpkc_update_options_t* pOptions = nullptr;
if (options != nullptr) {
vpkc_options = to_c(*options);
pOptions = &vpkc_options;
}
vpkc_locator_config_t vpkc_locator;
vpkc_locator_config_t* pLocator = nullptr;
if (locator != nullptr) {
vpkc_locator = to_c(*locator);
pLocator = &vpkc_locator;
}
vpkc_update_options_t* pOptions = alloc_c_UpdateOptions_ptr(options);
vpkc_locator_config_t* pLocator = alloc_c_VelopackLocatorConfig_ptr(locator);
m_pUpdateSource = std::unique_ptr<IUpdateSource>(static_cast<IUpdateSource*>(pUpdateSource.release()));
vpkc_update_source_t* pSource = m_pUpdateSource->m_pSource;
if (!vpkc_new_update_manager_with_source(pSource, pOptions, pLocator, &m_pManager)) {
bool result = vpkc_new_update_manager_with_source(pSource, pOptions, pLocator, &m_pManager);
free_c_UpdateOptions(pOptions);
free_c_VelopackLocatorConfig(pLocator);
if (!result) {
throw_last_error();
}
};
@@ -496,7 +714,10 @@ public:
* Destructor for UpdateManager.
*/
~UpdateManager() {
vpkc_free_update_manager(m_pManager);
if (m_pManager != nullptr) {
vpkc_free_update_manager(m_pManager);
m_pManager = nullptr;
}
};
/**
@@ -532,10 +753,10 @@ public:
* You can pass the UpdateInfo object to waitExitThenApplyUpdate to apply the update.
*/
std::optional<VelopackAsset> UpdatePendingRestart() noexcept {
vpkc_asset_t asset;
vpkc_asset_t* asset;
if (vpkc_update_pending_restart(m_pManager, &asset)) {
VelopackAsset cpp_asset = to_cpp(asset);
vpkc_free_asset(&asset);
VelopackAsset cpp_asset = to_cpp_VelopackAsset(asset).value();
vpkc_free_asset(asset);
return cpp_asset;
}
return std::nullopt;
@@ -546,7 +767,7 @@ public:
* UpdateInfo object containing the latest available release, and any delta updates that can be applied if they are available.
*/
std::optional<UpdateInfo> CheckForUpdates() {
vpkc_update_info_t update;
vpkc_update_info_t* update;
vpkc_update_check_t result = vpkc_check_for_updates(m_pManager, &update);
switch (result) {
case vpkc_update_check_t::UPDATE_ERROR:
@@ -556,8 +777,8 @@ public:
case vpkc_update_check_t::REMOTE_IS_EMPTY:
return std::nullopt;
case vpkc_update_check_t::UPDATE_AVAILABLE:
UpdateInfo cpp_info = to_cpp(update);
vpkc_free_update_info(&update);
UpdateInfo cpp_info = to_cpp_UpdateInfo(update).value();
vpkc_free_update_info(update);
return cpp_info;
}
return std::nullopt;
@@ -572,8 +793,10 @@ public:
* packages, this method will fall back to downloading the full version of the update.
*/
void DownloadUpdates(const UpdateInfo& update, vpkc_progress_callback_t progress = nullptr, void* pUserData = 0) {
vpkc_update_info_t vpkc_update = to_c(update);
if (!vpkc_download_updates(m_pManager, &vpkc_update, progress, pUserData)) {
vpkc_update_info_t* vpkc_update = alloc_c_UpdateInfo(update);
bool result = vpkc_download_updates(m_pManager, vpkc_update, progress, pUserData);
free_c_UpdateInfo(vpkc_update);
if (!result) {
throw_last_error();
}
};
@@ -593,11 +816,12 @@ public:
* optionally restart your app. The updater will only wait for 60 seconds before giving up.
*/
void WaitExitThenApplyUpdates(const VelopackAsset& asset, bool silent = false, bool restart = true, std::vector<std::string> restartArgs = {}) {
char** pRestartArgs = allocate_cstring_array(restartArgs);
vpkc_asset_t vpkc_asset = to_c(asset);
bool result = vpkc_wait_exit_then_apply_updates(m_pManager, &vpkc_asset, silent, restart, pRestartArgs, restartArgs.size());
free_cstring_array(pRestartArgs, restartArgs.size());
size_t cRestartArgs;
char** pRestartArgs = alloc_c_string_vec(restartArgs, &cRestartArgs);
vpkc_asset_t* vpkc_asset = alloc_c_VelopackAsset(asset);
bool result = vpkc_wait_exit_then_apply_updates(m_pManager, vpkc_asset, silent, restart, pRestartArgs, cRestartArgs);
free_c_string_vec(pRestartArgs, cRestartArgs);
free_c_VelopackAsset(vpkc_asset);
if (!result) {
throw_last_error();
}
@@ -610,11 +834,12 @@ public:
* If waitPid is 0, the updater will not wait for any process to exit before applying updates (Not Recommended).
*/
void UnsafeApplyUpdates(const VelopackAsset& asset, bool silent, uint32_t waitPid, bool restart, std::vector<std::string> restartArgs) {
char** pRestartArgs = allocate_cstring_array(restartArgs);
vpkc_asset_t vpkc_asset = to_c(asset);
bool result = vpkc_unsafe_apply_updates(m_pManager, &vpkc_asset, silent, waitPid, restart, pRestartArgs, restartArgs.size());
free_cstring_array(pRestartArgs, restartArgs.size());
size_t cRestartArgs;
char** pRestartArgs = alloc_c_string_vec(restartArgs, &cRestartArgs);
vpkc_asset_t* vpkc_asset = alloc_c_VelopackAsset(asset);
bool result = vpkc_unsafe_apply_updates(m_pManager, vpkc_asset, silent, waitPid, restart, pRestartArgs, cRestartArgs);
free_c_string_vec(pRestartArgs, cRestartArgs);
free_c_VelopackAsset(vpkc_asset);
if (!result) {
throw_last_error();
}

View File

@@ -42,9 +42,9 @@ impl UpdateSource for CCallbackUpdateSource {
if let Some(cb_get_release_feed) = self.cb_get_release_feed {
let json_cstr_ptr = (cb_get_release_feed)(self.p_user_data, releases_name_cstr.as_ptr());
let json = c_to_string_opt(json_cstr_ptr)
.ok_or(Error::Generic("User vpkc_release_feed_delegate_t returned a null pointer instead of an asset feed".to_string()))?;
let json = c_to_String(json_cstr_ptr).map_err(|_| {
Error::Generic("User vpkc_release_feed_delegate_t returned a null pointer instead of an asset feed".to_string())
})?;
if let Some(cb_free_release_feed) = self.cb_free_release_feed {
(cb_free_release_feed)(self.p_user_data, json_cstr_ptr); // Free the C string returned by the callback
} else {
@@ -60,9 +60,7 @@ impl UpdateSource for CCallbackUpdateSource {
fn download_release_entry(&self, asset: &VelopackAsset, local_file: &str, progress_sender: Option<Sender<i16>>) -> Result<(), Error> {
if let Some(cb_download_release_entry) = self.cb_download_release_entry {
let local_file_cstr = CString::new(local_file).unwrap();
let mut asset_c: vpkc_asset_t = unsafe { std::mem::zeroed() };
let asset_ptr: *mut vpkc_asset_t = &mut asset_c as *mut vpkc_asset_t;
unsafe { allocate_velopackasset(asset.clone(), asset_ptr) };
let asset_ptr = unsafe { allocate_VelopackAsset(asset) };
let progress_callback_id = PROGRESS_ID.fetch_add(1, Ordering::SeqCst);
if let Some(progress_sender) = &progress_sender {
@@ -71,8 +69,7 @@ impl UpdateSource for CCallbackUpdateSource {
}
let success = (cb_download_release_entry)(self.p_user_data, asset_ptr, local_file_cstr.as_ptr(), progress_callback_id);
unsafe { free_velopackasset(asset_ptr) };
unsafe { free_VelopackAsset(asset_ptr) };
if let Some(sender) = PROGRESS_CALLBACKS.write().unwrap().remove(&progress_callback_id) {
let _ = sender.send(100);

View File

@@ -15,16 +15,16 @@ use anyhow::{anyhow, bail};
use libc::{c_char, c_void, size_t};
use log_derive::{logfn, logfn_inputs};
use std::{ffi::CString, ptr};
use velopack::{sources, ApplyWaitMode, Error as VelopackError, UpdateCheck, UpdateManager, VelopackApp};
use velopack::locator::LocationContext;
use velopack::logging::{default_logfile_path, init_logging};
use velopack::{sources, ApplyWaitMode, Error as VelopackError, UpdateCheck, UpdateManager, VelopackApp};
/// Create a new FileSource update source for a given file path.
#[no_mangle]
#[logfn(Trace)]
#[logfn_inputs(Trace)]
pub extern "C" fn vpkc_new_source_file(psz_file_path: *const c_char) -> *mut vpkc_update_source_t {
if let Some(update_path) = c_to_string_opt(psz_file_path) {
if let Some(update_path) = c_to_String(psz_file_path).ok() {
UpdateSourceRawPtr::new(Box::new(sources::FileSource::new(update_path)))
} else {
log::error!("psz_file_path is null");
@@ -37,7 +37,7 @@ pub extern "C" fn vpkc_new_source_file(psz_file_path: *const c_char) -> *mut vpk
#[logfn(Trace)]
#[logfn_inputs(Trace)]
pub extern "C" fn vpkc_new_source_http_url(psz_http_url: *const c_char) -> *mut vpkc_update_source_t {
if let Some(update_url) = c_to_string_opt(psz_http_url) {
if let Some(update_url) = c_to_String(psz_http_url).ok() {
UpdateSourceRawPtr::new(Box::new(sources::HttpSource::new(update_url)))
} else {
log::error!("psz_http_url is null");
@@ -103,10 +103,10 @@ pub extern "C" fn vpkc_new_update_manager(
p_manager: *mut *mut vpkc_update_manager_t,
) -> bool {
wrap_error(|| {
let update_url = c_to_string_opt(psz_url_or_path).ok_or(anyhow!("URL or path is null"))?;
let update_url = c_to_String(psz_url_or_path)?;
let source = sources::AutoSource::new(&update_url);
let options = c_to_updateoptions_opt(p_options);
let locator = c_to_velopacklocatorconfig_opt(p_locator);
let options = c_to_UpdateOptions(p_options).ok();
let locator = c_to_VelopackLocatorConfig(p_locator).ok();
let manager = UpdateManager::new(source, options, locator)?;
unsafe { *p_manager = UpdateManagerRawPtr::new(manager) };
Ok(())
@@ -128,8 +128,8 @@ pub extern "C" fn vpkc_new_update_manager_with_source(
) -> bool {
wrap_error(|| {
let source = UpdateSourceRawPtr::get_source_clone(p_source).ok_or(anyhow!("pSource must not be null"))?;
let options = c_to_updateoptions_opt(p_options);
let locator = c_to_velopacklocatorconfig_opt(p_locator);
let options = c_to_UpdateOptions(p_options).ok();
let locator = c_to_VelopackLocatorConfig(p_locator).ok();
let manager = UpdateManager::new_boxed(source, options, locator)?;
unsafe { *p_manager = UpdateManagerRawPtr::new(manager) };
Ok(())
@@ -181,11 +181,11 @@ pub extern "C" fn vpkc_is_portable(p_manager: *mut vpkc_update_manager_t) -> boo
#[no_mangle]
#[logfn(Trace)]
#[logfn_inputs(Trace)]
pub extern "C" fn vpkc_update_pending_restart(p_manager: *mut vpkc_update_manager_t, p_asset: *mut vpkc_asset_t) -> bool {
pub extern "C" fn vpkc_update_pending_restart(p_manager: *mut vpkc_update_manager_t, p_asset: *mut *mut vpkc_asset_t) -> bool {
match p_manager.to_opaque_ref() {
Some(manager) => match manager.get_update_pending_restart() {
Some(asset) => {
unsafe { allocate_velopackasset(asset, p_asset) };
unsafe { *p_asset = allocate_VelopackAsset(&asset) };
true
}
None => false,
@@ -199,13 +199,14 @@ pub extern "C" fn vpkc_update_pending_restart(p_manager: *mut vpkc_update_manage
#[no_mangle]
#[logfn(Trace)]
#[logfn_inputs(Trace)]
pub extern "C" fn vpkc_check_for_updates(p_manager: *mut vpkc_update_manager_t, p_update: *mut vpkc_update_info_t) -> vpkc_update_check_t {
pub extern "C" fn vpkc_check_for_updates(
p_manager: *mut vpkc_update_manager_t,
p_update: *mut *mut vpkc_update_info_t,
) -> vpkc_update_check_t {
match p_manager.to_opaque_ref() {
Some(manager) => match manager.check_for_updates() {
Ok(UpdateCheck::UpdateAvailable(info)) => {
unsafe {
allocate_updateinfo(info, p_update);
}
unsafe { *p_update = allocate_UpdateInfo(&info) };
vpkc_update_check_t::UPDATE_AVAILABLE
}
Ok(UpdateCheck::RemoteIsEmpty) => vpkc_update_check_t::REMOTE_IS_EMPTY,
@@ -243,7 +244,7 @@ pub extern "C" fn vpkc_download_updates(
None => bail!("pManager must not be null"),
};
let update = c_to_updateinfo_opt(p_update).ok_or(anyhow!("pUpdate must not be null"))?;
let update = c_to_UpdateInfo(p_update)?;
if let Some(cb_progress) = cb_progress {
let (progress_sender, progress_receiver) = std::sync::mpsc::channel::<i16>();
@@ -311,15 +312,15 @@ pub extern "C" fn vpkc_wait_exit_then_apply_updates(
None => bail!("pManager must not be null"),
};
let asset = c_to_velopackasset_opt(p_asset).ok_or(anyhow!("pAsset must not be null"))?;
let restart_args = c_to_string_array_opt(p_restart_args, c_restart_args).unwrap_or_default();
let asset = c_to_VelopackAsset(p_asset)?;
let restart_args = c_to_String_vec(p_restart_args, c_restart_args)?;
manager.wait_exit_then_apply_updates(&asset, b_silent, b_restart, &restart_args)?;
Ok(())
})
}
/// This will launch the Velopack updater and optionally wait for a program to exit gracefully.
/// This method is unsafe because it does not necessarily wait for any / the correct process to exit
/// This method is unsafe because it does not necessarily wait for any / the correct process to exit
/// before applying updates. The `vpkc_wait_exit_then_apply_updates` method is recommended for most use cases.
/// If dw_wait_pid is 0, the updater will not wait for any process to exit before applying updates (Not Recommended).
#[no_mangle]
@@ -335,13 +336,9 @@ pub extern "C" fn vpkc_unsafe_apply_updates(
c_restart_args: size_t,
) -> bool {
wrap_error(|| {
let manager = match p_manager.to_opaque_ref() {
Some(manager) => manager,
None => bail!("pManager must not be null"),
};
let asset = c_to_velopackasset_opt(p_asset).ok_or(anyhow!("pAsset must not be null"))?;
let restart_args = c_to_string_array_opt(p_restart_args, c_restart_args).unwrap_or_default();
let manager = p_manager.to_opaque_ref().ok_or(anyhow!("pManager must not be null"))?;
let asset = c_to_VelopackAsset(p_asset)?;
let restart_args = c_to_String_vec(p_restart_args, c_restart_args)?;
let wait_mode = if dw_wait_pid > 0 { ApplyWaitMode::WaitPid(dw_wait_pid) } else { ApplyWaitMode::NoWait };
manager.unsafe_apply_updates(&asset, b_silent, wait_mode, b_restart, &restart_args)?;
Ok(())
@@ -361,7 +358,7 @@ pub extern "C" fn vpkc_free_update_manager(p_manager: *mut vpkc_update_manager_t
#[logfn(Trace)]
#[logfn_inputs(Trace)]
pub extern "C" fn vpkc_free_update_info(p_update_info: *mut vpkc_update_info_t) {
unsafe { free_updateinfo(p_update_info) };
unsafe { free_UpdateInfo(p_update_info) };
}
/// Frees a vpkc_asset_t instance.
@@ -369,7 +366,7 @@ pub extern "C" fn vpkc_free_update_info(p_update_info: *mut vpkc_update_info_t)
#[logfn(Trace)]
#[logfn_inputs(Trace)]
pub extern "C" fn vpkc_free_asset(p_asset: *mut vpkc_asset_t) {
unsafe { free_velopackasset(p_asset) };
unsafe { free_VelopackAsset(p_asset) };
}
/// VelopackApp helps you to handle app activation events correctly.
@@ -439,14 +436,14 @@ pub extern "C" fn vpkc_app_run(p_user_data: *mut c_void) {
hook(p_user_data, c_string.as_ptr());
});
}
// init logging
let log_file = if let Some(locator) = &app_options.locator {
default_logfile_path(locator)
} else {
default_logfile_path(LocationContext::FromCurrentExe)
};
init_logging("lib-cpp", Some(&log_file), false, false, Some(create_shared_logger()));
app.run();
}
@@ -463,7 +460,7 @@ pub extern "C" fn vpkc_app_set_auto_apply_on_startup(b_auto_apply: bool) {
#[no_mangle]
pub extern "C" fn vpkc_app_set_args(p_args: *mut *mut c_char, c_args: size_t) {
update_app_options(|opt| {
opt.args = c_to_string_array_opt(p_args, c_args);
opt.args = c_to_String_vec(p_args, c_args).ok();
});
}
@@ -471,7 +468,7 @@ pub extern "C" fn vpkc_app_set_args(p_args: *mut *mut c_char, c_args: size_t) {
#[no_mangle]
pub extern "C" fn vpkc_app_set_locator(p_locator: *mut vpkc_locator_config_t) {
update_app_options(|opt| {
opt.locator = c_to_velopacklocatorconfig_opt(p_locator);
opt.locator = c_to_VelopackLocatorConfig(p_locator).ok();
});
}

View File

@@ -1,6 +1,8 @@
use anyhow::{bail, Result};
use libc::{c_char, c_void, size_t};
use std::ffi::{CStr, CString};
use std::path::PathBuf;
use std::mem::size_of;
use velopack::{locator::VelopackLocatorConfig, UpdateInfo, UpdateOptions, VelopackAsset};
/// The result of a call to check for updates. This can indicate that an update is available, or that an error occurred.
@@ -47,73 +49,53 @@ pub type vpkc_download_asset_delegate_t = Option<
) -> bool,
>;
pub fn c_to_string_opt(psz: *const c_char) -> Option<String> {
pub fn c_to_String(psz: *const c_char) -> Result<String> {
if psz.is_null() {
return None;
bail!("Null pointer: String must be set.");
}
let cstr = unsafe { CStr::from_ptr(psz) };
Some(String::from_utf8_lossy(cstr.to_bytes()).to_string())
Ok(String::from_utf8_lossy(cstr.to_bytes()).to_string())
}
pub fn c_to_string(psz: *const c_char) -> String {
c_to_string_opt(psz).unwrap_or_default()
pub fn c_to_String_vec(p_args: *mut *mut c_char, c_args: size_t) -> Result<Vec<String>> {
if p_args.is_null() || c_args == 0 {
return Ok(Vec::new());
}
let mut args = Vec::with_capacity(c_args);
for i in 0..c_args {
let arg = c_to_String(unsafe { *p_args.add(i) })?;
args.push(arg);
}
Ok(args)
}
pub fn c_to_pathbuf(psz: *const c_char) -> PathBuf {
PathBuf::from(c_to_string(psz))
pub fn c_to_PathBuf(psz: *const c_char) -> Result<PathBuf> {
c_to_String(psz).map(PathBuf::from)
}
pub fn string_to_cstr(s: &str) -> *mut c_char {
let cstr = CString::new(s).unwrap();
pub fn allocate_String<'a, T: Into<Option<&'a String>>>(s: T) -> *mut c_char {
let s = s.into();
if s.is_none() {
return std::ptr::null_mut();
}
let s = s.unwrap();
let cstr = CString::new(s.clone()).unwrap();
cstr.into_raw()
}
pub fn free_cstr(psz: *mut c_char) {
pub fn allocate_PathBuf(p: &PathBuf) -> *mut c_char {
let st = p.to_string_lossy().to_string();
allocate_String(&st)
}
pub unsafe fn free_String(psz: *mut c_char) {
if !psz.is_null() {
let _ = unsafe { CString::from_raw(psz) };
drop(CString::from_raw(psz));
}
}
pub fn allocate_string(s: String, psz: *mut *mut c_char) {
if psz.is_null() {
return;
}
unsafe { *psz = string_to_cstr(&s) };
}
pub fn allocate_string_opt(s: Option<String>, psz: *mut *mut c_char) {
if let Some(s) = s {
allocate_string(s, psz);
}
}
pub unsafe fn free_string(psz: *mut *mut c_char) {
if !psz.is_null() {
free_cstr(*psz);
}
}
pub fn allocate_pathbuf(p: PathBuf, psz: *mut *mut c_char) {
allocate_string(p.to_string_lossy().to_string(), psz);
}
pub unsafe fn free_pathbuf(psz: *mut *mut c_char) {
free_string(psz);
}
pub fn c_to_string_array_opt(p_args: *mut *mut c_char, c_args: size_t) -> Option<Vec<String>> {
if p_args.is_null() || c_args == 0 {
return None;
}
let mut args = Vec::with_capacity(c_args);
for i in 0..c_args {
if let Some(arg) = c_to_string_opt(unsafe { *p_args.add(i) }) {
args.push(arg);
}
}
Some(args)
pub unsafe fn free_PathBuf(psz: *mut c_char) {
free_String(psz);
}
pub fn return_cstr(psz: *mut c_char, c: size_t, s: &str) -> size_t {
@@ -150,44 +132,90 @@ pub struct vpkc_locator_config_t {
}
#[rustfmt::skip]
pub fn c_to_velopacklocatorconfig(obj: &vpkc_locator_config_t) -> VelopackLocatorConfig {
VelopackLocatorConfig {
RootAppDir: c_to_pathbuf(obj.RootAppDir),
UpdateExePath: c_to_pathbuf(obj.UpdateExePath),
PackagesDir: c_to_pathbuf(obj.PackagesDir),
ManifestPath: c_to_pathbuf(obj.ManifestPath),
CurrentBinaryDir: c_to_pathbuf(obj.CurrentBinaryDir),
pub fn c_to_VelopackLocatorConfig(obj: *mut vpkc_locator_config_t) -> Result<VelopackLocatorConfig> {
if obj.is_null() { bail!("Null pointer: VelopackLocatorConfig must be set."); }
let obj = unsafe { &*obj };
let result = VelopackLocatorConfig {
RootAppDir: c_to_PathBuf(obj.RootAppDir)?,
UpdateExePath: c_to_PathBuf(obj.UpdateExePath)?,
PackagesDir: c_to_PathBuf(obj.PackagesDir)?,
ManifestPath: c_to_PathBuf(obj.ManifestPath)?,
CurrentBinaryDir: c_to_PathBuf(obj.CurrentBinaryDir)?,
IsPortable: obj.IsPortable,
};
Ok(result)
}
#[rustfmt::skip]
pub fn c_to_VelopackLocatorConfig_vec(obj: *mut *mut vpkc_locator_config_t, count: size_t) -> Result<Vec<VelopackLocatorConfig>> {
if obj.is_null() || count == 0 { return Ok(Vec::new()); }
let mut assets = Vec::with_capacity(count as usize);
for i in 0..count {
let ptr = unsafe { *obj.add(i as usize) };
assets.push(c_to_VelopackLocatorConfig(ptr)?);
}
Ok(assets)
}
#[rustfmt::skip]
pub fn c_to_velopacklocatorconfig_opt(obj: *mut vpkc_locator_config_t) -> Option<VelopackLocatorConfig> {
if obj.is_null() { return None; }
Some(c_to_velopacklocatorconfig(unsafe { &*obj }))
}
#[rustfmt::skip]
pub unsafe fn allocate_velopacklocatorconfig(dto: VelopackLocatorConfig, obj: *mut vpkc_locator_config_t) {
if obj.is_null() { return; }
pub unsafe fn allocate_VelopackLocatorConfig<'a, T: Into<Option<&'a VelopackLocatorConfig>>>(dto: T) -> *mut vpkc_locator_config_t {
let dto = dto.into();
if dto.is_none() {
return std::ptr::null_mut();
}
log::debug!("vpkc_locator_config_t allocated");
allocate_pathbuf(dto.RootAppDir, &mut (*obj).RootAppDir);
allocate_pathbuf(dto.UpdateExePath, &mut (*obj).UpdateExePath);
allocate_pathbuf(dto.PackagesDir, &mut (*obj).PackagesDir);
allocate_pathbuf(dto.ManifestPath, &mut (*obj).ManifestPath);
allocate_pathbuf(dto.CurrentBinaryDir, &mut (*obj).CurrentBinaryDir);
let dto = dto.unwrap();
let obj = libc::malloc(size_of::<vpkc_locator_config_t>()) as *mut vpkc_locator_config_t;
(*obj).RootAppDir = allocate_PathBuf(&dto.RootAppDir);
(*obj).UpdateExePath = allocate_PathBuf(&dto.UpdateExePath);
(*obj).PackagesDir = allocate_PathBuf(&dto.PackagesDir);
(*obj).ManifestPath = allocate_PathBuf(&dto.ManifestPath);
(*obj).CurrentBinaryDir = allocate_PathBuf(&dto.CurrentBinaryDir);
(*obj).IsPortable = dto.IsPortable;
obj
}
#[rustfmt::skip]
pub unsafe fn free_velopacklocatorconfig(obj: *mut vpkc_locator_config_t) {
pub unsafe fn allocate_VelopackLocatorConfig_vec(dto: &Vec<VelopackLocatorConfig>, count: *mut size_t) -> *mut *mut vpkc_locator_config_t {
if dto.is_empty() {
*count = 0;
return std::ptr::null_mut();
}
log::debug!("vpkc_locator_config_t vector allocated");
let count_value = dto.len() as size_t;
*count = count_value;
let mut assets = Vec::with_capacity(count_value as usize);
for i in 0..count_value {
let ptr = allocate_VelopackLocatorConfig(&dto[i as usize]);
assets.push(ptr);
}
let ptr = assets.as_mut_ptr();
std::mem::forget(assets);
ptr
}
#[rustfmt::skip]
pub unsafe fn free_VelopackLocatorConfig(obj: *mut vpkc_locator_config_t) {
if obj.is_null() { return; }
free_PathBuf((*obj).RootAppDir);
free_PathBuf((*obj).UpdateExePath);
free_PathBuf((*obj).PackagesDir);
free_PathBuf((*obj).ManifestPath);
free_PathBuf((*obj).CurrentBinaryDir);
libc::free(obj as *mut c_void);
log::debug!("vpkc_locator_config_t freed");
free_pathbuf(&mut (*obj).RootAppDir);
free_pathbuf(&mut (*obj).UpdateExePath);
free_pathbuf(&mut (*obj).PackagesDir);
free_pathbuf(&mut (*obj).ManifestPath);
free_pathbuf(&mut (*obj).CurrentBinaryDir);
}
#[rustfmt::skip]
pub unsafe fn free_VelopackLocatorConfig_vec(obj: *mut *mut vpkc_locator_config_t, count: size_t) {
if obj.is_null() || count == 0 { return; }
let vec = Vec::from_raw_parts(obj, count as usize, count as usize);
for i in 0..count {
let ptr = *vec.get_unchecked(i as usize);
free_VelopackLocatorConfig(ptr);
}
log::debug!("vpkc_locator_config_t vector freed");
}
#[rustfmt::skip]
@@ -215,53 +243,99 @@ pub struct vpkc_asset_t {
}
#[rustfmt::skip]
pub fn c_to_velopackasset(obj: &vpkc_asset_t) -> VelopackAsset {
VelopackAsset {
PackageId: c_to_string(obj.PackageId),
Version: c_to_string(obj.Version),
Type: c_to_string(obj.Type),
FileName: c_to_string(obj.FileName),
SHA1: c_to_string(obj.SHA1),
SHA256: c_to_string(obj.SHA256),
pub fn c_to_VelopackAsset(obj: *mut vpkc_asset_t) -> Result<VelopackAsset> {
if obj.is_null() { bail!("Null pointer: VelopackAsset must be set."); }
let obj = unsafe { &*obj };
let result = VelopackAsset {
PackageId: c_to_String(obj.PackageId)?,
Version: c_to_String(obj.Version)?,
Type: c_to_String(obj.Type)?,
FileName: c_to_String(obj.FileName)?,
SHA1: c_to_String(obj.SHA1)?,
SHA256: c_to_String(obj.SHA256)?,
Size: obj.Size,
NotesMarkdown: c_to_string(obj.NotesMarkdown),
NotesHtml: c_to_string(obj.NotesHtml),
NotesMarkdown: c_to_String(obj.NotesMarkdown)?,
NotesHtml: c_to_String(obj.NotesHtml)?,
};
Ok(result)
}
#[rustfmt::skip]
pub fn c_to_VelopackAsset_vec(obj: *mut *mut vpkc_asset_t, count: size_t) -> Result<Vec<VelopackAsset>> {
if obj.is_null() || count == 0 { return Ok(Vec::new()); }
let mut assets = Vec::with_capacity(count as usize);
for i in 0..count {
let ptr = unsafe { *obj.add(i as usize) };
assets.push(c_to_VelopackAsset(ptr)?);
}
Ok(assets)
}
#[rustfmt::skip]
pub fn c_to_velopackasset_opt(obj: *mut vpkc_asset_t) -> Option<VelopackAsset> {
if obj.is_null() { return None; }
Some(c_to_velopackasset(unsafe { &*obj }))
}
#[rustfmt::skip]
pub unsafe fn allocate_velopackasset(dto: VelopackAsset, obj: *mut vpkc_asset_t) {
if obj.is_null() { return; }
pub unsafe fn allocate_VelopackAsset<'a, T: Into<Option<&'a VelopackAsset>>>(dto: T) -> *mut vpkc_asset_t {
let dto = dto.into();
if dto.is_none() {
return std::ptr::null_mut();
}
log::debug!("vpkc_asset_t allocated");
allocate_string(dto.PackageId, &mut (*obj).PackageId);
allocate_string(dto.Version, &mut (*obj).Version);
allocate_string(dto.Type, &mut (*obj).Type);
allocate_string(dto.FileName, &mut (*obj).FileName);
allocate_string(dto.SHA1, &mut (*obj).SHA1);
allocate_string(dto.SHA256, &mut (*obj).SHA256);
let dto = dto.unwrap();
let obj = libc::malloc(size_of::<vpkc_asset_t>()) as *mut vpkc_asset_t;
(*obj).PackageId = allocate_String(&dto.PackageId);
(*obj).Version = allocate_String(&dto.Version);
(*obj).Type = allocate_String(&dto.Type);
(*obj).FileName = allocate_String(&dto.FileName);
(*obj).SHA1 = allocate_String(&dto.SHA1);
(*obj).SHA256 = allocate_String(&dto.SHA256);
(*obj).Size = dto.Size;
allocate_string(dto.NotesMarkdown, &mut (*obj).NotesMarkdown);
allocate_string(dto.NotesHtml, &mut (*obj).NotesHtml);
(*obj).NotesMarkdown = allocate_String(&dto.NotesMarkdown);
(*obj).NotesHtml = allocate_String(&dto.NotesHtml);
obj
}
#[rustfmt::skip]
pub unsafe fn free_velopackasset(obj: *mut vpkc_asset_t) {
pub unsafe fn allocate_VelopackAsset_vec(dto: &Vec<VelopackAsset>, count: *mut size_t) -> *mut *mut vpkc_asset_t {
if dto.is_empty() {
*count = 0;
return std::ptr::null_mut();
}
log::debug!("vpkc_asset_t vector allocated");
let count_value = dto.len() as size_t;
*count = count_value;
let mut assets = Vec::with_capacity(count_value as usize);
for i in 0..count_value {
let ptr = allocate_VelopackAsset(&dto[i as usize]);
assets.push(ptr);
}
let ptr = assets.as_mut_ptr();
std::mem::forget(assets);
ptr
}
#[rustfmt::skip]
pub unsafe fn free_VelopackAsset(obj: *mut vpkc_asset_t) {
if obj.is_null() { return; }
free_String((*obj).PackageId);
free_String((*obj).Version);
free_String((*obj).Type);
free_String((*obj).FileName);
free_String((*obj).SHA1);
free_String((*obj).SHA256);
free_String((*obj).NotesMarkdown);
free_String((*obj).NotesHtml);
libc::free(obj as *mut c_void);
log::debug!("vpkc_asset_t freed");
free_string(&mut (*obj).PackageId);
free_string(&mut (*obj).Version);
free_string(&mut (*obj).Type);
free_string(&mut (*obj).FileName);
free_string(&mut (*obj).SHA1);
free_string(&mut (*obj).SHA256);
free_string(&mut (*obj).NotesMarkdown);
free_string(&mut (*obj).NotesHtml);
}
#[rustfmt::skip]
pub unsafe fn free_VelopackAsset_vec(obj: *mut *mut vpkc_asset_t, count: size_t) {
if obj.is_null() || count == 0 { return; }
let vec = Vec::from_raw_parts(obj, count as usize, count as usize);
for i in 0..count {
let ptr = *vec.get_unchecked(i as usize);
free_VelopackAsset(ptr);
}
log::debug!("vpkc_asset_t vector freed");
}
#[rustfmt::skip]
@@ -269,7 +343,13 @@ pub unsafe fn free_velopackasset(obj: *mut vpkc_asset_t) {
/// Holds information about the current version and pending updates, such as how many there are, and access to release notes.
pub struct vpkc_update_info_t {
/// The available version that we are updating to.
pub TargetFullRelease: vpkc_asset_t,
pub TargetFullRelease: *mut vpkc_asset_t,
/// The base release that this update is based on. This is only available if the update is a delta update.
pub BaseRelease: *mut vpkc_asset_t,
/// The list of delta updates that can be applied to the base version to get to the target version.
pub DeltasToTarget: *mut *mut vpkc_asset_t,
/// The number of elements in the DeltasToTarget array.
pub DeltasToTargetCount: size_t,
/// True if the update is a version downgrade or lateral move (such as when switching channels to the same version number).
/// In this case, only full updates are allowed, and any local packages on disk newer than the downloaded version will be
/// deleted.
@@ -277,32 +357,84 @@ pub struct vpkc_update_info_t {
}
#[rustfmt::skip]
pub fn c_to_updateinfo(obj: &vpkc_update_info_t) -> UpdateInfo {
UpdateInfo {
TargetFullRelease: c_to_velopackasset(&obj.TargetFullRelease),
pub fn c_to_UpdateInfo(obj: *mut vpkc_update_info_t) -> Result<UpdateInfo> {
if obj.is_null() { bail!("Null pointer: UpdateInfo must be set."); }
let obj = unsafe { &*obj };
let result = UpdateInfo {
TargetFullRelease: c_to_VelopackAsset(obj.TargetFullRelease)?,
BaseRelease: c_to_VelopackAsset(obj.BaseRelease).ok(),
DeltasToTarget: c_to_VelopackAsset_vec(obj.DeltasToTarget, obj.DeltasToTargetCount)?,
IsDowngrade: obj.IsDowngrade,
};
Ok(result)
}
#[rustfmt::skip]
pub fn c_to_UpdateInfo_vec(obj: *mut *mut vpkc_update_info_t, count: size_t) -> Result<Vec<UpdateInfo>> {
if obj.is_null() || count == 0 { return Ok(Vec::new()); }
let mut assets = Vec::with_capacity(count as usize);
for i in 0..count {
let ptr = unsafe { *obj.add(i as usize) };
assets.push(c_to_UpdateInfo(ptr)?);
}
Ok(assets)
}
#[rustfmt::skip]
pub fn c_to_updateinfo_opt(obj: *mut vpkc_update_info_t) -> Option<UpdateInfo> {
if obj.is_null() { return None; }
Some(c_to_updateinfo(unsafe { &*obj }))
}
#[rustfmt::skip]
pub unsafe fn allocate_updateinfo(dto: UpdateInfo, obj: *mut vpkc_update_info_t) {
if obj.is_null() { return; }
pub unsafe fn allocate_UpdateInfo<'a, T: Into<Option<&'a UpdateInfo>>>(dto: T) -> *mut vpkc_update_info_t {
let dto = dto.into();
if dto.is_none() {
return std::ptr::null_mut();
}
log::debug!("vpkc_update_info_t allocated");
allocate_velopackasset(dto.TargetFullRelease, &mut (*obj).TargetFullRelease);
let dto = dto.unwrap();
let obj = libc::malloc(size_of::<vpkc_update_info_t>()) as *mut vpkc_update_info_t;
(*obj).TargetFullRelease = allocate_VelopackAsset(&dto.TargetFullRelease);
(*obj).BaseRelease = allocate_VelopackAsset(&dto.BaseRelease);
(*obj).DeltasToTarget = allocate_VelopackAsset_vec(&dto.DeltasToTarget, &mut (*obj).DeltasToTargetCount);
(*obj).IsDowngrade = dto.IsDowngrade;
obj
}
#[rustfmt::skip]
pub unsafe fn free_updateinfo(obj: *mut vpkc_update_info_t) {
pub unsafe fn allocate_UpdateInfo_vec(dto: &Vec<UpdateInfo>, count: *mut size_t) -> *mut *mut vpkc_update_info_t {
if dto.is_empty() {
*count = 0;
return std::ptr::null_mut();
}
log::debug!("vpkc_update_info_t vector allocated");
let count_value = dto.len() as size_t;
*count = count_value;
let mut assets = Vec::with_capacity(count_value as usize);
for i in 0..count_value {
let ptr = allocate_UpdateInfo(&dto[i as usize]);
assets.push(ptr);
}
let ptr = assets.as_mut_ptr();
std::mem::forget(assets);
ptr
}
#[rustfmt::skip]
pub unsafe fn free_UpdateInfo(obj: *mut vpkc_update_info_t) {
if obj.is_null() { return; }
free_VelopackAsset((*obj).TargetFullRelease);
free_VelopackAsset((*obj).BaseRelease);
free_VelopackAsset_vec((*obj).DeltasToTarget, (*obj).DeltasToTargetCount);
libc::free(obj as *mut c_void);
log::debug!("vpkc_update_info_t freed");
free_velopackasset(&mut (*obj).TargetFullRelease);
}
#[rustfmt::skip]
pub unsafe fn free_UpdateInfo_vec(obj: *mut *mut vpkc_update_info_t, count: size_t) {
if obj.is_null() || count == 0 { return; }
let vec = Vec::from_raw_parts(obj, count as usize, count as usize);
for i in 0..count {
let ptr = *vec.get_unchecked(i as usize);
free_UpdateInfo(ptr);
}
log::debug!("vpkc_update_info_t vector freed");
}
#[rustfmt::skip]
@@ -314,7 +446,7 @@ pub struct vpkc_update_options_t {
/// ExplicitChannel to switch channels to another channel where the latest version on that
/// channel is lower than the current version.
pub AllowVersionDowngrade: bool,
/// **This option should usually be left None**. <br/>
/// **This option should usually be left None**.
/// Overrides the default channel used to fetch updates.
/// The default channel will be whatever channel was specified on the command line when building this release.
/// For example, if the current release was packaged with '--channel beta', then the default channel will be 'beta'.
@@ -322,34 +454,86 @@ pub struct vpkc_update_options_t {
/// allows you to explicitly switch channels, for example if the user wished to switch back to the 'stable' channel
/// without having to reinstall the application.
pub ExplicitChannel: *mut c_char,
/// Sets the maximum number of deltas to consider before falling back to a full update.
/// The default is 10. Set to a negative number (eg. -1) to disable deltas.
pub MaximumDeltasBeforeFallback: i32,
}
#[rustfmt::skip]
pub fn c_to_updateoptions(obj: &vpkc_update_options_t) -> UpdateOptions {
UpdateOptions {
pub fn c_to_UpdateOptions(obj: *mut vpkc_update_options_t) -> Result<UpdateOptions> {
if obj.is_null() { bail!("Null pointer: UpdateOptions must be set."); }
let obj = unsafe { &*obj };
let result = UpdateOptions {
AllowVersionDowngrade: obj.AllowVersionDowngrade,
ExplicitChannel: c_to_string_opt(obj.ExplicitChannel),
ExplicitChannel: c_to_String(obj.ExplicitChannel).ok(),
MaximumDeltasBeforeFallback: obj.MaximumDeltasBeforeFallback,
};
Ok(result)
}
#[rustfmt::skip]
pub fn c_to_UpdateOptions_vec(obj: *mut *mut vpkc_update_options_t, count: size_t) -> Result<Vec<UpdateOptions>> {
if obj.is_null() || count == 0 { return Ok(Vec::new()); }
let mut assets = Vec::with_capacity(count as usize);
for i in 0..count {
let ptr = unsafe { *obj.add(i as usize) };
assets.push(c_to_UpdateOptions(ptr)?);
}
Ok(assets)
}
#[rustfmt::skip]
pub fn c_to_updateoptions_opt(obj: *mut vpkc_update_options_t) -> Option<UpdateOptions> {
if obj.is_null() { return None; }
Some(c_to_updateoptions(unsafe { &*obj }))
}
#[rustfmt::skip]
pub unsafe fn allocate_updateoptions(dto: UpdateOptions, obj: *mut vpkc_update_options_t) {
if obj.is_null() { return; }
pub unsafe fn allocate_UpdateOptions<'a, T: Into<Option<&'a UpdateOptions>>>(dto: T) -> *mut vpkc_update_options_t {
let dto = dto.into();
if dto.is_none() {
return std::ptr::null_mut();
}
log::debug!("vpkc_update_options_t allocated");
let dto = dto.unwrap();
let obj = libc::malloc(size_of::<vpkc_update_options_t>()) as *mut vpkc_update_options_t;
(*obj).AllowVersionDowngrade = dto.AllowVersionDowngrade;
allocate_string_opt(dto.ExplicitChannel, &mut (*obj).ExplicitChannel);
(*obj).ExplicitChannel = allocate_String(&dto.ExplicitChannel);
(*obj).MaximumDeltasBeforeFallback = dto.MaximumDeltasBeforeFallback;
obj
}
#[rustfmt::skip]
pub unsafe fn free_updateoptions(obj: *mut vpkc_update_options_t) {
pub unsafe fn allocate_UpdateOptions_vec(dto: &Vec<UpdateOptions>, count: *mut size_t) -> *mut *mut vpkc_update_options_t {
if dto.is_empty() {
*count = 0;
return std::ptr::null_mut();
}
log::debug!("vpkc_update_options_t vector allocated");
let count_value = dto.len() as size_t;
*count = count_value;
let mut assets = Vec::with_capacity(count_value as usize);
for i in 0..count_value {
let ptr = allocate_UpdateOptions(&dto[i as usize]);
assets.push(ptr);
}
let ptr = assets.as_mut_ptr();
std::mem::forget(assets);
ptr
}
#[rustfmt::skip]
pub unsafe fn free_UpdateOptions(obj: *mut vpkc_update_options_t) {
if obj.is_null() { return; }
free_String((*obj).ExplicitChannel);
libc::free(obj as *mut c_void);
log::debug!("vpkc_update_options_t freed");
free_string(&mut (*obj).ExplicitChannel);
}
#[rustfmt::skip]
pub unsafe fn free_UpdateOptions_vec(obj: *mut *mut vpkc_update_options_t, count: size_t) {
if obj.is_null() || count == 0 { return; }
let vec = Vec::from_raw_parts(obj, count as usize, count as usize);
for i in 0..count {
let ptr = *vec.get_unchecked(i as usize);
free_UpdateOptions(ptr);
}
log::debug!("vpkc_update_options_t vector freed");
}
// !! AUTO-GENERATED-END RUST_TYPES

View File

@@ -1,61 +0,0 @@
using System.Text;
public class IndentStringBuilder
{
private StringBuilder _sb = new();
int _indent = 0;
public void AppendLine()
{
_sb.AppendLine();
}
public void AppendLine(string text)
{
AppendIndent();
_sb.AppendLine(text);
}
public void AppendDocComment(string comment)
{
if (comment != null) {
foreach (var line in comment.ReplaceLineEndings("\n").Split('\n')) {
AppendLine($"/// {line}");
}
}
}
private void AppendIndent()
{
_sb.Append(' ', _indent * 4);
}
public IDisposable Indent()
{
_indent++;
return new IndentDisposable(this);
}
private void RemoveIndent()
{
_indent--;
}
public override string ToString()
{
return _sb.ToString();
}
private class IndentDisposable(IndentStringBuilder isb) : IDisposable
{
private bool _disposed = false;
public void Dispose()
{
if (!_disposed) {
isb.RemoveIndent();
_disposed = true;
}
}
}
}

View File

@@ -1,4 +1,5 @@
using System.Reflection;
using HandlebarsDotNet;
var scriptsDir = Assembly.GetEntryAssembly()!
.GetCustomAttributes<AssemblyMetadataAttribute>()
@@ -6,6 +7,7 @@ var scriptsDir = Assembly.GetEntryAssembly()!
var librustDir = Path.Combine(scriptsDir, "..", "..", "lib-rust", "src");
var libcppDir = Path.Combine(scriptsDir, "..");
var templatesDir = Path.Combine(scriptsDir, "Templates");
var files = Directory.EnumerateFiles(librustDir, "*.rs", SearchOption.AllDirectories);
string[] desiredStructs = [
@@ -15,13 +17,6 @@ string[] desiredStructs = [
"VelopackLocatorConfig",
];
Dictionary<string, string> basic_libc_names = new() {
{ "VelopackAsset", "vpkc_asset_t" },
{ "UpdateInfo", "vpkc_update_info_t" },
{ "UpdateOptions", "vpkc_update_options_t" },
{ "VelopackLocatorConfig", "vpkc_locator_config_t" },
};
List<RustStruct> availableStructs = new();
string[] searchStrings = desiredStructs.Select(s => "struct " + s + " {").ToArray();
@@ -46,60 +41,131 @@ if (desiredStructs.Length != availableStructs.Count) {
return -1;
}
// rust bridge code
// string rustCppLib = Path.Combine(libcppDir, "src", "lib.rs");
Handlebars.RegisterHelper("indent", (writer, context, args) => {
var comment = (string) context[(string) args[0]];
var indent = (string) args[1];
writer.WriteSafeString(comment.PrefixEveryLine(indent));
writer.WriteSafeString("\n");
});
var types = new List<TypeMap>() {
TypeMap.RustStruct("VelopackAsset", "vpkc_asset_t"),
TypeMap.RustStruct("UpdateInfo", "vpkc_update_info_t"),
TypeMap.RustStruct("UpdateOptions", "vpkc_update_options_t"),
TypeMap.RustStruct("VelopackLocatorConfig", "vpkc_locator_config_t"),
TypeMap.SystemType("String", "char", "string", "c_char"),
TypeMap.SystemType("PathBuf", "char", "string", "c_char"),
TypeMap.Primitive("bool", "bool"),
TypeMap.Primitive("i32", "int32_t"),
TypeMap.Primitive("i64", "int64_t"),
TypeMap.Primitive("u32", "uint32_t"),
TypeMap.Primitive("u64", "uint64_t"),
}.ToDictionary(v => v.rustType, v => v);
var handlebarData = availableStructs.Select(s => new RustStruct_Struct {
rust_comment = s.DocComment.ToRustComment(),
cpp_comment = s.DocComment.ToCppComment(),
struct_rust_name = s.Name,
struct_c_name = types[s.Name].interopType,
fields = s.Fields.Select(f => {
var isString = types[f.Type].rustType == "PathBuf" || types[f.Type].rustType == "String";
var field = new RustStruct_Field {
rust_comment = f.DocComment.ToRustComment(),
cpp_comment = f.DocComment.ToCppComment(),
field_name = f.Name,
field_optional = f.Optional,
field_vector = f.Vec,
field_rust_type = f.Type,
field_c_type = types[f.Type].interopType,
field_cpp_type = types[f.Type].cppType,
field_system = types[f.Type].system,
field_primitive = types[f.Type].primitive,
field_normal = !f.Vec && !types[f.Type].primitive,
};
return field;
}).ToArray(),
}).ToArray();
string rustTypes = Path.Combine(libcppDir, "src", "types.rs");
//string rustCppMap = Path.Combine(libcppDir, "src", "map.rs");
var rustCTypesTemplate = Handlebars.Compile(File.ReadAllText(Path.Combine(templatesDir, "rust_types.hbs")));
var rustCTypes = rustCTypesTemplate(handlebarData);
string rustCppInclude = Path.Combine(libcppDir, "include", "Velopack.hpp");
//string rustBridgeC = Path.Combine(libcppDir, "src", "bridge.cc");
//Console.WriteLine("Generating bridge dtos");
//var sbBridgeDto = new IndentStringBuilder();
//foreach(var rs in availableStructs) {
// Templates.WriteBridgeDto(desiredStructs, sbBridgeDto, rs);
//}
//Console.WriteLine("Generating bridge to core mappings");
//var sbBridgeMapping = new IndentStringBuilder();
//foreach(var rs in availableStructs) {
// Templates.WriteBridgeToCoreMapping(desiredStructs, sbBridgeMapping, rs);
//}
// Console.WriteLine("Generating C types");
// var cTypes = new IndentStringBuilder();
// cTypes.AppendLine();
// foreach (var rs in availableStructs) {
// Templates.WriteBasicC(basic_libc_names, cTypes, rs);
// }
Console.WriteLine("Generating C++ types");
var cppTypes = new IndentStringBuilder();
cppTypes.AppendLine();
foreach (var rs in availableStructs) {
Templates.WriteCPlusPlus(basic_libc_names, cppTypes, rs);
}
foreach (var rs in availableStructs) {
Templates.WriteC2CPPMapping(basic_libc_names, cppTypes, rs);
}
Console.WriteLine("Generating Rust-C types");
var rustCTypes = new IndentStringBuilder();
foreach (var rs in availableStructs) {
Templates.WriteRustCRepr(basic_libc_names, rustCTypes, rs);
}
//Console.WriteLine("Generating C to bridge mappings");
//var cToBridgeMapping = new IndentStringBuilder();
//foreach (var rs in availableStructs) {
// Templates.WriteCBridgeMapping(basic_libc_names, cToBridgeMapping, rs);
//}
var cppTypesTemplate = Handlebars.Compile(File.ReadAllText(Path.Combine(templatesDir, "cpp_mapping.hbs")));
var cppTypes = cppTypesTemplate(handlebarData);
Console.WriteLine("Writing all to file");
//Util.ReplaceTextInFile(rustCppLib, "BRIDGE_DTOS", sbBridgeDto.ToString());
//Util.ReplaceTextInFile(rustCppMap, "CORE_MAPPING", sbBridgeMapping.ToString());
Util.ReplaceTextInFile(rustTypes, "RUST_TYPES", rustCTypes.ToString());
// Util.ReplaceTextInFile(rustCppInclude, "C_TYPES", cTypes.ToString());
Util.ReplaceTextInFile(rustCppInclude, "CPP_TYPES", cppTypes.ToString());
//Util.ReplaceTextInFile(rustBridgeC, "BRIDGE_MAPPING", cToBridgeMapping.ToString());
Util.ReplaceTextInFile(rustTypes, "RUST_TYPES", rustCTypes.ToString().ReplaceLineEndings("\n"));
Util.ReplaceTextInFile(rustCppInclude, "CPP_TYPES", cppTypes.ToString().ReplaceLineEndings("\n"));
return 0;
return 0;
class TypeMap
{
public string rustType;
public string cType;
public string cppType;
public string interopType;
public bool primitive;
public bool system;
public static TypeMap RustStruct(string rustName, string cType)
{
return new TypeMap() {
rustType = rustName,
cType = cType,
cppType = rustName,
interopType = cType,
primitive = false,
system = false,
};
}
public static TypeMap Primitive(string rustName, string cType)
{
return new TypeMap() {
rustType = rustName,
cType = cType,
cppType = cType,
interopType = rustName,
primitive = true,
system = false,
};
}
public static TypeMap SystemType(string rustName, string cType, string cppType, string interopType)
{
return new TypeMap() {
rustType = rustName,
cType = cType,
cppType = cppType,
interopType = interopType,
primitive = false,
system = true,
};
}
}
class RustStruct_Struct
{
public string struct_c_name;
public string struct_rust_name;
public string rust_comment;
public string cpp_comment;
public RustStruct_Field[] fields;
}
class RustStruct_Field
{
public string field_name;
public string field_c_type;
public string field_cpp_type;
public string field_rust_type;
public bool field_primitive;
public bool field_optional;
public bool field_vector;
public bool field_system;
public bool field_normal;
public string rust_comment;
public string cpp_comment;
}

View File

@@ -1,7 +1,7 @@
using System.Text.RegularExpressions;
using Superpower;
using Superpower.Parsers;
using Superpower.Model;
using Superpower.Parsers;
using Superpower.Tokenizers;
public class RustField
@@ -10,6 +10,7 @@ public class RustField
public string Name { get; set; }
public string Type { get; set; }
public bool Optional { get; set; }
public bool Vec { get; set; }
}
public class RustStruct
@@ -80,7 +81,7 @@ public static class StructParser
{
return
(from nested in SkipNestedBraces() select Unit.Value) // handle recursive braces
.Or(from nonBrace in Token.Matching<RustToken>(kind => kind != RustToken.OpenBrace && kind != RustToken.CloseBrace,"non-brace").AtLeastOnce() select Unit.Value).Many()
.Or(from nonBrace in Token.Matching<RustToken>(kind => kind != RustToken.OpenBrace && kind != RustToken.CloseBrace, "non-brace").AtLeastOnce() select Unit.Value).Many()
.Select(_ => Unit.Value);
}
@@ -92,7 +93,7 @@ public static class StructParser
private static readonly TokenListParser<RustToken, string> TypeParser =
from rest in Token.Matching<RustToken>(kind => kind != RustToken.Comma, "Expected tokens before ','").AtLeastOnce()
from end in Token.EqualTo(RustToken.Comma)
from end in Token.EqualTo(RustToken.Comma)
select string.Join(" ", rest.Select(t => t.ToStringValue()));
private static readonly TokenListParser<RustToken, RustField> FieldDefinition =
@@ -103,8 +104,7 @@ public static class StructParser
from fieldName in Token.EqualTo(RustToken.Identifier).Select(t => t.ToStringValue())
from colon in Token.EqualTo(RustToken.Colon)
from fieldType in TypeParser
select new RustField
{
select new RustField {
DocComment = docComments,
Name = fieldName,
Type = fieldType.Trim()
@@ -124,8 +124,7 @@ public static class StructParser
from structKeyword in Token.EqualTo(RustToken.KeywordStruct)
from structName in Token.EqualTo(RustToken.Identifier).Select(t => t.ToStringValue())
from structBody in StructBody
select new RustStruct
{
select new RustStruct {
DocComment = docComments,
Name = structName,
Fields = structBody
@@ -133,7 +132,7 @@ public static class StructParser
private static readonly TokenListParser<RustToken, RustStruct> TopLevelItem =
(from impl in ImplBlock
select (RustStruct)null)
select (RustStruct) null)
.Or(
from structDef in StructDefinition
select structDef
@@ -145,27 +144,30 @@ public static class StructParser
var parser = TopLevelItem.Many();
var result = parser(tokens);
if (!result.HasValue)
{
if (!result.HasValue) {
throw new Exception(result.ToString());
}
var structs = result.Value.Where(s => s != null).ToArray();
foreach(var s in structs)
{
foreach(var f in s.Fields)
{
foreach (var s in structs) {
foreach (var f in s.Fields) {
var match = Regex.Match(f.Type, @"Option<(.*)>");
// If the field type is an Option, extract the inner type and set Optional to true
if (match.Success)
{
if (match.Success) {
f.Type = match.Groups[1].Value;
f.Optional = true;
}
var match2 = Regex.Match(f.Type, @"Vec<(.*)>");
// If the field type is an Vec, extract the inner type and set Vec to true
if (match2.Success) {
f.Type = match2.Groups[1].Value;
f.Vec = true;
}
}
}
return structs;
}
}

View File

@@ -1,406 +0,0 @@
public static class Templates
{
private static string GetBasicCType(Dictionary<string, string> nameMap, string rustType)
{
switch (rustType) {
case "PathBuf":
case "String":
return "char*";
case "bool":
return "bool";
case "i32":
return "int64_t";
case "i64":
return "int64_t";
case "u32":
return "uint32_t";
case "u64":
return "uint64_t";
default:
if (nameMap.TryGetValue(rustType, out var type)) {
return type;
}
throw new NotSupportedException("Unsupported type for basic-c: " + rustType);
}
}
private static string GetBasicCTypeInRust(Dictionary<string, string> nameMap, string rustType)
{
switch (rustType) {
case "PathBuf":
case "String":
return "*mut c_char";
case "bool":
return "bool";
case "i32":
return "i32";
case "i64":
return "i64";
case "u32":
return "u32";
case "u64":
return "u64";
default:
if (nameMap.TryGetValue(rustType, out var type)) {
return type;
}
throw new NotSupportedException("Unsupported type for rust-c: " + rustType);
}
}
public static void WriteRustCRepr(Dictionary<string, string> nameMap, IndentStringBuilder sb, RustStruct rs)
{
var cName = nameMap[rs.Name];
sb.AppendLine("#[rustfmt::skip]");
sb.AppendLine($"#[repr(C)]");
sb.AppendDocComment(rs.DocComment);
sb.AppendLine($"pub struct {cName} {{");
using (sb.Indent()) {
foreach (var field in rs.Fields) {
sb.AppendDocComment(field.DocComment);
sb.AppendLine($"pub {field.Name}: {GetBasicCTypeInRust(nameMap, field.Type)},");
}
}
sb.AppendLine("}");
sb.AppendLine();
sb.AppendLine("#[rustfmt::skip]");
sb.AppendLine($"pub fn c_to_{rs.Name.ToLower()}(obj: &{cName}) -> {rs.Name} {{");
using (sb.Indent()) {
// sb.AppendLine($"let obj = unsafe {{ &*obj }};");
sb.AppendLine($"{rs.Name} {{");
using (sb.Indent()) {
foreach (var field in rs.Fields) {
if (field.Optional || field.Type == "PathBuf" || field.Type == "String" || nameMap.ContainsKey(field.Type)) {
sb.AppendLine($"{field.Name}: c_to_{field.Type.ToLower()}{(field.Optional ? "_opt": "")}({(nameMap.ContainsKey(field.Type) ? "&" : "")}obj.{field.Name}),");
} else {
sb.AppendLine($"{field.Name}: obj.{field.Name},");
}
}
}
sb.AppendLine("}");
}
sb.AppendLine("}");
sb.AppendLine();
sb.AppendLine("#[rustfmt::skip]");
sb.AppendLine($"pub fn c_to_{rs.Name.ToLower()}_opt(obj: *mut {cName}) -> Option<{rs.Name}> {{");
using (sb.Indent()) {
sb.AppendLine("if obj.is_null() { return None; }");
sb.AppendLine($"Some(c_to_{rs.Name.ToLower()}(unsafe {{ &*obj }}))");
}
sb.AppendLine("}");
sb.AppendLine();
sb.AppendLine("#[rustfmt::skip]");
sb.AppendLine($"pub unsafe fn allocate_{rs.Name.ToLower()}(dto: {rs.Name}, obj: *mut {cName}) {{");
using (sb.Indent()) {
sb.AppendLine("if obj.is_null() { return; }");
sb.AppendLine($"log::debug!(\"{cName} allocated\");");
foreach (var field in rs.Fields) {
if (field.Optional || field.Type == "PathBuf" || field.Type == "String" || nameMap.ContainsKey(field.Type)) {
sb.AppendLine($"allocate_{field.Type.ToLower()}{(field.Optional ? "_opt": "")}(dto.{field.Name}, &mut (*obj).{field.Name});");
} else {
sb.AppendLine($"(*obj).{field.Name} = dto.{field.Name};");
}
}
}
sb.AppendLine("}");
sb.AppendLine();
sb.AppendLine("#[rustfmt::skip]");
sb.AppendLine($"pub unsafe fn free_{rs.Name.ToLower()}(obj: *mut {cName}) {{");
using (sb.Indent()) {
sb.AppendLine("if obj.is_null() { return; }");
sb.AppendLine($"log::debug!(\"{cName} freed\");");
foreach (var field in rs.Fields) {
if (field.Optional || field.Type == "PathBuf" || field.Type == "String" || nameMap.ContainsKey(field.Type)) {
sb.AppendLine($"free_{field.Type.ToLower()}(&mut (*obj).{field.Name});");
}
}
}
sb.AppendLine("}");
sb.AppendLine();
}
private static string GetCPlusPlusType(string[] coreTypes, string rustType, bool optional)
{
string type = rustType switch {
"PathBuf" => "std::string",
"String" => "std::string",
"bool" => "bool",
"i32" => "int64_t",
"i64" => "int64_t",
"u32" => "uint32_t",
"u64" => "uint64_t",
_ => coreTypes.Contains(rustType) ? rustType : throw new NotSupportedException("Unsupported type for c-plus-plus: " + rustType),
};
return optional ? "std::optional<" + type + ">" : type;
}
public static void WriteCBridgeMapping(Dictionary<string, string> nameMap, IndentStringBuilder sb, RustStruct rs)
{
var cName = nameMap[rs.Name];
sb.AppendLine($"static inline {rs.Name}Dto to_bridge({cName}* pDto) {{");
using (sb.Indent()) {
sb.AppendLine($"if (pDto == nullptr) {{ return {{}}; }}");
sb.AppendLine($"return {{");
using (sb.Indent()) {
foreach (var field in rs.Fields) {
string suffix = field.Optional ? "_opt" : "";
string type = field.Type == "PathBuf" ? "string" : field.Type.ToLower();
if (nameMap.ContainsKey(field.Type)) {
sb.AppendLine($"to_bridge{suffix}(&pDto->{field.Name}),");
} else if (type == "string") {
sb.AppendLine($"to_bridge{type}{suffix}(pDto->{field.Name}),");
} else {
sb.AppendLine($"pDto->{field.Name},");
}
}
}
sb.AppendLine("};");
}
sb.AppendLine($"}}");
sb.AppendLine();
sb.AppendLine($"static inline {rs.Name}DtoOption to_bridge_opt({cName}* pDto) {{");
using (sb.Indent()) {
sb.AppendLine($"{rs.Name}DtoOption opt;");
sb.AppendLine($"if (pDto == nullptr) {{");
using (sb.Indent()) {
sb.AppendLine($"opt.has_data = false;");
sb.AppendLine($"return opt;");
}
sb.AppendLine($"}}");
sb.AppendLine();
sb.AppendLine($"opt.has_data = true;");
sb.AppendLine($"opt.data = to_bridge(pDto);");
sb.AppendLine($"return opt;");
}
sb.AppendLine($"}}");
sb.AppendLine();
sb.AppendLine($"static inline void allocate_{rs.Name.ToLower()}({rs.Name}Dto bridgeDto, {cName}* pDto) {{");
using (sb.Indent()) {
sb.AppendLine($"if (pDto == nullptr) {{ return; }}");
foreach (var field in rs.Fields) {
string type = field.Type == "PathBuf" ? "string" : field.Type.ToLower();
string suffix = field.Optional ? "_opt" : "";
sb.AppendLine(
nameMap.ContainsKey(field.Type) || type == "string"
? $"allocate_{type}{suffix}(bridgeDto.{field.Name}, &pDto->{field.Name});"
: $"pDto->{field.Name} = bridgeDto.{field.Name};");
}
}
sb.AppendLine($"}}");
sb.AppendLine();
sb.AppendLine($"static inline void free_{rs.Name.ToLower()}({cName}* pDto) {{");
using (sb.Indent()) {
sb.AppendLine($"if (pDto == nullptr) {{ return; }}");
foreach (var field in rs.Fields) {
string type = field.Type == "PathBuf" ? "string" : field.Type.ToLower();
if (nameMap.ContainsKey(field.Type)) {
sb.AppendLine($"free_{type}(&pDto->{field.Name});");
} else if (type == "string") {
sb.AppendLine($"free(pDto->{field.Name});");
}
}
}
sb.AppendLine($"}}");
sb.AppendLine();
}
public static void WriteBasicC(Dictionary<string, string> nameMap, IndentStringBuilder sb, RustStruct rs)
{
sb.AppendDocComment(rs.DocComment);
sb.AppendLine($"typedef struct {nameMap[rs.Name]} {{");
foreach (var field in rs.Fields) {
using (sb.Indent()) {
sb.AppendDocComment(field.DocComment);
sb.AppendLine($"{GetBasicCType(nameMap, field.Type)} {field.Name};");
}
}
sb.AppendLine($"}} {nameMap[rs.Name]};");
sb.AppendLine();
}
public static void WriteC2CPPMapping(Dictionary<string, string> nameMap, IndentStringBuilder sb, RustStruct rs)
{
sb.AppendLine($"static inline {nameMap[rs.Name]} to_c(const {rs.Name}& dto) {{");
using (sb.Indent()) {
sb.AppendLine("return {");
using (sb.Indent()) {
foreach (var field in rs.Fields) {
string suffix = field.Optional ? "_opt" : "";
string type = field.Type == "PathBuf" ? "string" : field.Type.ToLower();
sb.AppendLine(
nameMap.ContainsKey(field.Type)
? $"to_c{suffix}(dto.{field.Name}),"
: $"to_c{type}{suffix}(dto.{field.Name}),");
}
}
sb.AppendLine("};");
}
sb.AppendLine($"}}");
sb.AppendLine();
sb.AppendLine($"static inline {rs.Name} to_cpp(const {nameMap[rs.Name]}& dto) {{");
using (sb.Indent()) {
sb.AppendLine("return {");
using (sb.Indent()) {
foreach (var field in rs.Fields) {
string suffix = field.Optional ? "_opt" : "";
string type = field.Type == "PathBuf" ? "string" : field.Type.ToLower();
sb.AppendLine(
nameMap.ContainsKey(field.Type)
? $"to_cpp{suffix}(dto.{field.Name}),"
: $"to_cpp{type}{suffix}(dto.{field.Name}),");
}
}
sb.AppendLine("};");
}
sb.AppendLine($"}}");
sb.AppendLine();
}
public static void WriteCPlusPlus(Dictionary<string, string> nameMap, IndentStringBuilder sb, RustStruct rs)
{
var coreTypes = nameMap.Keys.ToArray();
sb.AppendDocComment(rs.DocComment);
sb.AppendLine($"struct {rs.Name} {{");
foreach (var field in rs.Fields) {
using (sb.Indent()) {
sb.AppendDocComment(field.DocComment);
sb.AppendLine($"{GetCPlusPlusType(coreTypes, field.Type, field.Optional)} {field.Name};");
}
}
sb.AppendLine($"}};");
sb.AppendLine();
}
public static void WriteBridgeDto(string[] coreTypes, IndentStringBuilder sb, RustStruct rs)
{
Func<string, string> nameMapper = (str) =>
coreTypes.Contains(str) ? str + "Dto" : str;
using (sb.Indent()) {
sb.AppendLine($"#[derive(Default)]");
sb.AppendLine($"pub struct {nameMapper(rs.Name)} {{");
foreach (var field in rs.Fields) {
string type = field.Type;
if (type == "PathBuf") {
type = "String";
}
using (sb.Indent()) {
if (field.Optional) {
sb.AppendLine($"pub {field.Name}: {nameMapper(type)}Option,");
} else {
sb.AppendLine($"pub {field.Name}: {nameMapper(type)},");
}
}
}
sb.AppendLine($"}}");
sb.AppendLine();
sb.AppendLine($"#[derive(Default)]");
sb.AppendLine($"pub struct {nameMapper(rs.Name)}Option {{");
using (sb.Indent()) {
sb.AppendLine($"pub data: {nameMapper(rs.Name)},");
sb.AppendLine($"pub has_data: bool,");
}
sb.AppendLine($"}}");
sb.AppendLine();
}
}
public static void WriteBridgeToCoreMapping(string[] coreTypes, IndentStringBuilder sb, RustStruct rs)
{
Func<string, string> nameMapper = (str) => coreTypes.Contains(str) ? str + "Dto" : str;
sb.AppendLine($"pub fn {rs.Name.ToLower()}_to_core(dto: &{nameMapper(rs.Name)}) -> {rs.Name} {{");
;
using (sb.Indent()) {
sb.AppendLine($"{rs.Name} {{");
foreach (var field in rs.Fields) {
using (sb.Indent()) {
if (field.Optional) {
sb.AppendLine(
$"{field.Name}: if dto.{field.Name}.has_data {{ Some({field.Type.ToLower()}_to_core(&dto.{field.Name}.data)) }} else {{ None }},");
} else {
sb.AppendLine($"{field.Name}: {field.Type.ToLower()}_to_core(&dto.{field.Name}),");
}
}
}
sb.AppendLine($"}}");
}
sb.AppendLine($"}}");
sb.AppendLine();
sb.AppendLine($"pub fn {rs.Name.ToLower()}_to_bridge(dto: &{rs.Name}) -> {nameMapper(rs.Name)} {{");
using (sb.Indent()) {
sb.AppendLine($"{nameMapper(rs.Name)} {{");
foreach (var field in rs.Fields) {
using (sb.Indent()) {
if (field.Optional) {
sb.AppendLine(
$"{field.Name}: {nameMapper(field.Type)}Option {{ data: {field.Type.ToLower()}_to_bridge(&dto.{field.Name}.clone().unwrap_or_default()), has_data: dto.{field.Name}.is_some() }},");
} else {
sb.AppendLine($"{field.Name}: {field.Type.ToLower()}_to_bridge(&dto.{field.Name}),");
}
}
}
sb.AppendLine($"}}");
}
sb.AppendLine($"}}");
sb.AppendLine();
sb.AppendLine($"pub fn {rs.Name.ToLower()}_to_core_option(dto: &{nameMapper(rs.Name)}Option) -> Option<{rs.Name}> {{");
;
using (sb.Indent()) {
sb.AppendLine($"if dto.has_data {{ Some({rs.Name.ToLower()}_to_core(&dto.data)) }} else {{ None }}");
}
sb.AppendLine($"}}");
sb.AppendLine();
sb.AppendLine($"pub fn {rs.Name.ToLower()}_to_bridge_option(dto: &Option<{rs.Name}>) -> {nameMapper(rs.Name)}Option {{");
;
using (sb.Indent()) {
sb.AppendLine($"match dto {{");
using (sb.Indent()) {
sb.AppendLine($"Some(dto) => {nameMapper(rs.Name)}Option {{ data: {rs.Name.ToLower()}_to_bridge(dto), has_data: true }},");
sb.AppendLine($"None => {nameMapper(rs.Name)}Option {{ data: Default::default(), has_data: false }},");
}
sb.AppendLine($"}}");
}
sb.AppendLine($"}}");
sb.AppendLine();
}
}

View File

@@ -0,0 +1,83 @@
{{#each this}}
{{cpp_comment}}
struct {{struct_rust_name}} {
{{#each fields}}
{{#indent "cpp_comment" " "}}
{{#unless field_vector}}{{#if field_optional}}std::optional<{{/if~}}
{{~#if field_system~}}std::{{~/if~}}{{field_cpp_type}}
{{~#if field_optional}}>{{/if}} {{field_name}};{{~/unless~}}
{{#if field_vector}}std::vector<{{field_cpp_type}}> {{field_name}};{{/if}}
{{/each}}
};
static inline std::optional<{{struct_rust_name}}> to_cpp_{{struct_rust_name}}(const {{struct_c_name}}* dto) {
if (dto == nullptr) { return std::nullopt; }
return std::optional<{{struct_rust_name}}>({
{{#each fields}}
{{#if field_primitive}}dto->{{field_name}},{{/if~}}
{{#if field_normal}}{{#unless field_optional}}unwrap({{/unless}}to_cpp_{{field_cpp_type}}(dto->{{field_name}}){{~#unless field_optional}}, "Required property {{field_name}} was null"){{/unless}},{{/if~}}
{{#if field_vector}}to_cpp_{{field_cpp_type}}_vec(dto->{{field_name}}, dto->{{field_name}}Count),{{/if}}
{{/each}}
});
}
static inline std::vector<{{struct_rust_name}}> to_cpp_{{struct_rust_name}}_vec(const {{struct_c_name}}* const* arr, size_t c) {
if (arr == nullptr || c < 1) { return std::vector<{{struct_rust_name}}>(); }
std::vector<{{struct_rust_name}}> result;
result.reserve(c);
for (size_t i = 0; i < c; ++i) {
auto dto = arr[i];
if (dto == nullptr) { continue; }
result.push_back(unwrap(to_cpp_{{struct_rust_name}}(dto)));
}
return result;
}
static inline {{struct_c_name}}* alloc_c_{{struct_rust_name}}_ptr(const {{struct_rust_name}}* dto) {
if (dto == nullptr) { return nullptr; }
{{struct_c_name}}* obj = new {{struct_c_name}}{};
{{#each fields}}
{{#if field_primitive}}obj->{{field_name}} = dto->{{field_name}};{{/if~}}
{{#if field_normal}}obj->{{field_name}} = alloc_c_{{field_cpp_type}}(dto->{{field_name}});{{/if~}}
{{#if field_vector}}obj->{{field_name}} = alloc_c_{{field_cpp_type}}_vec(dto->{{field_name}}, &obj->{{field_name}}Count);{{/if}}
{{/each}}
return obj;
}
static inline {{struct_c_name}}* alloc_c_{{struct_rust_name}}(const std::optional<{{struct_rust_name}}>& dto) {
if (!dto.has_value()) { return nullptr; }
{{struct_rust_name}} obj = unwrap(dto);
return alloc_c_{{struct_rust_name}}_ptr(&obj);
}
static inline {{struct_c_name}}** alloc_c_{{struct_rust_name}}_vec(const std::vector<{{struct_rust_name}}>& dto, size_t* count) {
if (dto.empty()) {
*count = 0;
return nullptr;
}
*count = dto.size();
{{struct_c_name}}** arr = new {{struct_c_name}}*[*count];
for (size_t i = 0; i < *count; ++i) {
arr[i] = alloc_c_{{struct_rust_name}}(dto[i]);
}
return arr;
}
static inline void free_c_{{struct_rust_name}}({{struct_c_name}}* obj) {
if (obj == nullptr) { return; }
{{#each fields}}
{{#if field_normal}}free_c_{{field_cpp_type}}(obj->{{field_name}});{{/if~}}
{{#if field_vector}}free_c_{{field_cpp_type}}_vec(obj->{{field_name}}, obj->{{field_name}}Count);{{/if}}
{{/each}}
delete obj;
}
static inline void free_c_{{struct_rust_name}}_vec({{struct_c_name}}** arr, size_t count) {
if (arr == nullptr || count < 1) { return; }
for (size_t i = 0; i < count; ++i) {
free_c_{{struct_rust_name}}(arr[i]);
}
delete[] arr;
}
{{/each}}

View File

@@ -0,0 +1,99 @@
{{#each this}}
#[rustfmt::skip]
#[repr(C)]
{{rust_comment}}
pub struct {{struct_c_name}} {
{{#each fields}}
{{#indent "rust_comment" " "}}
pub {{field_name}}: {{#unless field_primitive}}*mut {{/unless}}{{~#if field_vector}}*mut {{/if}}{{field_c_type}},
{{#if field_vector}}
/// The number of elements in the {{field_name}} array.
pub {{field_name}}Count: size_t,
{{/if}}
{{/each}}
}
#[rustfmt::skip]
pub fn c_to_{{struct_rust_name}}(obj: *mut {{struct_c_name}}) -> Result<{{struct_rust_name}}> {
if obj.is_null() { bail!("Null pointer: {{struct_rust_name}} must be set."); }
let obj = unsafe { &*obj };
let result = {{struct_rust_name}} {
{{#each fields}}
{{#if field_normal}}{{field_name}}: c_to_{{field_rust_type}}(obj.{{field_name}}){{#if field_optional}}.ok(){{else}}?{{/if}},{{/if~}}
{{#if field_vector}}{{field_name}}: c_to_{{field_rust_type}}_vec(obj.{{field_name}}, obj.{{field_name}}Count)?,{{/if~}}
{{#if field_primitive}}{{field_name}}: obj.{{field_name}},{{/if}}
{{/each}}
};
Ok(result)
}
#[rustfmt::skip]
pub fn c_to_{{struct_rust_name}}_vec(obj: *mut *mut {{struct_c_name}}, count: size_t) -> Result<Vec<{{struct_rust_name}}>> {
if obj.is_null() || count == 0 { return Ok(Vec::new()); }
let mut assets = Vec::with_capacity(count as usize);
for i in 0..count {
let ptr = unsafe { *obj.add(i as usize) };
assets.push(c_to_{{struct_rust_name}}(ptr)?);
}
Ok(assets)
}
#[rustfmt::skip]
pub unsafe fn allocate_{{struct_rust_name}}<'a, T: Into<Option<&'a {{struct_rust_name}}>>>(dto: T) -> *mut {{struct_c_name}} {
let dto = dto.into();
if dto.is_none() {
return std::ptr::null_mut();
}
log::debug!("{{struct_c_name}} allocated");
let dto = dto.unwrap();
let obj = libc::malloc(size_of::<{{struct_c_name}}>()) as *mut {{struct_c_name}};
{{#each fields}}
{{#if field_normal}}(*obj).{{field_name}} = allocate_{{field_rust_type}}(&dto.{{field_name}});{{/if~}}
{{#if field_vector}}(*obj).{{field_name}} = allocate_{{field_rust_type}}_vec(&dto.{{field_name}}, &mut (*obj).{{field_name}}Count);{{/if~}}
{{#if field_primitive}}(*obj).{{field_name}} = dto.{{field_name}};{{/if}}
{{/each}}
obj
}
#[rustfmt::skip]
pub unsafe fn allocate_{{struct_rust_name}}_vec(dto: &Vec<{{struct_rust_name}}>, count: *mut size_t) -> *mut *mut {{struct_c_name}} {
if dto.is_empty() {
*count = 0;
return std::ptr::null_mut();
}
log::debug!("{{struct_c_name}} vector allocated");
let count_value = dto.len() as size_t;
*count = count_value;
let mut assets = Vec::with_capacity(count_value as usize);
for i in 0..count_value {
let ptr = allocate_{{struct_rust_name}}(&dto[i as usize]);
assets.push(ptr);
}
let ptr = assets.as_mut_ptr();
std::mem::forget(assets);
ptr
}
#[rustfmt::skip]
pub unsafe fn free_{{struct_rust_name}}(obj: *mut {{struct_c_name}}) {
if obj.is_null() { return; }
{{#each fields}}
{{#if field_vector}}free_{{field_rust_type}}_vec((*obj).{{field_name}}, (*obj).{{field_name}}Count);{{/if~}}
{{#if field_normal}}free_{{field_rust_type}}((*obj).{{field_name}});{{/if}}
{{/each}}
libc::free(obj as *mut c_void);
log::debug!("{{struct_c_name}} freed");
}
#[rustfmt::skip]
pub unsafe fn free_{{struct_rust_name}}_vec(obj: *mut *mut {{struct_c_name}}, count: size_t) {
if obj.is_null() || count == 0 { return; }
let vec = Vec::from_raw_parts(obj, count as usize, count as usize);
for i in 0..count {
let ptr = *vec.get_unchecked(i as usize);
free_{{struct_rust_name}}(ptr);
}
log::debug!("{{struct_c_name}} vector freed");
}
{{/each}}

View File

@@ -4,9 +4,9 @@
{
var body = File.ReadAllText(path);
ReplaceTextBetween(ref body, placeholderName, text);
File.WriteAllText(path, body);
File.WriteAllText(path, body.ReplaceLineEndings("\n"));
}
public static void ReplaceTextBetween(ref string body, string placeholderName, string text)
{
var start = $"// !! AUTO-GENERATED-START {placeholderName}";
@@ -24,4 +24,25 @@
body = body.Remove(startIndex, endIndex - startIndex);
body = body.Insert(startIndex, text.TrimEnd());
}
public static string PrefixEveryLine(this string text, string prefix)
{
if (string.IsNullOrEmpty(prefix)) { return text; }
var lines = text.ReplaceLineEndings("\n").Split(['\n']).Select(l => prefix + l);
return String.Join("\n", lines);
}
public static string ToRustComment(this string text)
{
return text.PrefixEveryLine("/// ");
}
public static string ToCppComment(this string text)
{
if (text.Contains("\n")) {
return "/**\n" + text.PrefixEveryLine(" * ") + "\n */";
} else {
return $"/** {text} */";
}
}
}

View File

@@ -4,11 +4,12 @@
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<Nullable>disable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Superpower" Version="3.0.0" />
<PackageReference Include="Handlebars.Net" Version="2.1.6" />
</ItemGroup>
<ItemGroup>

View File

@@ -35,12 +35,14 @@ namespace Velopack.Sources
protected virtual string? AccessToken { get; }
/// <summary>
/// The Bearer token used in the request.
/// The Bearer or other type of Authorization header used to authenticate against the Api.
/// </summary>
protected virtual string? Authorization => string.IsNullOrWhiteSpace(AccessToken) ? null : "Bearer " + AccessToken;
protected abstract (string Name, string Value) Authorization { get; }
/// <inheritdoc />
public GitBase(string repoUrl, string? accessToken, bool prerelease, IFileDownloader? downloader = null)
/// <summary>
/// Base constructor.
/// </summary>
protected GitBase(string repoUrl, string? accessToken, bool prerelease, IFileDownloader? downloader = null)
{
RepoUri = new Uri(repoUrl.TrimEnd('/'));
AccessToken = accessToken;
@@ -55,7 +57,12 @@ namespace Velopack.Sources
// this might be a browser url or an api url (depending on whether we have a AccessToken or not)
// https://docs.github.com/en/rest/reference/releases#get-a-release-asset
var assetUrl = GetAssetUrlFromName(githubEntry.Release, releaseEntry.FileName);
return Downloader.DownloadFile(assetUrl, localFile, progress, Authorization, "application/octet-stream", cancelToken: cancelToken);
return Downloader.DownloadFile(assetUrl, localFile, progress,
new Dictionary<string, string> {
[Authorization.Name] = Authorization.Value,
["Accept"] = "application/octet-stream"
},
cancelToken: cancelToken);
}
throw new ArgumentException($"Expected releaseEntry to be {nameof(GitBaseAsset)} but got {releaseEntry.GetType().Name}.");
@@ -84,7 +91,12 @@ namespace Velopack.Sources
logger.Trace(ex.ToString());
continue;
}
var releaseBytes = await Downloader.DownloadBytes(assetUrl, Authorization, "application/octet-stream").ConfigureAwait(false);
var releaseBytes = await Downloader.DownloadBytes(assetUrl,
new Dictionary<string, string> {
[Authorization.Name] = Authorization.Value,
["Accept"] = "application/octet-stream"
}
).ConfigureAwait(false);
var txt = CoreUtil.RemoveByteOrderMarkerIfPresent(releaseBytes);
var feed = VelopackAssetFeed.FromJson(txt);
foreach (var f in feed.Assets) {
@@ -111,7 +123,7 @@ namespace Velopack.Sources
protected abstract string GetAssetUrlFromName(T release, string assetName);
/// <summary>
/// Provides a wrapper around <see cref="ReleaseEntry"/> which also contains a Git Release.
/// Provides a wrapper around <see cref="VelopackAsset"/> which also contains a Git Release.
/// </summary>
protected internal record GitBaseAsset : VelopackAsset
{

View File

@@ -81,11 +81,10 @@ namespace Velopack.Sources
: base(repoUrl, accessToken, prerelease, downloader)
{
}
/// <summary>
/// The authorization token used in the request.
/// Overwrite it to token
/// </summary>
protected override string? Authorization => string.IsNullOrWhiteSpace(AccessToken) ? null : "token " + AccessToken;
/// <inheritdoc cref="Authorization"/>
protected override (string Name, string Value) Authorization => ("Authorization", $"token {AccessToken}");
/// <inheritdoc />
protected override async Task<GiteaRelease[]> GetReleases(bool includePrereleases)
{
@@ -97,7 +96,12 @@ namespace Velopack.Sources
var releasesPath = $"repos{RepoUri.AbsolutePath}/releases?limit={perPage}&page={page}&draft=false";
var baseUri = GetApiBaseUrl(RepoUri);
var getReleasesUri = new Uri(baseUri, releasesPath);
var response = await Downloader.DownloadString(getReleasesUri.ToString(), Authorization, "application/json").ConfigureAwait(false);
var response = await Downloader.DownloadString(getReleasesUri.ToString(),
new Dictionary<string, string> {
[Authorization.Name] = Authorization.Value,
["Accept"] = "application/json"
}
).ConfigureAwait(false);
var releases = CompiledJson.DeserializeGiteaReleaseList(response);
if (releases == null) return new GiteaRelease[0];
return releases.OrderByDescending(d => d.PublishedAt).Where(x => includePrereleases || !x.Prerelease).ToArray();

View File

@@ -87,6 +87,9 @@ namespace Velopack.Sources
{
}
/// <inheritdoc cref="Authorization"/>
protected override (string Name, string Value) Authorization => ("Authorization", $"Bearer {AccessToken}");
/// <inheritdoc />
protected override async Task<GithubRelease[]> GetReleases(bool includePrereleases)
{
@@ -96,7 +99,12 @@ namespace Velopack.Sources
var releasesPath = $"repos{RepoUri.AbsolutePath}/releases?per_page={perPage}&page={page}";
var baseUri = GetApiBaseUrl(RepoUri);
var getReleasesUri = new Uri(baseUri, releasesPath);
var response = await Downloader.DownloadString(getReleasesUri.ToString(), Authorization, "application/vnd.github.v3+json").ConfigureAwait(false);
var response = await Downloader.DownloadString(getReleasesUri.ToString(),
new Dictionary<string, string> {
[Authorization.Name] = Authorization.Value,
["Accept"] = "application/vnd.github.v3+json"
}
).ConfigureAwait(false);
var releases = CompiledJson.DeserializeGithubReleaseList(response);
if (releases == null) return Array.Empty<GithubRelease>();
return releases.OrderByDescending(d => d.PublishedAt).Where(x => includePrereleases || !x.Prerelease).ToArray();

View File

@@ -1,4 +1,5 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Velopack.Util;
@@ -98,6 +99,9 @@ namespace Velopack.Sources
/// </summary>
public class GitlabSource : GitBase<GitlabRelease>
{
/// <inheritdoc cref="Authorization"/>
protected override (string Name, string Value) Authorization => ("PRIVATE-TOKEN", AccessToken ?? string.Empty);
/// <inheritdoc cref="GitlabSource" />
/// <param name="repoUrl">
/// The URL of the GitLab repository to download releases from
@@ -156,7 +160,11 @@ namespace Velopack.Sources
var releasesPath = $"{RepoUri.AbsolutePath}/releases?per_page={perPage}&page={page}";
var baseUri = new Uri("https://gitlab.com");
var getReleasesUri = new Uri(baseUri, releasesPath);
var response = await Downloader.DownloadString(getReleasesUri.ToString(), Authorization).ConfigureAwait(false);
var response = await Downloader.DownloadString(getReleasesUri.ToString(),
new Dictionary<string, string> {
[Authorization.Name] = Authorization.Value,
["Accept"] = "application/json"
}).ConfigureAwait(false);
var releases = CompiledJson.DeserializeGitlabReleaseList(response);
if (releases == null) return new GitlabRelease[0];
return releases.OrderByDescending(d => d.ReleasedAt).Where(x => includePrereleases || !x.UpcomingRelease).ToArray();

View File

@@ -1,4 +1,5 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Net.Http;
@@ -8,7 +9,6 @@ using System.Threading.Tasks;
namespace Velopack.Sources
{
/// <inheritdoc cref="IFileDownloader"/>
public class HttpClientFileDownloader : IFileDownloader
{
@@ -18,9 +18,9 @@ namespace Velopack.Sources
public static ProductInfoHeaderValue UserAgent => new("Velopack", VelopackRuntimeInfo.VelopackNugetVersion.ToFullString());
/// <inheritdoc />
public virtual async Task DownloadFile(string url, string targetFile, Action<int> progress, string? authorization, string? accept, double timeout, CancellationToken cancelToken = default)
public virtual async Task DownloadFile(string url, string targetFile, Action<int> progress, IDictionary<string, string>? headers, double timeout, CancellationToken cancelToken = default)
{
using var client = CreateHttpClient(authorization, accept, timeout);
using var client = CreateHttpClient(headers, timeout);
try {
using (var fs = File.Open(targetFile, FileMode.Create)) {
@@ -36,9 +36,9 @@ namespace Velopack.Sources
}
/// <inheritdoc />
public virtual async Task<byte[]> DownloadBytes(string url, string? authorization, string? accept, double timeout)
public virtual async Task<byte[]> DownloadBytes(string url, IDictionary<string, string>? headers, double timeout)
{
using var client = CreateHttpClient(authorization, accept, timeout);
using var client = CreateHttpClient(headers, timeout);
try {
return await client.GetByteArrayAsync(url).ConfigureAwait(false);
@@ -50,9 +50,9 @@ namespace Velopack.Sources
}
/// <inheritdoc />
public virtual async Task<string> DownloadString(string url, string? authorization, string? accept, double timeout)
public virtual async Task<string> DownloadString(string url, IDictionary<string, string>? headers, double timeout)
{
using var client = CreateHttpClient(authorization, accept, timeout);
using var client = CreateHttpClient(headers, timeout);
try {
return await client.GetStringAsync(url).ConfigureAwait(false);
@@ -71,7 +71,7 @@ namespace Velopack.Sources
{
// https://stackoverflow.com/a/46497896/184746
// Get the http headers first to examine the content length
using var response = await client.GetAsync(requestUri, HttpCompletionOption.ResponseHeadersRead).ConfigureAwait(false);
using var response = await client.GetAsync(requestUri, HttpCompletionOption.ResponseHeadersRead, cancelToken).ConfigureAwait(false);
response.EnsureSuccessStatusCode();
var contentLength = response.Content.Headers.ContentLength;
@@ -123,16 +123,15 @@ namespace Velopack.Sources
/// <summary>
/// Creates a new <see cref="HttpClient"/> for every request.
/// </summary>
protected virtual HttpClient CreateHttpClient(string? authorization, string? accept, double timeout = 30)
protected virtual HttpClient CreateHttpClient(IDictionary<string, string>? headers, double timeout)
{
var client = new HttpClient(CreateHttpClientHandler());
client.DefaultRequestHeaders.UserAgent.Add(UserAgent);
if (authorization != null)
client.DefaultRequestHeaders.Add("Authorization", authorization);
if (accept != null)
client.DefaultRequestHeaders.Add("Accept", accept);
foreach (var header in headers ?? new Dictionary<string, string>())
{
client.DefaultRequestHeaders.Add(header.Key, header.Value);
}
client.Timeout = TimeSpan.FromMinutes(timeout);
return client;

View File

@@ -1,4 +1,5 @@
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
@@ -14,31 +15,26 @@ namespace Velopack.Sources
/// </summary>
/// <param name="url">The url which will be downloaded.</param>
/// <param name="targetFile">
/// The local path where the file will be stored
/// If a file exists at this path, it will be overwritten.</param>
/// The local path where the file will be stored
/// If a file exists at this path, it will be overwritten.</param>
/// <param name="progress">
/// A delegate for reporting download progress, with expected values from 0-100.
/// </param>
/// <param name="authorization">
/// Text to be sent in the 'Authorization' header of the request.
/// </param>
/// <param name="accept">
/// Text to be sent in the 'Accept' header of the request.
/// A delegate for reporting download progress, with expected values from 0-100.
/// </param>
/// <param name="headers">Headers that can be passed to Http Downloader, e.g. Accept or Authorization.</param>
/// <param name="timeout">
/// The maximum time in minutes to wait for the download to complete.
/// The maximum time in minutes to wait for the download to complete.
/// </param>
/// <param name="cancelToken">Optional token to cancel the request.</param>
Task DownloadFile(string url, string targetFile, Action<int> progress, string? authorization = null, string? accept = null, double timeout = 30, CancellationToken cancelToken = default);
Task DownloadFile(string url, string targetFile, Action<int> progress, IDictionary<string, string>? headers = null, double timeout = 30, CancellationToken cancelToken = default);
/// <summary>
/// Returns a byte array containing the contents of the file at the specified url
/// </summary>
Task<byte[]> DownloadBytes(string url, string? authorization = null, string? accept = null, double timeout = 30);
Task<byte[]> DownloadBytes(string url, IDictionary<string, string>? headers = null, double timeout = 30);
/// <summary>
/// Returns a string containing the contents of the specified url
/// </summary>
Task<string> DownloadString(string url, string? authorization = null, string? accept = null, double timeout = 30);
Task<string> DownloadString(string url, IDictionary<string, string>? headers = null, double timeout = 30);
}
}

View File

@@ -27,7 +27,7 @@ namespace Velopack.Sources
/// metadata from this package may be provided to the remote server (such as package id,
/// or cpu architecture) to ensure that the correct package is downloaded for this user.
/// </param>
/// <returns>An array of <see cref="ReleaseEntry"/> objects that are available for download
/// <returns>An array of <see cref="VelopackAsset"/> objects that are available for download
/// and are applicable to this user.</returns>
Task<VelopackAssetFeed> GetReleaseFeed(IVelopackLogger logger, string? appId, string channel, Guid? stagingId = null, VelopackAsset? latestLocalRelease = null);

View File

@@ -1,11 +1,12 @@
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using NuGet.Versioning;
using Velopack.Compression;
using Velopack.Exceptions;
using Velopack.Locators;
using Velopack.Logging;
@@ -68,6 +69,9 @@ namespace Velopack
/// <summary> If true, UpdateManager should return the latest asset in the feed, even if that version is lower than the current version. </summary>
protected bool ShouldAllowVersionDowngrade { get; }
/// <summary> Sets the maximum number of deltas to consider before falling back to a full update. </summary>
protected int MaximumDeltasBeforeFallback { get; }
/// <summary>
/// Creates a new UpdateManager instance using the specified URL or file path to the releases feed, and the specified channel name.
/// </summary>
@@ -95,6 +99,7 @@ namespace Velopack
Log = Locator.Log;
Channel = options?.ExplicitChannel ?? DefaultChannel;
ShouldAllowVersionDowngrade = options?.AllowVersionDowngrade ?? false;
MaximumDeltasBeforeFallback = options?.MaximumDeltasBeforeFallback ?? 10;
}
/// <inheritdoc cref="CheckForUpdatesAsync()"/>
@@ -155,7 +160,7 @@ namespace Velopack
/// <summary>
/// Given a feed of releases, and the latest local full release, and the latest remote full release, this method will return a delta
/// update strategy to be used by <see cref="DownloadUpdatesAsync(UpdateInfo, Action{int}?, bool, CancellationToken)"/>.
/// update strategy to be used by <see cref="DownloadUpdatesAsync(UpdateInfo, Action{int}?, CancellationToken)"/>.
/// </summary>
protected virtual UpdateInfo CreateDeltaUpdateStrategy(VelopackAsset[] feed, VelopackAsset? latestLocalFull, VelopackAsset latestRemoteFull)
{
@@ -168,7 +173,6 @@ namespace Velopack
}
EnsureInstalled();
var installedVer = CurrentVersion!;
var matchingRemoteDelta = feed.Where(r => r.Type == VelopackAssetType.Delta && r.Version == latestRemoteFull.Version).FirstOrDefault();
if (matchingRemoteDelta == null) {
@@ -184,10 +188,10 @@ namespace Velopack
return new UpdateInfo(latestRemoteFull, false, latestLocalFull, deltas);
}
/// <inheritdoc cref="DownloadUpdatesAsync(UpdateInfo, Action{int}, bool, CancellationToken)"/>
public void DownloadUpdates(UpdateInfo updates, Action<int>? progress = null, bool ignoreDeltas = false)
/// <inheritdoc cref="DownloadUpdatesAsync(UpdateInfo, Action{int}, CancellationToken)"/>
public void DownloadUpdates(UpdateInfo updates, Action<int>? progress = null)
{
DownloadUpdatesAsync(updates, progress, ignoreDeltas)
DownloadUpdatesAsync(updates, progress)
.ConfigureAwait(false).GetAwaiter().GetResult();
}
@@ -199,10 +203,8 @@ namespace Velopack
/// </summary>
/// <param name="updates">The updates to download. Should be retrieved from <see cref="CheckForUpdates"/>.</param>
/// <param name="progress">The progress callback. Will be called with values from 0-100.</param>
/// <param name="ignoreDeltas">Whether to attempt downloading delta's or skip to full package download.</param>
/// <param name="cancelToken">An optional cancellation token if you wish to stop this operation.</param>
public virtual async Task DownloadUpdatesAsync(
UpdateInfo updates, Action<int>? progress = null, bool ignoreDeltas = false, CancellationToken cancelToken = default)
public virtual async Task DownloadUpdatesAsync(UpdateInfo updates, Action<int>? progress = null, CancellationToken cancelToken = default)
{
progress ??= (_ => { });
@@ -231,66 +233,30 @@ namespace Velopack
EnsureInstalled();
using var _mut = await AcquireUpdateLock().ConfigureAwait(false);
var appTempDir = Locator.AppTempDir!;
var completeFile = Locator.GetLocalPackagePath(targetRelease);
var incompleteFile = completeFile + ".partial";
// if the package already exists on disk, we can skip the download.
if (File.Exists(completeFile)) {
Log.Info($"Package already exists on disk: '{completeFile}', nothing to do.");
return;
}
try {
// if the package already exists on disk, we can skip the download.
if (File.Exists(completeFile)) {
Log.Info($"Package already exists on disk: '{completeFile}', verifying checksum...");
try {
VerifyPackageChecksum(targetRelease, completeFile);
Log.Info("Package checksum verified, skipping download.");
return;
} catch (ChecksumFailedException ex) {
Log.Warn(ex, $"Checksum failed for file '{completeFile}'. Deleting and starting over.");
}
}
var deltasSize = updates.DeltasToTarget.Sum(x => x.Size);
var deltasCount = updates.DeltasToTarget.Length;
var deltasCount = updates.DeltasToTarget.Count();
try {
if (updates.BaseRelease?.FileName != null && deltasCount > 0) {
if (ignoreDeltas) {
Log.Info("Ignoring delta updates (ignoreDeltas parameter)");
if (deltasCount > MaximumDeltasBeforeFallback || deltasSize > targetRelease.Size) {
Log.Info(
$"There are too many delta's ({deltasCount} > {MaximumDeltasBeforeFallback}) or the sum of their size ({deltasSize} > {targetRelease.Size}) is too large. " +
$"Only full update will be available.");
} else {
if (deltasCount > 10 || deltasSize > targetRelease.Size) {
Log.Info(
$"There are too many delta's ({deltasCount} > 10) or the sum of their size ({deltasSize} > {targetRelease.Size}) is too large. " +
$"Only full update will be available.");
} else {
using var _1 = TempUtil.GetTempDirectory(out var deltaStagingDir, appTempDir);
string basePackagePath = Locator.GetLocalPackagePath(updates.BaseRelease);
if (!File.Exists(basePackagePath))
throw new Exception($"Unable to find base package {basePackagePath} for delta update.");
EasyZip.ExtractZipToDirectory(Log, basePackagePath, deltaStagingDir);
reportProgress(10);
await DownloadAndApplyDeltaUpdates(
deltaStagingDir,
updates,
x => reportProgress(CoreUtil.CalculateProgress(x, 10, 80)),
cancelToken)
.ConfigureAwait(false);
reportProgress(80);
Log.Info("Delta updates completed, creating final update package.");
File.Delete(incompleteFile);
await EasyZip.CreateZipFromDirectoryAsync(
Log,
incompleteFile,
deltaStagingDir,
x => reportProgress(CoreUtil.CalculateProgress(x, 80, 100)),
cancelToken: cancelToken).ConfigureAwait(false);
File.Delete(completeFile);
File.Move(incompleteFile, completeFile);
Log.Info("Delta release preparations complete. Package moved to: " + completeFile);
reportProgress(100);
return; // success!
}
await DownloadAndApplyDeltaUpdates(updates, incompleteFile, progress, cancelToken).ConfigureAwait(false);
IoUtil.MoveFile(incompleteFile, completeFile, true);
Log.Info("Delta update download complete. Package moved to: " + completeFile);
return; // success!
}
}
} catch (Exception ex) when (!VelopackRuntimeInfo.InUnitTestRunner) {
@@ -336,26 +302,24 @@ namespace Velopack
/// Given a folder containing the extracted base package, and a list of delta updates, downloads and applies the
/// delta updates to the base package.
/// </summary>
/// <param name="extractedBasePackage">A folder containing the application files to apply the delta's to.</param>
/// <param name="updates">An update object containing one or more delta's</param>
/// <param name="targetFile">The reconstructed full update after all delta's are applied</param>
/// <param name="progress">A callback reporting process of delta application progress (from 0-100).</param>
/// <param name="cancelToken">A token to use to cancel the request.</param>
protected virtual async Task DownloadAndApplyDeltaUpdates(string extractedBasePackage, UpdateInfo updates, Action<int> progress,
protected virtual async Task DownloadAndApplyDeltaUpdates(UpdateInfo updates, string targetFile, Action<int> progress,
CancellationToken cancelToken)
{
var releasesToDownload = updates.DeltasToTarget.OrderBy(d => d.Version).ToArray();
var appTempDir = Locator.AppTempDir!;
var updateExe = Locator.UpdateExePath!;
// downloading accounts for 0%-50% of progress
// downloading accounts for 0%-70% of progress
double current = 0;
double toIncrement = 100.0 / releasesToDownload.Length;
await releasesToDownload.ForEachAsync(
async x => {
var targetFile = Locator.GetLocalPackagePath(x);
double component = 0;
Log.Debug($"Downloading delta version {x.Version}");
Log.Info($"Downloading delta {x.Version}");
await Source.DownloadReleaseEntry(
Log,
x,
@@ -365,7 +329,7 @@ namespace Velopack
current -= component;
component = toIncrement / 100.0 * p;
var progressOfStep = (int) Math.Round(current += component);
progress(CoreUtil.CalculateProgress(progressOfStep, 0, 50));
progress(CoreUtil.CalculateProgress(progressOfStep, 0, 70));
}
},
cancelToken).ConfigureAwait(false);
@@ -374,23 +338,35 @@ namespace Velopack
Log.Debug($"Download complete for delta version {x.Version}");
}).ConfigureAwait(false);
Log.Info("All delta packages downloaded and verified, applying them to the base now. The delta staging dir is: " + extractedBasePackage);
Log.Info("All delta packages downloaded and verified.");
Log.Info($"Applying {releasesToDownload.Length} patches to {updates.BaseRelease?.FileName}.");
// applying deltas accounts for 50%-100% of progress
double progressStepSize = 100d / releasesToDownload.Length;
var builder = new DeltaUpdateExe(Log, appTempDir, updateExe);
for (var i = 0; i < releasesToDownload.Length; i++) {
cancelToken.ThrowIfCancellationRequested();
var rel = releasesToDownload[i];
double baseProgress = i * progressStepSize;
var packageFile = Locator.GetLocalPackagePath(rel);
builder.ApplyDeltaPackageFast(
extractedBasePackage,
packageFile,
x => {
var progressOfStep = (int) (baseProgress + (progressStepSize * (x / 100d)));
progress(CoreUtil.CalculateProgress(progressOfStep, 50, 100));
});
// applying deltas accounts for 70%-100% of progress
var baseFile = Locator.GetLocalPackagePath(updates.BaseRelease!);
var args = new List<string> {
"patch",
"--old",
baseFile,
"--output",
targetFile,
};
foreach (var x in releasesToDownload) {
args.Add("--delta");
args.Add(Locator.GetLocalPackagePath(x));
}
var psi = new ProcessStartInfo(updateExe);
psi.AppendArgumentListSafe(args, out _);
psi.CreateNoWindow = true;
var p = psi.StartRedirectOutputToILogger(Log, VelopackLogLevel.Debug);
if (!p.WaitForExit((int) TimeSpan.FromMinutes(5).TotalMilliseconds)) {
p.Kill();
throw new TimeoutException("patch process timed out (5min).");
}
if (p.ExitCode != 0) {
throw new Exception($"patch process failed with exit code {p.ExitCode}.");
}
progress(100);

View File

@@ -22,5 +22,11 @@
/// without having to reinstall the application.
/// </summary>
public string? ExplicitChannel { get; set; }
/// <summary>
/// Sets the maximum number of deltas to consider before falling back to a full update.
/// The default is 10. Set to a negative number to disable deltas.
/// </summary>
public int? MaximumDeltasBeforeFallback { get; set; }
}
}
}

View File

@@ -9,7 +9,7 @@ using System.Threading.Tasks;
using Velopack.Logging;
using Velopack.Util;
namespace Velopack.Compression
namespace Velopack.Util
{
internal static class EasyZip
{

View File

@@ -15,7 +15,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="NuGet.Versioning" Version="6.13.2" />
<PackageReference Include="NuGet.Versioning" Version="6.14.0" />
</ItemGroup>
<ItemGroup Condition=" $(TargetFramework.StartsWith('net4')) ">

View File

@@ -24,7 +24,7 @@
"typescript": "^5.3.3"
},
"engines": {
"node": ">=18.0.0 <=22.14.0"
"node": ">=18.0.0 <=22.16.0"
}
},
"node_modules/@ampproject/remapping": {
@@ -1186,9 +1186,9 @@
}
},
"node_modules/@types/node": {
"version": "22.14.0",
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.14.0.tgz",
"integrity": "sha512-Kmpl+z84ILoG+3T/zQFyAJsU6EPTmOCj8/2+83fSN6djd6I4o7uOuGIH6vq3PrjY5BGitSbFuMN18j3iknubbA==",
"version": "22.15.21",
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.15.21.tgz",
"integrity": "sha512-EV/37Td6c+MgKAbkcLG6vqZ2zEYHD7bvSrzqqs2RIhbA6w3x+Dqz8MZM3sP6kGTeLrdoOgKZe+Xja7tUB2DNkQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -4161,9 +4161,9 @@
}
},
"node_modules/ts-jest": {
"version": "29.3.1",
"resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.3.1.tgz",
"integrity": "sha512-FT2PIRtZABwl6+ZCry8IY7JZ3xMuppsEV9qFVHOVe8jDzggwUZ9TsM4chyJxL9yi6LvkqcZYU3LmapEE454zBQ==",
"version": "29.3.4",
"resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.3.4.tgz",
"integrity": "sha512-Iqbrm8IXOmV+ggWHOTEbjwyCf2xZlUMv5npExksXohL+tk8va4Fjhb+X2+Rt9NBmgO7bJ8WpnMLOwih/DnMlFA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -4174,8 +4174,8 @@
"json5": "^2.2.3",
"lodash.memoize": "^4.1.2",
"make-error": "^1.3.6",
"semver": "^7.7.1",
"type-fest": "^4.38.0",
"semver": "^7.7.2",
"type-fest": "^4.41.0",
"yargs-parser": "^21.1.1"
},
"bin": {
@@ -4211,9 +4211,9 @@
}
},
"node_modules/ts-jest/node_modules/semver": {
"version": "7.7.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz",
"integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==",
"version": "7.7.2",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz",
"integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==",
"dev": true,
"license": "ISC",
"bin": {
@@ -4224,9 +4224,9 @@
}
},
"node_modules/ts-jest/node_modules/type-fest": {
"version": "4.39.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.39.1.tgz",
"integrity": "sha512-uW9qzd66uyHYxwyVBYiwS4Oi0qZyUqwjU+Oevr6ZogYiXt99EOYtwvzMSLw1c3lYo2HzJsep/NB23iEVEgjG/w==",
"version": "4.41.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz",
"integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==",
"dev": true,
"license": "(MIT OR CC0-1.0)",
"engines": {
@@ -4304,9 +4304,9 @@
}
},
"node_modules/typescript": {
"version": "5.8.2",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz",
"integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==",
"version": "5.8.3",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz",
"integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==",
"dev": true,
"license": "Apache-2.0",
"bin": {

View File

@@ -36,7 +36,7 @@
"author": "Velopack Ltd, Caelan Sayler",
"license": "MIT",
"engines": {
"node": ">=18.0.0 <=22.14.0"
"node": ">=18.0.0 <=22.16.0"
},
"files": [
"lib/**/*.ts",

View File

@@ -9,6 +9,14 @@ export type UpdateInfo = {
* The available version that we are updating to.
*/
TargetFullRelease: VelopackAsset,
/**
* The base release that this update is based on. This is only available if the update is a delta update.
*/
BaseRelease: VelopackAsset | null,
/**
* The list of delta updates that can be applied to the base version to get to the target version.
*/
DeltasToTarget: Array<VelopackAsset>,
/**
* True if the update is a version downgrade or lateral move (such as when switching channels to the same version number).
* In this case, only full updates are allowed, and any local packages on disk newer than the downloaded version will be

View File

@@ -12,7 +12,7 @@ export type UpdateOptions = {
*/
AllowVersionDowngrade: boolean,
/**
* **This option should usually be left None**. <br/>
* **This option should usually be left None**.
* Overrides the default channel used to fetch updates.
* The default channel will be whatever channel was specified on the command line when building this release.
* For example, if the current release was packaged with '--channel beta', then the default channel will be 'beta'.
@@ -20,4 +20,9 @@ AllowVersionDowngrade: boolean,
* allows you to explicitly switch channels, for example if the user wished to switch back to the 'stable' channel
* without having to reinstall the application.
*/
ExplicitChannel: string | null, };
ExplicitChannel: string | null,
/**
* Sets the maximum number of deltas to consider before falling back to a full update.
* The default is 10. Set to a negative number (eg. -1) to disable deltas.
*/
MaximumDeltasBeforeFallback: number, };

View File

@@ -22,6 +22,7 @@ test("UpdateManager detects local update", async () => {
const options: UpdateOptions = {
ExplicitChannel: "beta",
AllowVersionDowngrade: false,
MaximumDeltasBeforeFallback: 10,
};
const um = new UpdateManager(tmpDir, options, locator);
@@ -54,6 +55,7 @@ test("UpdateManager downloads full update", async () => {
const options: UpdateOptions = {
ExplicitChannel: "beta",
AllowVersionDowngrade: false,
MaximumDeltasBeforeFallback: 10,
};
const um = new UpdateManager(feedDir, options, locator);

View File

@@ -14,15 +14,14 @@ edition.workspace = true
rust-version.workspace = true
[features]
default = ["zstd"]
delta = ["zstd"]
default = []
async = ["async-std"]
typescript = ["ts-rs"]
file-logging = ["log-panics", "simplelog", "file-rotate", "time"]
public-utils = []
[package.metadata.docs.rs]
features = ["async", "delta"]
features = ["async"]
[lib]
name = "velopack"
@@ -55,9 +54,6 @@ uuid.workspace = true
# typescript
ts-rs = { workspace = true, optional = true }
# delta packages
zstd = { workspace = true, optional = true }
# async
async-std = { workspace = true, optional = true }

View File

@@ -1,48 +0,0 @@
use std::{fs, io, path::Path};
use crate::Error;
/// Applies a zstd patch to a single file by loading the patch as a dictionary.
pub fn zstd_patch_single<P1: AsRef<Path>, P2: AsRef<Path>, P3: AsRef<Path>>(old_file: P1, patch_file: P2, output_file: P3) -> Result<(), Error> {
let old_file = old_file.as_ref();
let patch_file = patch_file.as_ref();
let output_file = output_file.as_ref();
if !old_file.exists() {
return Err(Error::FileNotFound(old_file.to_string_lossy().to_string()));
}
if !patch_file.exists() {
return Err(Error::FileNotFound(patch_file.to_string_lossy().to_string()));
}
let dict = fs::read(old_file)?;
info!("Loading Dictionary (Size: {})", dict.len());
let patch = fs::OpenOptions::new().read(true).open(patch_file)?;
let patch_reader = io::BufReader::new(patch);
let mut decoder = zstd::Decoder::with_dictionary(patch_reader, &dict)?;
let window_log = fio_highbit64(dict.len() as u64) + 1;
if window_log >= 27 {
info!("Large File detected. Overriding windowLog to {}", window_log);
decoder.window_log_max(window_log)?;
}
info!("Decoder loaded. Beginning patch...");
let mut output = fs::OpenOptions::new().write(true).create(true).truncate(true).open(output_file)?;
io::copy(&mut decoder, &mut output)?;
info!("Patch applied successfully.");
Ok(())
}
fn fio_highbit64(v: u64) -> u32 {
let mut count: u32 = 0;
let mut v = v;
v >>= 1;
while v > 0 {
v >>= 1;
count += 1;
}
return count;
}

View File

@@ -78,8 +78,6 @@
#![warn(missing_docs)]
macro_rules! maybe_pub {
($($mod:ident),*) => {
$(
@@ -118,7 +116,6 @@ macro_rules! maybe_pub_os {
};
}
mod app;
pub use app::*;
@@ -131,7 +128,7 @@ pub mod locator;
/// Sources are abstractions for custom update sources (eg. url, local file, github releases, etc).
pub mod sources;
maybe_pub!(download, bundle, delta, constants, lockfile, logging, misc);
maybe_pub!(download, bundle, constants, lockfile, logging, misc);
maybe_pub_os!(process, "process_win.rs", "process_unix.rs");
#[macro_use]
@@ -153,6 +150,10 @@ pub enum Error {
FileNotFound(String),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Checksum did not match for {0} (expected {1}, actual {2})")]
ChecksumInvalid(String, String, String),
#[error("Size did not match for {0} (expected {1}, actual {2})")]
SizeInvalid(String, u64, u64),
#[error("Zip error: {0}")]
Zip(#[from] zip::result::ZipError),
#[error("Network error: {0}")]

View File

@@ -545,7 +545,7 @@ pub fn find_latest_full_package(packages_dir: &PathBuf) -> Option<(PathBuf, Mani
info!("Attempting to auto-detect package in: {}", packages_dir);
let mut package: Option<(PathBuf, Manifest)> = None;
let search_glob = format!("{}/*.nupkg", packages_dir);
let search_glob = format!("{}/*-full.nupkg", packages_dir);
if let Ok(paths) = glob::glob(search_glob.as_str()) {
for path in paths.into_iter().flatten() {
trace!("Checking package: '{}'", path.to_string_lossy());

View File

@@ -76,12 +76,18 @@ pub fn default_logfile_path<L: TryInto<VelopackLocator>>(locator: L) -> PathBuf
/// It can only be called once per process, and should be called early in the process lifecycle.
/// Future calls to this function will fail.
#[cfg(feature = "file-logging")]
pub fn init_logging(process_name: &str, file: Option<&PathBuf>, console: bool, verbose: bool, custom_log_cb: Option<Box<dyn SharedLogger>>) {
pub fn init_logging(
process_name: &str,
file: Option<&PathBuf>,
console: bool,
verbose: bool,
custom_log_cb: Option<Box<dyn SharedLogger>>,
) {
let mut loggers: Vec<Box<dyn SharedLogger>> = Vec::new();
if let Some(cb) = custom_log_cb {
loggers.push(cb);
}
let color_choice = ColorChoice::Never;
if console {
let console_level = if verbose { LevelFilter::Debug } else { LevelFilter::Info };
@@ -105,6 +111,12 @@ pub fn init_logging(process_name: &str, file: Option<&PathBuf>, console: bool, v
}
}
/// Initialize a Trace / Console logger for the current process.
#[cfg(feature = "file-logging")]
pub fn trace_logger() {
TermLogger::init(LevelFilter::Trace, get_config(None), TerminalMode::Mixed, ColorChoice::Never).unwrap();
}
#[cfg(feature = "file-logging")]
fn get_config(process_name: Option<&str>) -> Config {
let mut c = ConfigBuilder::default();

View File

@@ -1,9 +1,7 @@
use std::process::exit;
#[cfg(target_os = "windows")]
use std::{fs, sync::mpsc::Sender};
use semver::Version;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::{fs, process::exit, sync::mpsc::Sender};
#[cfg(feature = "async")]
use async_std::channel::Sender as AsyncSender;
@@ -11,9 +9,10 @@ use async_std::channel::Sender as AsyncSender;
use async_std::task::JoinHandle;
use crate::{
bundle::Manifest,
constants,
locator::{self, LocationContext, VelopackLocator, VelopackLocatorConfig},
misc, process,
misc,
sources::UpdateSource,
Error,
};
@@ -80,12 +79,26 @@ pub struct VelopackAsset {
pub struct UpdateInfo {
/// The available version that we are updating to.
pub TargetFullRelease: VelopackAsset,
/// The base release that this update is based on. This is only available if the update is a delta update.
pub BaseRelease: Option<VelopackAsset>,
/// The list of delta updates that can be applied to the base version to get to the target version.
pub DeltasToTarget: Vec<VelopackAsset>,
/// True if the update is a version downgrade or lateral move (such as when switching channels to the same version number).
/// In this case, only full updates are allowed, and any local packages on disk newer than the downloaded version will be
/// deleted.
pub IsDowngrade: bool,
}
impl UpdateInfo {
pub(crate) fn new_full(target: VelopackAsset, is_downgrade: bool) -> UpdateInfo {
UpdateInfo { TargetFullRelease: target, BaseRelease: None, DeltasToTarget: Vec::new(), IsDowngrade: is_downgrade }
}
pub(crate) fn new_delta(target: VelopackAsset, base: VelopackAsset, deltas: Vec<VelopackAsset>) -> UpdateInfo {
UpdateInfo { TargetFullRelease: target, BaseRelease: Some(base), DeltasToTarget: deltas, IsDowngrade: false }
}
}
impl AsRef<VelopackAsset> for UpdateInfo {
fn as_ref(&self) -> &VelopackAsset {
&self.TargetFullRelease
@@ -109,7 +122,7 @@ pub struct UpdateOptions {
/// ExplicitChannel to switch channels to another channel where the latest version on that
/// channel is lower than the current version.
pub AllowVersionDowngrade: bool,
/// **This option should usually be left None**. <br/>
/// **This option should usually be left None**.
/// Overrides the default channel used to fetch updates.
/// The default channel will be whatever channel was specified on the command line when building this release.
/// For example, if the current release was packaged with '--channel beta', then the default channel will be 'beta'.
@@ -117,6 +130,9 @@ pub struct UpdateOptions {
/// allows you to explicitly switch channels, for example if the user wished to switch back to the 'stable' channel
/// without having to reinstall the application.
pub ExplicitChannel: Option<String>,
/// Sets the maximum number of deltas to consider before falling back to a full update.
/// The default is 10. Set to a negative number (eg. -1) to disable deltas.
pub MaximumDeltasBeforeFallback: i32,
}
/// Provides functionality for checking for updates, downloading updates, and applying updates to the current application.
@@ -175,7 +191,11 @@ impl UpdateManager {
} else {
locator::auto_locate_app_manifest(LocationContext::FromCurrentExe)?
};
Ok(UpdateManager { options: options.unwrap_or_default(), source, locator })
let mut options = options.unwrap_or_default();
if options.MaximumDeltasBeforeFallback == 0 {
options.MaximumDeltasBeforeFallback = 10;
}
Ok(UpdateManager { options, source, locator })
}
fn get_practical_channel(&self) -> String {
@@ -216,25 +236,27 @@ impl UpdateManager {
/// VelopackAsset can be applied by calling apply_updates_and_restart or wait_exit_then_apply_updates.
pub fn get_update_pending_restart(&self) -> Option<VelopackAsset> {
let packages_dir = self.locator.get_packages_dir();
if let Some((path, manifest)) = locator::find_latest_full_package(&packages_dir) {
if manifest.version > self.locator.get_manifest_version() {
let (sha1, sha256) = misc::calculate_sha1_sha256(&path).unwrap_or_default();
return Some(VelopackAsset {
PackageId: manifest.id,
Version: manifest.version.to_string(),
Type: "Full".to_string(),
FileName: path.file_name().unwrap().to_string_lossy().to_string(),
SHA1: sha1,
SHA256: sha256,
Size: path.metadata().map(|m| m.len()).unwrap_or(0),
NotesMarkdown: manifest.release_notes,
NotesHtml: manifest.release_notes_html,
});
}
if let Some((_, manifest)) = locator::find_latest_full_package(&packages_dir) {
if manifest.version > self.locator.get_manifest_version() {}
}
None
}
fn local_manifest_to_asset(&self, manifest: &Manifest, path: &PathBuf) -> VelopackAsset {
let (sha1, sha256) = misc::calculate_sha1_sha256(path).unwrap_or_default();
VelopackAsset {
PackageId: manifest.id.clone(),
Version: manifest.version.to_string(),
Type: "Full".to_string(),
FileName: path.file_name().unwrap().to_string_lossy().to_string(),
SHA1: sha1,
SHA256: sha256,
Size: path.metadata().map(|m| m.len()).unwrap_or(0),
NotesMarkdown: manifest.release_notes.clone(),
NotesHtml: manifest.release_notes_html.clone(),
}
}
/// Get a list of available remote releases from the package source.
pub fn get_release_feed(&self) -> Result<VelopackAssetFeed, Error> {
let channel = self.get_practical_channel();
@@ -265,9 +287,9 @@ impl UpdateManager {
return Ok(UpdateCheck::RemoteIsEmpty);
}
let mut latest: Option<VelopackAsset> = None;
let mut latest: Option<&VelopackAsset> = None;
let mut latest_version: Version = Version::parse("0.0.0")?;
for asset in assets {
for asset in &assets {
if let Ok(sv) = Version::parse(&asset.Version) {
if asset.Type.eq_ignore_ascii_case("Full") {
debug!("Found full release: {} ({}).", asset.FileName, sv.to_string());
@@ -290,21 +312,66 @@ impl UpdateManager {
if remote_version > app_version {
info!("Found newer remote release available ({} -> {}).", app_version, remote_version);
Ok(UpdateCheck::UpdateAvailable(UpdateInfo { TargetFullRelease: remote_asset, IsDowngrade: false }))
Ok(UpdateCheck::UpdateAvailable(self.create_delta_update_strategy(&assets, (remote_asset, remote_version))))
} else if remote_version < app_version && allow_downgrade {
info!("Found older remote release available and downgrade is enabled ({} -> {}).", app_version, remote_version);
Ok(UpdateCheck::UpdateAvailable(UpdateInfo { TargetFullRelease: remote_asset, IsDowngrade: true }))
Ok(UpdateCheck::UpdateAvailable(UpdateInfo::new_full(remote_asset.clone(), true)))
} else if remote_version == app_version && allow_downgrade && is_non_default_channel {
info!(
"Latest remote release is the same version of a different channel, and downgrade is enabled ({} -> {}, {} -> {}).",
app_version, remote_version, app_channel, practical_channel
);
Ok(UpdateCheck::UpdateAvailable(UpdateInfo { TargetFullRelease: remote_asset, IsDowngrade: true }))
Ok(UpdateCheck::UpdateAvailable(UpdateInfo::new_full(remote_asset.clone(), true)))
} else {
Ok(UpdateCheck::NoUpdateAvailable)
}
}
fn create_delta_update_strategy(
&self,
velopack_asset_feed: &Vec<VelopackAsset>,
latest_remote: (&VelopackAsset, Version),
) -> UpdateInfo {
let packages_dir = self.locator.get_packages_dir();
let latest_local = locator::find_latest_full_package(&packages_dir);
if latest_local.is_none() {
info!("There is no local/base package available for this update, so delta updates will be disabled.");
return UpdateInfo::new_full(latest_remote.0.clone(), false);
}
let (latest_local_path, latest_local_manifest) = latest_local.unwrap();
let local_asset = self.local_manifest_to_asset(&latest_local_manifest, &latest_local_path);
let assets_and_versions: Vec<(&VelopackAsset, Version)> =
velopack_asset_feed.iter().filter_map(|asset| Version::parse(&asset.Version).ok().map(|ver| (asset, ver))).collect();
let matching_latest_delta =
assets_and_versions.iter().find(|(asset, version)| asset.Type.eq_ignore_ascii_case("Delta") && version == &latest_remote.1);
if matching_latest_delta.is_none() {
info!("No matching delta update found for release {}, so deltas will be disabled.", latest_remote.1);
return UpdateInfo::new_full(latest_remote.0.clone(), false);
}
let mut remotes_greater_than_local = assets_and_versions
.iter()
.filter(|(asset, _version)| asset.Type.eq_ignore_ascii_case("Delta"))
.filter(|(_asset, version)| version > &latest_local_manifest.version && version <= &latest_remote.1)
.collect::<Vec<_>>();
remotes_greater_than_local.sort_by(|a, b| a.1.cmp(&b.1));
let remotes_greater_than_local = remotes_greater_than_local.iter().map(|obj| obj.0.clone()).collect::<Vec<VelopackAsset>>();
info!(
"Found {} delta updates between {} and {}.",
remotes_greater_than_local.len(),
latest_local_manifest.version,
latest_remote.1
);
UpdateInfo::new_delta(latest_remote.0.clone(), local_asset, remotes_greater_than_local)
}
/// Checks for updates, returning None if there are none available. If there are updates available, this method will return an
/// UpdateInfo object containing the latest available release, and any delta updates that can be applied if they are available.
#[cfg(feature = "async")]
@@ -326,7 +393,7 @@ impl UpdateManager {
fs::create_dir_all(packages_dir)?;
let final_target_file = packages_dir.join(name);
let partial_file = packages_dir.join(format!("{}.partial", name));
let partial_file = final_target_file.with_extension("partial");
if final_target_file.exists() {
info!("Package already exists on disk, skipping download: '{}'", final_target_file.to_string_lossy());
@@ -335,10 +402,10 @@ impl UpdateManager {
let old_nupkg_pattern = format!("{}/*.nupkg", packages_dir.to_string_lossy());
let old_partial_pattern = format!("{}/*.partial", packages_dir.to_string_lossy());
let delta_pattern = format!("{}/*-delta.nupkg", packages_dir.to_string_lossy());
let mut to_delete = Vec::new();
fn find_files_to_delete(pattern: &str, to_delete: &mut Vec<String>) {
info!("Searching for packages to clean: '{}'", pattern);
match glob::glob(pattern) {
Ok(paths) => {
for path in paths.into_iter().flatten() {
@@ -354,12 +421,26 @@ impl UpdateManager {
find_files_to_delete(&old_nupkg_pattern, &mut to_delete);
find_files_to_delete(&old_partial_pattern, &mut to_delete);
self.source.download_release_entry(&update.TargetFullRelease, &partial_file.to_string_lossy(), progress)?;
info!("Successfully placed file: '{}'", partial_file.to_string_lossy());
if update.BaseRelease.is_some() && !update.DeltasToTarget.is_empty() {
info!("Beginning delta update process.");
if let Err(e) = self.download_and_apply_delta_updates(update, &partial_file, progress.clone()) {
error!("Error downloading delta updates: {}", e);
info!("Falling back to full update...");
self.source.download_release_entry(&update.TargetFullRelease, &partial_file.to_string_lossy(), progress)?;
self.verify_package_checksum(&partial_file, &update.TargetFullRelease)?;
info!("Successfully downloaded file: '{}'", partial_file.to_string_lossy());
}
} else {
self.source.download_release_entry(&update.TargetFullRelease, &partial_file.to_string_lossy(), progress)?;
self.verify_package_checksum(&partial_file, &update.TargetFullRelease)?;
info!("Successfully downloaded file: '{}'", partial_file.to_string_lossy());
}
info!("Renaming partial file to final target: '{}'", final_target_file.to_string_lossy());
fs::rename(&partial_file, &final_target_file)?;
find_files_to_delete(&delta_pattern, &mut to_delete);
// extract new Update.exe on Windows only
#[cfg(target_os = "windows")]
match crate::bundle::load_bundle_from_file(&final_target_file) {
@@ -383,6 +464,74 @@ impl UpdateManager {
Ok(())
}
fn download_and_apply_delta_updates(
&self,
update: &UpdateInfo,
target_file: &PathBuf,
progress: Option<Sender<i16>>,
) -> Result<(), Error> {
let packages_dir = self.locator.get_packages_dir();
let base_release_path = packages_dir.join(&update.BaseRelease.as_ref().unwrap().FileName);
let base_release_path = base_release_path.to_string_lossy().to_string();
let output_path = target_file.to_string_lossy().to_string();
let mut args: Vec<String> =
["patch", "--old", &base_release_path, "--output", &output_path].iter().map(|s| s.to_string()).collect();
for (i, delta) in update.DeltasToTarget.iter().enumerate() {
let delta_file = packages_dir.join(&delta.FileName);
let partial_file = delta_file.with_extension("partial");
info!("Downloading delta package: '{}'", &delta.FileName);
self.source.download_release_entry(&delta, &partial_file.to_string_lossy(), None)?;
self.verify_package_checksum(&partial_file, delta)?;
fs::rename(&partial_file, &delta_file)?;
debug!("Successfully downloaded file: '{}'", &delta.FileName);
if let Some(progress) = &progress {
let _ = progress.send(((i as f64 / update.DeltasToTarget.len() as f64) * 70.0) as i16);
}
args.push("--delta".to_string());
args.push(delta_file.to_string_lossy().to_string());
}
info!("Applying {} patches to {}.", update.DeltasToTarget.len(), output_path);
if let Some(progress) = &progress {
let _ = progress.send(70);
}
let output = std::process::Command::new(self.locator.get_update_path()).args(args).output()?;
if output.status.success() {
info!("Successfully applied delta updates.");
} else {
let error_message = String::from_utf8_lossy(&output.stderr);
error!("Error applying delta updates: {}", error_message);
return Err(Error::Generic(error_message.to_string()));
}
if let Some(progress) = &progress {
let _ = progress.send(100);
}
Ok(())
}
fn verify_package_checksum(&self, file: &PathBuf, asset: &VelopackAsset) -> Result<(), Error> {
let file_size = file.metadata()?.len();
if file_size != asset.Size {
error!("File size mismatch for file '{}': expected {}, got {}", file.to_string_lossy(), asset.Size, file_size);
return Err(Error::SizeInvalid(file.to_string_lossy().to_string(), asset.Size, file_size));
}
let (sha1, _) = misc::calculate_sha1_sha256(file)?;
if !sha1.eq_ignore_ascii_case(&asset.SHA1) {
error!("SHA1 checksum mismatch for file '{}': expected '{}', got '{}'", file.to_string_lossy(), asset.SHA1, sha1);
return Err(Error::ChecksumInvalid(file.to_string_lossy().to_string(), asset.SHA1.clone(), sha1));
}
Ok(())
}
/// Downloads the specified updates to the local app packages directory. Progress is reported back to the caller via an optional Sender.
/// This function will acquire a global update lock so may fail if there is already another update operation in progress.
/// - If the update contains delta packages and the delta feature is enabled
@@ -523,7 +672,7 @@ impl UpdateManager {
}
let update_path = self.locator.get_update_path();
process::run_process(&update_path, args, update_path.parent(), false, None)?;
crate::process::run_process(&update_path, args, update_path.parent(), false, None)?;
Ok(())
}
}

View File

@@ -53,7 +53,6 @@ pub fn run_process(
}
pub fn wait_for_process_exit_with_timeout(process: Child, dur: Duration) -> IoResult<Option<u32>> {
let mut status = process.wait_timeout(dur)?;
if status.is_none() {
return Err(IoError::new(IoErrorKind::TimedOut, "Process timed out"));

View File

@@ -10,7 +10,7 @@ using Task = System.Threading.Tasks.Task;
namespace Velopack.Build;
public class MSBuildLogger(TaskLoggingHelper loggingHelper) : ILogger, IFancyConsole, IFancyConsoleProgress
public class MSBuildLogger(TaskLoggingHelper loggingHelper) : ILogger
{
private TaskLoggingHelper LoggingHelper { get; } = loggingHelper;
@@ -19,31 +19,6 @@ public class MSBuildLogger(TaskLoggingHelper loggingHelper) : ILogger, IFancyCon
throw new NotImplementedException();
}
public async Task ExecuteProgressAsync(Func<IFancyConsoleProgress, Task> action)
{
await action(this).ConfigureAwait(false);
}
public async Task RunTask(string name, Func<Action<int>, Task> fn)
{
try {
await fn(x => { }).ConfigureAwait(false);
} catch (Exception ex) {
this.LogError(ex, "Error running task {taskName}", name);
throw;
}
}
public async Task<T> RunTask<T>(string name, Func<Action<int>, Task<T>> fn)
{
try {
return await fn(x => { }).ConfigureAwait(false);
} catch (Exception ex) {
this.LogError(ex, "Error running task {taskName}", name);
throw;
}
}
public bool IsEnabled(LogLevel logLevel)
{
return logLevel switch {
@@ -60,6 +35,7 @@ public class MSBuildLogger(TaskLoggingHelper loggingHelper) : ILogger, IFancyCon
if (exception != null) {
message += " " + exception.Message;
}
switch (logLevel) {
case LogLevel.Trace:
LoggingHelper.LogMessage(MessageImportance.Low, message);
@@ -79,27 +55,4 @@ public class MSBuildLogger(TaskLoggingHelper loggingHelper) : ILogger, IFancyCon
break;
}
}
public void WriteTable(string tableName, IEnumerable<IEnumerable<string>> rows, bool hasHeaderRow = true)
{
LoggingHelper.LogMessage(tableName);
foreach (var row in rows) {
LoggingHelper.LogMessage(" " + String.Join(" ", row));
}
}
public System.Threading.Tasks.Task<bool> PromptYesNo(string prompt, bool? defaultValue = null, TimeSpan? timeout = null)
{
return Task.FromResult(true);
}
public void WriteLine(string text = "")
{
Log(LogLevel.Information, 0, null, null, (object? state, Exception? exception) => text);
}
public string EscapeMarkup(string text)
{
return text;
}
}
}

View File

@@ -4,6 +4,7 @@ using System.Reflection;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Build.Framework;
using Velopack.Core;
using Velopack.Packaging;
using Velopack.Packaging.Unix.Commands;
using Velopack.Packaging.Windows.Commands;
@@ -111,6 +112,7 @@ public class PackTask : MSBuildAsyncTask
{
//System.Diagnostics.Debugger.Launch();
try {
var console = new LoggerConsole(Logger);
HelperFile.ClearSearchPaths();
var searchPath = Path.GetFullPath(Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location)!, "..", "..", "vendor"));
HelperFile.AddSearchPath(searchPath);
@@ -138,15 +140,15 @@ public class PackTask : MSBuildAsyncTask
#pragma warning restore CS0618 // Type or member is obsolete
}
var runner = new WindowsPackCommandRunner(Logger, Logger);
var runner = new WindowsPackCommandRunner(Logger, console);
await runner.Run(options).ConfigureAwait(false);
} else if (VelopackRuntimeInfo.IsOSX) {
var options = this.ToOsxPackOptions();
var runner = new OsxPackCommandRunner(Logger, Logger);
var runner = new OsxPackCommandRunner(Logger, console);
await runner.Run(options).ConfigureAwait(false);
} else if (VelopackRuntimeInfo.IsLinux) {
var options = this.ToLinuxPackOptions();
var runner = new LinuxPackCommandRunner(Logger, Logger);
var runner = new LinuxPackCommandRunner(Logger, console);
await runner.Run(options).ConfigureAwait(false);
} else {
throw new NotSupportedException("Unsupported OS platform: " + VelopackRuntimeInfo.SystemOs.GetOsLongName());

View File

@@ -3,6 +3,7 @@ using System.Threading;
using System.Threading.Tasks;
using Microsoft.Build.Framework;
using Microsoft.Extensions.Logging;
using Velopack.Core;
using Velopack.Flow;
namespace Velopack.Build;
@@ -44,7 +45,8 @@ public class PublishTask : MSBuildAsyncTask
AllowInteractiveLogin = false,
};
var client = new VelopackFlowServiceClient(options, Logger, Logger);
var console = new LoggerConsole(Logger);
var client = new VelopackFlowServiceClient(options, Logger, console);
if (!await client.LoginAsync(loginOptions, false, cancellationToken).ConfigureAwait(false)) {
Logger.LogWarning("Not logged into Velopack Flow service, skipping publish. Please run vpk login.");
return true;

View File

@@ -2,6 +2,7 @@
using System.IO;
using Riok.Mapperly.Abstractions;
using Velopack.Packaging;
using Velopack.Packaging.Compression;
using Velopack.Packaging.Unix.Commands;
using Velopack.Packaging.Windows.Commands;

View File

@@ -22,9 +22,9 @@
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Build.Utilities.Core" Version="17.13.9" />
<PackageReference Include="Microsoft.Build.Utilities.Core" Version="17.14.8" />
<PackageReference Include="Microsoft.IO.Redist" Version="6.1.3" Condition="'$(TargetFramework)' == 'net472'" />
<PackageReference Include="Riok.Mapperly" Version="4.2.0" />
<PackageReference Include="Riok.Mapperly" Version="4.2.1" />
</ItemGroup>
<ItemGroup>

View File

@@ -2,7 +2,7 @@
using System.Runtime.InteropServices;
using System.Runtime.Versioning;
namespace Velopack.Packaging.Unix;
namespace Velopack.Core;
public class Chmod
{

View File

@@ -0,0 +1,55 @@
using Microsoft.Extensions.Logging;
using Velopack.Core.Abstractions;
namespace Velopack.Core;
public class LoggerConsole(ILogger log) : IFancyConsole, IFancyConsoleProgress
{
public async Task ExecuteProgressAsync(Func<IFancyConsoleProgress, Task> action)
{
await action(this).ConfigureAwait(false);
}
public async Task RunTask(string name, Func<Action<int>, Task> fn)
{
try {
await fn(x => { }).ConfigureAwait(false);
} catch (Exception ex) {
log.LogError(ex, "Error running task {taskName}", name);
throw;
}
}
public async Task<T> RunTask<T>(string name, Func<Action<int>, Task<T>> fn)
{
try {
return await fn(x => { }).ConfigureAwait(false);
} catch (Exception ex) {
log.LogError(ex, "Error running task {taskName}", name);
throw;
}
}
public void WriteTable(string tableName, IEnumerable<IEnumerable<string>> rows, bool hasHeaderRow = true)
{
log.LogInformation(tableName);
foreach (var row in rows) {
log.LogInformation(" " + String.Join(" ", row));
}
}
public Task<bool> PromptYesNo(string prompt, bool? defaultValue = null, TimeSpan? timeout = null)
{
return Task.FromResult(true);
}
public void WriteLine(string text = "")
{
log.LogInformation(text);
}
public string EscapeMarkup(string text)
{
return text;
}
}

View File

@@ -13,7 +13,7 @@ using System.Threading.Tasks;
using NuGet.Versioning;
using Velopack.Util;
namespace Velopack
namespace Velopack.Core
{
/// <summary>
/// Describes the requested release notes text format.
@@ -146,7 +146,7 @@ namespace Velopack
/// <summary>
/// Create a new instance of <see cref="ReleaseEntry"/>.
/// </summary>
protected internal ReleaseEntry(string sha1, string filename, long filesize, string baseUrl = null, string query = null, float? stagingPercentage = null)
public ReleaseEntry(string sha1, string filename, long filesize, string baseUrl = null, string query = null, float? stagingPercentage = null)
{
Contract.Requires(sha1 != null && sha1.Length == 40);
Contract.Requires(filename != null);

View File

@@ -7,7 +7,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="AWSSDK.S3" Version="3.7.416.5" />
<PackageReference Include="AWSSDK.S3" Version="4.0.0.4" />
<PackageReference Include="Azure.Storage.Blobs" Version="12.24.0" />
<PackageReference Include="Gitea.Net.API" Version="25.3.5" />
<PackageReference Include="Octokit" Version="14.0.0" />

View File

@@ -15,8 +15,14 @@ public class HmacAuthHttpClientHandler(HttpMessageHandler innerHandler) : Delega
var key = keyParts[1];
var nonce = Guid.NewGuid().ToString();
string requestUri = "";
if (request.RequestUri is { } reqUri) {
requestUri = $"{reqUri.Host}{reqUri.PathAndQuery}";
}
var secondsSinceEpoch = HmacHelper.GetSecondsSinceEpoch();
var signature = HmacHelper.BuildSignature(hashedId, request.Method.Method, request.RequestUri?.AbsoluteUri ?? "", secondsSinceEpoch, nonce);
var signature = HmacHelper.BuildSignature(hashedId, request.Method.Method, requestUri, secondsSinceEpoch, nonce);
var secret = HmacHelper.Calculate(Convert.FromBase64String(key), signature);
request.Headers.Authorization = BuildHeader(hashedId, secret, nonce, secondsSinceEpoch);
}

View File

@@ -9,10 +9,10 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Identity.Client" Version="4.70.1" />
<PackageReference Include="Microsoft.Identity.Client.Broker" Version="4.70.1" />
<PackageReference Include="Microsoft.Identity.Client.Extensions.Msal" Version="4.70.1" />
<PackageReference Include="Microsoft.Extensions.Http" Version="9.0.3" />
<PackageReference Include="Microsoft.Identity.Client" Version="4.72.0" />
<PackageReference Include="Microsoft.Identity.Client.Broker" Version="4.72.0" />
<PackageReference Include="Microsoft.Identity.Client.Extensions.Msal" Version="4.72.0" />
<PackageReference Include="Microsoft.Extensions.Http" Version="9.0.5" />
<PackageReference Include="Microsoft.AspNet.WebApi.Client" Version="6.0.0" Aliases="HttpFormatting" />
</ItemGroup>

View File

@@ -13,6 +13,7 @@ using Velopack.NuGet;
using Velopack.Packaging;
using Velopack.Util;
using System.Net;
using Velopack.Packaging.Compression;
#if !NET6_0_OR_GREATER
using System.Net.Http;

View File

@@ -1,6 +1,5 @@
using ICSharpCode.SharpZipLib.Tar;
using Microsoft.Extensions.Logging;
using Velopack.Compression;
using Velopack.Core;
using Velopack.Util;

View File

@@ -2,6 +2,7 @@
using Microsoft.Extensions.Logging;
using Velopack.Core;
using Velopack.Core.Abstractions;
using Velopack.Packaging.Compression;
using Velopack.Util;
namespace Velopack.Packaging.Unix.Commands;

View File

@@ -1,4 +1,5 @@
using Velopack.Packaging.Abstractions;
using Velopack.Packaging.Compression;
namespace Velopack.Packaging.Unix.Commands;

View File

@@ -1,4 +1,5 @@
using Velopack.Packaging.Abstractions;
using Velopack.Packaging.Compression;
namespace Velopack.Packaging.Unix.Commands;

View File

@@ -7,7 +7,6 @@ using Markdig;
using MarkdigExtensions.RtfRenderer;
using Microsoft.Extensions.Logging;
using NuGet.Versioning;
using Velopack.Compression;
using Velopack.Core;
using Velopack.Core.Abstractions;
using Velopack.NuGet;

View File

@@ -1,4 +1,6 @@
namespace Velopack.Packaging.Windows.Commands;
using Velopack.Packaging.Compression;
namespace Velopack.Packaging.Windows.Commands;
public class WindowsReleasifyOptions : WindowsSigningOptions
{

View File

@@ -1,4 +1,6 @@
namespace Velopack.Packaging.Abstractions;
using Velopack.Packaging.Compression;
namespace Velopack.Packaging.Abstractions;
public interface IPackOptions : INugetPackCommand, IPlatformOptions
{

View File

@@ -1,5 +1,6 @@
using Microsoft.Extensions.Logging;
using Velopack.Core.Abstractions;
using Velopack.Packaging.Compression;
namespace Velopack.Packaging.Commands;

View File

@@ -1,4 +1,5 @@
using Velopack.Core;
using Velopack.Packaging.Compression;
namespace Velopack.Packaging.Commands;

View File

@@ -1,7 +1,7 @@
using Microsoft.Extensions.Logging;
using Velopack.Compression;
using Velopack.Core;
using Velopack.Core.Abstractions;
using Velopack.Packaging.Compression;
using Velopack.Util;
namespace Velopack.Packaging.Commands;

View File

@@ -1,10 +1,8 @@
#nullable disable
using System;
using System.Diagnostics.CodeAnalysis;
using System.IO;
using System.IO.Compression;
namespace Velopack.Compression
namespace Velopack.Packaging.Compression
{
[ExcludeFromCodeCoverage]
internal sealed class BZip2Stream : Stream

View File

@@ -1,13 +1,10 @@
#nullable disable
using System;
using System.Diagnostics.CodeAnalysis;
using System.IO;
using System.IO.Compression;
using System.Threading;
// Adapted from https://github.com/LogosBible/bsdiff.net/blob/master/src/bsdiff/BinaryPatchUtility.cs
namespace Velopack.Compression
namespace Velopack.Packaging.Compression
{
/*
The original bsdiff.c source code (http://www.daemonology.net/bsdiff/) is

View File

@@ -1,8 +1,7 @@
using Microsoft.Extensions.Logging;
using Velopack.Compression;
using Velopack.Core;
namespace Velopack.Packaging;
namespace Velopack.Packaging.Compression;
public class DeltaEmbedded
{

View File

@@ -1,4 +1,4 @@
namespace Velopack.Packaging;
namespace Velopack.Packaging.Compression;
public enum DeltaMode
{

View File

@@ -1,14 +1,12 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
#nullable enable
using System.Text;
using System.Text.RegularExpressions;
using Velopack.Core;
using Velopack.Exceptions;
using Velopack.Logging;
using Velopack.Util;
namespace Velopack.Compression
namespace Velopack.Packaging.Compression
{
internal abstract class DeltaPackage
{

View File

@@ -1,12 +1,11 @@
using System.IO.MemoryMappedFiles;
using System.Text;
using Microsoft.Extensions.Logging;
using Velopack.Compression;
using Velopack.Core;
using Velopack.Packaging.Exceptions;
using Velopack.Util;
namespace Velopack.Packaging;
namespace Velopack.Packaging.Compression;
public class DeltaPackageBuilder
{

Some files were not shown because too many files have changed in this diff Show More