chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

1
vendor/tar/.cargo-checksum.json vendored Normal file
View File

@@ -0,0 +1 @@
{"files":{".cargo/config.toml":"d7389d2a0c08ec72b79e83a3c76980903e3f9123625c32e69c798721193e2e74",".cargo_vcs_info.json":"dccfdfe441ac4913753d6c5b79fb46687167eb2335f5c8f23e607538ba68e69d",".github/dependabot.yml":"d04c9b0253b2bbae886b59a11399ea260397b460cd9f5712d692d1c85f8ec090",".github/workflows/ci.yml":"9d4329099f185445c6ed9e3b0d1b5e00718e08845141546af070901f7fe7947c","Cargo.lock":"1a047203abdbd61eb1e704e8463d618c691d0d97917228092121c0b570328328","Cargo.toml":"0e3ed0433a1bddcdf9ced37cea1b8f9527526b630f61d0c8af666c946c56950b","Cargo.toml.orig":"db1073c79b48e52ec01cd19982db9e3adb0613fe7644cfe4cfad62127821d2de","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"8ca6b96cea9e67c6c5c63f452c31bd396db8bd2406231fdea5d48ef462b48077","README.md":"71079f1a0962c2cf288058f38d24735bddabd1427ac2dee72ec18cc5ae4bceed","examples/extract_file.rs":"5958faad05822704905a6b2f2d8029ee5b0466f7073d5dce31e3361571973736","examples/list.rs":"36e412205eaffea8ab7f39be4173594b74e36acb369e091362b1975ee4a7a14b","examples/raw_list.rs":"0a735576ac354457d6d5a4d395d044fae99bf67a7c69960ca784a6f6a1743651","examples/write.rs":"419ac3e4155035e32b52cd8e6ae987a2d99cf82f60abbfb315c2a2c4f8e8fd19","src/archive.rs":"03d42748660f72c1cfa8a90bb839d5574214b97d5d6bea5272f34c39517246db","src/builder.rs":"5abdf9f57923ef4ddcbac6cbd4aae333464dab52db42b8d37c18acc7d0bc58cd","src/entry.rs":"8fab886ea45b82da13d1aa73dd817ee34b048bb1ff93245bce76bc7dab3fa38a","src/entry_type.rs":"bdc7655918b97492f9f5204e9c4eee4e2c3268da398c7786342e6f993a6469c7","src/error.rs":"a20813fbc52f1f2e3a79654f62de6001759f6504a06acee5b0819d4865398587","src/header.rs":"5859b3506d42d2117aad6b08eaf37f6fb0ed2143ba7c405dd5bf917452469693","src/lib.rs":"e8b8b1557abe3726d124c7b56d1c64a35bea057119e8baeda6651389a0649441","src/pax.rs":"b4e5081a777f6400ed003be4a222f7d2ba7338af5b61dc5c05c817420057526d","tests/all.rs":"6632dce0d4df4ee2ad46bc919b7c280b5dab79631511516ef6fe93920589a257","tests/entry.rs":"3951a295d757f77d22f1ae77c575cff9394c56858d253be861fb019080d9aa67","tests/header/mod.rs":"edd0ff4b318d60080706abeffa426bb13fcab308938a3ae1aa7c262e826218c3"},"package":"22692a6476a21fa75fdfc11d452fda482af402c008cdbaf3476414e122040973"}

2
vendor/tar/.cargo/config.toml vendored Normal file
View File

@@ -0,0 +1,2 @@
[alias]
xtask = "run --manifest-path xtask/Cargo.toml --"

6
vendor/tar/.cargo_vcs_info.json vendored Normal file
View File

@@ -0,0 +1,6 @@
{
"git": {
"sha1": "096e3d16d14fefac51bc4d94d616b21546ea8bcc"
},
"path_in_vcs": ""
}

8
vendor/tar/.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,8 @@
version: 2
updates:
- package-ecosystem: cargo
directory: "/"
schedule:
interval: daily
time: "08:00"
open-pull-requests-limit: 10

139
vendor/tar/.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,139 @@
name: CI
on:
push:
branches: [master, main]
pull_request:
branches: [master, main]
env:
CARGO_TERM_COLOR: always
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
test:
name: Test (${{ matrix.build }})
runs-on: ${{ matrix.os }}
timeout-minutes: 20
strategy:
fail-fast: false
matrix:
build: [stable, beta, nightly, macos, windows]
include:
- build: stable
os: ubuntu-latest
rust: stable
- build: beta
os: ubuntu-latest
rust: beta
- build: nightly
os: ubuntu-latest
rust: nightly
- build: macos
os: macos-latest
rust: stable
- build: windows
os: windows-latest
rust: stable
steps:
- uses: actions/checkout@v6
- name: Install Rust
run: rustup update ${{ matrix.rust }} --no-self-update && rustup default ${{ matrix.rust }}
shell: bash
- run: cargo test
- run: cargo test --no-default-features
- name: Run cargo test with root
run: sudo -E $(which cargo) test
if: ${{ matrix.os == 'ubuntu-latest' }}
wasm:
name: Wasm
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
with:
targets: wasm32-unknown-emscripten
- run: cargo build --target=wasm32-unknown-emscripten
rustfmt:
name: Rustfmt
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt
- run: cargo fmt -- --check
semver-checks:
name: Semver Checks
runs-on: ubuntu-24.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v6
- uses: obi1kenobi/cargo-semver-checks-action@v2
with:
# Pinned until cargo-semver-checks supports rustdoc format v57 (Rust 1.93+)
rust-toolchain: "1.92.0"
# FIXME: failed on https://github.com/alexcrichton/tar-rs/pull/443, needs
# investigation.
# revdep:
# name: Reverse deps
# runs-on: ubuntu-24.04
# timeout-minutes: 30
# steps:
# - uses: actions/checkout@v6
# - uses: dtolnay/rust-toolchain@stable
# - uses: Swatinem/rust-cache@v2
# with:
# workspaces: xtask
# - name: Test reverse dependencies
# run: cargo xtask revdep-test
# - name: Verify tests catch regressions
# run: cargo xtask revdep-test --self-test
publish_docs:
name: Publish Documentation
runs-on: ubuntu-latest
timeout-minutes: 10
permissions:
contents: write
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
- name: Build documentation
run: cargo doc --no-deps --all-features
- name: Publish documentation
run: |
cd target/doc
git init
git add .
git -c user.name='ci' -c user.email='ci' commit -m init
git push -f -q https://git:${{ secrets.github_token }}@github.com/${{ github.repository }} HEAD:gh-pages
if: github.event_name == 'push' && github.event.ref == 'refs/heads/master'
# Sentinel job for required checks - configure this job name in
# repository settings as the single required status check.
required-checks:
if: always()
needs: [test, wasm, rustfmt, semver-checks, publish_docs]
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- run: exit 1
if: >-
needs.test.result != 'success' ||
needs.wasm.result != 'success' ||
needs.rustfmt.result != 'success' ||
needs.semver-checks.result != 'success' ||
needs.publish_docs.result != 'success'

652
vendor/tar/Cargo.lock generated vendored Normal file
View File

@@ -0,0 +1,652 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "anyhow"
version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c"
[[package]]
name = "astral-tokio-tar"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec179a06c1769b1e42e1e2cbe74c7dcdb3d6383c838454d063eaac5bbb7ebbe5"
dependencies = [
"filetime",
"futures-core",
"libc",
"portable-atomic",
"rustc-hash",
"tokio",
"tokio-stream",
"xattr",
]
[[package]]
name = "bitflags"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af"
[[package]]
name = "bytes"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
[[package]]
name = "cfg-if"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "equivalent"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
[[package]]
name = "errno"
version = "0.3.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "fastrand"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "filetime"
version = "0.2.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db"
dependencies = [
"cfg-if",
"libc",
"libredox",
]
[[package]]
name = "foldhash"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
[[package]]
name = "futures-core"
version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d"
[[package]]
name = "getrandom"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "getrandom"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasip2",
"wasip3",
]
[[package]]
name = "hashbrown"
version = "0.15.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
dependencies = [
"foldhash",
]
[[package]]
name = "hashbrown"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "id-arena"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954"
[[package]]
name = "indexmap"
version = "2.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017"
dependencies = [
"equivalent",
"hashbrown 0.16.1",
"serde",
"serde_core",
]
[[package]]
name = "itoa"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
[[package]]
name = "leb128fmt"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
[[package]]
name = "libc"
version = "0.2.183"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
[[package]]
name = "libredox"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a"
dependencies = [
"bitflags",
"libc",
"plain",
"redox_syscall",
]
[[package]]
name = "linux-raw-sys"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53"
[[package]]
name = "log"
version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "memchr"
version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79"
[[package]]
name = "once_cell"
version = "1.21.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50"
[[package]]
name = "pin-project-lite"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd"
[[package]]
name = "plain"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6"
[[package]]
name = "portable-atomic"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49"
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "prettyplease"
version = "0.2.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
dependencies = [
"proc-macro2",
"syn",
]
[[package]]
name = "proc-macro2"
version = "1.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "6.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf"
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.17",
]
[[package]]
name = "redox_syscall"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16"
dependencies = [
"bitflags",
]
[[package]]
name = "rustc-hash"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
[[package]]
name = "rustix"
version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190"
dependencies = [
"bitflags",
"errno",
"libc",
"linux-raw-sys",
"windows-sys",
]
[[package]]
name = "semver"
version = "1.0.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
[[package]]
name = "serde"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
dependencies = [
"serde_core",
]
[[package]]
name = "serde_core"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.149"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86"
dependencies = [
"itoa",
"memchr",
"serde",
"serde_core",
"zmij",
]
[[package]]
name = "syn"
version = "2.0.117"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tar"
version = "0.4.45"
dependencies = [
"astral-tokio-tar",
"filetime",
"libc",
"rand",
"tempfile",
"tokio",
"tokio-stream",
"xattr",
]
[[package]]
name = "tempfile"
version = "3.27.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd"
dependencies = [
"fastrand",
"getrandom 0.4.2",
"once_cell",
"rustix",
"windows-sys",
]
[[package]]
name = "tokio"
version = "1.50.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d"
dependencies = [
"bytes",
"pin-project-lite",
"tokio-macros",
]
[[package]]
name = "tokio-macros"
version = "2.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tokio-stream"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70"
dependencies = [
"futures-core",
"pin-project-lite",
"tokio",
]
[[package]]
name = "unicode-ident"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75"
[[package]]
name = "unicode-xid"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
[[package]]
name = "wasi"
version = "0.11.1+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "wasip2"
version = "1.0.2+wasi-0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5"
dependencies = [
"wit-bindgen",
]
[[package]]
name = "wasip3"
version = "0.4.0+wasi-0.3.0-rc-2026-01-06"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5"
dependencies = [
"wit-bindgen",
]
[[package]]
name = "wasm-encoder"
version = "0.244.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319"
dependencies = [
"leb128fmt",
"wasmparser",
]
[[package]]
name = "wasm-metadata"
version = "0.244.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909"
dependencies = [
"anyhow",
"indexmap",
"wasm-encoder",
"wasmparser",
]
[[package]]
name = "wasmparser"
version = "0.244.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe"
dependencies = [
"bitflags",
"hashbrown 0.15.5",
"indexmap",
"semver",
]
[[package]]
name = "windows-link"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
[[package]]
name = "windows-sys"
version = "0.61.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc"
dependencies = [
"windows-link",
]
[[package]]
name = "wit-bindgen"
version = "0.51.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5"
dependencies = [
"wit-bindgen-rust-macro",
]
[[package]]
name = "wit-bindgen-core"
version = "0.51.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc"
dependencies = [
"anyhow",
"heck",
"wit-parser",
]
[[package]]
name = "wit-bindgen-rust"
version = "0.51.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21"
dependencies = [
"anyhow",
"heck",
"indexmap",
"prettyplease",
"syn",
"wasm-metadata",
"wit-bindgen-core",
"wit-component",
]
[[package]]
name = "wit-bindgen-rust-macro"
version = "0.51.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a"
dependencies = [
"anyhow",
"prettyplease",
"proc-macro2",
"quote",
"syn",
"wit-bindgen-core",
"wit-bindgen-rust",
]
[[package]]
name = "wit-component"
version = "0.244.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2"
dependencies = [
"anyhow",
"bitflags",
"indexmap",
"log",
"serde",
"serde_derive",
"serde_json",
"wasm-encoder",
"wasm-metadata",
"wasmparser",
"wit-parser",
]
[[package]]
name = "wit-parser"
version = "0.244.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736"
dependencies = [
"anyhow",
"id-arena",
"indexmap",
"log",
"semver",
"serde",
"serde_derive",
"serde_json",
"unicode-xid",
"wasmparser",
]
[[package]]
name = "xattr"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156"
dependencies = [
"libc",
"rustix",
]
[[package]]
name = "zerocopy"
version = "0.8.47"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efbb2a062be311f2ba113ce66f697a4dc589f85e78a4aea276200804cea0ed87"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.47"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "zmij"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa"

104
vendor/tar/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,104 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.63"
name = "tar"
version = "0.4.45"
authors = ["Alex Crichton <alex@alexcrichton.com>"]
build = false
exclude = ["tests/archives/*"]
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = """
A Rust implementation of a TAR file reader and writer. This library does not
currently handle compression, but it is abstract over all I/O readers and
writers. Additionally, great lengths are taken to ensure that the entire
contents are never required to be entirely resident in memory all at once.
"""
homepage = "https://github.com/alexcrichton/tar-rs"
documentation = "https://docs.rs/tar"
readme = "README.md"
keywords = [
"tar",
"tarfile",
"encoding",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/alexcrichton/tar-rs"
[features]
default = ["xattr"]
[lib]
name = "tar"
path = "src/lib.rs"
[[example]]
name = "extract_file"
path = "examples/extract_file.rs"
[[example]]
name = "list"
path = "examples/list.rs"
[[example]]
name = "raw_list"
path = "examples/raw_list.rs"
[[example]]
name = "write"
path = "examples/write.rs"
[[test]]
name = "all"
path = "tests/all.rs"
[[test]]
name = "entry"
path = "tests/entry.rs"
[dependencies.filetime]
version = "0.2.8"
[dev-dependencies.astral-tokio-tar]
version = "0.5"
[dev-dependencies.rand]
version = "0.8"
features = ["small_rng"]
[dev-dependencies.tempfile]
version = "3"
[dev-dependencies.tokio]
version = "1"
features = [
"macros",
"rt",
]
[dev-dependencies.tokio-stream]
version = "0.1"
[target."cfg(unix)".dependencies.libc]
version = "0.2"
[target."cfg(unix)".dependencies.xattr]
version = "1.1.3"
optional = true
[lints.rust]
dead_code = "deny"

201
vendor/tar/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
vendor/tar/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) The tar-rs Project Contributors
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

76
vendor/tar/README.md vendored Normal file
View File

@@ -0,0 +1,76 @@
# tar-rs
[Documentation](https://docs.rs/tar)
A tar archive reading/writing library for Rust.
```toml
# Cargo.toml
[dependencies]
tar = "0.4"
```
## Reading an archive
```rust,no_run
extern crate tar;
use std::io::prelude::*;
use std::fs::File;
use tar::Archive;
fn main() {
let file = File::open("foo.tar").unwrap();
let mut a = Archive::new(file);
for file in a.entries().unwrap() {
// Make sure there wasn't an I/O error
let mut file = file.unwrap();
// Inspect metadata about the file
println!("{:?}", file.header().path().unwrap());
println!("{}", file.header().size().unwrap());
// files implement the Read trait
let mut s = String::new();
file.read_to_string(&mut s).unwrap();
println!("{}", s);
}
}
```
## Writing an archive
```rust,no_run
extern crate tar;
use std::io::prelude::*;
use std::fs::File;
use tar::Builder;
fn main() {
let file = File::create("foo.tar").unwrap();
let mut a = Builder::new(file);
a.append_path("file1.txt").unwrap();
a.append_file("file2.txt", &mut File::open("file3.txt").unwrap()).unwrap();
}
```
# License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this project by you, as defined in the Apache-2.0 license,
shall be dual licensed as above, without any additional terms or conditions.

25
vendor/tar/examples/extract_file.rs vendored Normal file
View File

@@ -0,0 +1,25 @@
//! An example of extracting a file in an archive.
//!
//! Takes a tarball on standard input, looks for an entry with a listed file
//! name as the first argument provided, and then prints the contents of that
//! file to stdout.
extern crate tar;
use std::env::args_os;
use std::io::{copy, stdin, stdout};
use std::path::Path;
use tar::Archive;
fn main() {
let first_arg = args_os().nth(1).unwrap();
let filename = Path::new(&first_arg);
let mut ar = Archive::new(stdin());
for file in ar.entries().unwrap() {
let mut f = file.unwrap();
if f.path().unwrap() == filename {
copy(&mut f, &mut stdout()).unwrap();
}
}
}

17
vendor/tar/examples/list.rs vendored Normal file
View File

@@ -0,0 +1,17 @@
//! An example of listing the file names of entries in an archive.
//!
//! Takes a tarball on stdin and prints out all of the entries inside.
extern crate tar;
use std::io::stdin;
use tar::Archive;
fn main() {
let mut ar = Archive::new(stdin());
for file in ar.entries().unwrap() {
let f = file.unwrap();
println!("{}", f.path().unwrap().display());
}
}

48
vendor/tar/examples/raw_list.rs vendored Normal file
View File

@@ -0,0 +1,48 @@
//! An example of listing raw entries in an archive.
//!
//! Takes a tarball on stdin and prints out all of the entries inside.
extern crate tar;
use std::io::stdin;
use tar::Archive;
fn main() {
let mut ar = Archive::new(stdin());
for (i, file) in ar.entries().unwrap().raw(true).enumerate() {
println!("-------------------------- Entry {}", i);
let mut f = file.unwrap();
println!("path: {}", f.path().unwrap().display());
println!("size: {}", f.header().size().unwrap());
println!("entry size: {}", f.header().entry_size().unwrap());
println!("link name: {:?}", f.link_name().unwrap());
println!("file type: {:#x}", f.header().entry_type().as_byte());
println!("mode: {:#o}", f.header().mode().unwrap());
println!("uid: {}", f.header().uid().unwrap());
println!("gid: {}", f.header().gid().unwrap());
println!("mtime: {}", f.header().mtime().unwrap());
println!("username: {:?}", f.header().username().unwrap());
println!("groupname: {:?}", f.header().groupname().unwrap());
if f.header().as_ustar().is_some() {
println!("kind: UStar");
} else if f.header().as_gnu().is_some() {
println!("kind: GNU");
} else {
println!("kind: normal");
}
if let Ok(Some(extensions)) = f.pax_extensions() {
println!("pax extensions:");
for e in extensions {
let e = e.unwrap();
println!(
"\t{:?} = {:?}",
String::from_utf8_lossy(e.key_bytes()),
String::from_utf8_lossy(e.value_bytes())
);
}
}
}
}

13
vendor/tar/examples/write.rs vendored Normal file
View File

@@ -0,0 +1,13 @@
extern crate tar;
use std::fs::File;
use tar::Builder;
fn main() {
let file = File::create("foo.tar").unwrap();
let mut a = Builder::new(file);
a.append_path("README.md").unwrap();
a.append_file("lib.rs", &mut File::open("src/lib.rs").unwrap())
.unwrap();
}

635
vendor/tar/src/archive.rs vendored Normal file
View File

@@ -0,0 +1,635 @@
use std::cell::{Cell, RefCell};
use std::cmp;
use std::convert::TryFrom;
use std::fs;
use std::io::prelude::*;
use std::io::{self, SeekFrom};
use std::marker;
use std::path::Path;
use crate::entry::{EntryFields, EntryIo};
use crate::error::TarError;
use crate::header::BLOCK_SIZE;
use crate::other;
use crate::pax::*;
use crate::{Entry, GnuExtSparseHeader, GnuSparseHeader, Header};
/// A top-level representation of an archive file.
///
/// This archive can have an entry added to it and it can be iterated over.
pub struct Archive<R: ?Sized + Read> {
inner: ArchiveInner<R>,
}
pub struct ArchiveInner<R: ?Sized> {
pos: Cell<u64>,
mask: u32,
unpack_xattrs: bool,
preserve_permissions: bool,
preserve_ownerships: bool,
preserve_mtime: bool,
overwrite: bool,
ignore_zeros: bool,
obj: RefCell<R>,
}
/// An iterator over the entries of an archive.
pub struct Entries<'a, R: 'a + Read> {
fields: EntriesFields<'a>,
_ignored: marker::PhantomData<&'a Archive<R>>,
}
trait SeekRead: Read + Seek {}
impl<R: Read + Seek> SeekRead for R {}
struct EntriesFields<'a> {
archive: &'a Archive<dyn Read + 'a>,
seekable_archive: Option<&'a Archive<dyn SeekRead + 'a>>,
next: u64,
done: bool,
raw: bool,
}
impl<R: Read> Archive<R> {
/// Create a new archive with the underlying object as the reader.
pub fn new(obj: R) -> Archive<R> {
Archive {
inner: ArchiveInner {
mask: u32::MIN,
unpack_xattrs: false,
preserve_permissions: false,
preserve_ownerships: false,
preserve_mtime: true,
overwrite: true,
ignore_zeros: false,
obj: RefCell::new(obj),
pos: Cell::new(0),
},
}
}
/// Unwrap this archive, returning the underlying object.
pub fn into_inner(self) -> R {
self.inner.obj.into_inner()
}
/// Construct an iterator over the entries in this archive.
///
/// Note that care must be taken to consider each entry within an archive in
/// sequence. If entries are processed out of sequence (from what the
/// iterator returns), then the contents read for each entry may be
/// corrupted.
pub fn entries(&mut self) -> io::Result<Entries<'_, R>> {
let me: &mut Archive<dyn Read> = self;
me._entries(None).map(|fields| Entries {
fields,
_ignored: marker::PhantomData,
})
}
/// Unpacks the contents tarball into the specified `dst`.
///
/// This function will iterate over the entire contents of this tarball,
/// extracting each file in turn to the location specified by the entry's
/// path name.
///
/// # Security
///
/// A best-effort is made to prevent writing files outside `dst` (paths
/// containing `..` are skipped, symlinks are validated). However, there
/// have been historical bugs in this area, and more may exist. For this
/// reason, when processing untrusted archives, stronger sandboxing is
/// encouraged: e.g. the [`cap-std`] crate and/or OS-level
/// containerization/virtualization.
///
/// If `dst` does not exist, it is created. Unpacking into an existing
/// directory merges content. This function assumes `dst` is not
/// concurrently modified by untrusted processes. Protecting against
/// TOCTOU races is out of scope for this crate.
///
/// [`cap-std`]: https://docs.rs/cap-std/
///
/// # Examples
///
/// ```no_run
/// use std::fs::File;
/// use tar::Archive;
///
/// let mut ar = Archive::new(File::open("foo.tar").unwrap());
/// ar.unpack("foo").unwrap();
/// ```
pub fn unpack<P: AsRef<Path>>(&mut self, dst: P) -> io::Result<()> {
let me: &mut Archive<dyn Read> = self;
me._unpack(dst.as_ref())
}
/// Set the mask of the permission bits when unpacking this entry.
///
/// The mask will be inverted when applying against a mode, similar to how
/// `umask` works on Unix. In logical notation it looks like:
///
/// ```text
/// new_mode = old_mode & (~mask)
/// ```
///
/// The mask is 0 by default and is currently only implemented on Unix.
pub fn set_mask(&mut self, mask: u32) {
self.inner.mask = mask;
}
/// Indicate whether extended file attributes (xattrs on Unix) are preserved
/// when unpacking this archive.
///
/// This flag is disabled by default and is currently only implemented on
/// Unix using xattr support. This may eventually be implemented for
/// Windows, however, if other archive implementations are found which do
/// this as well.
pub fn set_unpack_xattrs(&mut self, unpack_xattrs: bool) {
self.inner.unpack_xattrs = unpack_xattrs;
}
/// Indicate whether extended permissions (like suid on Unix) are preserved
/// when unpacking this entry.
///
/// This flag is disabled by default and is currently only implemented on
/// Unix.
pub fn set_preserve_permissions(&mut self, preserve: bool) {
self.inner.preserve_permissions = preserve;
}
/// Indicate whether numeric ownership ids (like uid and gid on Unix)
/// are preserved when unpacking this entry.
///
/// This flag is disabled by default and is currently only implemented on
/// Unix.
pub fn set_preserve_ownerships(&mut self, preserve: bool) {
self.inner.preserve_ownerships = preserve;
}
/// Indicate whether files and symlinks should be overwritten on extraction.
pub fn set_overwrite(&mut self, overwrite: bool) {
self.inner.overwrite = overwrite;
}
/// Indicate whether access time information is preserved when unpacking
/// this entry.
///
/// This flag is enabled by default.
pub fn set_preserve_mtime(&mut self, preserve: bool) {
self.inner.preserve_mtime = preserve;
}
/// Ignore zeroed headers, which would otherwise indicate to the archive that it has no more
/// entries.
///
/// This can be used in case multiple tar archives have been concatenated together.
pub fn set_ignore_zeros(&mut self, ignore_zeros: bool) {
self.inner.ignore_zeros = ignore_zeros;
}
}
impl<R: Seek + Read> Archive<R> {
/// Construct an iterator over the entries in this archive for a seekable
/// reader. Seek will be used to efficiently skip over file contents.
///
/// Note that care must be taken to consider each entry within an archive in
/// sequence. If entries are processed out of sequence (from what the
/// iterator returns), then the contents read for each entry may be
/// corrupted.
pub fn entries_with_seek(&mut self) -> io::Result<Entries<'_, R>> {
let me: &Archive<dyn Read> = self;
let me_seekable: &Archive<dyn SeekRead> = self;
me._entries(Some(me_seekable)).map(|fields| Entries {
fields,
_ignored: marker::PhantomData,
})
}
}
impl Archive<dyn Read + '_> {
fn _entries<'a>(
&'a self,
seekable_archive: Option<&'a Archive<dyn SeekRead + 'a>>,
) -> io::Result<EntriesFields<'a>> {
if self.inner.pos.get() != 0 {
return Err(other(
"cannot call entries unless archive is at \
position 0",
));
}
Ok(EntriesFields {
archive: self,
seekable_archive,
done: false,
next: 0,
raw: false,
})
}
fn _unpack(&mut self, dst: &Path) -> io::Result<()> {
if dst.symlink_metadata().is_err() {
fs::create_dir_all(dst)
.map_err(|e| TarError::new(format!("failed to create `{}`", dst.display()), e))?;
}
// Canonicalizing the dst directory will prepend the path with '\\?\'
// on windows which will allow windows APIs to treat the path as an
// extended-length path with a 32,767 character limit. Otherwise all
// unpacked paths over 260 characters will fail on creation with a
// NotFound exception.
let dst = &dst.canonicalize().unwrap_or(dst.to_path_buf());
// Delay any directory entries until the end (they will be created if needed by
// descendants), to ensure that directory permissions do not interfere with descendant
// extraction.
let mut directories = Vec::new();
for entry in self._entries(None)? {
let mut file = entry.map_err(|e| TarError::new("failed to iterate over archive", e))?;
if file.header().entry_type() == crate::EntryType::Directory {
directories.push(file);
} else {
file.unpack_in(dst)?;
}
}
// Apply the directories.
//
// Note: the order of application is important to permissions. That is, we must traverse
// the filesystem graph in topological ordering or else we risk not being able to create
// child directories within those of more restrictive permissions. See [0] for details.
//
// [0]: <https://github.com/alexcrichton/tar-rs/issues/242>
directories.sort_by(|a, b| b.path_bytes().cmp(&a.path_bytes()));
for mut dir in directories {
dir.unpack_in(dst)?;
}
Ok(())
}
}
impl<'a, R: Read> Entries<'a, R> {
/// Indicates whether this iterator will return raw entries or not.
///
/// If the raw list of entries is returned, then no preprocessing happens
/// on account of this library, for example taking into account GNU long name
/// or long link archive members. Raw iteration is disabled by default.
pub fn raw(self, raw: bool) -> Entries<'a, R> {
Entries {
fields: EntriesFields { raw, ..self.fields },
_ignored: marker::PhantomData,
}
}
}
impl<'a, R: Read> Iterator for Entries<'a, R> {
type Item = io::Result<Entry<'a, R>>;
fn next(&mut self) -> Option<io::Result<Entry<'a, R>>> {
self.fields
.next()
.map(|result| result.map(|e| EntryFields::from(e).into_entry()))
}
}
impl<'a> EntriesFields<'a> {
fn next_entry_raw(
&mut self,
pax_extensions: Option<&[u8]>,
) -> io::Result<Option<Entry<'a, io::Empty>>> {
let mut header = Header::new_old();
let mut header_pos = self.next;
loop {
// Seek to the start of the next header in the archive
let delta = self.next - self.archive.inner.pos.get();
self.skip(delta)?;
// EOF is an indicator that we are at the end of the archive.
if !try_read_all(&mut &self.archive.inner, header.as_mut_bytes())? {
return Ok(None);
}
// If a header is not all zeros, we have another valid header.
// Otherwise, check if we are ignoring zeros and continue, or break as if this is the
// end of the archive.
if !header.as_bytes().iter().all(|i| *i == 0) {
self.next += BLOCK_SIZE;
break;
}
if !self.archive.inner.ignore_zeros {
return Ok(None);
}
self.next += BLOCK_SIZE;
header_pos = self.next;
}
// Make sure the checksum is ok
let sum = header.as_bytes()[..148]
.iter()
.chain(&header.as_bytes()[156..])
.fold(0, |a, b| a + (*b as u32))
+ 8 * 32;
let cksum = header.cksum()?;
if sum != cksum {
return Err(other("archive header checksum mismatch"));
}
let mut pax_size: Option<u64> = None;
if let Some(pax_extensions_ref) = &pax_extensions {
pax_size = pax_extensions_value(pax_extensions_ref, PAX_SIZE);
if let Some(pax_uid) = pax_extensions_value(pax_extensions_ref, PAX_UID) {
header.set_uid(pax_uid);
}
if let Some(pax_gid) = pax_extensions_value(pax_extensions_ref, PAX_GID) {
header.set_gid(pax_gid);
}
}
let file_pos = self.next;
let mut size = header.entry_size()?;
// If this exists, it must override the header size. Disagreement among
// parsers allows construction of malicious archives that appear different
// when parsed.
if let Some(pax_size) = pax_size {
size = pax_size;
}
let ret = EntryFields {
size,
header_pos,
file_pos,
data: vec![EntryIo::Data((&self.archive.inner).take(size))],
header,
long_pathname: None,
long_linkname: None,
pax_extensions: None,
mask: self.archive.inner.mask,
unpack_xattrs: self.archive.inner.unpack_xattrs,
preserve_permissions: self.archive.inner.preserve_permissions,
preserve_mtime: self.archive.inner.preserve_mtime,
overwrite: self.archive.inner.overwrite,
preserve_ownerships: self.archive.inner.preserve_ownerships,
};
// Store where the next entry is, rounding up by 512 bytes (the size of
// a header);
let size = size
.checked_add(BLOCK_SIZE - 1)
.ok_or_else(|| other("size overflow"))?;
self.next = self
.next
.checked_add(size & !(BLOCK_SIZE - 1))
.ok_or_else(|| other("size overflow"))?;
Ok(Some(ret.into_entry()))
}
fn next_entry(&mut self) -> io::Result<Option<Entry<'a, io::Empty>>> {
if self.raw {
return self.next_entry_raw(None);
}
let mut gnu_longname = None;
let mut gnu_longlink = None;
let mut pax_extensions = None;
let mut processed = 0;
loop {
processed += 1;
let entry = match self.next_entry_raw(pax_extensions.as_deref())? {
Some(entry) => entry,
None if processed > 1 => {
return Err(other(
"members found describing a future member \
but no future member found",
));
}
None => return Ok(None),
};
let is_recognized_header =
entry.header().as_gnu().is_some() || entry.header().as_ustar().is_some();
if is_recognized_header && entry.header().entry_type().is_gnu_longname() {
if gnu_longname.is_some() {
return Err(other(
"two long name entries describing \
the same member",
));
}
gnu_longname = Some(EntryFields::from(entry).read_all()?);
continue;
}
if is_recognized_header && entry.header().entry_type().is_gnu_longlink() {
if gnu_longlink.is_some() {
return Err(other(
"two long name entries describing \
the same member",
));
}
gnu_longlink = Some(EntryFields::from(entry).read_all()?);
continue;
}
if is_recognized_header && entry.header().entry_type().is_pax_local_extensions() {
if pax_extensions.is_some() {
return Err(other(
"two pax extensions entries describing \
the same member",
));
}
pax_extensions = Some(EntryFields::from(entry).read_all()?);
continue;
}
let mut fields = EntryFields::from(entry);
fields.long_pathname = gnu_longname;
fields.long_linkname = gnu_longlink;
fields.pax_extensions = pax_extensions;
self.parse_sparse_header(&mut fields)?;
return Ok(Some(fields.into_entry()));
}
}
fn parse_sparse_header(&mut self, entry: &mut EntryFields<'a>) -> io::Result<()> {
if !entry.header.entry_type().is_gnu_sparse() {
return Ok(());
}
let gnu = match entry.header.as_gnu() {
Some(gnu) => gnu,
None => return Err(other("sparse entry type listed but not GNU header")),
};
// Sparse files are represented internally as a list of blocks that are
// read. Blocks are either a bunch of 0's or they're data from the
// underlying archive.
//
// Blocks of a sparse file are described by the `GnuSparseHeader`
// structure, some of which are contained in `GnuHeader` but some of
// which may also be contained after the first header in further
// headers.
//
// We read off all the blocks here and use the `add_block` function to
// incrementally add them to the list of I/O block (in `entry.data`).
// The `add_block` function also validates that each chunk comes after
// the previous, we don't overrun the end of the file, and each block is
// aligned to a 512-byte boundary in the archive itself.
//
// At the end we verify that the sparse file size (`Header::size`) is
// the same as the current offset (described by the list of blocks) as
// well as the amount of data read equals the size of the entry
// (`Header::entry_size`).
entry.data.truncate(0);
let mut cur = 0;
let mut remaining = entry.size;
{
let data = &mut entry.data;
let reader = &self.archive.inner;
let size = entry.size;
let mut add_block = |block: &GnuSparseHeader| -> io::Result<_> {
if block.is_empty() {
return Ok(());
}
let off = block.offset()?;
let len = block.length()?;
if len != 0 && (size - remaining) % BLOCK_SIZE != 0 {
return Err(other(
"previous block in sparse file was not \
aligned to 512-byte boundary",
));
} else if off < cur {
return Err(other(
"out of order or overlapping sparse \
blocks",
));
} else if cur < off {
let block = io::repeat(0).take(off - cur);
data.push(EntryIo::Pad(block));
}
cur = off
.checked_add(len)
.ok_or_else(|| other("more bytes listed in sparse file than u64 can hold"))?;
remaining = remaining.checked_sub(len).ok_or_else(|| {
other(
"sparse file consumed more data than the header \
listed",
)
})?;
data.push(EntryIo::Data(reader.take(len)));
Ok(())
};
for block in gnu.sparse.iter() {
add_block(block)?
}
if gnu.is_extended() {
let mut ext = GnuExtSparseHeader::new();
ext.isextended[0] = 1;
while ext.is_extended() {
if !try_read_all(&mut &self.archive.inner, ext.as_mut_bytes())? {
return Err(other("failed to read extension"));
}
self.next += BLOCK_SIZE;
for block in ext.sparse.iter() {
add_block(block)?;
}
}
}
}
if cur != gnu.real_size()? {
return Err(other(
"mismatch in sparse file chunks and \
size in header",
));
}
entry.size = cur;
if remaining > 0 {
return Err(other(
"mismatch in sparse file chunks and \
entry size in header",
));
}
Ok(())
}
fn skip(&mut self, mut amt: u64) -> io::Result<()> {
if let Some(seekable_archive) = self.seekable_archive {
let pos = io::SeekFrom::Current(
i64::try_from(amt).map_err(|_| other("seek position out of bounds"))?,
);
(&seekable_archive.inner).seek(pos)?;
} else {
let mut buf = [0u8; 4096 * 8];
while amt > 0 {
let n = cmp::min(amt, buf.len() as u64);
let n = (&self.archive.inner).read(&mut buf[..n as usize])?;
if n == 0 {
return Err(other("unexpected EOF during skip"));
}
amt -= n as u64;
}
}
Ok(())
}
}
impl<'a> Iterator for EntriesFields<'a> {
type Item = io::Result<Entry<'a, io::Empty>>;
fn next(&mut self) -> Option<io::Result<Entry<'a, io::Empty>>> {
if self.done {
None
} else {
match self.next_entry() {
Ok(Some(e)) => Some(Ok(e)),
Ok(None) => {
self.done = true;
None
}
Err(e) => {
self.done = true;
Some(Err(e))
}
}
}
}
}
impl<R: ?Sized + Read> Read for &ArchiveInner<R> {
fn read(&mut self, into: &mut [u8]) -> io::Result<usize> {
let i = self.obj.borrow_mut().read(into)?;
self.pos.set(self.pos.get() + i as u64);
Ok(i)
}
}
impl<R: ?Sized + Seek> Seek for &ArchiveInner<R> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let pos = self.obj.borrow_mut().seek(pos)?;
self.pos.set(pos);
Ok(pos)
}
}
/// Try to fill the buffer from the reader.
///
/// If the reader reaches its end before filling the buffer at all, returns `false`.
/// Otherwise returns `true`.
fn try_read_all<R: Read>(r: &mut R, buf: &mut [u8]) -> io::Result<bool> {
let mut read = 0;
while read < buf.len() {
match r.read(&mut buf[read..])? {
0 => {
if read == 0 {
return Ok(false);
}
return Err(other("failed to read entire block"));
}
n => read += n,
}
}
Ok(true)
}

1323
vendor/tar/src/builder.rs vendored Normal file

File diff suppressed because it is too large Load Diff

974
vendor/tar/src/entry.rs vendored Normal file
View File

@@ -0,0 +1,974 @@
use std::borrow::Cow;
use std::cmp;
use std::fs;
use std::fs::OpenOptions;
use std::io::prelude::*;
use std::io::{self, Error, ErrorKind, SeekFrom};
use std::marker;
use std::path::{Component, Path, PathBuf};
use filetime::{self, FileTime};
use crate::archive::ArchiveInner;
use crate::error::TarError;
use crate::header::bytes2path;
use crate::other;
use crate::{Archive, Header, PaxExtensions};
/// A read-only view into an entry of an archive.
///
/// This structure is a window into a portion of a borrowed archive which can
/// be inspected. It acts as a file handle by implementing the Reader trait. An
/// entry cannot be rewritten once inserted into an archive.
pub struct Entry<'a, R: 'a + Read> {
fields: EntryFields<'a>,
_ignored: marker::PhantomData<&'a Archive<R>>,
}
// private implementation detail of `Entry`, but concrete (no type parameters)
// and also all-public to be constructed from other modules.
pub struct EntryFields<'a> {
pub long_pathname: Option<Vec<u8>>,
pub long_linkname: Option<Vec<u8>>,
pub pax_extensions: Option<Vec<u8>>,
pub mask: u32,
pub header: Header,
pub size: u64,
pub header_pos: u64,
pub file_pos: u64,
pub data: Vec<EntryIo<'a>>,
pub unpack_xattrs: bool,
pub preserve_permissions: bool,
pub preserve_ownerships: bool,
pub preserve_mtime: bool,
pub overwrite: bool,
}
pub enum EntryIo<'a> {
Pad(io::Take<io::Repeat>),
Data(io::Take<&'a ArchiveInner<dyn Read + 'a>>),
}
/// When unpacking items the unpacked thing is returned to allow custom
/// additional handling by users. Today the File is returned, in future
/// the enum may be extended with kinds for links, directories etc.
#[derive(Debug)]
pub enum Unpacked {
/// A file was unpacked.
File(std::fs::File),
/// A directory, hardlink, symlink, or other node was unpacked.
#[doc(hidden)]
__Nonexhaustive,
}
impl<'a, R: Read> Entry<'a, R> {
/// Returns the path name for this entry.
///
/// This method may fail if the pathname is not valid Unicode and this is
/// called on a Windows platform.
///
/// Note that this function will convert any `\` characters to directory
/// separators, and it will not always return the same value as
/// `self.header().path()` as some archive formats have support for longer
/// path names described in separate entries.
///
/// It is recommended to use this method instead of inspecting the `header`
/// directly to ensure that various archive formats are handled correctly.
pub fn path(&self) -> io::Result<Cow<'_, Path>> {
self.fields.path()
}
/// Returns the raw bytes listed for this entry.
///
/// Note that this function will convert any `\` characters to directory
/// separators, and it will not always return the same value as
/// `self.header().path_bytes()` as some archive formats have support for
/// longer path names described in separate entries.
pub fn path_bytes(&self) -> Cow<'_, [u8]> {
self.fields.path_bytes()
}
/// Returns the link name for this entry, if any is found.
///
/// This method may fail if the pathname is not valid Unicode and this is
/// called on a Windows platform. `Ok(None)` being returned, however,
/// indicates that the link name was not present.
///
/// Note that this function will convert any `\` characters to directory
/// separators, and it will not always return the same value as
/// `self.header().link_name()` as some archive formats have support for
/// longer path names described in separate entries.
///
/// It is recommended to use this method instead of inspecting the `header`
/// directly to ensure that various archive formats are handled correctly.
pub fn link_name(&self) -> io::Result<Option<Cow<'_, Path>>> {
self.fields.link_name()
}
/// Returns the link name for this entry, in bytes, if listed.
///
/// Note that this will not always return the same value as
/// `self.header().link_name_bytes()` as some archive formats have support for
/// longer path names described in separate entries.
pub fn link_name_bytes(&self) -> Option<Cow<'_, [u8]>> {
self.fields.link_name_bytes()
}
/// Returns an iterator over the pax extensions contained in this entry.
///
/// Pax extensions are a form of archive where extra metadata is stored in
/// key/value pairs in entries before the entry they're intended to
/// describe. For example this can be used to describe long file name or
/// other metadata like atime/ctime/mtime in more precision.
///
/// The returned iterator will yield key/value pairs for each extension.
///
/// `None` will be returned if this entry does not indicate that it itself
/// contains extensions, or if there were no previous extensions describing
/// it.
///
/// Note that global pax extensions are intended to be applied to all
/// archive entries.
///
/// Also note that this function will read the entire entry if the entry
/// itself is a list of extensions.
pub fn pax_extensions(&mut self) -> io::Result<Option<PaxExtensions<'_>>> {
self.fields.pax_extensions()
}
/// Returns access to the header of this entry in the archive.
///
/// This provides access to the metadata for this entry in the archive.
pub fn header(&self) -> &Header {
&self.fields.header
}
/// Returns access to the size of this entry in the archive.
///
/// In the event the size is stored in a pax extension, that size value
/// will be referenced. Otherwise, the entry size will be stored in the header.
pub fn size(&self) -> u64 {
self.fields.size
}
/// Returns the starting position, in bytes, of the header of this entry in
/// the archive.
///
/// The header is always a contiguous section of 512 bytes, so if the
/// underlying reader implements `Seek`, then the slice from `header_pos` to
/// `header_pos + 512` contains the raw header bytes.
pub fn raw_header_position(&self) -> u64 {
self.fields.header_pos
}
/// Returns the starting position, in bytes, of the file of this entry in
/// the archive.
///
/// If the file of this entry is continuous (e.g. not a sparse file), and
/// if the underlying reader implements `Seek`, then the slice from
/// `file_pos` to `file_pos + entry_size` contains the raw file bytes.
pub fn raw_file_position(&self) -> u64 {
self.fields.file_pos
}
/// Writes this file to the specified location.
///
/// This function will write the entire contents of this file into the
/// location specified by `dst`. Metadata will also be propagated to the
/// path `dst`.
///
/// This function will create a file at the path `dst`, and it is required
/// that the intermediate directories are created. Any existing file at the
/// location `dst` will be overwritten.
///
/// > **Note**: This function does not have as many sanity checks as
/// > `Archive::unpack` or `Entry::unpack_in`. As a result if you're
/// > thinking of unpacking untrusted tarballs you may want to review the
/// > implementations of the previous two functions and perhaps implement
/// > similar logic yourself.
///
/// # Examples
///
/// ```no_run
/// use std::fs::File;
/// use tar::Archive;
///
/// let mut ar = Archive::new(File::open("foo.tar").unwrap());
///
/// for (i, file) in ar.entries().unwrap().enumerate() {
/// let mut file = file.unwrap();
/// file.unpack(format!("file-{}", i)).unwrap();
/// }
/// ```
pub fn unpack<P: AsRef<Path>>(&mut self, dst: P) -> io::Result<Unpacked> {
self.fields.unpack(None, dst.as_ref())
}
/// Extracts this file under the specified path, avoiding security issues.
///
/// This function will write the entire contents of this file into the
/// location obtained by appending the path of this file in the archive to
/// `dst`, creating any intermediate directories if needed. Metadata will
/// also be propagated to the path `dst`. Any existing file at the location
/// `dst` will be overwritten.
///
/// # Security
///
/// See [`Archive::unpack`].
///
/// # Examples
///
/// ```no_run
/// use std::fs::File;
/// use tar::Archive;
///
/// let mut ar = Archive::new(File::open("foo.tar").unwrap());
///
/// for (i, file) in ar.entries().unwrap().enumerate() {
/// let mut file = file.unwrap();
/// file.unpack_in("target").unwrap();
/// }
/// ```
pub fn unpack_in<P: AsRef<Path>>(&mut self, dst: P) -> io::Result<bool> {
self.fields.unpack_in(dst.as_ref())
}
/// Set the mask of the permission bits when unpacking this entry.
///
/// The mask will be inverted when applying against a mode, similar to how
/// `umask` works on Unix. In logical notation it looks like:
///
/// ```text
/// new_mode = old_mode & (~mask)
/// ```
///
/// The mask is 0 by default and is currently only implemented on Unix.
pub fn set_mask(&mut self, mask: u32) {
self.fields.mask = mask;
}
/// Indicate whether extended file attributes (xattrs on Unix) are preserved
/// when unpacking this entry.
///
/// This flag is disabled by default and is currently only implemented on
/// Unix using xattr support. This may eventually be implemented for
/// Windows, however, if other archive implementations are found which do
/// this as well.
pub fn set_unpack_xattrs(&mut self, unpack_xattrs: bool) {
self.fields.unpack_xattrs = unpack_xattrs;
}
/// Indicate whether extended permissions (like suid on Unix) are preserved
/// when unpacking this entry.
///
/// This flag is disabled by default and is currently only implemented on
/// Unix.
pub fn set_preserve_permissions(&mut self, preserve: bool) {
self.fields.preserve_permissions = preserve;
}
/// Indicate whether access time information is preserved when unpacking
/// this entry.
///
/// This flag is enabled by default.
pub fn set_preserve_mtime(&mut self, preserve: bool) {
self.fields.preserve_mtime = preserve;
}
}
impl<'a, R: Read> Read for Entry<'a, R> {
fn read(&mut self, into: &mut [u8]) -> io::Result<usize> {
self.fields.read(into)
}
}
impl<'a> EntryFields<'a> {
pub fn from<R: Read>(entry: Entry<R>) -> EntryFields {
entry.fields
}
pub fn into_entry<R: Read>(self) -> Entry<'a, R> {
Entry {
fields: self,
_ignored: marker::PhantomData,
}
}
pub fn read_all(&mut self) -> io::Result<Vec<u8>> {
// Preallocate some data but don't let ourselves get too crazy now.
let cap = cmp::min(self.size, 128 * 1024);
let mut v = Vec::with_capacity(cap as usize);
self.read_to_end(&mut v).map(|_| v)
}
fn path(&self) -> io::Result<Cow<'_, Path>> {
bytes2path(self.path_bytes())
}
fn path_bytes(&self) -> Cow<'_, [u8]> {
match self.long_pathname {
Some(ref bytes) => {
if let Some(&0) = bytes.last() {
Cow::Borrowed(&bytes[..bytes.len() - 1])
} else {
Cow::Borrowed(bytes)
}
}
None => {
if let Some(ref pax) = self.pax_extensions {
let pax = PaxExtensions::new(pax)
.filter_map(|f| f.ok())
.find(|f| f.key_bytes() == b"path")
.map(|f| f.value_bytes());
if let Some(field) = pax {
return Cow::Borrowed(field);
}
}
self.header.path_bytes()
}
}
}
/// Gets the path in a "lossy" way, used for error reporting ONLY.
fn path_lossy(&self) -> String {
String::from_utf8_lossy(&self.path_bytes()).to_string()
}
fn link_name(&self) -> io::Result<Option<Cow<'_, Path>>> {
match self.link_name_bytes() {
Some(bytes) => bytes2path(bytes).map(Some),
None => Ok(None),
}
}
fn link_name_bytes(&self) -> Option<Cow<'_, [u8]>> {
match self.long_linkname {
Some(ref bytes) => {
if let Some(&0) = bytes.last() {
Some(Cow::Borrowed(&bytes[..bytes.len() - 1]))
} else {
Some(Cow::Borrowed(bytes))
}
}
None => {
if let Some(ref pax) = self.pax_extensions {
let pax = PaxExtensions::new(pax)
.filter_map(|f| f.ok())
.find(|f| f.key_bytes() == b"linkpath")
.map(|f| f.value_bytes());
if let Some(field) = pax {
return Some(Cow::Borrowed(field));
}
}
self.header.link_name_bytes()
}
}
}
fn pax_extensions(&mut self) -> io::Result<Option<PaxExtensions<'_>>> {
if self.pax_extensions.is_none() {
if !self.header.entry_type().is_pax_global_extensions()
&& !self.header.entry_type().is_pax_local_extensions()
{
return Ok(None);
}
self.pax_extensions = Some(self.read_all()?);
}
Ok(Some(PaxExtensions::new(
self.pax_extensions.as_ref().unwrap(),
)))
}
fn unpack_in(&mut self, dst: &Path) -> io::Result<bool> {
// Notes regarding bsdtar 2.8.3 / libarchive 2.8.3:
// * Leading '/'s are trimmed. For example, `///test` is treated as
// `test`.
// * If the filename contains '..', then the file is skipped when
// extracting the tarball.
// * '//' within a filename is effectively skipped. An error is
// logged, but otherwise the effect is as if any two or more
// adjacent '/'s within the filename were consolidated into one
// '/'.
//
// Most of this is handled by the `path` module of the standard
// library, but we specially handle a few cases here as well.
let mut file_dst = dst.to_path_buf();
{
let path = self.path().map_err(|e| {
TarError::new(
format!("invalid path in entry header: {}", self.path_lossy()),
e,
)
})?;
for part in path.components() {
match part {
// Leading '/' characters, root paths, and '.'
// components are just ignored and treated as "empty
// components"
Component::Prefix(..) | Component::RootDir | Component::CurDir => continue,
// If any part of the filename is '..', then skip over
// unpacking the file to prevent directory traversal
// security issues. See, e.g.: CVE-2001-1267,
// CVE-2002-0399, CVE-2005-1918, CVE-2007-4131
Component::ParentDir => return Ok(false),
Component::Normal(part) => file_dst.push(part),
}
}
}
// Skip cases where only slashes or '.' parts were seen, because
// this is effectively an empty filename.
if *dst == *file_dst {
return Ok(true);
}
// Skip entries without a parent (i.e. outside of FS root)
let parent = match file_dst.parent() {
Some(p) => p,
None => return Ok(false),
};
self.ensure_dir_created(dst, parent)
.map_err(|e| TarError::new(format!("failed to create `{}`", parent.display()), e))?;
let canon_target = self.validate_inside_dst(dst, parent)?;
self.unpack(Some(&canon_target), &file_dst)
.map_err(|e| TarError::new(format!("failed to unpack `{}`", file_dst.display()), e))?;
Ok(true)
}
/// Unpack as destination directory `dst`.
fn unpack_dir(&mut self, dst: &Path) -> io::Result<()> {
// If the directory already exists just let it slide
fs::create_dir(dst).or_else(|err| {
if err.kind() == ErrorKind::AlreadyExists {
let prev = fs::symlink_metadata(dst);
if prev.map(|m| m.is_dir()).unwrap_or(false) {
return Ok(());
}
}
Err(Error::new(
err.kind(),
format!("{} when creating dir {}", err, dst.display()),
))
})
}
/// Returns access to the header of this entry in the archive.
fn unpack(&mut self, target_base: Option<&Path>, dst: &Path) -> io::Result<Unpacked> {
fn set_perms_ownerships(
dst: &Path,
f: Option<&mut std::fs::File>,
header: &Header,
mask: u32,
perms: bool,
ownerships: bool,
) -> io::Result<()> {
// ownerships need to be set first to avoid stripping SUID bits in the permissions ...
if ownerships {
set_ownerships(dst, &f, header.uid()?, header.gid()?)?;
}
// ... then set permissions, SUID bits set here is kept
if let Ok(mode) = header.mode() {
set_perms(dst, f, mode, mask, perms)?;
}
Ok(())
}
fn get_mtime(header: &Header) -> Option<FileTime> {
header.mtime().ok().map(|mtime| {
// For some more information on this see the comments in
// `Header::fill_platform_from`, but the general idea is that
// we're trying to avoid 0-mtime files coming out of archives
// since some tools don't ingest them well. Perhaps one day
// when Cargo stops working with 0-mtime archives we can remove
// this.
let mtime = if mtime == 0 { 1 } else { mtime };
FileTime::from_unix_time(mtime as i64, 0)
})
}
let kind = self.header.entry_type();
if kind.is_dir() {
self.unpack_dir(dst)?;
set_perms_ownerships(
dst,
None,
&self.header,
self.mask,
self.preserve_permissions,
self.preserve_ownerships,
)?;
return Ok(Unpacked::__Nonexhaustive);
} else if kind.is_hard_link() || kind.is_symlink() {
let src = match self.link_name()? {
Some(name) => name,
None => {
return Err(other(&format!(
"hard link listed for {} but no link name found",
String::from_utf8_lossy(self.header.as_bytes())
)));
}
};
if src.iter().count() == 0 {
return Err(other(&format!(
"symlink destination for {} is empty",
String::from_utf8_lossy(self.header.as_bytes())
)));
}
if kind.is_hard_link() {
let link_src = match target_base {
// If we're unpacking within a directory then ensure that
// the destination of this hard link is both present and
// inside our own directory. This is needed because we want
// to make sure to not overwrite anything outside the root.
//
// Note that this logic is only needed for hard links
// currently. With symlinks the `validate_inside_dst` which
// happens before this method as part of `unpack_in` will
// use canonicalization to ensure this guarantee. For hard
// links though they're canonicalized to their existing path
// so we need to validate at this time.
Some(p) => {
let link_src = p.join(src);
self.validate_inside_dst(p, &link_src)?;
link_src
}
None => src.into_owned(),
};
fs::hard_link(&link_src, dst).map_err(|err| {
Error::new(
err.kind(),
format!(
"{} when hard linking {} to {}",
err,
link_src.display(),
dst.display()
),
)
})?;
} else {
symlink(&src, dst)
.or_else(|err_io| {
if err_io.kind() == io::ErrorKind::AlreadyExists && self.overwrite {
// remove dest and try once more
std::fs::remove_file(dst).and_then(|()| symlink(&src, dst))
} else {
Err(err_io)
}
})
.map_err(|err| {
Error::new(
err.kind(),
format!(
"{} when symlinking {} to {}",
err,
src.display(),
dst.display()
),
)
})?;
// While permissions on symlinks are meaningless on most systems, the ownership
// of symlinks is important as it dictates the access control to the symlink
// itself.
if self.preserve_ownerships {
set_ownerships(dst, &None, self.header.uid()?, self.header.gid()?)?;
}
if self.preserve_mtime {
if let Some(mtime) = get_mtime(&self.header) {
filetime::set_symlink_file_times(dst, mtime, mtime).map_err(|e| {
TarError::new(format!("failed to set mtime for `{}`", dst.display()), e)
})?;
}
}
}
return Ok(Unpacked::__Nonexhaustive);
#[cfg(target_arch = "wasm32")]
#[allow(unused_variables)]
fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
Err(io::Error::new(io::ErrorKind::Other, "Not implemented"))
}
#[cfg(windows)]
fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
::std::os::windows::fs::symlink_file(src, dst)
}
#[cfg(all(unix, not(target_arch = "wasm32")))]
fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
::std::os::unix::fs::symlink(src, dst)
}
} else if kind.is_pax_global_extensions()
|| kind.is_pax_local_extensions()
|| kind.is_gnu_longname()
|| kind.is_gnu_longlink()
{
return Ok(Unpacked::__Nonexhaustive);
};
// Old BSD-tar compatibility.
// Names that have a trailing slash should be treated as a directory.
// Only applies to old headers.
if self.header.as_ustar().is_none() && self.path_bytes().ends_with(b"/") {
self.unpack_dir(dst)?;
set_perms_ownerships(
dst,
None,
&self.header,
self.mask,
self.preserve_permissions,
self.preserve_ownerships,
)?;
return Ok(Unpacked::__Nonexhaustive);
}
// Note the lack of `else` clause above. According to the FreeBSD
// documentation:
//
// > A POSIX-compliant implementation must treat any unrecognized
// > typeflag value as a regular file.
//
// As a result if we don't recognize the kind we just write out the file
// as we would normally.
// Ensure we write a new file rather than overwriting in-place which
// is attackable; if an existing file is found unlink it.
fn open(dst: &Path) -> io::Result<std::fs::File> {
OpenOptions::new().write(true).create_new(true).open(dst)
}
let mut f = (|| -> io::Result<std::fs::File> {
let mut f = open(dst).or_else(|err| {
if err.kind() != ErrorKind::AlreadyExists {
Err(err)
} else if self.overwrite {
match fs::remove_file(dst) {
Ok(()) => open(dst),
Err(ref e) if e.kind() == io::ErrorKind::NotFound => open(dst),
Err(e) => Err(e),
}
} else {
Err(err)
}
})?;
for io in self.data.drain(..) {
match io {
EntryIo::Data(mut d) => {
let expected = d.limit();
if io::copy(&mut d, &mut f)? != expected {
return Err(other("failed to write entire file"));
}
}
EntryIo::Pad(d) => {
// TODO: checked cast to i64
let to = SeekFrom::Current(d.limit() as i64);
let size = f.seek(to)?;
f.set_len(size)?;
}
}
}
Ok(f)
})()
.map_err(|e| {
let header = self.header.path_bytes();
TarError::new(
format!(
"failed to unpack `{}` into `{}`",
String::from_utf8_lossy(&header),
dst.display()
),
e,
)
})?;
if self.preserve_mtime {
if let Some(mtime) = get_mtime(&self.header) {
filetime::set_file_handle_times(&f, Some(mtime), Some(mtime)).map_err(|e| {
TarError::new(format!("failed to set mtime for `{}`", dst.display()), e)
})?;
}
}
set_perms_ownerships(
dst,
Some(&mut f),
&self.header,
self.mask,
self.preserve_permissions,
self.preserve_ownerships,
)?;
if self.unpack_xattrs {
set_xattrs(self, dst)?;
}
return Ok(Unpacked::File(f));
fn set_ownerships(
dst: &Path,
f: &Option<&mut std::fs::File>,
uid: u64,
gid: u64,
) -> Result<(), TarError> {
_set_ownerships(dst, f, uid, gid).map_err(|e| {
TarError::new(
format!(
"failed to set ownerships to uid={:?}, gid={:?} \
for `{}`",
uid,
gid,
dst.display()
),
e,
)
})
}
#[cfg(all(unix, not(target_arch = "wasm32")))]
fn _set_ownerships(
dst: &Path,
f: &Option<&mut std::fs::File>,
uid: u64,
gid: u64,
) -> io::Result<()> {
use std::os::unix::prelude::*;
let uid: libc::uid_t = uid.try_into().map_err(|_| {
io::Error::new(io::ErrorKind::Other, format!("UID {} is too large!", uid))
})?;
let gid: libc::gid_t = gid.try_into().map_err(|_| {
io::Error::new(io::ErrorKind::Other, format!("GID {} is too large!", gid))
})?;
match f {
Some(f) => unsafe {
let fd = f.as_raw_fd();
if libc::fchown(fd, uid, gid) != 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
},
None => unsafe {
let path = std::ffi::CString::new(dst.as_os_str().as_bytes()).map_err(|e| {
io::Error::new(
io::ErrorKind::Other,
format!("path contains null character: {:?}", e),
)
})?;
if libc::lchown(path.as_ptr(), uid, gid) != 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
},
}
}
// Windows does not support posix numeric ownership IDs
#[cfg(any(windows, target_arch = "wasm32"))]
fn _set_ownerships(
_: &Path,
_: &Option<&mut std::fs::File>,
_: u64,
_: u64,
) -> io::Result<()> {
Ok(())
}
fn set_perms(
dst: &Path,
f: Option<&mut std::fs::File>,
mode: u32,
mask: u32,
preserve: bool,
) -> Result<(), TarError> {
_set_perms(dst, f, mode, mask, preserve).map_err(|e| {
TarError::new(
format!(
"failed to set permissions to {:o} \
for `{}`",
mode,
dst.display()
),
e,
)
})
}
#[cfg(all(unix, not(target_arch = "wasm32")))]
fn _set_perms(
dst: &Path,
f: Option<&mut std::fs::File>,
mode: u32,
mask: u32,
preserve: bool,
) -> io::Result<()> {
use std::os::unix::prelude::*;
let mode = if preserve { mode } else { mode & 0o777 };
let mode = mode & !mask;
let perm = fs::Permissions::from_mode(mode as _);
match f {
Some(f) => f.set_permissions(perm),
None => fs::set_permissions(dst, perm),
}
}
#[cfg(windows)]
fn _set_perms(
dst: &Path,
f: Option<&mut std::fs::File>,
mode: u32,
_mask: u32,
_preserve: bool,
) -> io::Result<()> {
if mode & 0o200 == 0o200 {
return Ok(());
}
match f {
Some(f) => {
let mut perm = f.metadata()?.permissions();
perm.set_readonly(true);
f.set_permissions(perm)
}
None => {
let mut perm = fs::metadata(dst)?.permissions();
perm.set_readonly(true);
fs::set_permissions(dst, perm)
}
}
}
#[cfg(target_arch = "wasm32")]
#[allow(unused_variables)]
fn _set_perms(
dst: &Path,
f: Option<&mut std::fs::File>,
mode: u32,
mask: u32,
_preserve: bool,
) -> io::Result<()> {
Err(io::Error::new(io::ErrorKind::Other, "Not implemented"))
}
#[cfg(all(unix, not(target_arch = "wasm32"), feature = "xattr"))]
fn set_xattrs(me: &mut EntryFields, dst: &Path) -> io::Result<()> {
use std::ffi::OsStr;
use std::os::unix::prelude::*;
let exts = match me.pax_extensions() {
Ok(Some(e)) => e,
_ => return Ok(()),
};
let exts = exts
.filter_map(|e| e.ok())
.filter_map(|e| {
let key = e.key_bytes();
let prefix = crate::pax::PAX_SCHILYXATTR.as_bytes();
key.strip_prefix(prefix).map(|rest| (rest, e))
})
.map(|(key, e)| (OsStr::from_bytes(key), e.value_bytes()));
for (key, value) in exts {
xattr::set(dst, key, value).map_err(|e| {
TarError::new(
format!(
"failed to set extended \
attributes to {}. \
Xattrs: key={:?}, value={:?}.",
dst.display(),
key,
String::from_utf8_lossy(value)
),
e,
)
})?;
}
Ok(())
}
// Windows does not completely support posix xattrs
// https://en.wikipedia.org/wiki/Extended_file_attributes#Windows_NT
#[cfg(any(windows, not(feature = "xattr"), target_arch = "wasm32"))]
fn set_xattrs(_: &mut EntryFields, _: &Path) -> io::Result<()> {
Ok(())
}
}
fn ensure_dir_created(&self, dst: &Path, dir: &Path) -> io::Result<()> {
let mut ancestor = dir;
let mut dirs_to_create = Vec::new();
while ancestor.symlink_metadata().is_err() {
dirs_to_create.push(ancestor);
if let Some(parent) = ancestor.parent() {
ancestor = parent;
} else {
break;
}
}
for ancestor in dirs_to_create.into_iter().rev() {
if let Some(parent) = ancestor.parent() {
self.validate_inside_dst(dst, parent)?;
}
fs::create_dir_all(ancestor)?;
}
Ok(())
}
fn validate_inside_dst(&self, dst: &Path, file_dst: &Path) -> io::Result<PathBuf> {
// Abort if target (canonical) parent is outside of `dst`
let canon_parent = file_dst.canonicalize().map_err(|err| {
Error::new(
err.kind(),
format!("{} while canonicalizing {}", err, file_dst.display()),
)
})?;
let canon_target = dst.canonicalize().map_err(|err| {
Error::new(
err.kind(),
format!("{} while canonicalizing {}", err, dst.display()),
)
})?;
if !canon_parent.starts_with(&canon_target) {
let err = TarError::new(
format!(
"trying to unpack outside of destination path: {}",
canon_target.display()
),
// TODO: use ErrorKind::InvalidInput here? (minor breaking change)
Error::new(ErrorKind::Other, "Invalid argument"),
);
return Err(err.into());
}
Ok(canon_target)
}
}
impl<'a> Read for EntryFields<'a> {
fn read(&mut self, into: &mut [u8]) -> io::Result<usize> {
loop {
match self.data.get_mut(0).map(|io| io.read(into)) {
Some(Ok(0)) => {
self.data.remove(0);
}
Some(r) => return r,
None => return Ok(0),
}
}
}
}
impl<'a> Read for EntryIo<'a> {
fn read(&mut self, into: &mut [u8]) -> io::Result<usize> {
match *self {
EntryIo::Pad(ref mut io) => io.read(into),
EntryIo::Data(ref mut io) => io.read(into),
}
}
}

197
vendor/tar/src/entry_type.rs vendored Normal file
View File

@@ -0,0 +1,197 @@
// See https://en.wikipedia.org/wiki/Tar_%28computing%29#UStar_format
/// Indicate the type of content described by a header.
///
/// This is returned by [`crate::Header::entry_type()`] and should be used to
/// distinguish between types of content.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum EntryType {
/// Regular file
Regular,
/// Hard link
Link,
/// Symbolic link
Symlink,
/// Character device
Char,
/// Block device
Block,
/// Directory
Directory,
/// Named pipe (fifo)
Fifo,
/// Implementation-defined 'high-performance' type, treated as regular file
Continuous,
/// GNU extension - long file name
GNULongName,
/// GNU extension - long link name (link target)
GNULongLink,
/// GNU extension - sparse file
GNUSparse,
/// Global extended header
XGlobalHeader,
/// Extended Header
XHeader,
/// Hints that destructuring should not be exhaustive.
///
/// This enum may grow additional variants, so this makes sure clients
/// don't count on exhaustive matching. (Otherwise, adding a new variant
/// could break existing code.)
#[doc(hidden)]
__Nonexhaustive(u8),
}
impl EntryType {
/// Creates a new entry type from a raw byte.
///
/// Note that the other named constructors of entry type may be more
/// appropriate to create a file type from.
pub fn new(byte: u8) -> EntryType {
match byte {
b'\x00' | b'0' => EntryType::Regular,
b'1' => EntryType::Link,
b'2' => EntryType::Symlink,
b'3' => EntryType::Char,
b'4' => EntryType::Block,
b'5' => EntryType::Directory,
b'6' => EntryType::Fifo,
b'7' => EntryType::Continuous,
b'x' => EntryType::XHeader,
b'g' => EntryType::XGlobalHeader,
b'L' => EntryType::GNULongName,
b'K' => EntryType::GNULongLink,
b'S' => EntryType::GNUSparse,
b => EntryType::__Nonexhaustive(b),
}
}
/// Returns the raw underlying byte that this entry type represents.
pub fn as_byte(&self) -> u8 {
match *self {
EntryType::Regular => b'0',
EntryType::Link => b'1',
EntryType::Symlink => b'2',
EntryType::Char => b'3',
EntryType::Block => b'4',
EntryType::Directory => b'5',
EntryType::Fifo => b'6',
EntryType::Continuous => b'7',
EntryType::XHeader => b'x',
EntryType::XGlobalHeader => b'g',
EntryType::GNULongName => b'L',
EntryType::GNULongLink => b'K',
EntryType::GNUSparse => b'S',
EntryType::__Nonexhaustive(b) => b,
}
}
/// Creates a new entry type representing a regular file.
pub fn file() -> EntryType {
EntryType::Regular
}
/// Creates a new entry type representing a hard link.
pub fn hard_link() -> EntryType {
EntryType::Link
}
/// Creates a new entry type representing a symlink.
pub fn symlink() -> EntryType {
EntryType::Symlink
}
/// Creates a new entry type representing a character special device.
pub fn character_special() -> EntryType {
EntryType::Char
}
/// Creates a new entry type representing a block special device.
pub fn block_special() -> EntryType {
EntryType::Block
}
/// Creates a new entry type representing a directory.
pub fn dir() -> EntryType {
EntryType::Directory
}
/// Creates a new entry type representing a FIFO.
pub fn fifo() -> EntryType {
EntryType::Fifo
}
/// Creates a new entry type representing a contiguous file.
pub fn contiguous() -> EntryType {
EntryType::Continuous
}
/// Returns whether this type represents a regular file.
pub fn is_file(&self) -> bool {
self == &EntryType::Regular
}
/// Returns whether this type represents a hard link.
pub fn is_hard_link(&self) -> bool {
self == &EntryType::Link
}
/// Returns whether this type represents a symlink.
pub fn is_symlink(&self) -> bool {
self == &EntryType::Symlink
}
/// Returns whether this type represents a character special device.
pub fn is_character_special(&self) -> bool {
self == &EntryType::Char
}
/// Returns whether this type represents a block special device.
pub fn is_block_special(&self) -> bool {
self == &EntryType::Block
}
/// Returns whether this type represents a directory.
pub fn is_dir(&self) -> bool {
self == &EntryType::Directory
}
/// Returns whether this type represents a FIFO.
pub fn is_fifo(&self) -> bool {
self == &EntryType::Fifo
}
/// Returns whether this type represents a contiguous file.
pub fn is_contiguous(&self) -> bool {
self == &EntryType::Continuous
}
/// Returns whether this type represents a GNU long name header.
pub fn is_gnu_longname(&self) -> bool {
self == &EntryType::GNULongName
}
/// Returns whether this type represents a GNU sparse header.
pub fn is_gnu_sparse(&self) -> bool {
self == &EntryType::GNUSparse
}
/// Returns whether this type represents a GNU long link header.
pub fn is_gnu_longlink(&self) -> bool {
self == &EntryType::GNULongLink
}
/// Returns whether this type represents PAX global extensions, that
/// should affect all following entries. For more, see [PAX].
///
/// [PAX]: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
pub fn is_pax_global_extensions(&self) -> bool {
self == &EntryType::XGlobalHeader
}
/// Returns whether this type represents PAX local extensions; these
/// only affect the current entry. For more, see [PAX].
///
/// [PAX]: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
pub fn is_pax_local_extensions(&self) -> bool {
self == &EntryType::XHeader
}
}

41
vendor/tar/src/error.rs vendored Normal file
View File

@@ -0,0 +1,41 @@
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io::{self, Error};
#[derive(Debug)]
pub struct TarError {
desc: Cow<'static, str>,
io: io::Error,
}
impl TarError {
pub fn new(desc: impl Into<Cow<'static, str>>, err: Error) -> TarError {
TarError {
desc: desc.into(),
io: err,
}
}
}
impl error::Error for TarError {
fn description(&self) -> &str {
&self.desc
}
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
Some(&self.io)
}
}
impl fmt::Display for TarError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.desc.fmt(f)
}
}
impl From<TarError> for Error {
fn from(t: TarError) -> Error {
Error::new(t.io.kind(), t)
}
}

1730
vendor/tar/src/header.rs vendored Normal file

File diff suppressed because it is too large Load Diff

46
vendor/tar/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,46 @@
//! A library for reading and writing TAR archives
//!
//! This library provides utilities necessary to manage [TAR archives][1]
//! abstracted over a reader or writer. Great strides are taken to ensure that
//! an archive is never required to be fully resident in memory, and all objects
//! provide largely a streaming interface to read bytes from.
//!
//! [1]: http://en.wikipedia.org/wiki/Tar_%28computing%29
// More docs about the detailed tar format can also be found here:
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5&manpath=FreeBSD+8-current
// NB: some of the coding patterns and idioms here may seem a little strange.
// This is currently attempting to expose a super generic interface while
// also not forcing clients to codegen the entire crate each time they use
// it. To that end lots of work is done to ensure that concrete
// implementations are all found in this crate and the generic functions are
// all just super thin wrappers (e.g. easy to codegen).
#![doc(html_root_url = "https://docs.rs/tar/0.4")]
#![deny(missing_docs)]
#![cfg_attr(test, deny(warnings))]
use std::io::{Error, ErrorKind};
pub use crate::archive::{Archive, Entries};
pub use crate::builder::{Builder, EntryWriter};
pub use crate::entry::{Entry, Unpacked};
pub use crate::entry_type::EntryType;
pub use crate::header::GnuExtSparseHeader;
#[cfg(all(any(unix, windows), not(target_arch = "wasm32")))]
pub use crate::header::DETERMINISTIC_TIMESTAMP;
pub use crate::header::{GnuHeader, GnuSparseHeader, Header, HeaderMode, OldHeader, UstarHeader};
pub use crate::pax::{PaxExtension, PaxExtensions};
mod archive;
mod builder;
mod entry;
mod entry_type;
mod error;
mod header;
mod pax;
fn other(msg: &str) -> Error {
Error::new(ErrorKind::Other, msg)
}

195
vendor/tar/src/pax.rs vendored Normal file
View File

@@ -0,0 +1,195 @@
#![allow(dead_code)]
use std::io;
use std::io::Write;
use std::slice;
use std::str;
use crate::other;
// Keywords for PAX extended header records.
pub const PAX_NONE: &str = ""; // Indicates that no PAX key is suitable
pub const PAX_PATH: &str = "path";
pub const PAX_LINKPATH: &str = "linkpath";
pub const PAX_SIZE: &str = "size";
pub const PAX_UID: &str = "uid";
pub const PAX_GID: &str = "gid";
pub const PAX_UNAME: &str = "uname";
pub const PAX_GNAME: &str = "gname";
pub const PAX_MTIME: &str = "mtime";
pub const PAX_ATIME: &str = "atime";
pub const PAX_CTIME: &str = "ctime"; // Removed from later revision of PAX spec, but was valid
pub const PAX_CHARSET: &str = "charset"; // Currently unused
pub const PAX_COMMENT: &str = "comment"; // Currently unused
pub const PAX_SCHILYXATTR: &str = "SCHILY.xattr.";
// Keywords for GNU sparse files in a PAX extended header.
pub const PAX_GNUSPARSE: &str = "GNU.sparse.";
pub const PAX_GNUSPARSENUMBLOCKS: &str = "GNU.sparse.numblocks";
pub const PAX_GNUSPARSEOFFSET: &str = "GNU.sparse.offset";
pub const PAX_GNUSPARSENUMBYTES: &str = "GNU.sparse.numbytes";
pub const PAX_GNUSPARSEMAP: &str = "GNU.sparse.map";
pub const PAX_GNUSPARSENAME: &str = "GNU.sparse.name";
pub const PAX_GNUSPARSEMAJOR: &str = "GNU.sparse.major";
pub const PAX_GNUSPARSEMINOR: &str = "GNU.sparse.minor";
pub const PAX_GNUSPARSESIZE: &str = "GNU.sparse.size";
pub const PAX_GNUSPARSEREALSIZE: &str = "GNU.sparse.realsize";
/// An iterator over the pax extensions in an archive entry.
///
/// This iterator yields structures which can themselves be parsed into
/// key/value pairs.
pub struct PaxExtensions<'entry> {
data: slice::Split<'entry, u8, fn(&u8) -> bool>,
}
impl<'entry> PaxExtensions<'entry> {
/// Create new pax extensions iterator from the given entry data.
pub fn new(a: &'entry [u8]) -> Self {
fn is_newline(a: &u8) -> bool {
*a == b'\n'
}
PaxExtensions {
data: a.split(is_newline),
}
}
}
/// A key/value pair corresponding to a pax extension.
pub struct PaxExtension<'entry> {
key: &'entry [u8],
value: &'entry [u8],
}
pub fn pax_extensions_value(a: &[u8], key: &str) -> Option<u64> {
for extension in PaxExtensions::new(a) {
let current_extension = match extension {
Ok(ext) => ext,
Err(_) => return None,
};
if current_extension.key() != Ok(key) {
continue;
}
let value = match current_extension.value() {
Ok(value) => value,
Err(_) => return None,
};
let result = match value.parse::<u64>() {
Ok(result) => result,
Err(_) => return None,
};
return Some(result);
}
None
}
impl<'entry> Iterator for PaxExtensions<'entry> {
type Item = io::Result<PaxExtension<'entry>>;
fn next(&mut self) -> Option<io::Result<PaxExtension<'entry>>> {
let line = match self.data.next() {
Some([]) => return None,
Some(line) => line,
None => return None,
};
Some(
line.iter()
.position(|b| *b == b' ')
.and_then(|i| {
str::from_utf8(&line[..i])
.ok()
.and_then(|len| len.parse::<usize>().ok().map(|j| (i + 1, j)))
})
.and_then(|(kvstart, reported_len)| {
if line.len() + 1 == reported_len {
line[kvstart..]
.iter()
.position(|b| *b == b'=')
.map(|equals| (kvstart, equals))
} else {
None
}
})
.map(|(kvstart, equals)| PaxExtension {
key: &line[kvstart..kvstart + equals],
value: &line[kvstart + equals + 1..],
})
.ok_or_else(|| other("malformed pax extension")),
)
}
}
impl<'entry> PaxExtension<'entry> {
/// Returns the key for this key/value pair parsed as a string.
///
/// May fail if the key isn't actually utf-8.
pub fn key(&self) -> Result<&'entry str, str::Utf8Error> {
str::from_utf8(self.key)
}
/// Returns the underlying raw bytes for the key of this key/value pair.
pub fn key_bytes(&self) -> &'entry [u8] {
self.key
}
/// Returns the value for this key/value pair parsed as a string.
///
/// May fail if the value isn't actually utf-8.
pub fn value(&self) -> Result<&'entry str, str::Utf8Error> {
str::from_utf8(self.value)
}
/// Returns the underlying raw bytes for this value of this key/value pair.
pub fn value_bytes(&self) -> &'entry [u8] {
self.value
}
}
/// Extension trait for `Builder` to append PAX extended headers.
impl<T: Write> crate::Builder<T> {
/// Append PAX extended headers to the archive.
///
/// Takes in an iterator over the list of headers to add to convert it into a header set formatted.
///
/// Returns io::Error if an error occurs, else it returns ()
pub fn append_pax_extensions<'key, 'value>(
&mut self,
headers: impl IntoIterator<Item = (&'key str, &'value [u8])>,
) -> Result<(), io::Error> {
// Store the headers formatted before write
let mut data: Vec<u8> = Vec::new();
// For each key in headers, convert into a sized space and add it to data.
// This will then be written in the file
for (key, value) in headers {
let mut len_len = 1;
let mut max_len = 10;
let rest_len = 3 + key.len() + value.len();
while rest_len + len_len >= max_len {
len_len += 1;
max_len *= 10;
}
let len = rest_len + len_len;
write!(&mut data, "{} {}=", len, key)?;
data.extend_from_slice(value);
data.push(b'\n');
}
// Ignore the header append if it's empty.
if data.is_empty() {
return Ok(());
}
// Create a header of type XHeader, set the size to the length of the
// data, set the entry type to XHeader, and set the checksum
// then append the header and the data to the archive.
let mut header = crate::Header::new_ustar();
let data_as_bytes: &[u8] = &data;
header.set_size(data_as_bytes.len() as u64);
header.set_entry_type(crate::EntryType::XHeader);
header.set_cksum();
self.append(&header, data_as_bytes)
}
}

Binary file not shown.

View File

View File

@@ -0,0 +1 @@
{"name":"tar","vers":"0.4.45","deps":[{"name":"filetime","req":"^0.2.8","features":[],"optional":false,"default_features":true,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"astral-tokio-tar","req":"^0.5","features":[],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"rand","req":"^0.8","features":["small_rng"],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"tempfile","req":"^3","features":[],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"tokio","req":"^1","features":["macros","rt"],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"tokio-stream","req":"^0.1","features":[],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"libc","req":"^0.2","features":[],"optional":false,"default_features":true,"target":"cfg(unix)","kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"xattr","req":"^1.1.3","features":[],"optional":true,"default_features":true,"target":"cfg(unix)","kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false}],"features":{"default":["xattr"]},"features2":null,"cksum":"4f9a58c1209cb4ebdf26b4d7671c3bfddb76b1537b401e87cf96618d525c2eeb","yanked":null,"links":null,"rust_version":null,"v":2}

Binary file not shown.

2024
vendor/tar/tests/all.rs vendored Normal file

File diff suppressed because it is too large Load Diff

466
vendor/tar/tests/entry.rs vendored Normal file
View File

@@ -0,0 +1,466 @@
extern crate tar;
extern crate tempfile;
use std::fs::create_dir;
use std::fs::File;
use std::io::Read;
use tempfile::Builder;
#[test]
fn absolute_symlink() {
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Symlink);
header.set_path("foo").unwrap();
header.set_link_name("/bar").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
ar.unpack(td.path()).unwrap();
td.path().join("foo").symlink_metadata().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let mut entries = ar.entries().unwrap();
let entry = entries.next().unwrap().unwrap();
assert_eq!(&*entry.link_name_bytes().unwrap(), b"/bar");
}
#[test]
fn absolute_hardlink() {
let td = Builder::new().prefix("tar").tempdir().unwrap();
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("foo").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Link);
header.set_path("bar").unwrap();
// This absolute path under tempdir will be created at unpack time
header.set_link_name(td.path().join("foo")).unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
ar.unpack(td.path()).unwrap();
td.path().join("foo").metadata().unwrap();
td.path().join("bar").metadata().unwrap();
}
#[test]
fn relative_hardlink() {
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("foo").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Link);
header.set_path("bar").unwrap();
header.set_link_name("foo").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
ar.unpack(td.path()).unwrap();
td.path().join("foo").metadata().unwrap();
td.path().join("bar").metadata().unwrap();
}
#[test]
fn absolute_link_deref_error() {
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Symlink);
header.set_path("foo").unwrap();
header.set_link_name("/").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("foo/bar").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
assert!(ar.unpack(td.path()).is_err());
td.path().join("foo").symlink_metadata().unwrap();
assert!(File::open(td.path().join("foo").join("bar")).is_err());
}
#[test]
fn relative_link_deref_error() {
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Symlink);
header.set_path("foo").unwrap();
header.set_link_name("../../../../").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("foo/bar").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
assert!(ar.unpack(td.path()).is_err());
td.path().join("foo").symlink_metadata().unwrap();
assert!(File::open(td.path().join("foo").join("bar")).is_err());
}
#[test]
#[cfg(unix)]
fn directory_maintains_permissions() {
use ::std::os::unix::fs::PermissionsExt;
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Directory);
header.set_path("foo").unwrap();
header.set_mode(0o777);
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
ar.unpack(td.path()).unwrap();
let f = File::open(td.path().join("foo")).unwrap();
let md = f.metadata().unwrap();
assert!(md.is_dir());
assert_eq!(md.permissions().mode(), 0o40777);
}
#[test]
#[cfg(unix)]
fn set_entry_mask() {
use ::std::os::unix::fs::PermissionsExt;
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("foo").unwrap();
header.set_mode(0o777);
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
let foo_path = td.path().join("foo");
let mut entries = ar.entries().unwrap();
let mut foo = entries.next().unwrap().unwrap();
foo.set_mask(0o027);
foo.unpack(&foo_path).unwrap();
let f = File::open(foo_path).unwrap();
let md = f.metadata().unwrap();
assert!(md.is_file());
assert_eq!(md.permissions().mode(), 0o100750);
}
#[test]
#[cfg(not(windows))] // dangling symlinks have weird permissions
fn modify_link_just_created() {
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Symlink);
header.set_path("foo").unwrap();
header.set_link_name("bar").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("bar/foo").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("foo/bar").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
ar.unpack(td.path()).unwrap();
File::open(td.path().join("bar/foo")).unwrap();
File::open(td.path().join("bar/bar")).unwrap();
File::open(td.path().join("foo/foo")).unwrap();
File::open(td.path().join("foo/bar")).unwrap();
}
#[test]
#[cfg(not(windows))] // dangling symlinks have weird permissions
fn modify_outside_with_relative_symlink() {
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Symlink);
header.set_path("symlink").unwrap();
header.set_link_name("..").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("symlink/foo/bar").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
let tar_dir = td.path().join("tar");
create_dir(&tar_dir).unwrap();
assert!(ar.unpack(tar_dir).is_err());
assert!(!td.path().join("foo").exists());
}
#[test]
fn parent_paths_error() {
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Symlink);
header.set_path("foo").unwrap();
header.set_link_name("..").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("foo/bar").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
assert!(ar.unpack(td.path()).is_err());
td.path().join("foo").symlink_metadata().unwrap();
assert!(File::open(td.path().join("foo").join("bar")).is_err());
}
#[test]
#[cfg(unix)]
fn good_parent_paths_ok() {
use std::path::PathBuf;
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Symlink);
header.set_path(PathBuf::from("foo").join("bar")).unwrap();
header
.set_link_name(PathBuf::from("..").join("bar"))
.unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("bar").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
ar.unpack(td.path()).unwrap();
td.path().join("foo").join("bar").read_link().unwrap();
let dst = td.path().join("foo").join("bar").canonicalize().unwrap();
File::open(dst).unwrap();
}
#[test]
fn modify_hard_link_just_created() {
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Link);
header.set_path("foo").unwrap();
header.set_link_name("../test").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(1);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("foo").unwrap();
header.set_cksum();
ar.append(&header, &b"x"[..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
let test = td.path().join("test");
File::create(&test).unwrap();
let dir = td.path().join("dir");
assert!(ar.unpack(&dir).is_err());
let mut contents = Vec::new();
File::open(&test)
.unwrap()
.read_to_end(&mut contents)
.unwrap();
assert_eq!(contents.len(), 0);
}
#[test]
fn modify_symlink_just_created() {
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Symlink);
header.set_path("foo").unwrap();
header.set_link_name("../test").unwrap();
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(1);
header.set_entry_type(tar::EntryType::Regular);
header.set_path("foo").unwrap();
header.set_cksum();
ar.append(&header, &b"x"[..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let td = Builder::new().prefix("tar").tempdir().unwrap();
let test = td.path().join("test");
File::create(&test).unwrap();
let dir = td.path().join("dir");
ar.unpack(&dir).unwrap();
let mut contents = Vec::new();
File::open(&test)
.unwrap()
.read_to_end(&mut contents)
.unwrap();
assert_eq!(contents.len(), 0);
}
/// Test that unpacking a tarball with a symlink followed by a directory entry
/// with the same name does not allow modifying permissions of arbitrary directories
/// outside the extraction path.
#[test]
#[cfg(unix)]
fn symlink_dir_collision_does_not_modify_external_dir_permissions() {
use ::std::fs;
use ::std::os::unix::fs::PermissionsExt;
let td = Builder::new().prefix("tar").tempdir().unwrap();
let target_dir = td.path().join("target-dir");
fs::create_dir(&target_dir).unwrap();
fs::set_permissions(&target_dir, fs::Permissions::from_mode(0o700)).unwrap();
let before_mode = fs::metadata(&target_dir).unwrap().permissions().mode() & 0o7777;
assert_eq!(before_mode, 0o700);
let extract_dir = td.path().join("extract-dir");
fs::create_dir(&extract_dir).unwrap();
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Symlink);
header.set_path("foo").unwrap();
header.set_link_name(&target_dir).unwrap();
header.set_mode(0o777);
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Directory);
header.set_path("foo").unwrap();
header.set_mode(0o777);
header.set_cksum();
ar.append(&header, &[][..]).unwrap();
let bytes = ar.into_inner().unwrap();
let mut ar = tar::Archive::new(&bytes[..]);
let result = ar.unpack(&extract_dir);
assert!(result.is_err());
let symlink_path = extract_dir.join("foo");
assert!(symlink_path
.symlink_metadata()
.unwrap()
.file_type()
.is_symlink());
let after_mode = fs::metadata(&target_dir).unwrap().permissions().mode() & 0o7777;
assert_eq!(after_mode, 0o700);
}

256
vendor/tar/tests/header/mod.rs vendored Normal file
View File

@@ -0,0 +1,256 @@
use std::fs::{self, File};
use std::io::{self, Write};
use std::path::Path;
use std::{mem, thread, time};
use tempfile::Builder;
use tar::{GnuHeader, Header, HeaderMode};
#[test]
fn default_gnu() {
let mut h = Header::new_gnu();
assert!(h.as_gnu().is_some());
assert!(h.as_gnu_mut().is_some());
assert!(h.as_ustar().is_none());
assert!(h.as_ustar_mut().is_none());
}
#[test]
fn goto_old() {
let mut h = Header::new_old();
assert!(h.as_gnu().is_none());
assert!(h.as_gnu_mut().is_none());
assert!(h.as_ustar().is_none());
assert!(h.as_ustar_mut().is_none());
}
#[test]
fn goto_ustar() {
let mut h = Header::new_ustar();
assert!(h.as_gnu().is_none());
assert!(h.as_gnu_mut().is_none());
assert!(h.as_ustar().is_some());
assert!(h.as_ustar_mut().is_some());
}
#[test]
fn link_name() {
let mut h = Header::new_gnu();
h.set_link_name("foo").unwrap();
assert_eq!(h.link_name().unwrap().unwrap().to_str(), Some("foo"));
h.set_link_name("../foo").unwrap();
assert_eq!(h.link_name().unwrap().unwrap().to_str(), Some("../foo"));
h.set_link_name("foo/bar").unwrap();
assert_eq!(h.link_name().unwrap().unwrap().to_str(), Some("foo/bar"));
h.set_link_name("foo\\ba").unwrap();
if cfg!(windows) {
assert_eq!(h.link_name().unwrap().unwrap().to_str(), Some("foo/ba"));
} else {
assert_eq!(h.link_name().unwrap().unwrap().to_str(), Some("foo\\ba"));
}
let name = "foo\\bar\0";
for (slot, val) in h.as_old_mut().linkname.iter_mut().zip(name.as_bytes()) {
*slot = *val;
}
assert_eq!(h.link_name().unwrap().unwrap().to_str(), Some("foo\\bar"));
assert!(h.set_link_name("\0").is_err());
}
#[test]
fn mtime() {
let h = Header::new_gnu();
assert_eq!(h.mtime().unwrap(), 0);
let h = Header::new_ustar();
assert_eq!(h.mtime().unwrap(), 0);
let h = Header::new_old();
assert_eq!(h.mtime().unwrap(), 0);
}
#[test]
fn user_and_group_name() {
let mut h = Header::new_gnu();
h.set_username("foo").unwrap();
h.set_groupname("bar").unwrap();
assert_eq!(h.username().unwrap(), Some("foo"));
assert_eq!(h.groupname().unwrap(), Some("bar"));
h = Header::new_ustar();
h.set_username("foo").unwrap();
h.set_groupname("bar").unwrap();
assert_eq!(h.username().unwrap(), Some("foo"));
assert_eq!(h.groupname().unwrap(), Some("bar"));
h = Header::new_old();
assert_eq!(h.username().unwrap(), None);
assert_eq!(h.groupname().unwrap(), None);
assert!(h.set_username("foo").is_err());
assert!(h.set_groupname("foo").is_err());
}
#[test]
fn dev_major_minor() {
let mut h = Header::new_gnu();
h.set_device_major(1).unwrap();
h.set_device_minor(2).unwrap();
assert_eq!(h.device_major().unwrap(), Some(1));
assert_eq!(h.device_minor().unwrap(), Some(2));
h = Header::new_ustar();
h.set_device_major(1).unwrap();
h.set_device_minor(2).unwrap();
assert_eq!(h.device_major().unwrap(), Some(1));
assert_eq!(h.device_minor().unwrap(), Some(2));
h.as_ustar_mut().unwrap().dev_minor[0] = 0x7f;
h.as_ustar_mut().unwrap().dev_major[0] = 0x7f;
assert!(h.device_major().is_err());
assert!(h.device_minor().is_err());
h.as_ustar_mut().unwrap().dev_minor[0] = b'g';
h.as_ustar_mut().unwrap().dev_major[0] = b'h';
assert!(h.device_major().is_err());
assert!(h.device_minor().is_err());
h = Header::new_old();
assert_eq!(h.device_major().unwrap(), None);
assert_eq!(h.device_minor().unwrap(), None);
assert!(h.set_device_major(1).is_err());
assert!(h.set_device_minor(1).is_err());
}
#[test]
fn set_path() {
let mut h = Header::new_gnu();
h.set_path("foo").unwrap();
assert_eq!(h.path().unwrap().to_str(), Some("foo"));
h.set_path("foo/").unwrap();
assert_eq!(h.path().unwrap().to_str(), Some("foo/"));
h.set_path("foo/bar").unwrap();
assert_eq!(h.path().unwrap().to_str(), Some("foo/bar"));
h.set_path("foo\\bar").unwrap();
if cfg!(windows) {
assert_eq!(h.path().unwrap().to_str(), Some("foo/bar"));
} else {
assert_eq!(h.path().unwrap().to_str(), Some("foo\\bar"));
}
// set_path documentation explicitly states it removes any ".", signifying the
// current directory, from the path. This test ensures that documented
// behavior occurs
h.set_path("./control").unwrap();
assert_eq!(h.path().unwrap().to_str(), Some("control"));
let long_name = "foo".repeat(100);
let medium1 = "foo".repeat(52);
let medium2 = "fo/".repeat(52);
assert!(h.set_path(&long_name).is_err());
assert!(h.set_path(&medium1).is_err());
assert!(h.set_path(&medium2).is_err());
assert!(h.set_path("\0").is_err());
assert!(h.set_path("..").is_err());
assert!(h.set_path("foo/..").is_err());
assert!(h.set_path("foo/../bar").is_err());
h = Header::new_ustar();
h.set_path("foo").unwrap();
assert_eq!(h.path().unwrap().to_str(), Some("foo"));
assert!(h.set_path(&long_name).is_err());
assert!(h.set_path(&medium1).is_err());
h.set_path(&medium2).unwrap();
assert_eq!(h.path().unwrap().to_str(), Some(&medium2[..]));
}
#[test]
fn set_ustar_path_hard() {
let mut h = Header::new_ustar();
let p = Path::new("a").join(vec!["a"; 100].join(""));
h.set_path(&p).unwrap();
assert_eq!(h.path().unwrap(), p);
}
#[test]
fn set_metadata_deterministic() {
let td = Builder::new().prefix("tar-rs").tempdir().unwrap();
let tmppath = td.path().join("tmpfile");
fn mk_header(path: &Path, readonly: bool) -> Result<Header, io::Error> {
let mut file = File::create(path).unwrap();
file.write_all(b"c").unwrap();
let mut perms = file.metadata().unwrap().permissions();
perms.set_readonly(readonly);
fs::set_permissions(path, perms).unwrap();
let mut h = Header::new_ustar();
h.set_metadata_in_mode(&path.metadata().unwrap(), HeaderMode::Deterministic);
Ok(h)
}
// Create "the same" File twice in a row, one second apart, with differing readonly values.
let one = mk_header(tmppath.as_path(), false).unwrap();
thread::sleep(time::Duration::from_millis(1050));
let two = mk_header(tmppath.as_path(), true).unwrap();
// Always expected to match.
assert_eq!(one.size().unwrap(), two.size().unwrap());
assert_eq!(one.path().unwrap(), two.path().unwrap());
assert_eq!(one.mode().unwrap(), two.mode().unwrap());
// Would not match without `Deterministic`.
assert_eq!(one.mtime().unwrap(), two.mtime().unwrap());
assert_eq!(one.mtime().unwrap(), 1153704088);
// TODO: No great way to validate that these would not be filled, but
// check them anyway.
assert_eq!(one.uid().unwrap(), two.uid().unwrap());
assert_eq!(one.gid().unwrap(), two.gid().unwrap());
}
#[test]
fn extended_numeric_format() {
let mut h: GnuHeader = unsafe { mem::zeroed() };
h.as_header_mut().set_size(42);
assert_eq!(h.size, [48, 48, 48, 48, 48, 48, 48, 48, 48, 53, 50, 0]);
h.as_header_mut().set_size(8589934593);
assert_eq!(h.size, [0x80, 0, 0, 0, 0, 0, 0, 0x02, 0, 0, 0, 1]);
h.as_header_mut().set_size(44);
assert_eq!(h.size, [48, 48, 48, 48, 48, 48, 48, 48, 48, 53, 52, 0]);
h.size = [0x80, 0, 0, 0, 0, 0, 0, 0x02, 0, 0, 0, 0];
assert_eq!(h.as_header().entry_size().unwrap(), 0x0200000000);
h.size = [48, 48, 48, 48, 48, 48, 48, 48, 48, 53, 51, 0];
assert_eq!(h.as_header().entry_size().unwrap(), 43);
h.as_header_mut().set_gid(42);
assert_eq!(h.gid, [48, 48, 48, 48, 48, 53, 50, 0]);
assert_eq!(h.as_header().gid().unwrap(), 42);
h.as_header_mut().set_gid(0x7fffffffffffffff);
assert_eq!(h.gid, [0xff; 8]);
assert_eq!(h.as_header().gid().unwrap(), 0x7fffffffffffffff);
h.uid = [0x80, 0x00, 0x00, 0x00, 0x12, 0x34, 0x56, 0x78];
assert_eq!(h.as_header().uid().unwrap(), 0x12345678);
h.mtime = [
0x80, 0, 0, 0, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
];
assert_eq!(h.as_header().mtime().unwrap(), 0x0123456789abcdef);
h.realsize = [0x80, 0, 0, 0, 0, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde];
assert_eq!(h.real_size().unwrap(), 0x00123456789abcde);
h.sparse[0].offset = [0x80, 0, 0, 0, 0, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd];
assert_eq!(h.sparse[0].offset().unwrap(), 0x000123456789abcd);
h.sparse[0].numbytes = [0x80, 0, 0, 0, 0, 0, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc];
assert_eq!(h.sparse[0].length().unwrap(), 0x0000123456789abc);
}
#[test]
fn byte_slice_conversion() {
let h = Header::new_gnu();
let b: &[u8] = h.as_bytes();
let b_conv: &[u8] = Header::from_byte_slice(h.as_bytes()).as_bytes();
assert_eq!(b, b_conv);
}