git/t/t5000-tar-tree.sh

502 строки
13 KiB
Bash
Исходник Обычный вид История

#!/bin/sh
#
# Copyright (C) 2005 Rene Scharfe
#
test_description='git archive and git get-tar-commit-id test
This test covers the topics of file contents, commit date handling and
commit id embedding:
The contents of the repository is compared to the extracted tar
archive. The repository contains simple text files, symlinks and a
binary file (/bin/sh). Only paths shorter than 99 characters are
used.
git archive applies the commit date to every file in the archive it
creates. The test sets the commit date to a specific value and checks
if the tar archive contains that value.
When giving git archive a commit id (in contrast to a tree id) it
embeds this commit id into the tar archive as a comment. The test
checks the ability of git get-tar-commit-id to figure it out from the
tar file.
'
TEST_CREATE_REPO_NO_TEMPLATE=1
. ./test-lib.sh
SUBSTFORMAT=%H%n
test_lazy_prereq TAR_NEEDS_PAX_FALLBACK '
(
mkdir pax &&
cd pax &&
"$TAR" xf "$TEST_DIRECTORY"/t5000/pax.tar &&
test -f PaxHeaders.1791/file
)
'
2013-12-03 17:21:40 +04:00
test_lazy_prereq GZIP 'gzip --version'
get_pax_header() {
file=$1
header=$2=
while read len rest
do
if test "$len" = $(echo "$len $rest" | wc -c)
then
case "$rest" in
$header*)
echo "${rest#$header}"
;;
esac
fi
done <"$file"
}
check_tar() {
tarfile=$1.tar
listfile=$1.lst
dir=$1
dir_with_prefix=$dir/$2
test_expect_success ' extract tar archive' '
(mkdir $dir && cd $dir && "$TAR" xf -) <$tarfile
'
test_expect_success TAR_NEEDS_PAX_FALLBACK ' interpret pax headers' '
(
cd $dir &&
for header in *.paxheader
do
data=${header%.paxheader}.data &&
if test -h $data || test -e $data
then
path=$(get_pax_header $header path) &&
if test -n "$path"
then
mv "$data" "$path" || exit 1
fi
fi
done
)
'
test_expect_success ' validate filenames' '
(cd ${dir_with_prefix}a && find .) | sort >$listfile &&
test_cmp a.lst $listfile
'
test_expect_success ' validate file contents' '
diff -r a ${dir_with_prefix}a
'
}
check_added() {
dir=$1
path_in_fs=$2
path_in_archive=$3
test_expect_success " validate extra file $path_in_archive" '
diff -r $path_in_fs $dir/$path_in_archive
'
}
test_expect_success 'setup' '
test_oid_cache <<-EOF
obj sha1:19f9c8273ec45a8938e6999cb59b3ff66739902a
obj sha256:3c666f798798601571f5cec0adb57ce4aba8546875e7693177e0535f34d2c49b
EOF
'
test_expect_success 'populate workdir' '
mkdir a &&
echo simple textfile >a/a &&
ten=0123456789 &&
hundred="$ten$ten$ten$ten$ten$ten$ten$ten$ten$ten" &&
echo long filename >"a/four$hundred" &&
mkdir a/bin &&
test-tool genrandom "frotz" 500000 >a/bin/sh &&
printf "A\$Format:%s\$O" "$SUBSTFORMAT" >a/substfile1 &&
printf "A not substituted O" >a/substfile2 &&
if test_have_prereq SYMLINKS
then
ln -s a a/l1
else
printf %s a >a/l1
fi &&
(
p=long_path_to_a_file &&
cd a &&
for depth in 1 2 3 4 5
do
mkdir $p &&
cd $p || exit 1
done &&
echo text >file_with_long_path
) &&
(cd a && find .) | sort >a.lst
'
test_expect_success \
'add ignored file' \
'echo ignore me >a/ignored &&
mkdir .git/info &&
echo ignored export-ignore >.git/info/attributes'
test_expect_success 'add files to repository' '
git add a &&
GIT_COMMITTER_DATE="2005-05-27 22:00" git commit -m initial
'
test_expect_success 'setup export-subst' '
echo "substfile?" export-subst >>.git/info/attributes &&
git log --max-count=1 "--pretty=format:A${SUBSTFORMAT}O" HEAD \
>a/substfile1
'
test_expect_success 'create bare clone' '
git clone --template= --bare . bare.git &&
mkdir bare.git/info &&
cp .git/info/attributes bare.git/info/attributes
'
test_expect_success 'remove ignored file' '
rm a/ignored
'
test_expect_success 'git archive' '
git archive HEAD >b.tar
'
check_tar b
test_expect_success 'git archive --prefix=prefix/' '
git archive --prefix=prefix/ HEAD >with_prefix.tar
'
check_tar with_prefix prefix/
test_expect_success 'git-archive --prefix=olde-' '
git archive --prefix=olde- HEAD >with_olde-prefix.tar
'
check_tar with_olde-prefix olde-
test_expect_success 'git archive --add-file' '
echo untracked >untracked &&
git archive --add-file=untracked HEAD >with_untracked.tar
'
check_tar with_untracked
check_added with_untracked untracked untracked
test_expect_success 'git archive --add-file twice' '
echo untracked >untracked &&
git archive --prefix=one/ --add-file=untracked \
--prefix=two/ --add-file=untracked \
--prefix= HEAD >with_untracked2.tar
'
check_tar with_untracked2
check_added with_untracked2 untracked one/untracked
check_added with_untracked2 untracked two/untracked
test_expect_success 'git archive on large files' '
test_config core.bigfilethreshold 1 &&
git archive HEAD >b3.tar &&
test_cmp_bin b.tar b3.tar
'
test_expect_success 'git archive in a bare repo' '
git --git-dir bare.git archive HEAD >b3.tar
'
test_expect_success 'git archive vs. the same in a bare repo' '
test_cmp_bin b.tar b3.tar
'
test_expect_success 'git archive with --output' '
git archive --output=b4.tar HEAD &&
test_cmp_bin b.tar b4.tar
'
test_expect_success 'git archive --remote' '
git archive --remote=. HEAD >b5.tar &&
test_cmp_bin b.tar b5.tar
'
test_expect_success 'git archive --remote with configured remote' '
git config remote.foo.url . &&
(
cd a &&
git archive --remote=foo --output=../b5-nick.tar HEAD
) &&
test_cmp_bin b.tar b5-nick.tar
'
test_expect_success 'validate file modification time' '
mkdir extract &&
"$TAR" xf b.tar -C extract a/a &&
test-tool chmtime --get extract/a/a >b.mtime &&
echo "1117231200" >expected.mtime &&
test_cmp expected.mtime b.mtime
'
test_expect_success 'git get-tar-commit-id' '
git get-tar-commit-id <b.tar >actual &&
git rev-parse HEAD >expect &&
test_cmp expect actual
'
test_expect_success 'git archive with --output, override inferred format' '
git archive --format=tar --output=d4.zip HEAD &&
test_cmp_bin b.tar d4.zip
'
test_expect_success GZIP 'git archive with --output and --remote creates .tgz' '
git archive --output=d5.tgz --remote=. HEAD &&
gzip -d -c <d5.tgz >d5.tar &&
test_cmp_bin b.tar d5.tar
'
test_expect_success 'git archive --list outside of a git repo' '
nongit git archive --list
'
test_expect_success 'git archive --remote outside of a git repo' '
git archive HEAD >expect.tar &&
nongit git archive --remote="$PWD" HEAD >actual.tar &&
test_cmp_bin expect.tar actual.tar
'
test_expect_success 'clients cannot access unreachable commits' '
test_commit unreachable &&
sha1=$(git rev-parse HEAD) &&
git reset --hard HEAD^ &&
git archive $sha1 >remote.tar &&
test_must_fail git archive --remote=. $sha1 >remote.tar
'
test_expect_success 'upload-archive can allow unreachable commits' '
test_commit unreachable1 &&
sha1=$(git rev-parse HEAD) &&
git reset --hard HEAD^ &&
git archive $sha1 >remote.tar &&
test_config uploadarchive.allowUnreachable true &&
git archive --remote=. $sha1 >remote.tar
'
test_expect_success 'setup tar filters' '
git config tar.tar.foo.command "tr ab ba" &&
git config tar.bar.command "tr ab ba" &&
git config tar.bar.remote true &&
git config tar.invalid baz
'
test_expect_success 'archive --list mentions user filter' '
git archive --list >output &&
grep "^tar\.foo\$" output &&
grep "^bar\$" output
'
test_expect_success 'archive --list shows only enabled remote filters' '
git archive --list --remote=. >output &&
! grep "^tar\.foo\$" output &&
grep "^bar\$" output
'
test_expect_success 'invoke tar filter by format' '
git archive --format=tar.foo HEAD >config.tar.foo &&
tr ab ba <config.tar.foo >config.tar &&
test_cmp_bin b.tar config.tar &&
git archive --format=bar HEAD >config.bar &&
tr ab ba <config.bar >config.tar &&
test_cmp_bin b.tar config.tar
'
test_expect_success 'invoke tar filter by extension' '
git archive -o config-implicit.tar.foo HEAD &&
test_cmp_bin config.tar.foo config-implicit.tar.foo &&
git archive -o config-implicit.bar HEAD &&
test_cmp_bin config.tar.foo config-implicit.bar
'
test_expect_success 'default output format remains tar' '
git archive -o config-implicit.baz HEAD &&
test_cmp_bin b.tar config-implicit.baz
'
test_expect_success 'extension matching requires dot' '
git archive -o config-implicittar.foo HEAD &&
test_cmp_bin b.tar config-implicittar.foo
'
test_expect_success 'only enabled filters are available remotely' '
test_must_fail git archive --remote=. --format=tar.foo HEAD \
>remote.tar.foo &&
git archive --remote=. --format=bar >remote.bar HEAD &&
test_cmp_bin remote.bar config.bar
'
test_expect_success 'git archive --format=tgz' '
git archive --format=tgz HEAD >j.tgz
'
test_expect_success 'git archive --format=tar.gz' '
git archive --format=tar.gz HEAD >j1.tar.gz &&
test_cmp_bin j.tgz j1.tar.gz
'
test_expect_success 'infer tgz from .tgz filename' '
git archive --output=j2.tgz HEAD &&
test_cmp_bin j.tgz j2.tgz
'
test_expect_success 'infer tgz from .tar.gz filename' '
git archive --output=j3.tar.gz HEAD &&
test_cmp_bin j.tgz j3.tar.gz
'
2013-12-03 17:21:40 +04:00
test_expect_success GZIP 'extract tgz file' '
gzip -d -c <j.tgz >j.tar &&
test_cmp_bin b.tar j.tar
'
test_expect_success 'remote tar.gz is allowed by default' '
git archive --remote=. --format=tar.gz HEAD >remote.tar.gz &&
test_cmp_bin j.tgz remote.tar.gz
'
test_expect_success 'remote tar.gz can be disabled' '
git config tar.tar.gz.remote false &&
test_must_fail git archive --remote=. --format=tar.gz HEAD \
>remote.tar.gz
'
test_expect_success GZIP 'git archive --format=tgz (external gzip)' '
test_config tar.tgz.command "gzip -cn" &&
git archive --format=tgz HEAD >external_gzip.tgz
archive-tar: add internal gzip implementation Git uses zlib for its own object store, but calls gzip when creating tgz archives. Add an option to perform the gzip compression for the latter using zlib, without depending on the external gzip binary. Plug it in by making write_block a function pointer and switching to a compressing variant if the filter command has the magic value "git archive gzip". Does that indirection slow down tar creation? Not really, at least not in this test: $ hyperfine -w3 -L rev HEAD,origin/main -p 'git checkout {rev} && make' \ './git -C ../linux archive --format=tar HEAD # {rev}' Benchmark #1: ./git -C ../linux archive --format=tar HEAD # HEAD Time (mean ± σ): 4.044 s ± 0.007 s [User: 3.901 s, System: 0.137 s] Range (min … max): 4.038 s … 4.059 s 10 runs Benchmark #2: ./git -C ../linux archive --format=tar HEAD # origin/main Time (mean ± σ): 4.047 s ± 0.009 s [User: 3.903 s, System: 0.138 s] Range (min … max): 4.038 s … 4.066 s 10 runs How does tgz creation perform? $ hyperfine -w3 -L command 'gzip -cn','git archive gzip' \ './git -c tar.tgz.command="{command}" -C ../linux archive --format=tgz HEAD' Benchmark #1: ./git -c tar.tgz.command="gzip -cn" -C ../linux archive --format=tgz HEAD Time (mean ± σ): 20.404 s ± 0.006 s [User: 23.943 s, System: 0.401 s] Range (min … max): 20.395 s … 20.414 s 10 runs Benchmark #2: ./git -c tar.tgz.command="git archive gzip" -C ../linux archive --format=tgz HEAD Time (mean ± σ): 23.807 s ± 0.023 s [User: 23.655 s, System: 0.145 s] Range (min … max): 23.782 s … 23.857 s 10 runs Summary './git -c tar.tgz.command="gzip -cn" -C ../linux archive --format=tgz HEAD' ran 1.17 ± 0.00 times faster than './git -c tar.tgz.command="git archive gzip" -C ../linux archive --format=tgz HEAD' So the internal implementation takes 17% longer on the Linux repo, but uses 2% less CPU time. That's because the external gzip can run in parallel on its own processor, while the internal one works sequentially and avoids the inter-process communication overhead. What are the benefits? Only an internal sequential implementation can offer this eco mode, and it allows avoiding the gzip(1) requirement. This implementation uses the helper functions from our zlib.c instead of the convenient gz* functions from zlib, because the latter doesn't give the control over the generated gzip header that the next patch requires. Original-patch-by: Rohit Ashiwal <rohit.ashiwal265@gmail.com> Signed-off-by: René Scharfe <l.s.r@web.de> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-15 20:02:33 +03:00
'
test_expect_success GZIP 'git archive --format=tar.gz (external gzip)' '
test_config tar.tar.gz.command "gzip -cn" &&
git archive --format=tar.gz HEAD >external_gzip.tar.gz &&
test_cmp_bin external_gzip.tgz external_gzip.tar.gz
archive-tar: add internal gzip implementation Git uses zlib for its own object store, but calls gzip when creating tgz archives. Add an option to perform the gzip compression for the latter using zlib, without depending on the external gzip binary. Plug it in by making write_block a function pointer and switching to a compressing variant if the filter command has the magic value "git archive gzip". Does that indirection slow down tar creation? Not really, at least not in this test: $ hyperfine -w3 -L rev HEAD,origin/main -p 'git checkout {rev} && make' \ './git -C ../linux archive --format=tar HEAD # {rev}' Benchmark #1: ./git -C ../linux archive --format=tar HEAD # HEAD Time (mean ± σ): 4.044 s ± 0.007 s [User: 3.901 s, System: 0.137 s] Range (min … max): 4.038 s … 4.059 s 10 runs Benchmark #2: ./git -C ../linux archive --format=tar HEAD # origin/main Time (mean ± σ): 4.047 s ± 0.009 s [User: 3.903 s, System: 0.138 s] Range (min … max): 4.038 s … 4.066 s 10 runs How does tgz creation perform? $ hyperfine -w3 -L command 'gzip -cn','git archive gzip' \ './git -c tar.tgz.command="{command}" -C ../linux archive --format=tgz HEAD' Benchmark #1: ./git -c tar.tgz.command="gzip -cn" -C ../linux archive --format=tgz HEAD Time (mean ± σ): 20.404 s ± 0.006 s [User: 23.943 s, System: 0.401 s] Range (min … max): 20.395 s … 20.414 s 10 runs Benchmark #2: ./git -c tar.tgz.command="git archive gzip" -C ../linux archive --format=tgz HEAD Time (mean ± σ): 23.807 s ± 0.023 s [User: 23.655 s, System: 0.145 s] Range (min … max): 23.782 s … 23.857 s 10 runs Summary './git -c tar.tgz.command="gzip -cn" -C ../linux archive --format=tgz HEAD' ran 1.17 ± 0.00 times faster than './git -c tar.tgz.command="git archive gzip" -C ../linux archive --format=tgz HEAD' So the internal implementation takes 17% longer on the Linux repo, but uses 2% less CPU time. That's because the external gzip can run in parallel on its own processor, while the internal one works sequentially and avoids the inter-process communication overhead. What are the benefits? Only an internal sequential implementation can offer this eco mode, and it allows avoiding the gzip(1) requirement. This implementation uses the helper functions from our zlib.c instead of the convenient gz* functions from zlib, because the latter doesn't give the control over the generated gzip header that the next patch requires. Original-patch-by: Rohit Ashiwal <rohit.ashiwal265@gmail.com> Signed-off-by: René Scharfe <l.s.r@web.de> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-15 20:02:33 +03:00
'
test_expect_success GZIP 'extract tgz file (external gzip)' '
gzip -d -c <external_gzip.tgz >external_gzip.tar &&
test_cmp_bin b.tar external_gzip.tar
archive-tar: add internal gzip implementation Git uses zlib for its own object store, but calls gzip when creating tgz archives. Add an option to perform the gzip compression for the latter using zlib, without depending on the external gzip binary. Plug it in by making write_block a function pointer and switching to a compressing variant if the filter command has the magic value "git archive gzip". Does that indirection slow down tar creation? Not really, at least not in this test: $ hyperfine -w3 -L rev HEAD,origin/main -p 'git checkout {rev} && make' \ './git -C ../linux archive --format=tar HEAD # {rev}' Benchmark #1: ./git -C ../linux archive --format=tar HEAD # HEAD Time (mean ± σ): 4.044 s ± 0.007 s [User: 3.901 s, System: 0.137 s] Range (min … max): 4.038 s … 4.059 s 10 runs Benchmark #2: ./git -C ../linux archive --format=tar HEAD # origin/main Time (mean ± σ): 4.047 s ± 0.009 s [User: 3.903 s, System: 0.138 s] Range (min … max): 4.038 s … 4.066 s 10 runs How does tgz creation perform? $ hyperfine -w3 -L command 'gzip -cn','git archive gzip' \ './git -c tar.tgz.command="{command}" -C ../linux archive --format=tgz HEAD' Benchmark #1: ./git -c tar.tgz.command="gzip -cn" -C ../linux archive --format=tgz HEAD Time (mean ± σ): 20.404 s ± 0.006 s [User: 23.943 s, System: 0.401 s] Range (min … max): 20.395 s … 20.414 s 10 runs Benchmark #2: ./git -c tar.tgz.command="git archive gzip" -C ../linux archive --format=tgz HEAD Time (mean ± σ): 23.807 s ± 0.023 s [User: 23.655 s, System: 0.145 s] Range (min … max): 23.782 s … 23.857 s 10 runs Summary './git -c tar.tgz.command="gzip -cn" -C ../linux archive --format=tgz HEAD' ran 1.17 ± 0.00 times faster than './git -c tar.tgz.command="git archive gzip" -C ../linux archive --format=tgz HEAD' So the internal implementation takes 17% longer on the Linux repo, but uses 2% less CPU time. That's because the external gzip can run in parallel on its own processor, while the internal one works sequentially and avoids the inter-process communication overhead. What are the benefits? Only an internal sequential implementation can offer this eco mode, and it allows avoiding the gzip(1) requirement. This implementation uses the helper functions from our zlib.c instead of the convenient gz* functions from zlib, because the latter doesn't give the control over the generated gzip header that the next patch requires. Original-patch-by: Rohit Ashiwal <rohit.ashiwal265@gmail.com> Signed-off-by: René Scharfe <l.s.r@web.de> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-15 20:02:33 +03:00
'
test_expect_success 'archive and :(glob)' '
git archive -v HEAD -- ":(glob)**/sh" >/dev/null 2>actual &&
cat >expect <<EOF &&
a/
a/bin/
a/bin/sh
EOF
test_cmp expect actual
'
test_expect_success 'catch non-matching pathspec' '
test_must_fail git archive -v HEAD -- "*.abc" >/dev/null
'
t5000: test tar files that overflow ustar headers The ustar format only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. For values larger than this, we have to add pax extended headers to specify the real data, and git does not yet know how to do so. Before fixing that, let's start off with some test infrastructure, as designing portable and efficient tests for this is non-trivial. We want to use the system tar to check our output (because what we really care about is interoperability), but we can't rely on it: 1. being able to read pax headers 2. being able to handle huge sizes or mtimes 3. supporting a "t" format we can parse So as a prerequisite, we can feed the system tar a reference tarball to make sure it can handle these features. The reference tar here was created with: dd if=/dev/zero seek=64G bs=1 count=1 of=huge touch -d @68719476737 huge tar cf - --format=pax | head -c 2048 using GNU tar. Note that this is not a complete tarfile, but it's enough to contain the headers we want to examine. Likewise, we need to convince git that it has a 64GB blob to output. Running "git add" on that 64GB file takes many minutes of CPU, and even compressed, the result is 64MB. So again, I pre-generated that loose object, and then took only the first 2k of it. That should be enough to generate 2MB of data before hitting an inflate error, which is plenty for us to generate the tar header (and then die of SIGPIPE while streaming the rest out). The tests are split so that we test as much as we can even with an uncooperative system tar. This actually catches the current breakage (which is that we die("BUG") trying to write the ustar header) on every system, and then on systems where we can, we go farther and actually verify the result. Helped-by: Robin H. Johnson <robbat2@gentoo.org> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 12:08:57 +03:00
# Pull the size and date of each entry in a tarfile using the system tar.
#
# We'll pull out only the year from the date; that avoids any question of
# timezones impacting the result (as long as we keep our test times away from a
# year boundary; our reference times are all in August).
#
# The output of tar_info is expected to be "<size> <year>", both in decimal. It
# ignores the return value of tar. We have to do this, because some of our test
# input is only partial (the real data is 64GB in some cases).
tar_info () {
"$TAR" tvf "$1" |
awk '{
split($4, date, "-")
print $3 " " date[1]
}'
}
# See if our system tar can handle a tar file with huge sizes and dates far in
# the future, and that we can actually parse its output.
#
# The reference file was generated by GNU tar, and the magic time and size are
# both octal 01000000000001, which overflows normal ustar fields.
test_lazy_prereq TAR_HUGE '
echo "68719476737 4147" >expect &&
tar_info "$TEST_DIRECTORY"/t5000/huge-and-future.tar >actual &&
test_cmp expect actual
'
test_expect_success LONG_IS_64BIT 'set up repository with huge blob' '
obj=$(test_oid obj) &&
path=$(test_oid_to_path $obj) &&
mkdir -p .git/objects/$(dirname $path) &&
cp "$TEST_DIRECTORY"/t5000/huge-object .git/objects/$path &&
t5000: test tar files that overflow ustar headers The ustar format only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. For values larger than this, we have to add pax extended headers to specify the real data, and git does not yet know how to do so. Before fixing that, let's start off with some test infrastructure, as designing portable and efficient tests for this is non-trivial. We want to use the system tar to check our output (because what we really care about is interoperability), but we can't rely on it: 1. being able to read pax headers 2. being able to handle huge sizes or mtimes 3. supporting a "t" format we can parse So as a prerequisite, we can feed the system tar a reference tarball to make sure it can handle these features. The reference tar here was created with: dd if=/dev/zero seek=64G bs=1 count=1 of=huge touch -d @68719476737 huge tar cf - --format=pax | head -c 2048 using GNU tar. Note that this is not a complete tarfile, but it's enough to contain the headers we want to examine. Likewise, we need to convince git that it has a 64GB blob to output. Running "git add" on that 64GB file takes many minutes of CPU, and even compressed, the result is 64MB. So again, I pre-generated that loose object, and then took only the first 2k of it. That should be enough to generate 2MB of data before hitting an inflate error, which is plenty for us to generate the tar header (and then die of SIGPIPE while streaming the rest out). The tests are split so that we test as much as we can even with an uncooperative system tar. This actually catches the current breakage (which is that we die("BUG") trying to write the ustar header) on every system, and then on systems where we can, we go farther and actually verify the result. Helped-by: Robin H. Johnson <robbat2@gentoo.org> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 12:08:57 +03:00
rm -f .git/index &&
git update-index --add --cacheinfo 100644,$obj,huge &&
git commit -m huge
'
# We expect git to die with SIGPIPE here (otherwise we
# would generate the whole 64GB).
test_expect_success LONG_IS_64BIT 'generate tar with huge size' '
t5000: test tar files that overflow ustar headers The ustar format only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. For values larger than this, we have to add pax extended headers to specify the real data, and git does not yet know how to do so. Before fixing that, let's start off with some test infrastructure, as designing portable and efficient tests for this is non-trivial. We want to use the system tar to check our output (because what we really care about is interoperability), but we can't rely on it: 1. being able to read pax headers 2. being able to handle huge sizes or mtimes 3. supporting a "t" format we can parse So as a prerequisite, we can feed the system tar a reference tarball to make sure it can handle these features. The reference tar here was created with: dd if=/dev/zero seek=64G bs=1 count=1 of=huge touch -d @68719476737 huge tar cf - --format=pax | head -c 2048 using GNU tar. Note that this is not a complete tarfile, but it's enough to contain the headers we want to examine. Likewise, we need to convince git that it has a 64GB blob to output. Running "git add" on that 64GB file takes many minutes of CPU, and even compressed, the result is 64MB. So again, I pre-generated that loose object, and then took only the first 2k of it. That should be enough to generate 2MB of data before hitting an inflate error, which is plenty for us to generate the tar header (and then die of SIGPIPE while streaming the rest out). The tests are split so that we test as much as we can even with an uncooperative system tar. This actually catches the current breakage (which is that we die("BUG") trying to write the ustar header) on every system, and then on systems where we can, we go farther and actually verify the result. Helped-by: Robin H. Johnson <robbat2@gentoo.org> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 12:08:57 +03:00
{
git archive HEAD
echo $? >exit-code
} | test_copy_bytes 4096 >huge.tar &&
echo 141 >expect &&
test_cmp expect exit-code
'
test_expect_success TAR_HUGE,LONG_IS_64BIT 'system tar can read our huge size' '
t5000: test tar files that overflow ustar headers The ustar format only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. For values larger than this, we have to add pax extended headers to specify the real data, and git does not yet know how to do so. Before fixing that, let's start off with some test infrastructure, as designing portable and efficient tests for this is non-trivial. We want to use the system tar to check our output (because what we really care about is interoperability), but we can't rely on it: 1. being able to read pax headers 2. being able to handle huge sizes or mtimes 3. supporting a "t" format we can parse So as a prerequisite, we can feed the system tar a reference tarball to make sure it can handle these features. The reference tar here was created with: dd if=/dev/zero seek=64G bs=1 count=1 of=huge touch -d @68719476737 huge tar cf - --format=pax | head -c 2048 using GNU tar. Note that this is not a complete tarfile, but it's enough to contain the headers we want to examine. Likewise, we need to convince git that it has a 64GB blob to output. Running "git add" on that 64GB file takes many minutes of CPU, and even compressed, the result is 64MB. So again, I pre-generated that loose object, and then took only the first 2k of it. That should be enough to generate 2MB of data before hitting an inflate error, which is plenty for us to generate the tar header (and then die of SIGPIPE while streaming the rest out). The tests are split so that we test as much as we can even with an uncooperative system tar. This actually catches the current breakage (which is that we die("BUG") trying to write the ustar header) on every system, and then on systems where we can, we go farther and actually verify the result. Helped-by: Robin H. Johnson <robbat2@gentoo.org> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 12:08:57 +03:00
echo 68719476737 >expect &&
tar_info huge.tar | cut -d" " -f1 >actual &&
test_cmp expect actual
'
commit-graph: consolidate fill_commit_graph_info Both fill_commit_graph_info() and fill_commit_in_graph() parse information present in commit data chunk. Let's simplify the implementation by calling fill_commit_graph_info() within fill_commit_in_graph(). fill_commit_graph_info() used to not load committer data from commit data chunk. However, with the upcoming switch to using corrected committer date as generation number v2, we will have to load committer date to compute generation number value anyway. e51217e15 (t5000: test tar files that overflow ustar headers, 30-06-2016) introduced a test 'generate tar with future mtime' that creates a commit with committer date of (2^36 + 1) seconds since EPOCH. The CDAT chunk provides 34-bits for storing committer date, thus committer time overflows into generation number (within CDAT chunk) and has undefined behavior. The test used to pass as fill_commit_graph_info() would not set struct member `date` of struct commit and load committer date from the object database, generating a tar file with the expected mtime. However, with corrected commit date, we will load the committer date from CDAT chunk (truncated to lower 34-bits to populate the generation number. Thus, Git sets date and generates tar file with the truncated mtime. The ustar format (the header format used by most modern tar programs) only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. As the CDAT chunk is overflow by 12-octal digits but not 11-octal digits, we split the existing tests to test both implementations separately and add a new explicit test for 11-digit implementation. To test the 11-octal digit implementation, we create a future commit with committer date of 2^34 - 1, which overflows 11-octal digits without overflowing 34-bits of the Commit Date chunks. To test the 12-octal digit implementation, the smallest committer date possible is 2^36 + 1, which overflows the CDAT chunk and thus commit-graph must be disabled for the test. Signed-off-by: Abhishek Kumar <abhishekkumar8222@gmail.com> Reviewed-by: Taylor Blau <me@ttaylorr.com> Reviewed-by: Derrick Stolee <dstolee@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-16 21:11:10 +03:00
test_expect_success TIME_IS_64BIT 'set up repository with far-future (2^34 - 1) commit' '
rm -f .git/index &&
echo foo >file &&
git add file &&
GIT_COMMITTER_DATE="@17179869183 +0000" \
git commit -m "tempori parendum"
'
test_expect_success TIME_IS_64BIT 'generate tar with far-future mtime' '
git archive HEAD >future.tar
'
test_expect_success TAR_HUGE,TIME_IS_64BIT,TIME_T_IS_64BIT 'system tar can read our future mtime' '
echo 2514 >expect &&
tar_info future.tar | cut -d" " -f2 >actual &&
test_cmp expect actual
'
test_expect_success TIME_IS_64BIT 'set up repository with far-far-future (2^36 + 1) commit' '
t5000: test tar files that overflow ustar headers The ustar format only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. For values larger than this, we have to add pax extended headers to specify the real data, and git does not yet know how to do so. Before fixing that, let's start off with some test infrastructure, as designing portable and efficient tests for this is non-trivial. We want to use the system tar to check our output (because what we really care about is interoperability), but we can't rely on it: 1. being able to read pax headers 2. being able to handle huge sizes or mtimes 3. supporting a "t" format we can parse So as a prerequisite, we can feed the system tar a reference tarball to make sure it can handle these features. The reference tar here was created with: dd if=/dev/zero seek=64G bs=1 count=1 of=huge touch -d @68719476737 huge tar cf - --format=pax | head -c 2048 using GNU tar. Note that this is not a complete tarfile, but it's enough to contain the headers we want to examine. Likewise, we need to convince git that it has a 64GB blob to output. Running "git add" on that 64GB file takes many minutes of CPU, and even compressed, the result is 64MB. So again, I pre-generated that loose object, and then took only the first 2k of it. That should be enough to generate 2MB of data before hitting an inflate error, which is plenty for us to generate the tar header (and then die of SIGPIPE while streaming the rest out). The tests are split so that we test as much as we can even with an uncooperative system tar. This actually catches the current breakage (which is that we die("BUG") trying to write the ustar header) on every system, and then on systems where we can, we go farther and actually verify the result. Helped-by: Robin H. Johnson <robbat2@gentoo.org> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 12:08:57 +03:00
rm -f .git/index &&
echo content >file &&
git add file &&
commit-graph: consolidate fill_commit_graph_info Both fill_commit_graph_info() and fill_commit_in_graph() parse information present in commit data chunk. Let's simplify the implementation by calling fill_commit_graph_info() within fill_commit_in_graph(). fill_commit_graph_info() used to not load committer data from commit data chunk. However, with the upcoming switch to using corrected committer date as generation number v2, we will have to load committer date to compute generation number value anyway. e51217e15 (t5000: test tar files that overflow ustar headers, 30-06-2016) introduced a test 'generate tar with future mtime' that creates a commit with committer date of (2^36 + 1) seconds since EPOCH. The CDAT chunk provides 34-bits for storing committer date, thus committer time overflows into generation number (within CDAT chunk) and has undefined behavior. The test used to pass as fill_commit_graph_info() would not set struct member `date` of struct commit and load committer date from the object database, generating a tar file with the expected mtime. However, with corrected commit date, we will load the committer date from CDAT chunk (truncated to lower 34-bits to populate the generation number. Thus, Git sets date and generates tar file with the truncated mtime. The ustar format (the header format used by most modern tar programs) only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. As the CDAT chunk is overflow by 12-octal digits but not 11-octal digits, we split the existing tests to test both implementations separately and add a new explicit test for 11-digit implementation. To test the 11-octal digit implementation, we create a future commit with committer date of 2^34 - 1, which overflows 11-octal digits without overflowing 34-bits of the Commit Date chunks. To test the 12-octal digit implementation, the smallest committer date possible is 2^36 + 1, which overflows the CDAT chunk and thus commit-graph must be disabled for the test. Signed-off-by: Abhishek Kumar <abhishekkumar8222@gmail.com> Reviewed-by: Taylor Blau <me@ttaylorr.com> Reviewed-by: Derrick Stolee <dstolee@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-16 21:11:10 +03:00
GIT_TEST_COMMIT_GRAPH=0 GIT_COMMITTER_DATE="@68719476737 +0000" \
t5000: test tar files that overflow ustar headers The ustar format only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. For values larger than this, we have to add pax extended headers to specify the real data, and git does not yet know how to do so. Before fixing that, let's start off with some test infrastructure, as designing portable and efficient tests for this is non-trivial. We want to use the system tar to check our output (because what we really care about is interoperability), but we can't rely on it: 1. being able to read pax headers 2. being able to handle huge sizes or mtimes 3. supporting a "t" format we can parse So as a prerequisite, we can feed the system tar a reference tarball to make sure it can handle these features. The reference tar here was created with: dd if=/dev/zero seek=64G bs=1 count=1 of=huge touch -d @68719476737 huge tar cf - --format=pax | head -c 2048 using GNU tar. Note that this is not a complete tarfile, but it's enough to contain the headers we want to examine. Likewise, we need to convince git that it has a 64GB blob to output. Running "git add" on that 64GB file takes many minutes of CPU, and even compressed, the result is 64MB. So again, I pre-generated that loose object, and then took only the first 2k of it. That should be enough to generate 2MB of data before hitting an inflate error, which is plenty for us to generate the tar header (and then die of SIGPIPE while streaming the rest out). The tests are split so that we test as much as we can even with an uncooperative system tar. This actually catches the current breakage (which is that we die("BUG") trying to write the ustar header) on every system, and then on systems where we can, we go farther and actually verify the result. Helped-by: Robin H. Johnson <robbat2@gentoo.org> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 12:08:57 +03:00
git commit -m "tempori parendum"
'
commit-graph: consolidate fill_commit_graph_info Both fill_commit_graph_info() and fill_commit_in_graph() parse information present in commit data chunk. Let's simplify the implementation by calling fill_commit_graph_info() within fill_commit_in_graph(). fill_commit_graph_info() used to not load committer data from commit data chunk. However, with the upcoming switch to using corrected committer date as generation number v2, we will have to load committer date to compute generation number value anyway. e51217e15 (t5000: test tar files that overflow ustar headers, 30-06-2016) introduced a test 'generate tar with future mtime' that creates a commit with committer date of (2^36 + 1) seconds since EPOCH. The CDAT chunk provides 34-bits for storing committer date, thus committer time overflows into generation number (within CDAT chunk) and has undefined behavior. The test used to pass as fill_commit_graph_info() would not set struct member `date` of struct commit and load committer date from the object database, generating a tar file with the expected mtime. However, with corrected commit date, we will load the committer date from CDAT chunk (truncated to lower 34-bits to populate the generation number. Thus, Git sets date and generates tar file with the truncated mtime. The ustar format (the header format used by most modern tar programs) only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. As the CDAT chunk is overflow by 12-octal digits but not 11-octal digits, we split the existing tests to test both implementations separately and add a new explicit test for 11-digit implementation. To test the 11-octal digit implementation, we create a future commit with committer date of 2^34 - 1, which overflows 11-octal digits without overflowing 34-bits of the Commit Date chunks. To test the 12-octal digit implementation, the smallest committer date possible is 2^36 + 1, which overflows the CDAT chunk and thus commit-graph must be disabled for the test. Signed-off-by: Abhishek Kumar <abhishekkumar8222@gmail.com> Reviewed-by: Taylor Blau <me@ttaylorr.com> Reviewed-by: Derrick Stolee <dstolee@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-16 21:11:10 +03:00
test_expect_success TIME_IS_64BIT 'generate tar with far-far-future mtime' '
t5000: test tar files that overflow ustar headers The ustar format only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. For values larger than this, we have to add pax extended headers to specify the real data, and git does not yet know how to do so. Before fixing that, let's start off with some test infrastructure, as designing portable and efficient tests for this is non-trivial. We want to use the system tar to check our output (because what we really care about is interoperability), but we can't rely on it: 1. being able to read pax headers 2. being able to handle huge sizes or mtimes 3. supporting a "t" format we can parse So as a prerequisite, we can feed the system tar a reference tarball to make sure it can handle these features. The reference tar here was created with: dd if=/dev/zero seek=64G bs=1 count=1 of=huge touch -d @68719476737 huge tar cf - --format=pax | head -c 2048 using GNU tar. Note that this is not a complete tarfile, but it's enough to contain the headers we want to examine. Likewise, we need to convince git that it has a 64GB blob to output. Running "git add" on that 64GB file takes many minutes of CPU, and even compressed, the result is 64MB. So again, I pre-generated that loose object, and then took only the first 2k of it. That should be enough to generate 2MB of data before hitting an inflate error, which is plenty for us to generate the tar header (and then die of SIGPIPE while streaming the rest out). The tests are split so that we test as much as we can even with an uncooperative system tar. This actually catches the current breakage (which is that we die("BUG") trying to write the ustar header) on every system, and then on systems where we can, we go farther and actually verify the result. Helped-by: Robin H. Johnson <robbat2@gentoo.org> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 12:08:57 +03:00
git archive HEAD >future.tar
'
test_expect_success TAR_HUGE,TIME_IS_64BIT,TIME_T_IS_64BIT 'system tar can read our future mtime' '
t5000: test tar files that overflow ustar headers The ustar format only has room for 11 (or 12, depending on some implementations) octal digits for the size and mtime of each file. For values larger than this, we have to add pax extended headers to specify the real data, and git does not yet know how to do so. Before fixing that, let's start off with some test infrastructure, as designing portable and efficient tests for this is non-trivial. We want to use the system tar to check our output (because what we really care about is interoperability), but we can't rely on it: 1. being able to read pax headers 2. being able to handle huge sizes or mtimes 3. supporting a "t" format we can parse So as a prerequisite, we can feed the system tar a reference tarball to make sure it can handle these features. The reference tar here was created with: dd if=/dev/zero seek=64G bs=1 count=1 of=huge touch -d @68719476737 huge tar cf - --format=pax | head -c 2048 using GNU tar. Note that this is not a complete tarfile, but it's enough to contain the headers we want to examine. Likewise, we need to convince git that it has a 64GB blob to output. Running "git add" on that 64GB file takes many minutes of CPU, and even compressed, the result is 64MB. So again, I pre-generated that loose object, and then took only the first 2k of it. That should be enough to generate 2MB of data before hitting an inflate error, which is plenty for us to generate the tar header (and then die of SIGPIPE while streaming the rest out). The tests are split so that we test as much as we can even with an uncooperative system tar. This actually catches the current breakage (which is that we die("BUG") trying to write the ustar header) on every system, and then on systems where we can, we go farther and actually verify the result. Helped-by: Robin H. Johnson <robbat2@gentoo.org> Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 12:08:57 +03:00
echo 4147 >expect &&
tar_info future.tar | cut -d" " -f2 >actual &&
test_cmp expect actual
'
test_done