зеркало из https://github.com/microsoft/git.git
Merge branch 'ns/batch-fsync'
Introduce a filesystem-dependent mechanism to optimize the way the bits for many loose object files are ensured to hit the disk platter. * ns/batch-fsync: core.fsyncmethod: performance tests for batch mode t/perf: add iteration setup mechanism to perf-lib core.fsyncmethod: tests for batch mode test-lib-functions: add parsing helpers for ls-files and ls-tree core.fsync: use batch mode and sync loose objects by default on Windows unpack-objects: use the bulk-checkin infrastructure update-index: use the bulk-checkin infrastructure builtin/add: add ODB transaction around add_files_to_cache cache-tree: use ODB transaction around writing a tree core.fsyncmethod: batched disk flushes for loose-objects bulk-checkin: rebrand plug/unplug APIs as 'odb transactions' bulk-checkin: rename 'state' variable and separate 'plugged' boolean
This commit is contained in:
Коммит
83937e9592
|
@ -628,6 +628,14 @@ core.fsyncMethod::
|
||||||
* `writeout-only` issues pagecache writeback requests, but depending on the
|
* `writeout-only` issues pagecache writeback requests, but depending on the
|
||||||
filesystem and storage hardware, data added to the repository may not be
|
filesystem and storage hardware, data added to the repository may not be
|
||||||
durable in the event of a system crash. This is the default mode on macOS.
|
durable in the event of a system crash. This is the default mode on macOS.
|
||||||
|
* `batch` enables a mode that uses writeout-only flushes to stage multiple
|
||||||
|
updates in the disk writeback cache and then does a single full fsync of
|
||||||
|
a dummy file to trigger the disk cache flush at the end of the operation.
|
||||||
|
+
|
||||||
|
Currently `batch` mode only applies to loose-object files. Other repository
|
||||||
|
data is made durable as if `fsync` was specified. This mode is expected to
|
||||||
|
be as safe as `fsync` on macOS for repos stored on HFS+ or APFS filesystems
|
||||||
|
and on Windows for repos stored on NTFS or ReFS filesystems.
|
||||||
|
|
||||||
core.fsyncObjectFiles::
|
core.fsyncObjectFiles::
|
||||||
This boolean will enable 'fsync()' when writing object files.
|
This boolean will enable 'fsync()' when writing object files.
|
||||||
|
|
|
@ -141,7 +141,16 @@ int add_files_to_cache(const char *prefix,
|
||||||
rev.diffopt.format_callback_data = &data;
|
rev.diffopt.format_callback_data = &data;
|
||||||
rev.diffopt.flags.override_submodule_config = 1;
|
rev.diffopt.flags.override_submodule_config = 1;
|
||||||
rev.max_count = 0; /* do not compare unmerged paths with stage #2 */
|
rev.max_count = 0; /* do not compare unmerged paths with stage #2 */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use an ODB transaction to optimize adding multiple objects.
|
||||||
|
* This function is invoked from commands other than 'add', which
|
||||||
|
* may not have their own transaction active.
|
||||||
|
*/
|
||||||
|
begin_odb_transaction();
|
||||||
run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);
|
run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);
|
||||||
|
end_odb_transaction();
|
||||||
|
|
||||||
clear_pathspec(&rev.prune_data);
|
clear_pathspec(&rev.prune_data);
|
||||||
return !!data.add_errors;
|
return !!data.add_errors;
|
||||||
}
|
}
|
||||||
|
@ -665,7 +674,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
||||||
string_list_clear(&only_match_skip_worktree, 0);
|
string_list_clear(&only_match_skip_worktree, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
plug_bulk_checkin();
|
begin_odb_transaction();
|
||||||
|
|
||||||
if (add_renormalize)
|
if (add_renormalize)
|
||||||
exit_status |= renormalize_tracked_files(&pathspec, flags);
|
exit_status |= renormalize_tracked_files(&pathspec, flags);
|
||||||
|
@ -677,7 +686,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
||||||
|
|
||||||
if (chmod_arg && pathspec.nr)
|
if (chmod_arg && pathspec.nr)
|
||||||
exit_status |= chmod_pathspec(&pathspec, chmod_arg[0], show_only);
|
exit_status |= chmod_pathspec(&pathspec, chmod_arg[0], show_only);
|
||||||
unplug_bulk_checkin();
|
end_odb_transaction();
|
||||||
|
|
||||||
finish:
|
finish:
|
||||||
if (write_locked_index(&the_index, &lock_file,
|
if (write_locked_index(&the_index, &lock_file,
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
#include "builtin.h"
|
#include "builtin.h"
|
||||||
#include "cache.h"
|
#include "cache.h"
|
||||||
|
#include "bulk-checkin.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
#include "object-store.h"
|
#include "object-store.h"
|
||||||
#include "object.h"
|
#include "object.h"
|
||||||
|
@ -503,10 +504,12 @@ static void unpack_all(void)
|
||||||
if (!quiet)
|
if (!quiet)
|
||||||
progress = start_progress(_("Unpacking objects"), nr_objects);
|
progress = start_progress(_("Unpacking objects"), nr_objects);
|
||||||
CALLOC_ARRAY(obj_list, nr_objects);
|
CALLOC_ARRAY(obj_list, nr_objects);
|
||||||
|
begin_odb_transaction();
|
||||||
for (i = 0; i < nr_objects; i++) {
|
for (i = 0; i < nr_objects; i++) {
|
||||||
unpack_one(i);
|
unpack_one(i);
|
||||||
display_progress(progress, i + 1);
|
display_progress(progress, i + 1);
|
||||||
}
|
}
|
||||||
|
end_odb_transaction();
|
||||||
stop_progress(&progress);
|
stop_progress(&progress);
|
||||||
|
|
||||||
if (delta_list)
|
if (delta_list)
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
#define USE_THE_INDEX_COMPATIBILITY_MACROS
|
#define USE_THE_INDEX_COMPATIBILITY_MACROS
|
||||||
#include "cache.h"
|
#include "cache.h"
|
||||||
|
#include "bulk-checkin.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
#include "lockfile.h"
|
#include "lockfile.h"
|
||||||
#include "quote.h"
|
#include "quote.h"
|
||||||
|
@ -57,6 +58,14 @@ static void report(const char *fmt, ...)
|
||||||
if (!verbose)
|
if (!verbose)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is possible, though unlikely, that a caller could use the verbose
|
||||||
|
* output to synchronize with addition of objects to the object
|
||||||
|
* database. The current implementation of ODB transactions leaves
|
||||||
|
* objects invisible while a transaction is active, so flush the
|
||||||
|
* transaction here before reporting a change made by update-index.
|
||||||
|
*/
|
||||||
|
flush_odb_transaction();
|
||||||
va_start(vp, fmt);
|
va_start(vp, fmt);
|
||||||
vprintf(fmt, vp);
|
vprintf(fmt, vp);
|
||||||
putchar('\n');
|
putchar('\n');
|
||||||
|
@ -1116,6 +1125,12 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
||||||
*/
|
*/
|
||||||
parse_options_start(&ctx, argc, argv, prefix,
|
parse_options_start(&ctx, argc, argv, prefix,
|
||||||
options, PARSE_OPT_STOP_AT_NON_OPTION);
|
options, PARSE_OPT_STOP_AT_NON_OPTION);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allow the object layer to optimize adding multiple objects in
|
||||||
|
* a batch.
|
||||||
|
*/
|
||||||
|
begin_odb_transaction();
|
||||||
while (ctx.argc) {
|
while (ctx.argc) {
|
||||||
if (parseopt_state != PARSE_OPT_DONE)
|
if (parseopt_state != PARSE_OPT_DONE)
|
||||||
parseopt_state = parse_options_step(&ctx, options,
|
parseopt_state = parse_options_step(&ctx, options,
|
||||||
|
@ -1190,6 +1205,11 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
||||||
strbuf_release(&buf);
|
strbuf_release(&buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* By now we have added all of the new objects
|
||||||
|
*/
|
||||||
|
end_odb_transaction();
|
||||||
|
|
||||||
if (split_index > 0) {
|
if (split_index > 0) {
|
||||||
if (git_config_get_split_index() == 0)
|
if (git_config_get_split_index() == 0)
|
||||||
warning(_("core.splitIndex is set to false; "
|
warning(_("core.splitIndex is set to false; "
|
||||||
|
|
117
bulk-checkin.c
117
bulk-checkin.c
|
@ -3,16 +3,21 @@
|
||||||
*/
|
*/
|
||||||
#include "cache.h"
|
#include "cache.h"
|
||||||
#include "bulk-checkin.h"
|
#include "bulk-checkin.h"
|
||||||
|
#include "lockfile.h"
|
||||||
#include "repository.h"
|
#include "repository.h"
|
||||||
#include "csum-file.h"
|
#include "csum-file.h"
|
||||||
#include "pack.h"
|
#include "pack.h"
|
||||||
#include "strbuf.h"
|
#include "strbuf.h"
|
||||||
|
#include "string-list.h"
|
||||||
|
#include "tmp-objdir.h"
|
||||||
#include "packfile.h"
|
#include "packfile.h"
|
||||||
#include "object-store.h"
|
#include "object-store.h"
|
||||||
|
|
||||||
static struct bulk_checkin_state {
|
static int odb_transaction_nesting;
|
||||||
unsigned plugged:1;
|
|
||||||
|
|
||||||
|
static struct tmp_objdir *bulk_fsync_objdir;
|
||||||
|
|
||||||
|
static struct bulk_checkin_packfile {
|
||||||
char *pack_tmp_name;
|
char *pack_tmp_name;
|
||||||
struct hashfile *f;
|
struct hashfile *f;
|
||||||
off_t offset;
|
off_t offset;
|
||||||
|
@ -21,7 +26,7 @@ static struct bulk_checkin_state {
|
||||||
struct pack_idx_entry **written;
|
struct pack_idx_entry **written;
|
||||||
uint32_t alloc_written;
|
uint32_t alloc_written;
|
||||||
uint32_t nr_written;
|
uint32_t nr_written;
|
||||||
} state;
|
} bulk_checkin_packfile;
|
||||||
|
|
||||||
static void finish_tmp_packfile(struct strbuf *basename,
|
static void finish_tmp_packfile(struct strbuf *basename,
|
||||||
const char *pack_tmp_name,
|
const char *pack_tmp_name,
|
||||||
|
@ -39,7 +44,7 @@ static void finish_tmp_packfile(struct strbuf *basename,
|
||||||
free(idx_tmp_name);
|
free(idx_tmp_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void finish_bulk_checkin(struct bulk_checkin_state *state)
|
static void flush_bulk_checkin_packfile(struct bulk_checkin_packfile *state)
|
||||||
{
|
{
|
||||||
unsigned char hash[GIT_MAX_RAWSZ];
|
unsigned char hash[GIT_MAX_RAWSZ];
|
||||||
struct strbuf packname = STRBUF_INIT;
|
struct strbuf packname = STRBUF_INIT;
|
||||||
|
@ -80,7 +85,41 @@ clear_exit:
|
||||||
reprepare_packed_git(the_repository);
|
reprepare_packed_git(the_repository);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int already_written(struct bulk_checkin_state *state, struct object_id *oid)
|
/*
|
||||||
|
* Cleanup after batch-mode fsync_object_files.
|
||||||
|
*/
|
||||||
|
static void flush_batch_fsync(void)
|
||||||
|
{
|
||||||
|
struct strbuf temp_path = STRBUF_INIT;
|
||||||
|
struct tempfile *temp;
|
||||||
|
|
||||||
|
if (!bulk_fsync_objdir)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Issue a full hardware flush against a temporary file to ensure
|
||||||
|
* that all objects are durable before any renames occur. The code in
|
||||||
|
* fsync_loose_object_bulk_checkin has already issued a writeout
|
||||||
|
* request, but it has not flushed any writeback cache in the storage
|
||||||
|
* hardware or any filesystem logs. This fsync call acts as a barrier
|
||||||
|
* to ensure that the data in each new object file is durable before
|
||||||
|
* the final name is visible.
|
||||||
|
*/
|
||||||
|
strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX", get_object_directory());
|
||||||
|
temp = xmks_tempfile(temp_path.buf);
|
||||||
|
fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
|
||||||
|
delete_tempfile(&temp);
|
||||||
|
strbuf_release(&temp_path);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make the object files visible in the primary ODB after their data is
|
||||||
|
* fully durable.
|
||||||
|
*/
|
||||||
|
tmp_objdir_migrate(bulk_fsync_objdir);
|
||||||
|
bulk_fsync_objdir = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int already_written(struct bulk_checkin_packfile *state, struct object_id *oid)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -112,7 +151,7 @@ static int already_written(struct bulk_checkin_state *state, struct object_id *o
|
||||||
* status before calling us just in case we ask it to call us again
|
* status before calling us just in case we ask it to call us again
|
||||||
* with a new pack.
|
* with a new pack.
|
||||||
*/
|
*/
|
||||||
static int stream_to_pack(struct bulk_checkin_state *state,
|
static int stream_to_pack(struct bulk_checkin_packfile *state,
|
||||||
git_hash_ctx *ctx, off_t *already_hashed_to,
|
git_hash_ctx *ctx, off_t *already_hashed_to,
|
||||||
int fd, size_t size, enum object_type type,
|
int fd, size_t size, enum object_type type,
|
||||||
const char *path, unsigned flags)
|
const char *path, unsigned flags)
|
||||||
|
@ -189,7 +228,7 @@ static int stream_to_pack(struct bulk_checkin_state *state,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Lazily create backing packfile for the state */
|
/* Lazily create backing packfile for the state */
|
||||||
static void prepare_to_stream(struct bulk_checkin_state *state,
|
static void prepare_to_stream(struct bulk_checkin_packfile *state,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
if (!(flags & HASH_WRITE_OBJECT) || state->f)
|
if (!(flags & HASH_WRITE_OBJECT) || state->f)
|
||||||
|
@ -204,7 +243,7 @@ static void prepare_to_stream(struct bulk_checkin_state *state,
|
||||||
die_errno("unable to write pack header");
|
die_errno("unable to write pack header");
|
||||||
}
|
}
|
||||||
|
|
||||||
static int deflate_to_pack(struct bulk_checkin_state *state,
|
static int deflate_to_pack(struct bulk_checkin_packfile *state,
|
||||||
struct object_id *result_oid,
|
struct object_id *result_oid,
|
||||||
int fd, size_t size,
|
int fd, size_t size,
|
||||||
enum object_type type, const char *path,
|
enum object_type type, const char *path,
|
||||||
|
@ -251,7 +290,7 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
|
||||||
BUG("should not happen");
|
BUG("should not happen");
|
||||||
hashfile_truncate(state->f, &checkpoint);
|
hashfile_truncate(state->f, &checkpoint);
|
||||||
state->offset = checkpoint.offset;
|
state->offset = checkpoint.offset;
|
||||||
finish_bulk_checkin(state);
|
flush_bulk_checkin_packfile(state);
|
||||||
if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
|
if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
|
||||||
return error("cannot seek back");
|
return error("cannot seek back");
|
||||||
}
|
}
|
||||||
|
@ -274,25 +313,67 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void prepare_loose_object_bulk_checkin(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We lazily create the temporary object directory
|
||||||
|
* the first time an object might be added, since
|
||||||
|
* callers may not know whether any objects will be
|
||||||
|
* added at the time they call begin_odb_transaction.
|
||||||
|
*/
|
||||||
|
if (!odb_transaction_nesting || bulk_fsync_objdir)
|
||||||
|
return;
|
||||||
|
|
||||||
|
bulk_fsync_objdir = tmp_objdir_create("bulk-fsync");
|
||||||
|
if (bulk_fsync_objdir)
|
||||||
|
tmp_objdir_replace_primary_odb(bulk_fsync_objdir, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void fsync_loose_object_bulk_checkin(int fd, const char *filename)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If we have an active ODB transaction, we issue a call that
|
||||||
|
* cleans the filesystem page cache but avoids a hardware flush
|
||||||
|
* command. Later on we will issue a single hardware flush
|
||||||
|
* before renaming the objects to their final names as part of
|
||||||
|
* flush_batch_fsync.
|
||||||
|
*/
|
||||||
|
if (!bulk_fsync_objdir ||
|
||||||
|
git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
|
||||||
|
fsync_or_die(fd, filename);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int index_bulk_checkin(struct object_id *oid,
|
int index_bulk_checkin(struct object_id *oid,
|
||||||
int fd, size_t size, enum object_type type,
|
int fd, size_t size, enum object_type type,
|
||||||
const char *path, unsigned flags)
|
const char *path, unsigned flags)
|
||||||
{
|
{
|
||||||
int status = deflate_to_pack(&state, oid, fd, size, type,
|
int status = deflate_to_pack(&bulk_checkin_packfile, oid, fd, size, type,
|
||||||
path, flags);
|
path, flags);
|
||||||
if (!state.plugged)
|
if (!odb_transaction_nesting)
|
||||||
finish_bulk_checkin(&state);
|
flush_bulk_checkin_packfile(&bulk_checkin_packfile);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
void plug_bulk_checkin(void)
|
void begin_odb_transaction(void)
|
||||||
{
|
{
|
||||||
state.plugged = 1;
|
odb_transaction_nesting += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void unplug_bulk_checkin(void)
|
void flush_odb_transaction(void)
|
||||||
{
|
{
|
||||||
state.plugged = 0;
|
flush_batch_fsync();
|
||||||
if (state.f)
|
flush_bulk_checkin_packfile(&bulk_checkin_packfile);
|
||||||
finish_bulk_checkin(&state);
|
}
|
||||||
|
|
||||||
|
void end_odb_transaction(void)
|
||||||
|
{
|
||||||
|
odb_transaction_nesting -= 1;
|
||||||
|
if (odb_transaction_nesting < 0)
|
||||||
|
BUG("Unbalanced ODB transaction nesting");
|
||||||
|
|
||||||
|
if (odb_transaction_nesting)
|
||||||
|
return;
|
||||||
|
|
||||||
|
flush_odb_transaction();
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,11 +6,34 @@
|
||||||
|
|
||||||
#include "cache.h"
|
#include "cache.h"
|
||||||
|
|
||||||
|
void prepare_loose_object_bulk_checkin(void);
|
||||||
|
void fsync_loose_object_bulk_checkin(int fd, const char *filename);
|
||||||
|
|
||||||
int index_bulk_checkin(struct object_id *oid,
|
int index_bulk_checkin(struct object_id *oid,
|
||||||
int fd, size_t size, enum object_type type,
|
int fd, size_t size, enum object_type type,
|
||||||
const char *path, unsigned flags);
|
const char *path, unsigned flags);
|
||||||
|
|
||||||
void plug_bulk_checkin(void);
|
/*
|
||||||
void unplug_bulk_checkin(void);
|
* Tell the object database to optimize for adding
|
||||||
|
* multiple objects. end_odb_transaction must be called
|
||||||
|
* to make new objects visible. Transactions can be nested,
|
||||||
|
* and objects are only visible after the outermost transaction
|
||||||
|
* is complete or the transaction is flushed.
|
||||||
|
*/
|
||||||
|
void begin_odb_transaction(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make any objects that are currently part of a pending object
|
||||||
|
* database transaction visible. It is valid to call this function
|
||||||
|
* even if no transaction is active.
|
||||||
|
*/
|
||||||
|
void flush_odb_transaction(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Tell the object database to make any objects from the
|
||||||
|
* current transaction visible if this is the final nested
|
||||||
|
* transaction.
|
||||||
|
*/
|
||||||
|
void end_odb_transaction(void);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
#include "tree.h"
|
#include "tree.h"
|
||||||
#include "tree-walk.h"
|
#include "tree-walk.h"
|
||||||
#include "cache-tree.h"
|
#include "cache-tree.h"
|
||||||
|
#include "bulk-checkin.h"
|
||||||
#include "object-store.h"
|
#include "object-store.h"
|
||||||
#include "replace-object.h"
|
#include "replace-object.h"
|
||||||
#include "promisor-remote.h"
|
#include "promisor-remote.h"
|
||||||
|
@ -474,8 +475,10 @@ int cache_tree_update(struct index_state *istate, int flags)
|
||||||
|
|
||||||
trace_performance_enter();
|
trace_performance_enter();
|
||||||
trace2_region_enter("cache_tree", "update", the_repository);
|
trace2_region_enter("cache_tree", "update", the_repository);
|
||||||
|
begin_odb_transaction();
|
||||||
i = update_one(istate->cache_tree, istate->cache, istate->cache_nr,
|
i = update_one(istate->cache_tree, istate->cache, istate->cache_nr,
|
||||||
"", 0, &skip, flags);
|
"", 0, &skip, flags);
|
||||||
|
end_odb_transaction();
|
||||||
trace2_region_leave("cache_tree", "update", the_repository);
|
trace2_region_leave("cache_tree", "update", the_repository);
|
||||||
trace_performance_leave("cache_tree_update");
|
trace_performance_leave("cache_tree_update");
|
||||||
if (i < 0)
|
if (i < 0)
|
||||||
|
|
12
cache.h
12
cache.h
|
@ -1031,6 +1031,10 @@ enum fsync_component {
|
||||||
FSYNC_COMPONENT_INDEX | \
|
FSYNC_COMPONENT_INDEX | \
|
||||||
FSYNC_COMPONENT_REFERENCE)
|
FSYNC_COMPONENT_REFERENCE)
|
||||||
|
|
||||||
|
#ifndef FSYNC_COMPONENTS_PLATFORM_DEFAULT
|
||||||
|
#define FSYNC_COMPONENTS_PLATFORM_DEFAULT FSYNC_COMPONENTS_DEFAULT
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A bitmask indicating which components of the repo should be fsynced.
|
* A bitmask indicating which components of the repo should be fsynced.
|
||||||
*/
|
*/
|
||||||
|
@ -1040,7 +1044,8 @@ extern int use_fsync;
|
||||||
|
|
||||||
enum fsync_method {
|
enum fsync_method {
|
||||||
FSYNC_METHOD_FSYNC,
|
FSYNC_METHOD_FSYNC,
|
||||||
FSYNC_METHOD_WRITEOUT_ONLY
|
FSYNC_METHOD_WRITEOUT_ONLY,
|
||||||
|
FSYNC_METHOD_BATCH,
|
||||||
};
|
};
|
||||||
|
|
||||||
extern enum fsync_method fsync_method;
|
extern enum fsync_method fsync_method;
|
||||||
|
@ -1766,6 +1771,11 @@ void fsync_or_die(int fd, const char *);
|
||||||
int fsync_component(enum fsync_component component, int fd);
|
int fsync_component(enum fsync_component component, int fd);
|
||||||
void fsync_component_or_die(enum fsync_component component, int fd, const char *msg);
|
void fsync_component_or_die(enum fsync_component component, int fd, const char *msg);
|
||||||
|
|
||||||
|
static inline int batch_fsync_enabled(enum fsync_component component)
|
||||||
|
{
|
||||||
|
return (fsync_components & component) && (fsync_method == FSYNC_METHOD_BATCH);
|
||||||
|
}
|
||||||
|
|
||||||
ssize_t read_in_full(int fd, void *buf, size_t count);
|
ssize_t read_in_full(int fd, void *buf, size_t count);
|
||||||
ssize_t write_in_full(int fd, const void *buf, size_t count);
|
ssize_t write_in_full(int fd, const void *buf, size_t count);
|
||||||
ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset);
|
ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset);
|
||||||
|
|
|
@ -332,6 +332,9 @@ int mingw_getpagesize(void);
|
||||||
int win32_fsync_no_flush(int fd);
|
int win32_fsync_no_flush(int fd);
|
||||||
#define fsync_no_flush win32_fsync_no_flush
|
#define fsync_no_flush win32_fsync_no_flush
|
||||||
|
|
||||||
|
#define FSYNC_COMPONENTS_PLATFORM_DEFAULT (FSYNC_COMPONENTS_DEFAULT | FSYNC_COMPONENT_LOOSE_OBJECT)
|
||||||
|
#define FSYNC_METHOD_DEFAULT (FSYNC_METHOD_BATCH)
|
||||||
|
|
||||||
struct rlimit {
|
struct rlimit {
|
||||||
unsigned int rlim_cur;
|
unsigned int rlim_cur;
|
||||||
};
|
};
|
||||||
|
|
4
config.c
4
config.c
|
@ -1342,7 +1342,7 @@ static const struct fsync_component_name {
|
||||||
|
|
||||||
static enum fsync_component parse_fsync_components(const char *var, const char *string)
|
static enum fsync_component parse_fsync_components(const char *var, const char *string)
|
||||||
{
|
{
|
||||||
enum fsync_component current = FSYNC_COMPONENTS_DEFAULT;
|
enum fsync_component current = FSYNC_COMPONENTS_PLATFORM_DEFAULT;
|
||||||
enum fsync_component positive = 0, negative = 0;
|
enum fsync_component positive = 0, negative = 0;
|
||||||
|
|
||||||
while (string) {
|
while (string) {
|
||||||
|
@ -1688,6 +1688,8 @@ static int git_default_core_config(const char *var, const char *value, void *cb)
|
||||||
fsync_method = FSYNC_METHOD_FSYNC;
|
fsync_method = FSYNC_METHOD_FSYNC;
|
||||||
else if (!strcmp(value, "writeout-only"))
|
else if (!strcmp(value, "writeout-only"))
|
||||||
fsync_method = FSYNC_METHOD_WRITEOUT_ONLY;
|
fsync_method = FSYNC_METHOD_WRITEOUT_ONLY;
|
||||||
|
else if (!strcmp(value, "batch"))
|
||||||
|
fsync_method = FSYNC_METHOD_BATCH;
|
||||||
else
|
else
|
||||||
warning(_("ignoring unknown core.fsyncMethod value '%s'"), value);
|
warning(_("ignoring unknown core.fsyncMethod value '%s'"), value);
|
||||||
|
|
||||||
|
|
|
@ -1324,11 +1324,13 @@ __attribute__((format (printf, 3, 4))) NORETURN
|
||||||
void BUG_fl(const char *file, int line, const char *fmt, ...);
|
void BUG_fl(const char *file, int line, const char *fmt, ...);
|
||||||
#define BUG(...) BUG_fl(__FILE__, __LINE__, __VA_ARGS__)
|
#define BUG(...) BUG_fl(__FILE__, __LINE__, __VA_ARGS__)
|
||||||
|
|
||||||
|
#ifndef FSYNC_METHOD_DEFAULT
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
#define FSYNC_METHOD_DEFAULT FSYNC_METHOD_WRITEOUT_ONLY
|
#define FSYNC_METHOD_DEFAULT FSYNC_METHOD_WRITEOUT_ONLY
|
||||||
#else
|
#else
|
||||||
#define FSYNC_METHOD_DEFAULT FSYNC_METHOD_FSYNC
|
#define FSYNC_METHOD_DEFAULT FSYNC_METHOD_FSYNC
|
||||||
#endif
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
enum fsync_action {
|
enum fsync_action {
|
||||||
FSYNC_WRITEOUT_ONLY,
|
FSYNC_WRITEOUT_ONLY,
|
||||||
|
|
|
@ -1893,7 +1893,9 @@ static void close_loose_object(int fd, const char *filename)
|
||||||
if (the_repository->objects->odb->will_destroy)
|
if (the_repository->objects->odb->will_destroy)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (fsync_object_files > 0)
|
if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
|
||||||
|
fsync_loose_object_bulk_checkin(fd, filename);
|
||||||
|
else if (fsync_object_files > 0)
|
||||||
fsync_or_die(fd, filename);
|
fsync_or_die(fd, filename);
|
||||||
else
|
else
|
||||||
fsync_component_or_die(FSYNC_COMPONENT_LOOSE_OBJECT, fd,
|
fsync_component_or_die(FSYNC_COMPONENT_LOOSE_OBJECT, fd,
|
||||||
|
@ -1961,6 +1963,9 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
|
||||||
static struct strbuf tmp_file = STRBUF_INIT;
|
static struct strbuf tmp_file = STRBUF_INIT;
|
||||||
static struct strbuf filename = STRBUF_INIT;
|
static struct strbuf filename = STRBUF_INIT;
|
||||||
|
|
||||||
|
if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
|
||||||
|
prepare_loose_object_bulk_checkin();
|
||||||
|
|
||||||
loose_object_path(the_repository, &filename, oid);
|
loose_object_path(the_repository, &filename, oid);
|
||||||
|
|
||||||
fd = create_tmpfile(&tmp_file, filename.buf);
|
fd = create_tmpfile(&tmp_file, filename.buf);
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
# Helper to create files with unique contents
|
||||||
|
|
||||||
|
# Create multiple files with unique contents within this test run. Takes the
|
||||||
|
# number of directories, the number of files in each directory, and the base
|
||||||
|
# directory.
|
||||||
|
#
|
||||||
|
# test_create_unique_files 2 3 my_dir -- Creates 2 directories with 3 files
|
||||||
|
# each in my_dir, all with contents
|
||||||
|
# different from previous invocations
|
||||||
|
# of this command in this run.
|
||||||
|
|
||||||
|
test_create_unique_files () {
|
||||||
|
test "$#" -ne 3 && BUG "3 param"
|
||||||
|
|
||||||
|
local dirs="$1" &&
|
||||||
|
local files="$2" &&
|
||||||
|
local basedir="$3" &&
|
||||||
|
local counter="0" &&
|
||||||
|
local i &&
|
||||||
|
local j &&
|
||||||
|
test_tick &&
|
||||||
|
local basedata="$basedir$test_tick" &&
|
||||||
|
rm -rf "$basedir" &&
|
||||||
|
for i in $(test_seq $dirs)
|
||||||
|
do
|
||||||
|
local dir="$basedir/dir$i" &&
|
||||||
|
mkdir -p "$dir" &&
|
||||||
|
for j in $(test_seq $files)
|
||||||
|
do
|
||||||
|
counter=$((counter + 1)) &&
|
||||||
|
echo "$basedata.$counter">"$dir/file$j.txt"
|
||||||
|
done
|
||||||
|
done
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# This test measures the performance of adding new files to the object
|
||||||
|
# database. The test was originally added to measure the effect of the
|
||||||
|
# core.fsyncMethod=batch mode, which is why we are testing different values of
|
||||||
|
# that setting explicitly and creating a lot of unique objects.
|
||||||
|
|
||||||
|
test_description="Tests performance of adding things to the object database"
|
||||||
|
|
||||||
|
. ./perf-lib.sh
|
||||||
|
|
||||||
|
. $TEST_DIRECTORY/lib-unique-files.sh
|
||||||
|
|
||||||
|
test_perf_fresh_repo
|
||||||
|
test_checkout_worktree
|
||||||
|
|
||||||
|
dir_count=10
|
||||||
|
files_per_dir=50
|
||||||
|
total_files=$((dir_count * files_per_dir))
|
||||||
|
|
||||||
|
populate_files () {
|
||||||
|
test_create_unique_files $dir_count $files_per_dir files
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_repo () {
|
||||||
|
(rm -rf .git || 1) &&
|
||||||
|
git init &&
|
||||||
|
test_commit first &&
|
||||||
|
populate_files
|
||||||
|
}
|
||||||
|
|
||||||
|
test_perf_fsync_cfgs () {
|
||||||
|
local method &&
|
||||||
|
local cfg &&
|
||||||
|
for method in none fsync batch writeout-only
|
||||||
|
do
|
||||||
|
case $method in
|
||||||
|
none)
|
||||||
|
cfg="-c core.fsync=none"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
cfg="-c core.fsync=loose-object -c core.fsyncMethod=$method"
|
||||||
|
esac &&
|
||||||
|
|
||||||
|
# Set GIT_TEST_FSYNC=1 explicitly since fsync is normally
|
||||||
|
# disabled by t/test-lib.sh.
|
||||||
|
if ! test_perf "$1 (fsyncMethod=$method)" \
|
||||||
|
--setup "$2" \
|
||||||
|
"GIT_TEST_FSYNC=1 git $cfg $3"
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
test_perf_fsync_cfgs "add $total_files files" \
|
||||||
|
"setup_repo" \
|
||||||
|
"add -- files"
|
||||||
|
|
||||||
|
test_perf_fsync_cfgs "stash $total_files files" \
|
||||||
|
"setup_repo" \
|
||||||
|
"stash push -u -- files"
|
||||||
|
|
||||||
|
test_perf_fsync_cfgs "unpack $total_files files" \
|
||||||
|
"
|
||||||
|
setup_repo &&
|
||||||
|
git -c core.fsync=none add -- files &&
|
||||||
|
git -c core.fsync=none commit -q -m second &&
|
||||||
|
echo HEAD | git pack-objects -q --stdout --revs >test_pack.pack &&
|
||||||
|
setup_repo
|
||||||
|
" \
|
||||||
|
"unpack-objects -q <test_pack.pack"
|
||||||
|
|
||||||
|
test_perf_fsync_cfgs "commit $total_files files" \
|
||||||
|
"
|
||||||
|
setup_repo &&
|
||||||
|
git -c core.fsync=none add -- files &&
|
||||||
|
populate_files
|
||||||
|
" \
|
||||||
|
"commit -q -a -m test"
|
||||||
|
|
||||||
|
test_done
|
|
@ -36,7 +36,8 @@ do
|
||||||
else
|
else
|
||||||
prereq=""
|
prereq=""
|
||||||
fi
|
fi
|
||||||
test_perf $prereq "$engine log$GIT_PERF_4220_LOG_OPTS --grep='$pattern'" "
|
test_perf "$engine log$GIT_PERF_4220_LOG_OPTS --grep='$pattern'" \
|
||||||
|
--prereq "$prereq" "
|
||||||
git -c grep.patternType=$engine log --pretty=format:%h$GIT_PERF_4220_LOG_OPTS --grep='$pattern' >'out.$engine' || :
|
git -c grep.patternType=$engine log --pretty=format:%h$GIT_PERF_4220_LOG_OPTS --grep='$pattern' >'out.$engine' || :
|
||||||
"
|
"
|
||||||
done
|
done
|
||||||
|
|
|
@ -26,7 +26,8 @@ do
|
||||||
else
|
else
|
||||||
prereq=""
|
prereq=""
|
||||||
fi
|
fi
|
||||||
test_perf $prereq "$engine log$GIT_PERF_4221_LOG_OPTS --grep='$pattern'" "
|
test_perf "$engine log$GIT_PERF_4221_LOG_OPTS --grep='$pattern'" \
|
||||||
|
--prereq "$prereq" "
|
||||||
git -c grep.patternType=$engine log --pretty=format:%h$GIT_PERF_4221_LOG_OPTS --grep='$pattern' >'out.$engine' || :
|
git -c grep.patternType=$engine log --pretty=format:%h$GIT_PERF_4221_LOG_OPTS --grep='$pattern' >'out.$engine' || :
|
||||||
"
|
"
|
||||||
done
|
done
|
||||||
|
|
|
@ -26,9 +26,8 @@ test_expect_success 'set up thread-counting tests' '
|
||||||
done
|
done
|
||||||
'
|
'
|
||||||
|
|
||||||
test_perf PERF_EXTRA 'index-pack 0 threads' '
|
test_perf 'index-pack 0 threads' --prereq PERF_EXTRA \
|
||||||
rm -rf repo.git &&
|
--setup 'rm -rf repo.git && git init --bare repo.git' '
|
||||||
git init --bare repo.git &&
|
|
||||||
GIT_DIR=repo.git git index-pack --threads=1 --stdin < $PACK
|
GIT_DIR=repo.git git index-pack --threads=1 --stdin < $PACK
|
||||||
'
|
'
|
||||||
|
|
||||||
|
@ -36,17 +35,15 @@ for t in $threads
|
||||||
do
|
do
|
||||||
THREADS=$t
|
THREADS=$t
|
||||||
export THREADS
|
export THREADS
|
||||||
test_perf PERF_EXTRA "index-pack $t threads" '
|
test_perf "index-pack $t threads" --prereq PERF_EXTRA \
|
||||||
rm -rf repo.git &&
|
--setup 'rm -rf repo.git && git init --bare repo.git' '
|
||||||
git init --bare repo.git &&
|
|
||||||
GIT_DIR=repo.git GIT_FORCE_THREADS=1 \
|
GIT_DIR=repo.git GIT_FORCE_THREADS=1 \
|
||||||
git index-pack --threads=$THREADS --stdin <$PACK
|
git index-pack --threads=$THREADS --stdin <$PACK
|
||||||
'
|
'
|
||||||
done
|
done
|
||||||
|
|
||||||
test_perf 'index-pack default number of threads' '
|
test_perf 'index-pack default number of threads' \
|
||||||
rm -rf repo.git &&
|
--setup 'rm -rf repo.git && git init --bare repo.git' '
|
||||||
git init --bare repo.git &&
|
|
||||||
GIT_DIR=repo.git git index-pack --stdin < $PACK
|
GIT_DIR=repo.git git index-pack --stdin < $PACK
|
||||||
'
|
'
|
||||||
|
|
||||||
|
|
|
@ -60,18 +60,6 @@ then
|
||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test -n "$GIT_PERF_7519_DROP_CACHE"
|
|
||||||
then
|
|
||||||
# When using GIT_PERF_7519_DROP_CACHE, GIT_PERF_REPEAT_COUNT must be 1 to
|
|
||||||
# generate valid results. Otherwise the caching that happens for the nth
|
|
||||||
# run will negate the validity of the comparisons.
|
|
||||||
if test "$GIT_PERF_REPEAT_COUNT" -ne 1
|
|
||||||
then
|
|
||||||
echo "warning: Setting GIT_PERF_REPEAT_COUNT=1" >&2
|
|
||||||
GIT_PERF_REPEAT_COUNT=1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
trace_start () {
|
trace_start () {
|
||||||
if test -n "$GIT_PERF_7519_TRACE"
|
if test -n "$GIT_PERF_7519_TRACE"
|
||||||
then
|
then
|
||||||
|
@ -175,10 +163,10 @@ setup_for_fsmonitor_hook () {
|
||||||
|
|
||||||
test_perf_w_drop_caches () {
|
test_perf_w_drop_caches () {
|
||||||
if test -n "$GIT_PERF_7519_DROP_CACHE"; then
|
if test -n "$GIT_PERF_7519_DROP_CACHE"; then
|
||||||
test-tool drop-caches
|
test_perf "$1" --setup "test-tool drop-caches" "$2"
|
||||||
fi
|
else
|
||||||
|
|
||||||
test_perf "$@"
|
test_perf "$@"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
test_fsmonitor_suite () {
|
test_fsmonitor_suite () {
|
||||||
|
|
|
@ -49,13 +49,15 @@ do
|
||||||
fi
|
fi
|
||||||
if ! test_have_prereq PERF_GREP_ENGINES_THREADS
|
if ! test_have_prereq PERF_GREP_ENGINES_THREADS
|
||||||
then
|
then
|
||||||
test_perf $prereq "$engine grep$GIT_PERF_7820_GREP_OPTS '$pattern'" "
|
test_perf "$engine grep$GIT_PERF_7820_GREP_OPTS '$pattern'" \
|
||||||
|
--prereq "$prereq" "
|
||||||
git -c grep.patternType=$engine grep$GIT_PERF_7820_GREP_OPTS -- '$pattern' >'out.$engine' || :
|
git -c grep.patternType=$engine grep$GIT_PERF_7820_GREP_OPTS -- '$pattern' >'out.$engine' || :
|
||||||
"
|
"
|
||||||
else
|
else
|
||||||
for threads in $GIT_PERF_GREP_THREADS
|
for threads in $GIT_PERF_GREP_THREADS
|
||||||
do
|
do
|
||||||
test_perf PTHREADS,$prereq "$engine grep$GIT_PERF_7820_GREP_OPTS '$pattern' with $threads threads" "
|
test_perf "$engine grep$GIT_PERF_7820_GREP_OPTS '$pattern' with $threads threads"
|
||||||
|
--prereq PTHREADS,$prereq "
|
||||||
git -c grep.patternType=$engine -c grep.threads=$threads grep$GIT_PERF_7820_GREP_OPTS -- '$pattern' >'out.$engine.$threads' || :
|
git -c grep.patternType=$engine -c grep.threads=$threads grep$GIT_PERF_7820_GREP_OPTS -- '$pattern' >'out.$engine.$threads' || :
|
||||||
"
|
"
|
||||||
done
|
done
|
||||||
|
|
|
@ -189,19 +189,39 @@ exit $ret' >&3 2>&4
|
||||||
}
|
}
|
||||||
|
|
||||||
test_wrapper_ () {
|
test_wrapper_ () {
|
||||||
test_wrapper_func_=$1; shift
|
local test_wrapper_func_="$1"; shift
|
||||||
|
local test_title_="$1"; shift
|
||||||
test_start_
|
test_start_
|
||||||
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
|
test_prereq=
|
||||||
test "$#" = 2 ||
|
test_perf_setup_=
|
||||||
BUG "not 2 or 3 parameters to test-expect-success"
|
while test $# != 0
|
||||||
|
do
|
||||||
|
case $1 in
|
||||||
|
--prereq)
|
||||||
|
test_prereq=$2
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--setup)
|
||||||
|
test_perf_setup_=$2
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
test "$#" = 1 || BUG "test_wrapper_ needs 2 positional parameters"
|
||||||
export test_prereq
|
export test_prereq
|
||||||
if ! test_skip "$@"
|
export test_perf_setup_
|
||||||
|
|
||||||
|
if ! test_skip "$test_title_" "$@"
|
||||||
then
|
then
|
||||||
base=$(basename "$0" .sh)
|
base=$(basename "$0" .sh)
|
||||||
echo "$test_count" >>"$perf_results_dir"/$base.subtests
|
echo "$test_count" >>"$perf_results_dir"/$base.subtests
|
||||||
echo "$1" >"$perf_results_dir"/$base.$test_count.descr
|
echo "$1" >"$perf_results_dir"/$base.$test_count.descr
|
||||||
base="$perf_results_dir"/"$PERF_RESULTS_PREFIX$(basename "$0" .sh)"."$test_count"
|
base="$perf_results_dir"/"$PERF_RESULTS_PREFIX$(basename "$0" .sh)"."$test_count"
|
||||||
"$test_wrapper_func_" "$@"
|
"$test_wrapper_func_" "$test_title_" "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
test_finish_
|
test_finish_
|
||||||
|
@ -214,6 +234,16 @@ test_perf_ () {
|
||||||
echo "perf $test_count - $1:"
|
echo "perf $test_count - $1:"
|
||||||
fi
|
fi
|
||||||
for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
|
for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
|
||||||
|
if test -n "$test_perf_setup_"
|
||||||
|
then
|
||||||
|
say >&3 "setup: $test_perf_setup_"
|
||||||
|
if ! test_eval_ $test_perf_setup_
|
||||||
|
then
|
||||||
|
test_failure_ "$test_perf_setup_"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
fi
|
||||||
say >&3 "running: $2"
|
say >&3 "running: $2"
|
||||||
if test_run_perf_ "$2"
|
if test_run_perf_ "$2"
|
||||||
then
|
then
|
||||||
|
@ -237,11 +267,24 @@ test_perf_ () {
|
||||||
rm test_time.*
|
rm test_time.*
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Usage: test_perf 'title' [options] 'perf-test'
|
||||||
|
# Run the performance test script specified in perf-test with
|
||||||
|
# optional prerequisite and setup steps.
|
||||||
|
# Options:
|
||||||
|
# --prereq prerequisites: Skip the test if prequisites aren't met
|
||||||
|
# --setup "setup-steps": Run setup steps prior to each measured iteration
|
||||||
|
#
|
||||||
test_perf () {
|
test_perf () {
|
||||||
test_wrapper_ test_perf_ "$@"
|
test_wrapper_ test_perf_ "$@"
|
||||||
}
|
}
|
||||||
|
|
||||||
test_size_ () {
|
test_size_ () {
|
||||||
|
if test -n "$test_perf_setup_"
|
||||||
|
then
|
||||||
|
say >&3 "setup: $test_perf_setup_"
|
||||||
|
test_eval_ $test_perf_setup_
|
||||||
|
fi
|
||||||
|
|
||||||
say >&3 "running: $2"
|
say >&3 "running: $2"
|
||||||
if test_eval_ "$2" 3>"$base".result; then
|
if test_eval_ "$2" 3>"$base".result; then
|
||||||
test_ok_ "$1"
|
test_ok_ "$1"
|
||||||
|
@ -250,6 +293,14 @@ test_size_ () {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Usage: test_size 'title' [options] 'size-test'
|
||||||
|
# Run the size test script specified in size-test with optional
|
||||||
|
# prerequisites and setup steps. Returns the numeric value
|
||||||
|
# returned by size-test.
|
||||||
|
# Options:
|
||||||
|
# --prereq prerequisites: Skip the test if prequisites aren't met
|
||||||
|
# --setup "setup-steps": Run setup steps prior to the size measurement
|
||||||
|
|
||||||
test_size () {
|
test_size () {
|
||||||
test_wrapper_ test_size_ "$@"
|
test_wrapper_ test_size_ "$@"
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,8 @@ test_description='Test of git add, including the -- option.'
|
||||||
TEST_PASSES_SANITIZE_LEAK=true
|
TEST_PASSES_SANITIZE_LEAK=true
|
||||||
. ./test-lib.sh
|
. ./test-lib.sh
|
||||||
|
|
||||||
|
. $TEST_DIRECTORY/lib-unique-files.sh
|
||||||
|
|
||||||
# Test the file mode "$1" of the file "$2" in the index.
|
# Test the file mode "$1" of the file "$2" in the index.
|
||||||
test_mode_in_index () {
|
test_mode_in_index () {
|
||||||
case "$(git ls-files -s "$2")" in
|
case "$(git ls-files -s "$2")" in
|
||||||
|
@ -34,6 +36,32 @@ test_expect_success \
|
||||||
'Test that "git add -- -q" works' \
|
'Test that "git add -- -q" works' \
|
||||||
'touch -- -q && git add -- -q'
|
'touch -- -q && git add -- -q'
|
||||||
|
|
||||||
|
BATCH_CONFIGURATION='-c core.fsync=loose-object -c core.fsyncmethod=batch'
|
||||||
|
|
||||||
|
test_expect_success 'git add: core.fsyncmethod=batch' "
|
||||||
|
test_create_unique_files 2 4 files_base_dir1 &&
|
||||||
|
GIT_TEST_FSYNC=1 git $BATCH_CONFIGURATION add -- ./files_base_dir1/ &&
|
||||||
|
git ls-files --stage files_base_dir1/ |
|
||||||
|
test_parse_ls_files_stage_oids >added_files_oids &&
|
||||||
|
|
||||||
|
# We created 2 subdirs with 4 files each (8 files total) above
|
||||||
|
test_line_count = 8 added_files_oids &&
|
||||||
|
git cat-file --batch-check='%(objectname)' <added_files_oids >added_files_actual &&
|
||||||
|
test_cmp added_files_oids added_files_actual
|
||||||
|
"
|
||||||
|
|
||||||
|
test_expect_success 'git update-index: core.fsyncmethod=batch' "
|
||||||
|
test_create_unique_files 2 4 files_base_dir2 &&
|
||||||
|
find files_base_dir2 ! -type d -print | xargs git $BATCH_CONFIGURATION update-index --add -- &&
|
||||||
|
git ls-files --stage files_base_dir2 |
|
||||||
|
test_parse_ls_files_stage_oids >added_files2_oids &&
|
||||||
|
|
||||||
|
# We created 2 subdirs with 4 files each (8 files total) above
|
||||||
|
test_line_count = 8 added_files2_oids &&
|
||||||
|
git cat-file --batch-check='%(objectname)' <added_files2_oids >added_files2_actual &&
|
||||||
|
test_cmp added_files2_oids added_files2_actual
|
||||||
|
"
|
||||||
|
|
||||||
test_expect_success \
|
test_expect_success \
|
||||||
'git add: Test that executable bit is not used if core.filemode=0' \
|
'git add: Test that executable bit is not used if core.filemode=0' \
|
||||||
'git config core.filemode 0 &&
|
'git config core.filemode 0 &&
|
||||||
|
|
|
@ -9,6 +9,7 @@ GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
|
||||||
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
|
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
|
||||||
|
|
||||||
. ./test-lib.sh
|
. ./test-lib.sh
|
||||||
|
. $TEST_DIRECTORY/lib-unique-files.sh
|
||||||
|
|
||||||
test_expect_success 'usage on cmd and subcommand invalid option' '
|
test_expect_success 'usage on cmd and subcommand invalid option' '
|
||||||
test_expect_code 129 git stash --invalid-option 2>usage &&
|
test_expect_code 129 git stash --invalid-option 2>usage &&
|
||||||
|
@ -1410,6 +1411,25 @@ test_expect_success 'stash handles skip-worktree entries nicely' '
|
||||||
git rev-parse --verify refs/stash:A.t
|
git rev-parse --verify refs/stash:A.t
|
||||||
'
|
'
|
||||||
|
|
||||||
|
|
||||||
|
BATCH_CONFIGURATION='-c core.fsync=loose-object -c core.fsyncmethod=batch'
|
||||||
|
|
||||||
|
test_expect_success 'stash with core.fsyncmethod=batch' "
|
||||||
|
test_create_unique_files 2 4 files_base_dir &&
|
||||||
|
GIT_TEST_FSYNC=1 git $BATCH_CONFIGURATION stash push -u -- ./files_base_dir/ &&
|
||||||
|
|
||||||
|
# The files were untracked, so use the third parent,
|
||||||
|
# which contains the untracked files
|
||||||
|
git ls-tree -r stash^3 -- ./files_base_dir/ |
|
||||||
|
test_parse_ls_tree_oids >stashed_files_oids &&
|
||||||
|
|
||||||
|
# We created 2 dirs with 4 files each (8 files total) above
|
||||||
|
test_line_count = 8 stashed_files_oids &&
|
||||||
|
git cat-file --batch-check='%(objectname)' <stashed_files_oids >stashed_files_actual &&
|
||||||
|
test_cmp stashed_files_oids stashed_files_actual
|
||||||
|
"
|
||||||
|
|
||||||
|
|
||||||
test_expect_success 'git stash succeeds despite directory/file change' '
|
test_expect_success 'git stash succeeds despite directory/file change' '
|
||||||
test_create_repo directory_file_switch_v1 &&
|
test_create_repo directory_file_switch_v1 &&
|
||||||
(
|
(
|
||||||
|
|
|
@ -161,22 +161,27 @@ test_expect_success 'pack-objects with bogus arguments' '
|
||||||
'
|
'
|
||||||
|
|
||||||
check_unpack () {
|
check_unpack () {
|
||||||
|
local packname="$1" &&
|
||||||
|
local object_list="$2" &&
|
||||||
|
local git_config="$3" &&
|
||||||
test_when_finished "rm -rf git2" &&
|
test_when_finished "rm -rf git2" &&
|
||||||
git init --bare git2 &&
|
git $git_config init --bare git2 &&
|
||||||
git -C git2 unpack-objects -n <"$1".pack &&
|
(
|
||||||
git -C git2 unpack-objects <"$1".pack &&
|
git $git_config -C git2 unpack-objects -n <"$packname".pack &&
|
||||||
(cd .git && find objects -type f -print) |
|
git $git_config -C git2 unpack-objects <"$packname".pack &&
|
||||||
while read path
|
git $git_config -C git2 cat-file --batch-check="%(objectname)"
|
||||||
do
|
) <"$object_list" >current &&
|
||||||
cmp git2/$path .git/$path || {
|
cmp "$object_list" current
|
||||||
echo $path differs.
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
test_expect_success 'unpack without delta' '
|
test_expect_success 'unpack without delta' '
|
||||||
check_unpack test-1-${packname_1}
|
check_unpack test-1-${packname_1} obj-list
|
||||||
|
'
|
||||||
|
|
||||||
|
BATCH_CONFIGURATION='-c core.fsync=loose-object -c core.fsyncmethod=batch'
|
||||||
|
|
||||||
|
test_expect_success 'unpack without delta (core.fsyncmethod=batch)' '
|
||||||
|
check_unpack test-1-${packname_1} obj-list "$BATCH_CONFIGURATION"
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'pack with REF_DELTA' '
|
test_expect_success 'pack with REF_DELTA' '
|
||||||
|
@ -185,7 +190,11 @@ test_expect_success 'pack with REF_DELTA' '
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'unpack with REF_DELTA' '
|
test_expect_success 'unpack with REF_DELTA' '
|
||||||
check_unpack test-2-${packname_2}
|
check_unpack test-2-${packname_2} obj-list
|
||||||
|
'
|
||||||
|
|
||||||
|
test_expect_success 'unpack with REF_DELTA (core.fsyncmethod=batch)' '
|
||||||
|
check_unpack test-2-${packname_2} obj-list "$BATCH_CONFIGURATION"
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'pack with OFS_DELTA' '
|
test_expect_success 'pack with OFS_DELTA' '
|
||||||
|
@ -195,7 +204,11 @@ test_expect_success 'pack with OFS_DELTA' '
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'unpack with OFS_DELTA' '
|
test_expect_success 'unpack with OFS_DELTA' '
|
||||||
check_unpack test-3-${packname_3}
|
check_unpack test-3-${packname_3} obj-list
|
||||||
|
'
|
||||||
|
|
||||||
|
test_expect_success 'unpack with OFS_DELTA (core.fsyncmethod=batch)' '
|
||||||
|
check_unpack test-3-${packname_3} obj-list "$BATCH_CONFIGURATION"
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'compare delta flavors' '
|
test_expect_success 'compare delta flavors' '
|
||||||
|
|
|
@ -10,9 +10,6 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
|
||||||
# Test blob:none filter.
|
# Test blob:none filter.
|
||||||
|
|
||||||
test_expect_success 'setup r1' '
|
test_expect_success 'setup r1' '
|
||||||
echo "{print \$1}" >print_1.awk &&
|
|
||||||
echo "{print \$2}" >print_2.awk &&
|
|
||||||
|
|
||||||
git init r1 &&
|
git init r1 &&
|
||||||
for n in 1 2 3 4 5
|
for n in 1 2 3 4 5
|
||||||
do
|
do
|
||||||
|
@ -22,10 +19,13 @@ test_expect_success 'setup r1' '
|
||||||
done
|
done
|
||||||
'
|
'
|
||||||
|
|
||||||
|
parse_verify_pack_blob_oid () {
|
||||||
|
awk '{print $1}' -
|
||||||
|
}
|
||||||
|
|
||||||
test_expect_success 'verify blob count in normal packfile' '
|
test_expect_success 'verify blob count in normal packfile' '
|
||||||
git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 \
|
git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 |
|
||||||
>ls_files_result &&
|
test_parse_ls_files_stage_oids |
|
||||||
awk -f print_2.awk ls_files_result |
|
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r1 pack-objects --revs --stdout >all.pack <<-EOF &&
|
git -C r1 pack-objects --revs --stdout >all.pack <<-EOF &&
|
||||||
|
@ -35,7 +35,7 @@ test_expect_success 'verify blob count in normal packfile' '
|
||||||
|
|
||||||
git -C r1 verify-pack -v ../all.pack >verify_result &&
|
git -C r1 verify-pack -v ../all.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
|
@ -54,12 +54,12 @@ test_expect_success 'verify blob:none packfile has no blobs' '
|
||||||
test_expect_success 'verify normal and blob:none packfiles have same commits/trees' '
|
test_expect_success 'verify normal and blob:none packfiles have same commits/trees' '
|
||||||
git -C r1 verify-pack -v ../all.pack >verify_result &&
|
git -C r1 verify-pack -v ../all.pack >verify_result &&
|
||||||
grep -E "commit|tree" verify_result |
|
grep -E "commit|tree" verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r1 verify-pack -v ../filter.pack >verify_result &&
|
git -C r1 verify-pack -v ../filter.pack >verify_result &&
|
||||||
grep -E "commit|tree" verify_result |
|
grep -E "commit|tree" verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
|
@ -123,8 +123,8 @@ test_expect_success 'setup r2' '
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'verify blob count in normal packfile' '
|
test_expect_success 'verify blob count in normal packfile' '
|
||||||
git -C r2 ls-files -s large.1000 large.10000 >ls_files_result &&
|
git -C r2 ls-files -s large.1000 large.10000 |
|
||||||
awk -f print_2.awk ls_files_result |
|
test_parse_ls_files_stage_oids |
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r2 pack-objects --revs --stdout >all.pack <<-EOF &&
|
git -C r2 pack-objects --revs --stdout >all.pack <<-EOF &&
|
||||||
|
@ -134,7 +134,7 @@ test_expect_success 'verify blob count in normal packfile' '
|
||||||
|
|
||||||
git -C r2 verify-pack -v ../all.pack >verify_result &&
|
git -C r2 verify-pack -v ../all.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
|
@ -161,8 +161,8 @@ test_expect_success 'verify blob:limit=1000' '
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'verify blob:limit=1001' '
|
test_expect_success 'verify blob:limit=1001' '
|
||||||
git -C r2 ls-files -s large.1000 >ls_files_result &&
|
git -C r2 ls-files -s large.1000 |
|
||||||
awk -f print_2.awk ls_files_result |
|
test_parse_ls_files_stage_oids |
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r2 pack-objects --revs --stdout --filter=blob:limit=1001 >filter.pack <<-EOF &&
|
git -C r2 pack-objects --revs --stdout --filter=blob:limit=1001 >filter.pack <<-EOF &&
|
||||||
|
@ -172,15 +172,15 @@ test_expect_success 'verify blob:limit=1001' '
|
||||||
|
|
||||||
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'verify blob:limit=10001' '
|
test_expect_success 'verify blob:limit=10001' '
|
||||||
git -C r2 ls-files -s large.1000 large.10000 >ls_files_result &&
|
git -C r2 ls-files -s large.1000 large.10000 |
|
||||||
awk -f print_2.awk ls_files_result |
|
test_parse_ls_files_stage_oids |
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r2 pack-objects --revs --stdout --filter=blob:limit=10001 >filter.pack <<-EOF &&
|
git -C r2 pack-objects --revs --stdout --filter=blob:limit=10001 >filter.pack <<-EOF &&
|
||||||
|
@ -190,15 +190,15 @@ test_expect_success 'verify blob:limit=10001' '
|
||||||
|
|
||||||
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'verify blob:limit=1k' '
|
test_expect_success 'verify blob:limit=1k' '
|
||||||
git -C r2 ls-files -s large.1000 >ls_files_result &&
|
git -C r2 ls-files -s large.1000 |
|
||||||
awk -f print_2.awk ls_files_result |
|
test_parse_ls_files_stage_oids |
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r2 pack-objects --revs --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
|
git -C r2 pack-objects --revs --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
|
||||||
|
@ -208,15 +208,15 @@ test_expect_success 'verify blob:limit=1k' '
|
||||||
|
|
||||||
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'verify explicitly specifying oversized blob in input' '
|
test_expect_success 'verify explicitly specifying oversized blob in input' '
|
||||||
git -C r2 ls-files -s large.1000 large.10000 >ls_files_result &&
|
git -C r2 ls-files -s large.1000 large.10000 |
|
||||||
awk -f print_2.awk ls_files_result |
|
test_parse_ls_files_stage_oids |
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
echo HEAD >objects &&
|
echo HEAD >objects &&
|
||||||
|
@ -226,15 +226,15 @@ test_expect_success 'verify explicitly specifying oversized blob in input' '
|
||||||
|
|
||||||
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'verify blob:limit=1m' '
|
test_expect_success 'verify blob:limit=1m' '
|
||||||
git -C r2 ls-files -s large.1000 large.10000 >ls_files_result &&
|
git -C r2 ls-files -s large.1000 large.10000 |
|
||||||
awk -f print_2.awk ls_files_result |
|
test_parse_ls_files_stage_oids |
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r2 pack-objects --revs --stdout --filter=blob:limit=1m >filter.pack <<-EOF &&
|
git -C r2 pack-objects --revs --stdout --filter=blob:limit=1m >filter.pack <<-EOF &&
|
||||||
|
@ -244,7 +244,7 @@ test_expect_success 'verify blob:limit=1m' '
|
||||||
|
|
||||||
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
|
@ -253,12 +253,12 @@ test_expect_success 'verify blob:limit=1m' '
|
||||||
test_expect_success 'verify normal and blob:limit packfiles have same commits/trees' '
|
test_expect_success 'verify normal and blob:limit packfiles have same commits/trees' '
|
||||||
git -C r2 verify-pack -v ../all.pack >verify_result &&
|
git -C r2 verify-pack -v ../all.pack >verify_result &&
|
||||||
grep -E "commit|tree" verify_result |
|
grep -E "commit|tree" verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
git -C r2 verify-pack -v ../filter.pack >verify_result &&
|
||||||
grep -E "commit|tree" verify_result |
|
grep -E "commit|tree" verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
|
@ -289,9 +289,8 @@ test_expect_success 'setup r3' '
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'verify blob count in normal packfile' '
|
test_expect_success 'verify blob count in normal packfile' '
|
||||||
git -C r3 ls-files -s sparse1 sparse2 dir1/sparse1 dir1/sparse2 \
|
git -C r3 ls-files -s sparse1 sparse2 dir1/sparse1 dir1/sparse2 |
|
||||||
>ls_files_result &&
|
test_parse_ls_files_stage_oids |
|
||||||
awk -f print_2.awk ls_files_result |
|
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r3 pack-objects --revs --stdout >all.pack <<-EOF &&
|
git -C r3 pack-objects --revs --stdout >all.pack <<-EOF &&
|
||||||
|
@ -301,7 +300,7 @@ test_expect_success 'verify blob count in normal packfile' '
|
||||||
|
|
||||||
git -C r3 verify-pack -v ../all.pack >verify_result &&
|
git -C r3 verify-pack -v ../all.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
|
@ -342,9 +341,8 @@ test_expect_success 'setup r4' '
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'verify blob count in normal packfile' '
|
test_expect_success 'verify blob count in normal packfile' '
|
||||||
git -C r4 ls-files -s pattern sparse1 sparse2 dir1/sparse1 dir1/sparse2 \
|
git -C r4 ls-files -s pattern sparse1 sparse2 dir1/sparse1 dir1/sparse2 |
|
||||||
>ls_files_result &&
|
test_parse_ls_files_stage_oids |
|
||||||
awk -f print_2.awk ls_files_result |
|
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r4 pack-objects --revs --stdout >all.pack <<-EOF &&
|
git -C r4 pack-objects --revs --stdout >all.pack <<-EOF &&
|
||||||
|
@ -354,19 +352,19 @@ test_expect_success 'verify blob count in normal packfile' '
|
||||||
|
|
||||||
git -C r4 verify-pack -v ../all.pack >verify_result &&
|
git -C r4 verify-pack -v ../all.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'verify sparse:oid=OID' '
|
test_expect_success 'verify sparse:oid=OID' '
|
||||||
git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 >ls_files_result &&
|
git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 |
|
||||||
awk -f print_2.awk ls_files_result |
|
test_parse_ls_files_stage_oids |
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r4 ls-files -s pattern >staged &&
|
git -C r4 ls-files -s pattern >staged &&
|
||||||
oid=$(awk -f print_2.awk staged) &&
|
oid=$(test_parse_ls_files_stage_oids <staged) &&
|
||||||
git -C r4 pack-objects --revs --stdout --filter=sparse:oid=$oid >filter.pack <<-EOF &&
|
git -C r4 pack-objects --revs --stdout --filter=sparse:oid=$oid >filter.pack <<-EOF &&
|
||||||
HEAD
|
HEAD
|
||||||
EOF
|
EOF
|
||||||
|
@ -374,15 +372,15 @@ test_expect_success 'verify sparse:oid=OID' '
|
||||||
|
|
||||||
git -C r4 verify-pack -v ../filter.pack >verify_result &&
|
git -C r4 verify-pack -v ../filter.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success 'verify sparse:oid=oid-ish' '
|
test_expect_success 'verify sparse:oid=oid-ish' '
|
||||||
git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 >ls_files_result &&
|
git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 |
|
||||||
awk -f print_2.awk ls_files_result |
|
test_parse_ls_files_stage_oids |
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
git -C r4 pack-objects --revs --stdout --filter=sparse:oid=main:pattern >filter.pack <<-EOF &&
|
git -C r4 pack-objects --revs --stdout --filter=sparse:oid=main:pattern >filter.pack <<-EOF &&
|
||||||
|
@ -392,7 +390,7 @@ test_expect_success 'verify sparse:oid=oid-ish' '
|
||||||
|
|
||||||
git -C r4 verify-pack -v ../filter.pack >verify_result &&
|
git -C r4 verify-pack -v ../filter.pack >verify_result &&
|
||||||
grep blob verify_result |
|
grep blob verify_result |
|
||||||
awk -f print_1.awk |
|
parse_verify_pack_blob_oid |
|
||||||
sort >observed &&
|
sort >observed &&
|
||||||
|
|
||||||
test_cmp expected observed
|
test_cmp expected observed
|
||||||
|
@ -402,9 +400,8 @@ test_expect_success 'verify sparse:oid=oid-ish' '
|
||||||
# This models previously omitted objects that we did not receive.
|
# This models previously omitted objects that we did not receive.
|
||||||
|
|
||||||
test_expect_success 'setup r1 - delete loose blobs' '
|
test_expect_success 'setup r1 - delete loose blobs' '
|
||||||
git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 \
|
git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 |
|
||||||
>ls_files_result &&
|
test_parse_ls_files_stage_oids |
|
||||||
awk -f print_2.awk ls_files_result |
|
|
||||||
sort >expected &&
|
sort >expected &&
|
||||||
|
|
||||||
for id in `cat expected | sed "s|..|&/|"`
|
for id in `cat expected | sed "s|..|&/|"`
|
||||||
|
|
|
@ -1782,6 +1782,16 @@ test_oid_to_path () {
|
||||||
echo "${1%$basename}/$basename"
|
echo "${1%$basename}/$basename"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Parse oids from git ls-files --staged output
|
||||||
|
test_parse_ls_files_stage_oids () {
|
||||||
|
awk '{print $2}' -
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse oids from git ls-tree output
|
||||||
|
test_parse_ls_tree_oids () {
|
||||||
|
awk '{print $3}' -
|
||||||
|
}
|
||||||
|
|
||||||
# Choose a port number based on the test script's number and store it in
|
# Choose a port number based on the test script's number and store it in
|
||||||
# the given variable name, unless that variable already contains a number.
|
# the given variable name, unless that variable already contains a number.
|
||||||
test_set_port () {
|
test_set_port () {
|
||||||
|
|
Загрузка…
Ссылка в новой задаче