зеркало из https://github.com/microsoft/git.git
checkout: fix two bugs on the final count of updated entries
At the end of `git checkout <pathspec>`, we get a message informing how many entries were updated in the working tree. However, this number can be inaccurate for two reasons: 1) Delayed entries currently get counted twice. 2) Failed entries are included in the count. The first problem happens because the counter is first incremented before inserting the entry in the delayed checkout queue, and once again when finish_delayed_checkout() calls checkout_entry(). And the second happens because the counter is incremented too early in checkout_entry(), before the entry was in fact checked out. Fix that by moving the count increment further down in the call stack and removing the duplicate increment on delayed entries. Note that we have to keep a per-entry reference for the counter (both on parallel checkout and delayed checkout) because not all entries are always accumulated at the same counter. See checkout_worktree(), at builtin/checkout.c for an example. Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br> Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Родитель
11d14dee43
Коммит
611c7785e8
|
@ -417,7 +417,7 @@ static int checkout_worktree(const struct checkout_opts *opts,
|
|||
mem_pool_discard(&ce_mem_pool, should_validate_cache_entries());
|
||||
remove_marked_cache_entries(&the_index, 1);
|
||||
remove_scheduled_dirs();
|
||||
errs |= finish_delayed_checkout(&state, &nr_checkouts, opts->show_progress);
|
||||
errs |= finish_delayed_checkout(&state, opts->show_progress);
|
||||
|
||||
if (opts->count_checkout_paths) {
|
||||
if (nr_unmerged)
|
||||
|
|
|
@ -53,7 +53,11 @@ struct delayed_checkout {
|
|||
enum ce_delay_state state;
|
||||
/* List of filter drivers that signaled delayed blobs. */
|
||||
struct string_list filters;
|
||||
/* List of delayed blobs identified by their path. */
|
||||
/*
|
||||
* List of delayed blobs identified by their path. The `util` member
|
||||
* holds a counter pointer which must be incremented when/if the
|
||||
* associated blob gets checked out.
|
||||
*/
|
||||
struct string_list paths;
|
||||
};
|
||||
|
||||
|
|
34
entry.c
34
entry.c
|
@ -157,12 +157,11 @@ static int remove_available_paths(struct string_list_item *item, void *cb_data)
|
|||
|
||||
available = string_list_lookup(available_paths, item->string);
|
||||
if (available)
|
||||
available->util = (void *)item->string;
|
||||
available->util = item->util;
|
||||
return !available;
|
||||
}
|
||||
|
||||
int finish_delayed_checkout(struct checkout *state, int *nr_checkouts,
|
||||
int show_progress)
|
||||
int finish_delayed_checkout(struct checkout *state, int show_progress)
|
||||
{
|
||||
int errs = 0;
|
||||
unsigned processed_paths = 0;
|
||||
|
@ -227,7 +226,7 @@ int finish_delayed_checkout(struct checkout *state, int *nr_checkouts,
|
|||
strlen(path->string), 0);
|
||||
if (ce) {
|
||||
display_progress(progress, ++processed_paths);
|
||||
errs |= checkout_entry(ce, state, NULL, nr_checkouts);
|
||||
errs |= checkout_entry(ce, state, NULL, path->util);
|
||||
filtered_bytes += ce->ce_stat_data.sd_size;
|
||||
display_throughput(progress, filtered_bytes);
|
||||
} else
|
||||
|
@ -266,7 +265,8 @@ void update_ce_after_write(const struct checkout *state, struct cache_entry *ce,
|
|||
|
||||
/* Note: ca is used (and required) iff the entry refers to a regular file. */
|
||||
static int write_entry(struct cache_entry *ce, char *path, struct conv_attrs *ca,
|
||||
const struct checkout *state, int to_tempfile)
|
||||
const struct checkout *state, int to_tempfile,
|
||||
int *nr_checkouts)
|
||||
{
|
||||
unsigned int ce_mode_s_ifmt = ce->ce_mode & S_IFMT;
|
||||
struct delayed_checkout *dco = state->delayed_checkout;
|
||||
|
@ -279,6 +279,7 @@ static int write_entry(struct cache_entry *ce, char *path, struct conv_attrs *ca
|
|||
struct stat st;
|
||||
const struct submodule *sub;
|
||||
struct checkout_metadata meta;
|
||||
static int scratch_nr_checkouts;
|
||||
|
||||
clone_checkout_metadata(&meta, &state->meta, &ce->oid);
|
||||
|
||||
|
@ -333,9 +334,15 @@ static int write_entry(struct cache_entry *ce, char *path, struct conv_attrs *ca
|
|||
ret = async_convert_to_working_tree_ca(ca, ce->name,
|
||||
new_blob, size,
|
||||
&buf, &meta, dco);
|
||||
if (ret && string_list_has_string(&dco->paths, ce->name)) {
|
||||
free(new_blob);
|
||||
goto delayed;
|
||||
if (ret) {
|
||||
struct string_list_item *item =
|
||||
string_list_lookup(&dco->paths, ce->name);
|
||||
if (item) {
|
||||
item->util = nr_checkouts ? nr_checkouts
|
||||
: &scratch_nr_checkouts;
|
||||
free(new_blob);
|
||||
goto delayed;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ret = convert_to_working_tree_ca(ca, ce->name, new_blob,
|
||||
|
@ -392,6 +399,8 @@ finish:
|
|||
ce->name);
|
||||
update_ce_after_write(state, ce , &st);
|
||||
}
|
||||
if (nr_checkouts)
|
||||
(*nr_checkouts)++;
|
||||
delayed:
|
||||
return 0;
|
||||
}
|
||||
|
@ -476,7 +485,7 @@ int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca,
|
|||
convert_attrs(state->istate, &ca_buf, ce->name);
|
||||
ca = &ca_buf;
|
||||
}
|
||||
return write_entry(ce, topath, ca, state, 1);
|
||||
return write_entry(ce, topath, ca, state, 1, nr_checkouts);
|
||||
}
|
||||
|
||||
strbuf_reset(&path);
|
||||
|
@ -540,18 +549,15 @@ int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca,
|
|||
|
||||
create_directories(path.buf, path.len, state);
|
||||
|
||||
if (nr_checkouts)
|
||||
(*nr_checkouts)++;
|
||||
|
||||
if (S_ISREG(ce->ce_mode) && !ca) {
|
||||
convert_attrs(state->istate, &ca_buf, ce->name);
|
||||
ca = &ca_buf;
|
||||
}
|
||||
|
||||
if (!enqueue_checkout(ce, ca))
|
||||
if (!enqueue_checkout(ce, ca, nr_checkouts))
|
||||
return 0;
|
||||
|
||||
return write_entry(ce, path.buf, ca, state, 0);
|
||||
return write_entry(ce, path.buf, ca, state, 0, nr_checkouts);
|
||||
}
|
||||
|
||||
void unlink_entry(const struct cache_entry *ce)
|
||||
|
|
3
entry.h
3
entry.h
|
@ -43,8 +43,7 @@ static inline int checkout_entry(struct cache_entry *ce,
|
|||
}
|
||||
|
||||
void enable_delayed_checkout(struct checkout *state);
|
||||
int finish_delayed_checkout(struct checkout *state, int *nr_checkouts,
|
||||
int show_progress);
|
||||
int finish_delayed_checkout(struct checkout *state, int show_progress);
|
||||
|
||||
/*
|
||||
* Unlink the last component and schedule the leading directories for
|
||||
|
|
|
@ -143,7 +143,8 @@ static int is_eligible_for_parallel_checkout(const struct cache_entry *ce,
|
|||
}
|
||||
}
|
||||
|
||||
int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca)
|
||||
int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca,
|
||||
int *checkout_counter)
|
||||
{
|
||||
struct parallel_checkout_item *pc_item;
|
||||
|
||||
|
@ -159,6 +160,7 @@ int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca)
|
|||
memcpy(&pc_item->ca, ca, sizeof(pc_item->ca));
|
||||
pc_item->status = PC_ITEM_PENDING;
|
||||
pc_item->id = parallel_checkout.nr;
|
||||
pc_item->checkout_counter = checkout_counter;
|
||||
parallel_checkout.nr++;
|
||||
|
||||
return 0;
|
||||
|
@ -200,7 +202,8 @@ static int handle_results(struct checkout *state)
|
|||
|
||||
switch(pc_item->status) {
|
||||
case PC_ITEM_WRITTEN:
|
||||
/* Already handled */
|
||||
if (pc_item->checkout_counter)
|
||||
(*pc_item->checkout_counter)++;
|
||||
break;
|
||||
case PC_ITEM_COLLIDED:
|
||||
/*
|
||||
|
@ -225,7 +228,8 @@ static int handle_results(struct checkout *state)
|
|||
* add any extra overhead.
|
||||
*/
|
||||
ret |= checkout_entry_ca(pc_item->ce, &pc_item->ca,
|
||||
state, NULL, NULL);
|
||||
state, NULL,
|
||||
pc_item->checkout_counter);
|
||||
advance_progress_meter();
|
||||
break;
|
||||
case PC_ITEM_PENDING:
|
||||
|
|
|
@ -31,7 +31,8 @@ void init_parallel_checkout(void);
|
|||
* entry is not eligible for parallel checkout. Otherwise, enqueue the entry
|
||||
* for later write and return 0.
|
||||
*/
|
||||
int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca);
|
||||
int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca,
|
||||
int *checkout_counter);
|
||||
size_t pc_queue_size(void);
|
||||
|
||||
/*
|
||||
|
@ -68,6 +69,7 @@ struct parallel_checkout_item {
|
|||
struct cache_entry *ce;
|
||||
struct conv_attrs ca;
|
||||
size_t id; /* position in parallel_checkout.items[] of main process */
|
||||
int *checkout_counter;
|
||||
|
||||
/* Output fields, sent from workers. */
|
||||
enum pc_item_status status;
|
||||
|
|
|
@ -1132,7 +1132,7 @@ do
|
|||
'
|
||||
done
|
||||
|
||||
test_expect_failure PERL 'delayed checkout correctly reports the number of updated entries' '
|
||||
test_expect_success PERL 'delayed checkout correctly reports the number of updated entries' '
|
||||
rm -rf repo &&
|
||||
git init repo &&
|
||||
(
|
||||
|
|
|
@ -230,7 +230,7 @@ test_expect_success SYMLINKS 'parallel checkout checks for symlinks in leading d
|
|||
# check the final report including sequential, parallel, and delayed entries
|
||||
# all at the same time. So we must have finer control of the parallel checkout
|
||||
# variables.
|
||||
test_expect_failure PERL '"git checkout ." report should not include failed entries' '
|
||||
test_expect_success PERL '"git checkout ." report should not include failed entries' '
|
||||
write_script rot13-filter.pl "$PERL_PATH" \
|
||||
<"$TEST_DIRECTORY"/t0021/rot13-filter.pl &&
|
||||
|
||||
|
|
|
@ -487,7 +487,7 @@ static int check_updates(struct unpack_trees_options *o,
|
|||
errs |= run_parallel_checkout(&state, pc_workers, pc_threshold,
|
||||
progress, &cnt);
|
||||
stop_progress(&progress);
|
||||
errs |= finish_delayed_checkout(&state, NULL, o->verbose_update);
|
||||
errs |= finish_delayed_checkout(&state, o->verbose_update);
|
||||
git_attr_set_direction(GIT_ATTR_CHECKIN);
|
||||
|
||||
if (o->clone)
|
||||
|
|
Загрузка…
Ссылка в новой задаче