fscache: fscache takes an initial size

Update enable_fscache() to take an optional initial size parameter which is
used to initialize the hashmap so that it can avoid having to rehash as
additional entries are added.

Add a separate disable_fscache() macro to make the code clearer and easier
to read.

Signed-off-by: Ben Peart <benpeart@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
This commit is contained in:
Ben Peart 2018-11-02 11:19:10 -04:00 коммит произвёл Matthew John Cheetham
Родитель bd86d90b37
Коммит 2b642bcf36
9 изменённых файлов: 24 добавлений и 15 удалений

Просмотреть файл

@ -553,7 +553,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
die_in_unpopulated_submodule(&the_index, prefix); die_in_unpopulated_submodule(&the_index, prefix);
die_path_inside_submodule(&the_index, &pathspec); die_path_inside_submodule(&the_index, &pathspec);
enable_fscache(1); enable_fscache(0);
/* We do not really re-read the index but update the up-to-date flags */ /* We do not really re-read the index but update the up-to-date flags */
preload_index(&the_index, &pathspec, 0); preload_index(&the_index, &pathspec, 0);

Просмотреть файл

@ -401,7 +401,7 @@ static int checkout_worktree(const struct checkout_opts *opts,
if (pc_workers > 1) if (pc_workers > 1)
init_parallel_checkout(); init_parallel_checkout();
enable_fscache(1); enable_fscache(the_index.cache_nr);
for (pos = 0; pos < the_index.cache_nr; pos++) { for (pos = 0; pos < the_index.cache_nr; pos++) {
struct cache_entry *ce = the_index.cache[pos]; struct cache_entry *ce = the_index.cache[pos];
if (ce->ce_flags & CE_MATCHED) { if (ce->ce_flags & CE_MATCHED) {
@ -426,7 +426,7 @@ static int checkout_worktree(const struct checkout_opts *opts,
errs |= run_parallel_checkout(&state, pc_workers, pc_threshold, errs |= run_parallel_checkout(&state, pc_workers, pc_threshold,
NULL, NULL); NULL, NULL);
mem_pool_discard(&ce_mem_pool, should_validate_cache_entries()); mem_pool_discard(&ce_mem_pool, should_validate_cache_entries());
enable_fscache(0); disable_fscache();
remove_marked_cache_entries(&the_index, 1); remove_marked_cache_entries(&the_index, 1);
remove_scheduled_dirs(); remove_scheduled_dirs();
errs |= finish_delayed_checkout(&state, opts->show_progress); errs |= finish_delayed_checkout(&state, opts->show_progress);

Просмотреть файл

@ -1562,7 +1562,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
PATHSPEC_PREFER_FULL, PATHSPEC_PREFER_FULL,
prefix, argv); prefix, argv);
enable_fscache(1); enable_fscache(0);
if (status_format != STATUS_FORMAT_PORCELAIN && if (status_format != STATUS_FORMAT_PORCELAIN &&
status_format != STATUS_FORMAT_PORCELAIN_V2) status_format != STATUS_FORMAT_PORCELAIN_V2)
progress_flag = REFRESH_PROGRESS; progress_flag = REFRESH_PROGRESS;
@ -1603,7 +1603,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
wt_status_print(&s); wt_status_print(&s);
wt_status_collect_free_buffers(&s); wt_status_collect_free_buffers(&s);
enable_fscache(0); disable_fscache();
return 0; return 0;
} }

Просмотреть файл

@ -405,7 +405,7 @@ static struct fsentry *fscache_get(struct fsentry *key)
* Enables or disables the cache. Note that the cache is read-only, changes to * Enables or disables the cache. Note that the cache is read-only, changes to
* the working directory are NOT reflected in the cache while enabled. * the working directory are NOT reflected in the cache while enabled.
*/ */
int fscache_enable(int enable) int fscache_enable(int enable, size_t initial_size)
{ {
int result; int result;
@ -421,7 +421,11 @@ int fscache_enable(int enable)
InitializeCriticalSection(&mutex); InitializeCriticalSection(&mutex);
lstat_requests = opendir_requests = 0; lstat_requests = opendir_requests = 0;
fscache_misses = fscache_requests = 0; fscache_misses = fscache_requests = 0;
hashmap_init(&map, (hashmap_cmp_fn) fsentry_cmp, NULL, 0); /*
* avoid having to rehash by leaving room for the parent dirs.
* '4' was determined empirically by testing several repos
*/
hashmap_init(&map, (hashmap_cmp_fn) fsentry_cmp, NULL, initial_size * 4);
initialized = 1; initialized = 1;
} }

Просмотреть файл

@ -1,8 +1,9 @@
#ifndef FSCACHE_H #ifndef FSCACHE_H
#define FSCACHE_H #define FSCACHE_H
int fscache_enable(int enable); int fscache_enable(int enable, size_t initial_size);
#define enable_fscache(x) fscache_enable(x) #define enable_fscache(initial_size) fscache_enable(1, initial_size)
#define disable_fscache() fscache_enable(0, 0)
int fscache_enabled(const char *path); int fscache_enabled(const char *path);
#define is_fscache_enabled(path) fscache_enabled(path) #define is_fscache_enabled(path) fscache_enabled(path)

Просмотреть файл

@ -762,7 +762,7 @@ static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
save_commit_buffer = 0; save_commit_buffer = 0;
trace2_region_enter("fetch-pack", "parse_remote_refs_and_find_cutoff", NULL); trace2_region_enter("fetch-pack", "parse_remote_refs_and_find_cutoff", NULL);
enable_fscache(1); enable_fscache(0);
for (ref = *refs; ref; ref = ref->next) { for (ref = *refs; ref; ref = ref->next) {
struct commit *commit; struct commit *commit;
@ -789,7 +789,7 @@ static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
if (!cutoff || cutoff < commit->date) if (!cutoff || cutoff < commit->date)
cutoff = commit->date; cutoff = commit->date;
} }
enable_fscache(0); disable_fscache();
trace2_region_leave("fetch-pack", "parse_remote_refs_and_find_cutoff", NULL); trace2_region_leave("fetch-pack", "parse_remote_refs_and_find_cutoff", NULL);
/* /*

Просмотреть файл

@ -1616,6 +1616,10 @@ static inline int is_missing_file_error(int errno_)
#define enable_fscache(x) /* noop */ #define enable_fscache(x) /* noop */
#endif #endif
#ifndef disable_fscache
#define disable_fscache() /* noop */
#endif
#ifndef is_fscache_enabled #ifndef is_fscache_enabled
#define is_fscache_enabled(path) (0) #define is_fscache_enabled(path) (0)
#endif #endif

Просмотреть файл

@ -130,7 +130,7 @@ void preload_index(struct index_state *index,
pthread_mutex_init(&pd.mutex, NULL); pthread_mutex_init(&pd.mutex, NULL);
} }
enable_fscache(1); enable_fscache(index->cache_nr);
for (i = 0; i < threads; i++) { for (i = 0; i < threads; i++) {
struct thread_data *p = data+i; struct thread_data *p = data+i;
int err; int err;
@ -167,7 +167,7 @@ void preload_index(struct index_state *index,
trace2_data_intmax("index", NULL, "preload/sum_lstat", t2_sum_lstat); trace2_data_intmax("index", NULL, "preload/sum_lstat", t2_sum_lstat);
trace2_region_leave("index", "preload", NULL); trace2_region_leave("index", "preload", NULL);
enable_fscache(0); disable_fscache();
} }
int repo_read_index_preload(struct repository *repo, int repo_read_index_preload(struct repository *repo,

Просмотреть файл

@ -1586,7 +1586,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n"; typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n";
added_fmt = in_porcelain ? "A\t%s\n" : "%s: needs update\n"; added_fmt = in_porcelain ? "A\t%s\n" : "%s: needs update\n";
unmerged_fmt = in_porcelain ? "U\t%s\n" : "%s: needs merge\n"; unmerged_fmt = in_porcelain ? "U\t%s\n" : "%s: needs merge\n";
enable_fscache(1); enable_fscache(0);
/* /*
* Use the multi-threaded preload_index() to refresh most of the * Use the multi-threaded preload_index() to refresh most of the
* cache entries quickly then in the single threaded loop below, * cache entries quickly then in the single threaded loop below,
@ -1681,7 +1681,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
display_progress(progress, istate->cache_nr); display_progress(progress, istate->cache_nr);
stop_progress(&progress); stop_progress(&progress);
trace_performance_leave("refresh index"); trace_performance_leave("refresh index");
enable_fscache(0); disable_fscache();
return has_errors; return has_errors;
} }