fscache: fscache takes an initial size

Update enable_fscache() to take an optional initial size parameter which is
used to initialize the hashmap so that it can avoid having to rehash as
additional entries are added.

Add a separate disable_fscache() macro to make the code clearer and easier
to read.

Signed-off-by: Ben Peart <benpeart@microsoft.com>
This commit is contained in:
Ben Peart 2018-11-02 11:19:10 -04:00 коммит произвёл Johannes Schindelin
Родитель b10e3e56ca
Коммит b8ff8d15e0
9 изменённых файлов: 24 добавлений и 15 удалений

Просмотреть файл

@ -500,7 +500,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
die_in_unpopulated_submodule(&the_index, prefix);
die_path_inside_submodule(&the_index, &pathspec);
enable_fscache(1);
enable_fscache(0);
/* We do not really re-read the index but update the up-to-date flags */
preload_index(&the_index, &pathspec, 0);

Просмотреть файл

@ -414,7 +414,7 @@ static int checkout_paths(const struct checkout_opts *opts,
state.istate = &the_index;
enable_delayed_checkout(&state);
enable_fscache(1);
enable_fscache(active_nr);
for (pos = 0; pos < active_nr; pos++) {
struct cache_entry *ce = active_cache[pos];
if (ce->ce_flags & CE_MATCHED) {
@ -434,7 +434,7 @@ static int checkout_paths(const struct checkout_opts *opts,
pos = skip_same_name(ce, pos) - 1;
}
}
enable_fscache(0);
disable_fscache();
remove_marked_cache_entries(&the_index, 1);
remove_scheduled_dirs();
errs |= finish_delayed_checkout(&state, &nr_checkouts);

Просмотреть файл

@ -1376,7 +1376,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
PATHSPEC_PREFER_FULL,
prefix, argv);
enable_fscache(1);
enable_fscache(0);
if (status_format != STATUS_FORMAT_PORCELAIN &&
status_format != STATUS_FORMAT_PORCELAIN_V2)
progress_flag = REFRESH_PROGRESS;
@ -1417,7 +1417,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
wt_status_print(&s);
wt_status_collect_free_buffers(&s);
enable_fscache(0);
disable_fscache();
return 0;
}

Просмотреть файл

@ -386,7 +386,7 @@ static struct fsentry *fscache_get(struct fsentry *key)
* Enables or disables the cache. Note that the cache is read-only, changes to
* the working directory are NOT reflected in the cache while enabled.
*/
int fscache_enable(int enable)
int fscache_enable(int enable, size_t initial_size)
{
int result;
@ -402,7 +402,11 @@ int fscache_enable(int enable)
InitializeCriticalSection(&mutex);
lstat_requests = opendir_requests = 0;
fscache_misses = fscache_requests = 0;
hashmap_init(&map, (hashmap_cmp_fn) fsentry_cmp, NULL, 0);
/*
* avoid having to rehash by leaving room for the parent dirs.
* '4' was determined empirically by testing several repos
*/
hashmap_init(&map, (hashmap_cmp_fn) fsentry_cmp, NULL, initial_size * 4);
initialized = 1;
}

Просмотреть файл

@ -1,8 +1,9 @@
#ifndef FSCACHE_H
#define FSCACHE_H
int fscache_enable(int enable);
#define enable_fscache(x) fscache_enable(x)
int fscache_enable(int enable, size_t initial_size);
#define enable_fscache(initial_size) fscache_enable(1, initial_size)
#define disable_fscache() fscache_enable(0, 0)
int fscache_enabled(const char *path);
#define is_fscache_enabled(path) fscache_enabled(path)

Просмотреть файл

@ -671,7 +671,7 @@ static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
save_commit_buffer = 0;
enable_fscache(1);
enable_fscache(0);
for (ref = *refs; ref; ref = ref->next) {
struct object *o;
@ -692,7 +692,7 @@ static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
cutoff = commit->date;
}
}
enable_fscache(0);
disable_fscache();
if (!args->deepen) {
for_each_ref(mark_complete_oid, NULL);

Просмотреть файл

@ -1298,6 +1298,10 @@ static inline int is_missing_file_error(int errno_)
#define enable_fscache(x) /* noop */
#endif
#ifndef disable_fscache
#define disable_fscache() /* noop */
#endif
#ifndef is_fscache_enabled
#define is_fscache_enabled(path) (0)
#endif

Просмотреть файл

@ -120,7 +120,7 @@ void preload_index(struct index_state *index,
pthread_mutex_init(&pd.mutex, NULL);
}
enable_fscache(1);
enable_fscache(index->cache_nr);
for (i = 0; i < threads; i++) {
struct thread_data *p = data+i;
int err;
@ -146,7 +146,7 @@ void preload_index(struct index_state *index,
stop_progress(&pd.progress);
trace_performance_leave("preload index");
enable_fscache(0);
disable_fscache();
}
int repo_read_index_preload(struct repository *repo,

Просмотреть файл

@ -1505,7 +1505,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n";
added_fmt = in_porcelain ? "A\t%s\n" : "%s: needs update\n";
unmerged_fmt = in_porcelain ? "U\t%s\n" : "%s: needs merge\n";
enable_fscache(1);
enable_fscache(0);
/*
* Use the multi-threaded preload_index() to refresh most of the
* cache entries quickly then in the single threaded loop below,
@ -1583,7 +1583,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
stop_progress(&progress);
}
trace_performance_leave("refresh index");
enable_fscache(0);
disable_fscache();
return has_errors;
}