builtin/submodule--helper: factor out submodule updating

Separate the command line parsing from the actual execution of the command
within the repository. For now there is not a lot of execution as
most of it is still in git-submodule.sh.

Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Stefan Beller 2018-08-03 15:23:18 -07:00 коммит произвёл Junio C Hamano
Родитель 9eca701f69
Коммит 90efe595c5
1 изменённых файлов: 33 добавлений и 26 удалений

Просмотреть файл

@ -1474,6 +1474,8 @@ struct submodule_update_clone {
/* failed clones to be retried again */
const struct cache_entry **failed_clones;
int failed_clones_nr, failed_clones_alloc;
int max_jobs;
};
#define SUBMODULE_UPDATE_CLONE_INIT {0, MODULE_LIST_INIT, 0, \
SUBMODULE_UPDATE_STRATEGY_INIT, 0, 0, -1, STRING_LIST_INIT_DUP, 0, \
@ -1716,11 +1718,36 @@ static int git_update_clone_config(const char *var, const char *value,
return 0;
}
static int update_submodules(struct submodule_update_clone *suc)
{
struct string_list_item *item;
run_processes_parallel(suc->max_jobs,
update_clone_get_next_task,
update_clone_start_failure,
update_clone_task_finished,
suc);
/*
* We saved the output and put it out all at once now.
* That means:
* - the listener does not have to interleave their (checkout)
* work with our fetching. The writes involved in a
* checkout involve more straightforward sequential I/O.
* - the listener can avoid doing any work if fetching failed.
*/
if (suc->quickstop)
return 1;
for_each_string_list_item(item, &suc->projectlines)
fprintf(stdout, "%s", item->string);
return 0;
}
static int update_clone(int argc, const char **argv, const char *prefix)
{
const char *update = NULL;
int max_jobs = 1;
struct string_list_item *item;
struct pathspec pathspec;
struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
@ -1742,7 +1769,7 @@ static int update_clone(int argc, const char **argv, const char *prefix)
OPT_STRING(0, "depth", &suc.depth, "<depth>",
N_("Create a shallow clone truncated to the "
"specified number of revisions")),
OPT_INTEGER('j', "jobs", &max_jobs,
OPT_INTEGER('j', "jobs", &suc.max_jobs,
N_("parallel jobs")),
OPT_BOOL(0, "recommend-shallow", &suc.recommend_shallow,
N_("whether the initial clone should follow the shallow recommendation")),
@ -1758,8 +1785,8 @@ static int update_clone(int argc, const char **argv, const char *prefix)
};
suc.prefix = prefix;
update_clone_config_from_gitmodules(&max_jobs);
git_config(git_update_clone_config, &max_jobs);
update_clone_config_from_gitmodules(&suc.max_jobs);
git_config(git_update_clone_config, &suc.max_jobs);
argc = parse_options(argc, argv, prefix, module_update_clone_options,
git_submodule_helper_usage, 0);
@ -1774,27 +1801,7 @@ static int update_clone(int argc, const char **argv, const char *prefix)
if (pathspec.nr)
suc.warn_if_uninitialized = 1;
run_processes_parallel(max_jobs,
update_clone_get_next_task,
update_clone_start_failure,
update_clone_task_finished,
&suc);
/*
* We saved the output and put it out all at once now.
* That means:
* - the listener does not have to interleave their (checkout)
* work with our fetching. The writes involved in a
* checkout involve more straightforward sequential I/O.
* - the listener can avoid doing any work if fetching failed.
*/
if (suc.quickstop)
return 1;
for_each_string_list_item(item, &suc.projectlines)
fprintf(stdout, "%s", item->string);
return 0;
return update_submodules(&suc);
}
static int resolve_relative_path(int argc, const char **argv, const char *prefix)