scalar clone: support GVFS-enabled remote repositories

With this change, we come a big step closer to feature parity with
Scalar: this allows cloning from Azure Repos (which do not support
partial clones at time of writing).

We use the just-implemented JSON parser to parse the response we got
from the `gvfs/config` endpoint; Please note that this response might,
or might not, contain information about a cache server. The presence or
absence of said cache server, however, has nothing to do with the
ability to speak the GVFS protocol (but the presence of the
`gvfs/config` endpoint does that).

An alternative considered during the development of this patch was to
perform simple string matching instead of parsing the JSON-formatted
data; However, this would have been fragile, as the response contains
free-form text (e.g. the repository's description) which might contain
parts that would confuse a simple string matcher (but not a proper JSON
parser).

Note: we need to limit the re-try logic in `git clone` to handle only
the non-GVFS case: the call to `set_config()` to un-set the partial
clone settings would otherwise fail because those settings would not
exist in the GVFS protocol case. This will at least give us a clearer
reason why such a fetch fails.

Co-authored-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
This commit is contained in:
Johannes Schindelin 2021-08-24 21:05:42 +02:00 коммит произвёл Victoria Dye
Родитель d293094601
Коммит a64450c051
2 изменённых файлов: 131 добавлений и 3 удалений

Просмотреть файл

@ -11,6 +11,7 @@
#include "packfile.h"
#include "parse-options.h"
#include "write-or-die.h"
#include "config.h"
struct archive_dir {
const char *path;
@ -219,6 +220,7 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
struct strvec archiver_args = STRVEC_INIT;
char **argv_copy = NULL;
int stdout_fd = -1, archiver_fd = -1;
char *cache_server_url = NULL;
struct strbuf buf = STRBUF_INIT;
int res, i;
struct archive_dir archive_dirs[] = {
@ -254,6 +256,11 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
get_version_info(&buf, 1);
strbuf_addf(&buf, "Repository root: %s\n", the_repository->worktree);
git_config_get_string("gvfs.cache-server", &cache_server_url);
strbuf_addf(&buf, "Cache Server: %s\n\n",
cache_server_url ? cache_server_url : "None");
get_disk_info(&buf);
write_or_die(stdout_fd, buf.buf, buf.len);
strvec_pushf(&archiver_args,
@ -311,6 +318,7 @@ diagnose_cleanup:
free(argv_copy);
strvec_clear(&archiver_args);
strbuf_release(&buf);
free(cache_server_url);
return res;
}

126
scalar.c
Просмотреть файл

@ -17,6 +17,7 @@
#include "help.h"
#include "setup.h"
#include "trace2.h"
#include "json-parser.h"
static void setup_enlistment_directory(int argc, const char **argv,
const char * const *usagestr,
@ -334,6 +335,80 @@ static int set_config(const char *fmt, ...)
return res;
}
/* Find N for which .CacheServers[N].GlobalDefault == true */
static int get_cache_server_index(struct json_iterator *it)
{
const char *p;
char *q;
long l;
if (it->type == JSON_TRUE &&
skip_iprefix(it->key.buf, ".CacheServers[", &p) &&
(l = strtol(p, &q, 10)) >= 0 && p != q &&
!strcasecmp(q, "].GlobalDefault")) {
*(long *)it->fn_data = l;
return 1;
}
return 0;
}
struct cache_server_url_data {
char *key, *url;
};
/* Get .CacheServers[N].Url */
static int get_cache_server_url(struct json_iterator *it)
{
struct cache_server_url_data *data = it->fn_data;
if (it->type == JSON_STRING &&
!strcasecmp(data->key, it->key.buf)) {
data->url = strbuf_detach(&it->string_value, NULL);
return 1;
}
return 0;
}
/*
* If `cache_server_url` is `NULL`, print the list to `stdout`.
*
* Since `gvfs-helper` requires a Git directory, this _must_ be run in
* a worktree.
*/
static int supports_gvfs_protocol(const char *url, char **cache_server_url)
{
struct child_process cp = CHILD_PROCESS_INIT;
struct strbuf out = STRBUF_INIT;
cp.git_cmd = 1;
strvec_pushl(&cp.args, "gvfs-helper", "--remote", url, "config", NULL);
if (!pipe_command(&cp, NULL, 0, &out, 512, NULL, 0)) {
long l = 0;
struct json_iterator it =
JSON_ITERATOR_INIT(out.buf, get_cache_server_index, &l);
struct cache_server_url_data data = { .url = NULL };
if (iterate_json(&it) < 0) {
strbuf_release(&out);
return error("JSON parse error");
}
data.key = xstrfmt(".CacheServers[%ld].Url", l);
it.fn = get_cache_server_url;
it.fn_data = &data;
if (iterate_json(&it) < 0) {
strbuf_release(&out);
return error("JSON parse error");
}
*cache_server_url = data.url;
free(data.key);
return 1;
}
strbuf_release(&out);
return 0; /* error out quietly */
}
static char *remote_default_branch(const char *url)
{
struct child_process cp = CHILD_PROCESS_INIT;
@ -435,6 +510,8 @@ static int cmd_clone(int argc, const char **argv)
{
const char *branch = NULL;
int full_clone = 0, single_branch = 0, show_progress = isatty(2);
const char *cache_server_url = NULL;
char *default_cache_server_url = NULL;
struct option clone_options[] = {
OPT_STRING('b', "branch", &branch, N_("<branch>"),
N_("branch to checkout after clone")),
@ -443,6 +520,9 @@ static int cmd_clone(int argc, const char **argv)
OPT_BOOL(0, "single-branch", &single_branch,
N_("only download metadata for the branch that will "
"be checked out")),
OPT_STRING(0, "cache-server-url", &cache_server_url,
N_("<url>"),
N_("the url or friendly name of the cache server")),
OPT_END(),
};
const char * const clone_usage[] = {
@ -453,6 +533,7 @@ static int cmd_clone(int argc, const char **argv)
char *enlistment = NULL, *dir = NULL;
struct strbuf buf = STRBUF_INIT;
int res;
int gvfs_protocol;
argc = parse_options(argc, argv, NULL, clone_options, clone_usage, 0);
@ -515,13 +596,46 @@ static int cmd_clone(int argc, const char **argv)
set_config("remote.origin.fetch="
"+refs/heads/%s:refs/remotes/origin/%s",
single_branch ? branch : "*",
single_branch ? branch : "*") ||
set_config("remote.origin.promisor=true") ||
set_config("remote.origin.partialCloneFilter=blob:none")) {
single_branch ? branch : "*")) {
res = error(_("could not configure remote in '%s'"), dir);
goto cleanup;
}
if (set_config("credential.https://dev.azure.com.useHttpPath=true")) {
res = error(_("could not configure credential.useHttpPath"));
goto cleanup;
}
gvfs_protocol = cache_server_url ||
supports_gvfs_protocol(url, &default_cache_server_url);
if (gvfs_protocol) {
if (!cache_server_url)
cache_server_url = default_cache_server_url;
if (set_config("core.useGVFSHelper=true") ||
set_config("core.gvfs=150") ||
set_config("http.version=HTTP/1.1")) {
res = error(_("could not turn on GVFS helper"));
goto cleanup;
}
if (cache_server_url &&
set_config("gvfs.cache-server=%s", cache_server_url)) {
res = error(_("could not configure cache server"));
goto cleanup;
}
if (cache_server_url)
fprintf(stderr, "Cache server URL: %s\n",
cache_server_url);
} else {
if (set_config("core.useGVFSHelper=false") ||
set_config("remote.origin.promisor=true") ||
set_config("remote.origin.partialCloneFilter=blob:none")) {
res = error(_("could not configure partial clone in "
"'%s'"), dir);
goto cleanup;
}
}
if (!full_clone &&
(res = run_git("sparse-checkout", "init", "--cone", NULL)))
goto cleanup;
@ -532,6 +646,11 @@ static int cmd_clone(int argc, const char **argv)
if ((res = run_git("fetch", "--quiet",
show_progress ? "--progress" : "--no-progress",
"origin", NULL))) {
if (gvfs_protocol) {
res = error(_("failed to prefetch commits and trees"));
goto cleanup;
}
warning(_("partial clone failed; attempting full clone"));
if (set_config("remote.origin.promisor") ||
@ -564,6 +683,7 @@ cleanup:
free(enlistment);
free(dir);
strbuf_release(&buf);
free(default_cache_server_url);
return res;
}