зеркало из https://github.com/microsoft/git.git
Merge first wave of gvfs-helper feature
Includes commits from these pull requests: #191 #205 #206 #207 #208 #215 #220 #221 Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
This commit is contained in:
Коммит
5fd1ddf282
|
@ -73,6 +73,7 @@
|
|||
/git-gc
|
||||
/git-get-tar-commit-id
|
||||
/git-grep
|
||||
/git-gvfs-helper
|
||||
/git-hash-object
|
||||
/git-help
|
||||
/git-hook
|
||||
|
|
|
@ -441,6 +441,8 @@ include::config/gui.txt[]
|
|||
|
||||
include::config/guitool.txt[]
|
||||
|
||||
include::config/gvfs.txt[]
|
||||
|
||||
include::config/help.txt[]
|
||||
|
||||
include::config/http.txt[]
|
||||
|
|
|
@ -778,6 +778,9 @@ core.gvfs::
|
|||
flag just blocks them from occurring at all.
|
||||
--
|
||||
|
||||
core.useGvfsHelper::
|
||||
TODO
|
||||
|
||||
core.sparseCheckout::
|
||||
Enable "sparse checkout" feature. See linkgit:git-sparse-checkout[1]
|
||||
for more information.
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
gvfs.cache-server::
|
||||
TODO
|
||||
|
||||
gvfs.sharedcache::
|
||||
TODO
|
8
Makefile
8
Makefile
|
@ -1043,6 +1043,7 @@ LIB_OBJS += gpg-interface.o
|
|||
LIB_OBJS += graph.o
|
||||
LIB_OBJS += grep.o
|
||||
LIB_OBJS += gvfs.o
|
||||
LIB_OBJS += gvfs-helper-client.o
|
||||
LIB_OBJS += hash-lookup.o
|
||||
LIB_OBJS += hashmap.o
|
||||
LIB_OBJS += help.o
|
||||
|
@ -1631,6 +1632,9 @@ endif
|
|||
endif
|
||||
BASIC_CFLAGS += $(CURL_CFLAGS)
|
||||
|
||||
PROGRAM_OBJS += gvfs-helper.o
|
||||
TEST_PROGRAMS_NEED_X += test-gvfs-protocol
|
||||
|
||||
REMOTE_CURL_PRIMARY = git-remote-http$X
|
||||
REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X
|
||||
REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES)
|
||||
|
@ -2872,6 +2876,10 @@ scalar$X: scalar.o GIT-LDFLAGS $(GITLIBS)
|
|||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \
|
||||
$(filter %.o,$^) $(LIBS)
|
||||
|
||||
git-gvfs-helper$X: gvfs-helper.o http.o GIT-LDFLAGS $(GITLIBS) $(LAZYLOAD_LIBCURL_OBJ)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
|
||||
$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
|
||||
|
||||
$(LIB_FILE): $(LIB_OBJS)
|
||||
$(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^
|
||||
|
||||
|
|
|
@ -812,7 +812,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
|
|||
read_lock();
|
||||
collision_test_needed =
|
||||
repo_has_object_file_with_flags(the_repository, oid,
|
||||
OBJECT_INFO_QUICK);
|
||||
OBJECT_INFO_FOR_PREFETCH);
|
||||
read_unlock();
|
||||
}
|
||||
|
||||
|
|
38
config.c
38
config.c
|
@ -39,6 +39,7 @@
|
|||
#include "ws.h"
|
||||
#include "wrapper.h"
|
||||
#include "write-or-die.h"
|
||||
#include "transport.h"
|
||||
|
||||
struct config_source {
|
||||
struct config_source *prev;
|
||||
|
@ -1820,6 +1821,11 @@ int git_default_core_config(const char *var, const char *value, void *cb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!strcmp(var, "core.usegvfshelper")) {
|
||||
core_use_gvfs_helper = git_config_bool(var, value);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!strcmp(var, "core.sparsecheckout")) {
|
||||
/* virtual file system relies on the sparse checkout logic so force it on */
|
||||
if (core_virtualfilesystem)
|
||||
|
@ -1962,6 +1968,35 @@ static int git_default_mailmap_config(const char *var, const char *value)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int git_default_gvfs_config(const char *var, const char *value)
|
||||
{
|
||||
if (!strcmp(var, "gvfs.cache-server")) {
|
||||
const char *v2 = NULL;
|
||||
|
||||
if (!git_config_string(&v2, var, value) && v2 && *v2)
|
||||
gvfs_cache_server_url = transport_anonymize_url(v2);
|
||||
free((char*)v2);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!strcmp(var, "gvfs.sharedcache") && value && *value) {
|
||||
strbuf_setlen(&gvfs_shared_cache_pathname, 0);
|
||||
strbuf_addstr(&gvfs_shared_cache_pathname, value);
|
||||
if (strbuf_normalize_path(&gvfs_shared_cache_pathname) < 0) {
|
||||
/*
|
||||
* Pretend it wasn't set. This will cause us to
|
||||
* fallback to ".git/objects" effectively.
|
||||
*/
|
||||
strbuf_release(&gvfs_shared_cache_pathname);
|
||||
return 0;
|
||||
}
|
||||
strbuf_trim_trailing_dir_sep(&gvfs_shared_cache_pathname);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int git_default_config(const char *var, const char *value, void *cb)
|
||||
{
|
||||
if (starts_with(var, "core."))
|
||||
|
@ -2011,6 +2046,9 @@ int git_default_config(const char *var, const char *value, void *cb)
|
|||
if (starts_with(var, "sparse."))
|
||||
return git_default_sparse_config(var, value);
|
||||
|
||||
if (starts_with(var, "gvfs."))
|
||||
return git_default_gvfs_config(var, value);
|
||||
|
||||
/* Add other config variables here and to Documentation/config.txt. */
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -647,7 +647,7 @@ if(NOT CURL_FOUND)
|
|||
add_compile_definitions(NO_CURL)
|
||||
message(WARNING "git-http-push and git-http-fetch will not be built")
|
||||
else()
|
||||
list(APPEND PROGRAMS_BUILT git-http-fetch git-http-push git-imap-send git-remote-http)
|
||||
list(APPEND PROGRAMS_BUILT git-http-fetch git-http-push git-imap-send git-remote-http git-gvfs-helper)
|
||||
if(CURL_VERSION_STRING VERSION_GREATER_EQUAL 7.34.0)
|
||||
add_compile_definitions(USE_CURL_FOR_IMAP_SEND)
|
||||
endif()
|
||||
|
@ -816,6 +816,9 @@ if(CURL_FOUND)
|
|||
add_executable(git-http-push ${CMAKE_SOURCE_DIR}/http-push.c)
|
||||
target_link_libraries(git-http-push http_obj common-main ${CURL_LIBRARIES} ${EXPAT_LIBRARIES})
|
||||
endif()
|
||||
|
||||
add_executable(git-gvfs-helper ${CMAKE_SOURCE_DIR}/gvfs-helper.c)
|
||||
target_link_libraries(git-gvfs-helper http_obj common-main ${CURL_LIBRARIES} )
|
||||
endif()
|
||||
|
||||
parse_makefile_for_executables(git_builtin_extra "BUILT_INS")
|
||||
|
@ -1025,6 +1028,20 @@ set(wrapper_scripts
|
|||
set(wrapper_test_scripts
|
||||
test-fake-ssh test-tool)
|
||||
|
||||
if(CURL_FOUND)
|
||||
list(APPEND wrapper_test_scripts test-gvfs-protocol)
|
||||
|
||||
add_executable(test-gvfs-protocol ${CMAKE_SOURCE_DIR}/t/helper/test-gvfs-protocol.c)
|
||||
target_link_libraries(test-gvfs-protocol common-main)
|
||||
|
||||
if(MSVC)
|
||||
set_target_properties(test-gvfs-protocol
|
||||
PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/t/helper)
|
||||
set_target_properties(test-gvfs-protocol
|
||||
PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR}/t/helper)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
foreach(script ${wrapper_scripts})
|
||||
file(STRINGS ${CMAKE_SOURCE_DIR}/wrap-for-bin.sh content NEWLINE_CONSUME)
|
||||
|
|
|
@ -310,6 +310,8 @@ static int run_credential_helper(struct credential *c,
|
|||
else
|
||||
helper.no_stdout = 1;
|
||||
|
||||
helper.trace2_child_class = "cred";
|
||||
|
||||
if (start_command(&helper) < 0)
|
||||
return -1;
|
||||
|
||||
|
|
|
@ -96,6 +96,9 @@ int protect_hfs = PROTECT_HFS_DEFAULT;
|
|||
#define PROTECT_NTFS_DEFAULT 1
|
||||
#endif
|
||||
int protect_ntfs = PROTECT_NTFS_DEFAULT;
|
||||
int core_use_gvfs_helper;
|
||||
const char *gvfs_cache_server_url;
|
||||
struct strbuf gvfs_shared_cache_pathname = STRBUF_INIT;
|
||||
|
||||
/*
|
||||
* The character that begins a commented line in user-editable file
|
||||
|
|
|
@ -152,6 +152,9 @@ extern int core_gvfs;
|
|||
extern int precomposed_unicode;
|
||||
extern int protect_hfs;
|
||||
extern int protect_ntfs;
|
||||
extern int core_use_gvfs_helper;
|
||||
extern const char *gvfs_cache_server_url;
|
||||
extern struct strbuf gvfs_shared_cache_pathname;
|
||||
|
||||
extern int core_apply_sparse_checkout;
|
||||
extern int core_sparse_checkout_cone;
|
||||
|
|
|
@ -0,0 +1,467 @@
|
|||
#include "git-compat-util.h"
|
||||
#include "environment.h"
|
||||
#include "hex.h"
|
||||
#include "strvec.h"
|
||||
#include "trace2.h"
|
||||
#include "oidset.h"
|
||||
#include "object.h"
|
||||
#include "object-store.h"
|
||||
#include "gvfs-helper-client.h"
|
||||
#include "sub-process.h"
|
||||
#include "sigchain.h"
|
||||
#include "pkt-line.h"
|
||||
#include "quote.h"
|
||||
#include "packfile.h"
|
||||
|
||||
static struct oidset gh_client__oidset_queued = OIDSET_INIT;
|
||||
static unsigned long gh_client__oidset_count;
|
||||
|
||||
struct gh_server__process {
|
||||
struct subprocess_entry subprocess; /* must be first */
|
||||
unsigned int supported_capabilities;
|
||||
};
|
||||
|
||||
static int gh_server__subprocess_map_initialized;
|
||||
static struct hashmap gh_server__subprocess_map;
|
||||
static struct object_directory *gh_client__chosen_odb;
|
||||
|
||||
/*
|
||||
* The "objects" capability has 2 verbs: "get" and "post".
|
||||
*/
|
||||
#define CAP_OBJECTS (1u<<1)
|
||||
#define CAP_OBJECTS_NAME "objects"
|
||||
|
||||
#define CAP_OBJECTS__VERB_GET1_NAME "get"
|
||||
#define CAP_OBJECTS__VERB_POST_NAME "post"
|
||||
|
||||
static int gh_client__start_fn(struct subprocess_entry *subprocess)
|
||||
{
|
||||
static int versions[] = {1, 0};
|
||||
static struct subprocess_capability capabilities[] = {
|
||||
{ CAP_OBJECTS_NAME, CAP_OBJECTS },
|
||||
{ NULL, 0 }
|
||||
};
|
||||
|
||||
struct gh_server__process *entry = (struct gh_server__process *)subprocess;
|
||||
|
||||
return subprocess_handshake(subprocess, "gvfs-helper", versions,
|
||||
NULL, capabilities,
|
||||
&entry->supported_capabilities);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send the queued OIDs in the OIDSET to gvfs-helper for it to
|
||||
* fetch from the cache-server or main Git server using "/gvfs/objects"
|
||||
* POST semantics.
|
||||
*
|
||||
* objects.post LF
|
||||
* (<hex-oid> LF)*
|
||||
* <flush>
|
||||
*
|
||||
*/
|
||||
static int gh_client__send__objects_post(struct child_process *process)
|
||||
{
|
||||
struct oidset_iter iter;
|
||||
struct object_id *oid;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* We assume that all of the packet_ routines call error()
|
||||
* so that we don't have to.
|
||||
*/
|
||||
|
||||
err = packet_write_fmt_gently(
|
||||
process->in,
|
||||
(CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_POST_NAME "\n"));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
oidset_iter_init(&gh_client__oidset_queued, &iter);
|
||||
while ((oid = oidset_iter_next(&iter))) {
|
||||
err = packet_write_fmt_gently(process->in, "%s\n",
|
||||
oid_to_hex(oid));
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = packet_flush_gently(process->in);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send the given OID to gvfs-helper for it to fetch from the
|
||||
* cache-server or main Git server using "/gvfs/objects" GET
|
||||
* semantics.
|
||||
*
|
||||
* This ignores any queued OIDs.
|
||||
*
|
||||
* objects.get LF
|
||||
* <hex-oid> LF
|
||||
* <flush>
|
||||
*
|
||||
*/
|
||||
static int gh_client__send__objects_get(struct child_process *process,
|
||||
const struct object_id *oid)
|
||||
{
|
||||
int err;
|
||||
|
||||
/*
|
||||
* We assume that all of the packet_ routines call error()
|
||||
* so that we don't have to.
|
||||
*/
|
||||
|
||||
err = packet_write_fmt_gently(
|
||||
process->in,
|
||||
(CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_GET1_NAME "\n"));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = packet_write_fmt_gently(process->in, "%s\n",
|
||||
oid_to_hex(oid));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = packet_flush_gently(process->in);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the loose object cache to include the newly created
|
||||
* object.
|
||||
*/
|
||||
static void gh_client__update_loose_cache(const char *line)
|
||||
{
|
||||
const char *v1_oid;
|
||||
struct object_id oid;
|
||||
|
||||
if (!skip_prefix(line, "loose ", &v1_oid))
|
||||
BUG("update_loose_cache: invalid line '%s'", line);
|
||||
|
||||
if (get_oid_hex(v1_oid, &oid))
|
||||
BUG("update_loose_cache: invalid line '%s'", line);
|
||||
|
||||
odb_loose_cache_add_new_oid(gh_client__chosen_odb, &oid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the packed-git list to include the newly created packfile.
|
||||
*/
|
||||
static void gh_client__update_packed_git(const char *line)
|
||||
{
|
||||
struct strbuf path = STRBUF_INIT;
|
||||
const char *v1_filename;
|
||||
struct packed_git *p;
|
||||
int is_local;
|
||||
|
||||
if (!skip_prefix(line, "packfile ", &v1_filename))
|
||||
BUG("update_packed_git: invalid line '%s'", line);
|
||||
|
||||
/*
|
||||
* ODB[0] is the local .git/objects. All others are alternates.
|
||||
*/
|
||||
is_local = (gh_client__chosen_odb == the_repository->objects->odb);
|
||||
|
||||
strbuf_addf(&path, "%s/pack/%s",
|
||||
gh_client__chosen_odb->path, v1_filename);
|
||||
strbuf_strip_suffix(&path, ".pack");
|
||||
strbuf_addstr(&path, ".idx");
|
||||
|
||||
p = add_packed_git(path.buf, path.len, is_local);
|
||||
if (p)
|
||||
install_packed_git_and_mru(the_repository, p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Both CAP_OBJECTS verbs return the same format response:
|
||||
*
|
||||
* <odb>
|
||||
* <data>*
|
||||
* <status>
|
||||
* <flush>
|
||||
*
|
||||
* Where:
|
||||
*
|
||||
* <odb> ::= odb SP <directory> LF
|
||||
*
|
||||
* <data> ::= <packfile> / <loose>
|
||||
*
|
||||
* <packfile> ::= packfile SP <filename> LF
|
||||
*
|
||||
* <loose> ::= loose SP <hex-oid> LF
|
||||
*
|
||||
* <status> ::= ok LF
|
||||
* / partial LF
|
||||
* / error SP <message> LF
|
||||
*
|
||||
* Note that `gvfs-helper` controls how/if it chunks the request when
|
||||
* it talks to the cache-server and/or main Git server. So it is
|
||||
* possible for us to receive many packfiles and/or loose objects *AND
|
||||
* THEN* get a hard network error or a 404 on an individual object.
|
||||
*
|
||||
* If we get a partial result, we can let the caller try to continue
|
||||
* -- for example, maybe an immediate request for a tree object was
|
||||
* grouped with a queued request for a blob. The tree-walk *might* be
|
||||
* able to continue and let the 404 blob be handled later.
|
||||
*/
|
||||
static int gh_client__objects__receive_response(
|
||||
struct child_process *process,
|
||||
enum gh_client__created *p_ghc,
|
||||
int *p_nr_loose, int *p_nr_packfile)
|
||||
{
|
||||
enum gh_client__created ghc = GHC__CREATED__NOTHING;
|
||||
const char *v1;
|
||||
char *line;
|
||||
int len;
|
||||
int err = 0;
|
||||
|
||||
while (1) {
|
||||
/*
|
||||
* Warning: packet_read_line_gently() calls die()
|
||||
* despite the _gently moniker.
|
||||
*/
|
||||
len = packet_read_line_gently(process->out, NULL, &line);
|
||||
if ((len < 0) || !line)
|
||||
break;
|
||||
|
||||
if (starts_with(line, "odb")) {
|
||||
/* trust that this matches what we expect */
|
||||
}
|
||||
|
||||
else if (starts_with(line, "packfile")) {
|
||||
gh_client__update_packed_git(line);
|
||||
ghc |= GHC__CREATED__PACKFILE;
|
||||
*p_nr_packfile += 1;
|
||||
}
|
||||
|
||||
else if (starts_with(line, "loose")) {
|
||||
gh_client__update_loose_cache(line);
|
||||
ghc |= GHC__CREATED__LOOSE;
|
||||
*p_nr_loose += 1;
|
||||
}
|
||||
|
||||
else if (starts_with(line, "ok"))
|
||||
;
|
||||
else if (starts_with(line, "partial"))
|
||||
;
|
||||
else if (skip_prefix(line, "error ", &v1)) {
|
||||
error("gvfs-helper error: '%s'", v1);
|
||||
err = -1;
|
||||
}
|
||||
}
|
||||
|
||||
*p_ghc = ghc;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Select the preferred ODB for fetching missing objects.
|
||||
* This should be the alternate with the same directory
|
||||
* name as set in `gvfs.sharedCache`.
|
||||
*
|
||||
* Fallback to .git/objects if necessary.
|
||||
*/
|
||||
static void gh_client__choose_odb(void)
|
||||
{
|
||||
struct object_directory *odb;
|
||||
|
||||
if (gh_client__chosen_odb)
|
||||
return;
|
||||
|
||||
prepare_alt_odb(the_repository);
|
||||
gh_client__chosen_odb = the_repository->objects->odb;
|
||||
|
||||
if (!gvfs_shared_cache_pathname.len)
|
||||
return;
|
||||
|
||||
for (odb = the_repository->objects->odb->next; odb; odb = odb->next) {
|
||||
if (!strcmp(odb->path, gvfs_shared_cache_pathname.buf)) {
|
||||
gh_client__chosen_odb = odb;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct gh_server__process *gh_client__find_long_running_process(
|
||||
unsigned int cap_needed)
|
||||
{
|
||||
struct gh_server__process *entry;
|
||||
struct strvec argv = STRVEC_INIT;
|
||||
struct strbuf quoted = STRBUF_INIT;
|
||||
|
||||
gh_client__choose_odb();
|
||||
|
||||
/*
|
||||
* TODO decide what defaults we want.
|
||||
*/
|
||||
strvec_push(&argv, "gvfs-helper");
|
||||
strvec_push(&argv, "--fallback");
|
||||
strvec_push(&argv, "--cache-server=trust");
|
||||
strvec_pushf(&argv, "--shared-cache=%s",
|
||||
gh_client__chosen_odb->path);
|
||||
strvec_push(&argv, "server");
|
||||
|
||||
sq_quote_argv_pretty("ed, argv.v);
|
||||
|
||||
/*
|
||||
* Find an existing long-running process with the above command
|
||||
* line -or- create a new long-running process for this and
|
||||
* subsequent 'get' requests.
|
||||
*/
|
||||
if (!gh_server__subprocess_map_initialized) {
|
||||
gh_server__subprocess_map_initialized = 1;
|
||||
hashmap_init(&gh_server__subprocess_map,
|
||||
(hashmap_cmp_fn)cmd2process_cmp, NULL, 0);
|
||||
entry = NULL;
|
||||
} else
|
||||
entry = (struct gh_server__process *)subprocess_find_entry(
|
||||
&gh_server__subprocess_map, quoted.buf);
|
||||
|
||||
if (!entry) {
|
||||
entry = xmalloc(sizeof(*entry));
|
||||
entry->supported_capabilities = 0;
|
||||
|
||||
if (subprocess_start_strvec(&gh_server__subprocess_map,
|
||||
&entry->subprocess, 1,
|
||||
&argv, gh_client__start_fn))
|
||||
FREE_AND_NULL(entry);
|
||||
}
|
||||
|
||||
if (entry &&
|
||||
(entry->supported_capabilities & cap_needed) != cap_needed) {
|
||||
error("gvfs-helper: does not support needed capabilities");
|
||||
subprocess_stop(&gh_server__subprocess_map,
|
||||
(struct subprocess_entry *)entry);
|
||||
FREE_AND_NULL(entry);
|
||||
}
|
||||
|
||||
strvec_clear(&argv);
|
||||
strbuf_release("ed);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
void gh_client__queue_oid(const struct object_id *oid)
|
||||
{
|
||||
// TODO consider removing this trace2. it is useful for interactive
|
||||
// TODO debugging, but may generate way too much noise for a data
|
||||
// TODO event.
|
||||
trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid));
|
||||
|
||||
if (!oidset_insert(&gh_client__oidset_queued, oid))
|
||||
gh_client__oidset_count++;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine should actually take a "const struct oid_array *"
|
||||
* rather than the component parts, but fetch_objects() uses
|
||||
* this model (because of the call in sha1-file.c).
|
||||
*/
|
||||
void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr)
|
||||
{
|
||||
int k;
|
||||
|
||||
for (k = 0; k < oid_nr; k++)
|
||||
gh_client__queue_oid(&oids[k]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bulk fetch all of the queued OIDs in the OIDSET.
|
||||
*/
|
||||
int gh_client__drain_queue(enum gh_client__created *p_ghc)
|
||||
{
|
||||
struct gh_server__process *entry;
|
||||
struct child_process *process;
|
||||
int nr_loose = 0;
|
||||
int nr_packfile = 0;
|
||||
int err = 0;
|
||||
|
||||
*p_ghc = GHC__CREATED__NOTHING;
|
||||
|
||||
if (!gh_client__oidset_count)
|
||||
return 0;
|
||||
|
||||
entry = gh_client__find_long_running_process(CAP_OBJECTS);
|
||||
if (!entry)
|
||||
return -1;
|
||||
|
||||
trace2_region_enter("gh-client", "objects/post", the_repository);
|
||||
|
||||
process = &entry->subprocess.process;
|
||||
|
||||
sigchain_push(SIGPIPE, SIG_IGN);
|
||||
|
||||
err = gh_client__send__objects_post(process);
|
||||
if (!err)
|
||||
err = gh_client__objects__receive_response(
|
||||
process, p_ghc, &nr_loose, &nr_packfile);
|
||||
|
||||
sigchain_pop(SIGPIPE);
|
||||
|
||||
if (err) {
|
||||
subprocess_stop(&gh_server__subprocess_map,
|
||||
(struct subprocess_entry *)entry);
|
||||
FREE_AND_NULL(entry);
|
||||
}
|
||||
|
||||
trace2_data_intmax("gh-client", the_repository,
|
||||
"objects/post/nr_objects", gh_client__oidset_count);
|
||||
trace2_region_leave("gh-client", "objects/post", the_repository);
|
||||
|
||||
oidset_clear(&gh_client__oidset_queued);
|
||||
gh_client__oidset_count = 0;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get exactly 1 object immediately.
|
||||
* Ignore any queued objects.
|
||||
*/
|
||||
int gh_client__get_immediate(const struct object_id *oid,
|
||||
enum gh_client__created *p_ghc)
|
||||
{
|
||||
struct gh_server__process *entry;
|
||||
struct child_process *process;
|
||||
int nr_loose = 0;
|
||||
int nr_packfile = 0;
|
||||
int err = 0;
|
||||
|
||||
// TODO consider removing this trace2. it is useful for interactive
|
||||
// TODO debugging, but may generate way too much noise for a data
|
||||
// TODO event.
|
||||
trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid));
|
||||
|
||||
entry = gh_client__find_long_running_process(CAP_OBJECTS);
|
||||
if (!entry)
|
||||
return -1;
|
||||
|
||||
trace2_region_enter("gh-client", "objects/get", the_repository);
|
||||
|
||||
process = &entry->subprocess.process;
|
||||
|
||||
sigchain_push(SIGPIPE, SIG_IGN);
|
||||
|
||||
err = gh_client__send__objects_get(process, oid);
|
||||
if (!err)
|
||||
err = gh_client__objects__receive_response(
|
||||
process, p_ghc, &nr_loose, &nr_packfile);
|
||||
|
||||
sigchain_pop(SIGPIPE);
|
||||
|
||||
if (err) {
|
||||
subprocess_stop(&gh_server__subprocess_map,
|
||||
(struct subprocess_entry *)entry);
|
||||
FREE_AND_NULL(entry);
|
||||
}
|
||||
|
||||
trace2_region_leave("gh-client", "objects/get", the_repository);
|
||||
|
||||
return err;
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
#ifndef GVFS_HELPER_CLIENT_H
|
||||
#define GVFS_HELPER_CLIENT_H
|
||||
|
||||
struct repository;
|
||||
struct commit;
|
||||
struct object_id;
|
||||
|
||||
enum gh_client__created {
|
||||
/*
|
||||
* The _get_ operation did not create anything. If doesn't
|
||||
* matter if `gvfs-helper` had errors or not -- just that
|
||||
* nothing was created.
|
||||
*/
|
||||
GHC__CREATED__NOTHING = 0,
|
||||
|
||||
/*
|
||||
* The _get_ operation created one or more packfiles.
|
||||
*/
|
||||
GHC__CREATED__PACKFILE = 1<<1,
|
||||
|
||||
/*
|
||||
* The _get_ operation created one or more loose objects.
|
||||
* (Not necessarily the for the individual OID you requested.)
|
||||
*/
|
||||
GHC__CREATED__LOOSE = 1<<2,
|
||||
|
||||
/*
|
||||
* The _get_ operation created one or more packfilea *and*
|
||||
* one or more loose objects.
|
||||
*/
|
||||
GHC__CREATED__PACKFILE_AND_LOOSE = (GHC__CREATED__PACKFILE |
|
||||
GHC__CREATED__LOOSE),
|
||||
};
|
||||
|
||||
/*
|
||||
* Ask `gvfs-helper server` to immediately fetch a single object
|
||||
* using "/gvfs/objects" GET semantics.
|
||||
*
|
||||
* A long-running background process is used to make subsequent
|
||||
* requests more efficient.
|
||||
*
|
||||
* A loose object will be created in the shared-cache ODB and
|
||||
* in-memory cache updated.
|
||||
*/
|
||||
int gh_client__get_immediate(const struct object_id *oid,
|
||||
enum gh_client__created *p_ghc);
|
||||
|
||||
/*
|
||||
* Queue this OID for a future fetch using `gvfs-helper service`.
|
||||
* It does not wait.
|
||||
*
|
||||
* Callers should not rely on the queued object being on disk until
|
||||
* the queue has been drained.
|
||||
*/
|
||||
void gh_client__queue_oid(const struct object_id *oid);
|
||||
void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr);
|
||||
|
||||
/*
|
||||
* Ask `gvfs-helper server` to fetch the set of queued OIDs using
|
||||
* "/gvfs/objects" POST semantics.
|
||||
*
|
||||
* A long-running background process is used to subsequent requests
|
||||
* more efficient.
|
||||
*
|
||||
* One or more packfiles will be created in the shared-cache ODB.
|
||||
*/
|
||||
int gh_client__drain_queue(enum gh_client__created *p_ghc);
|
||||
|
||||
#endif /* GVFS_HELPER_CLIENT_H */
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
113
object-file.c
113
object-file.c
|
@ -50,6 +50,7 @@
|
|||
#include "sigchain.h"
|
||||
#include "sub-process.h"
|
||||
#include "pkt-line.h"
|
||||
#include "gvfs-helper-client.h"
|
||||
|
||||
/* The maximum size for an object header. */
|
||||
#define MAX_HEADER_LEN 32
|
||||
|
@ -465,6 +466,8 @@ const char *loose_object_path(struct repository *r, struct strbuf *buf,
|
|||
return odb_loose_path(r->objects->odb, buf, oid);
|
||||
}
|
||||
|
||||
static int gvfs_matched_shared_cache_to_alternate;
|
||||
|
||||
/*
|
||||
* Return non-zero iff the path is usable as an alternate object database.
|
||||
*/
|
||||
|
@ -474,6 +477,52 @@ static int alt_odb_usable(struct raw_object_store *o,
|
|||
{
|
||||
int r;
|
||||
|
||||
if (!strbuf_cmp(path, &gvfs_shared_cache_pathname)) {
|
||||
/*
|
||||
* `gvfs.sharedCache` is the preferred alternate that we
|
||||
* will use with `gvfs-helper.exe` to dynamically fetch
|
||||
* missing objects. It is set during git_default_config().
|
||||
*
|
||||
* Make sure the directory exists on disk before we let the
|
||||
* stock code discredit it.
|
||||
*/
|
||||
struct strbuf buf_pack_foo = STRBUF_INIT;
|
||||
enum scld_error scld;
|
||||
|
||||
/*
|
||||
* Force create the "<odb>" and "<odb>/pack" directories, if
|
||||
* not present on disk. Append an extra bogus directory to
|
||||
* get safe_create_leading_directories() to see "<odb>/pack"
|
||||
* as a leading directory of something deeper (which it
|
||||
* won't create).
|
||||
*/
|
||||
strbuf_addf(&buf_pack_foo, "%s/pack/foo", path->buf);
|
||||
|
||||
scld = safe_create_leading_directories(buf_pack_foo.buf);
|
||||
if (scld != SCLD_OK && scld != SCLD_EXISTS) {
|
||||
error_errno(_("could not create shared-cache ODB '%s'"),
|
||||
gvfs_shared_cache_pathname.buf);
|
||||
|
||||
strbuf_release(&buf_pack_foo);
|
||||
|
||||
/*
|
||||
* Pretend no shared-cache was requested and
|
||||
* effectively fallback to ".git/objects" for
|
||||
* fetching missing objects.
|
||||
*/
|
||||
strbuf_release(&gvfs_shared_cache_pathname);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We know that there is an alternate (either from
|
||||
* .git/objects/info/alternates or from a memory-only
|
||||
* entry) associated with the shared-cache directory.
|
||||
*/
|
||||
gvfs_matched_shared_cache_to_alternate++;
|
||||
strbuf_release(&buf_pack_foo);
|
||||
}
|
||||
|
||||
/* Detect cases where alternate disappeared */
|
||||
if (!is_directory(path->buf)) {
|
||||
error(_("object directory %s does not exist; "
|
||||
|
@ -957,6 +1006,33 @@ void prepare_alt_odb(struct repository *r)
|
|||
link_alt_odb_entries(r, r->objects->alternate_db, PATH_SEP, NULL, 0);
|
||||
|
||||
read_info_alternates(r, r->objects->odb->path, 0);
|
||||
|
||||
if (gvfs_shared_cache_pathname.len &&
|
||||
!gvfs_matched_shared_cache_to_alternate) {
|
||||
/*
|
||||
* There is no entry in .git/objects/info/alternates for
|
||||
* the requested shared-cache directory. Therefore, the
|
||||
* odb-list does not contain this directory.
|
||||
*
|
||||
* Force this directory into the odb-list as an in-memory
|
||||
* alternate. Implicitly create the directory on disk, if
|
||||
* necessary.
|
||||
*
|
||||
* See GIT_ALTERNATE_OBJECT_DIRECTORIES for another example
|
||||
* of this kind of usage.
|
||||
*
|
||||
* Note: This has the net-effect of allowing Git to treat
|
||||
* `gvfs.sharedCache` as an unofficial alternate. This
|
||||
* usage should be discouraged for compatbility reasons
|
||||
* with other tools in the overall Git ecosystem (that
|
||||
* won't know about this trick). It would be much better
|
||||
* for us to update .git/objects/info/alternates instead.
|
||||
* The code here is considered a backstop.
|
||||
*/
|
||||
link_alt_odb_entries(r, gvfs_shared_cache_pathname.buf,
|
||||
'\n', NULL, 0);
|
||||
}
|
||||
|
||||
r->objects->loaded_alternates = 1;
|
||||
}
|
||||
|
||||
|
@ -1703,7 +1779,7 @@ static int do_oid_object_info_extended(struct repository *r,
|
|||
const struct object_id *real = oid;
|
||||
int already_retried = 0;
|
||||
int tried_hook = 0;
|
||||
|
||||
int tried_gvfs_helper = 0;
|
||||
|
||||
if (flags & OBJECT_INFO_LOOKUP_REPLACE)
|
||||
real = lookup_replace_object(r, oid);
|
||||
|
@ -1741,13 +1817,41 @@ retry:
|
|||
if (!loose_object_info(r, real, oi, flags))
|
||||
return 0;
|
||||
|
||||
if (core_use_gvfs_helper && !tried_gvfs_helper) {
|
||||
enum gh_client__created ghc;
|
||||
|
||||
if (flags & OBJECT_INFO_SKIP_FETCH_OBJECT)
|
||||
return -1;
|
||||
|
||||
gh_client__get_immediate(real, &ghc);
|
||||
tried_gvfs_helper = 1;
|
||||
|
||||
/*
|
||||
* Retry the lookup IIF `gvfs-helper` created one
|
||||
* or more new packfiles or loose objects.
|
||||
*/
|
||||
if (ghc != GHC__CREATED__NOTHING)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If `gvfs-helper` fails, we just want to return -1.
|
||||
* But allow the other providers to have a shot at it.
|
||||
* (At least until we have a chance to consolidate
|
||||
* them.)
|
||||
*/
|
||||
}
|
||||
|
||||
/* Not a loose object; someone else may have just packed it. */
|
||||
if (!(flags & OBJECT_INFO_QUICK)) {
|
||||
reprepare_packed_git(r);
|
||||
if (find_pack_entry(r, real, &e))
|
||||
break;
|
||||
if (core_virtualize_objects && !tried_hook) {
|
||||
// TODO Assert or at least trace2 if gvfs-helper
|
||||
// TODO was tried and failed and then read-object-hook
|
||||
// TODO is successful at getting this object.
|
||||
tried_hook = 1;
|
||||
// TODO BUG? Should 'oid' be 'real' ?
|
||||
if (!read_object_process(oid))
|
||||
goto retry;
|
||||
}
|
||||
|
@ -2845,6 +2949,13 @@ struct oidtree *odb_loose_cache(struct object_directory *odb,
|
|||
return odb->loose_objects_cache;
|
||||
}
|
||||
|
||||
void odb_loose_cache_add_new_oid(struct object_directory *odb,
|
||||
const struct object_id *oid)
|
||||
{
|
||||
struct oidtree *cache = odb_loose_cache(odb, oid);
|
||||
append_loose_object(oid, NULL, cache);
|
||||
}
|
||||
|
||||
void odb_clear_loose_cache(struct object_directory *odb)
|
||||
{
|
||||
oidtree_clear(odb->loose_objects_cache);
|
||||
|
|
|
@ -95,6 +95,14 @@ void restore_primary_odb(struct object_directory *restore_odb, const char *old_p
|
|||
struct oidtree *odb_loose_cache(struct object_directory *odb,
|
||||
const struct object_id *oid);
|
||||
|
||||
/*
|
||||
* Add a new object to the loose object cache (possibly after the
|
||||
* cache was populated). This might be used after dynamically
|
||||
* fetching a missing object.
|
||||
*/
|
||||
void odb_loose_cache_add_new_oid(struct object_directory *odb,
|
||||
const struct object_id *oid);
|
||||
|
||||
/* Empty the loose object cache for the specified object directory. */
|
||||
void odb_clear_loose_cache(struct object_directory *odb);
|
||||
|
||||
|
|
|
@ -770,6 +770,12 @@ void install_packed_git(struct repository *r, struct packed_git *pack)
|
|||
hashmap_add(&r->objects->pack_map, &pack->packmap_ent);
|
||||
}
|
||||
|
||||
void install_packed_git_and_mru(struct repository *r, struct packed_git *pack)
|
||||
{
|
||||
install_packed_git(r, pack);
|
||||
list_add(&pack->mru, &r->objects->packed_git_mru);
|
||||
}
|
||||
|
||||
void (*report_garbage)(unsigned seen_bits, const char *path);
|
||||
|
||||
static void report_helper(const struct string_list *list,
|
||||
|
|
|
@ -67,6 +67,7 @@ extern void (*report_garbage)(unsigned seen_bits, const char *path);
|
|||
|
||||
void reprepare_packed_git(struct repository *r);
|
||||
void install_packed_git(struct repository *r, struct packed_git *pack);
|
||||
void install_packed_git_and_mru(struct repository *r, struct packed_git *pack);
|
||||
|
||||
struct packed_git *get_packed_git(struct repository *r);
|
||||
struct list_head *get_packed_git_mru(struct repository *r);
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
#include "git-compat-util.h"
|
||||
#include "environment.h"
|
||||
#include "gettext.h"
|
||||
#include "hex.h"
|
||||
#include "object-store.h"
|
||||
#include "gvfs-helper-client.h"
|
||||
#include "promisor-remote.h"
|
||||
#include "config.h"
|
||||
#include "trace2.h"
|
||||
|
@ -199,7 +201,7 @@ struct promisor_remote *repo_promisor_remote_find(struct repository *r,
|
|||
|
||||
int repo_has_promisor_remote(struct repository *r)
|
||||
{
|
||||
return !!repo_promisor_remote_find(r, NULL);
|
||||
return core_use_gvfs_helper || !!repo_promisor_remote_find(r, NULL);
|
||||
}
|
||||
|
||||
static int remove_fetched_oids(struct repository *repo,
|
||||
|
@ -246,6 +248,15 @@ void promisor_remote_get_direct(struct repository *repo,
|
|||
|
||||
if (oid_nr == 0)
|
||||
return;
|
||||
if (core_use_gvfs_helper) {
|
||||
enum gh_client__created ghc = GHC__CREATED__NOTHING;
|
||||
|
||||
trace2_data_intmax("bug", the_repository, "fetch_objects/gvfs-helper", oid_nr);
|
||||
gh_client__queue_oid_array(oids, oid_nr);
|
||||
if (!gh_client__drain_queue(&ghc))
|
||||
return;
|
||||
die(_("failed to fetch missing objects from the remote"));
|
||||
}
|
||||
|
||||
promisor_remote_init(repo);
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "sub-process.h"
|
||||
#include "sigchain.h"
|
||||
#include "pkt-line.h"
|
||||
#include "quote.h"
|
||||
|
||||
int cmd2process_cmp(const void *cmp_data UNUSED,
|
||||
const struct hashmap_entry *eptr,
|
||||
|
@ -81,7 +82,12 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co
|
|||
int err;
|
||||
struct child_process *process;
|
||||
|
||||
entry->cmd = cmd;
|
||||
// BUGBUG most callers to subprocess_start() pass in "cmd" the value
|
||||
// BUGBUG of find_hook() which returns a static buffer (that's only
|
||||
// BUGBUG good until the next call to find_hook()).
|
||||
// BUGFIX Defer assignment until we copy the string in our argv.
|
||||
// entry->cmd = cmd;
|
||||
|
||||
process = &entry->process;
|
||||
|
||||
child_process_init(process);
|
||||
|
@ -93,6 +99,8 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co
|
|||
process->clean_on_exit_handler = subprocess_exit_handler;
|
||||
process->trace2_child_class = "subprocess";
|
||||
|
||||
entry->cmd = process->args.v[0];
|
||||
|
||||
err = start_command(process);
|
||||
if (err) {
|
||||
error("cannot fork to run subprocess '%s'", cmd);
|
||||
|
@ -112,6 +120,52 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co
|
|||
return 0;
|
||||
}
|
||||
|
||||
int subprocess_start_strvec(struct hashmap *hashmap,
|
||||
struct subprocess_entry *entry,
|
||||
int is_git_cmd,
|
||||
const struct strvec *argv,
|
||||
subprocess_start_fn startfn)
|
||||
{
|
||||
int err;
|
||||
int k;
|
||||
struct child_process *process;
|
||||
struct strbuf quoted = STRBUF_INIT;
|
||||
|
||||
process = &entry->process;
|
||||
|
||||
child_process_init(process);
|
||||
for (k = 0; k < argv->nr; k++)
|
||||
strvec_push(&process->args, argv->v[k]);
|
||||
process->use_shell = 1;
|
||||
process->in = -1;
|
||||
process->out = -1;
|
||||
process->git_cmd = is_git_cmd;
|
||||
process->clean_on_exit = 1;
|
||||
process->clean_on_exit_handler = subprocess_exit_handler;
|
||||
process->trace2_child_class = "subprocess";
|
||||
|
||||
sq_quote_argv_pretty("ed, argv->v);
|
||||
entry->cmd = strbuf_detach("ed, NULL);
|
||||
|
||||
err = start_command(process);
|
||||
if (err) {
|
||||
error("cannot fork to run subprocess '%s'", entry->cmd);
|
||||
return err;
|
||||
}
|
||||
|
||||
hashmap_entry_init(&entry->ent, strhash(entry->cmd));
|
||||
|
||||
err = startfn(entry);
|
||||
if (err) {
|
||||
error("initialization for subprocess '%s' failed", entry->cmd);
|
||||
subprocess_stop(hashmap, entry);
|
||||
return err;
|
||||
}
|
||||
|
||||
hashmap_add(hashmap, &entry->ent);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handshake_version(struct child_process *process,
|
||||
const char *welcome_prefix, int *versions,
|
||||
int *chosen_version)
|
||||
|
|
|
@ -56,6 +56,12 @@ typedef int(*subprocess_start_fn)(struct subprocess_entry *entry);
|
|||
int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, const char *cmd,
|
||||
subprocess_start_fn startfn);
|
||||
|
||||
int subprocess_start_strvec(struct hashmap *hashmap,
|
||||
struct subprocess_entry *entry,
|
||||
int is_git_cmd,
|
||||
const struct strvec *argv,
|
||||
subprocess_start_fn startfn);
|
||||
|
||||
/* Kill a subprocess and remove it from the subprocess hashmap. */
|
||||
void subprocess_stop(struct hashmap *hashmap, struct subprocess_entry *entry);
|
||||
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
/test-gvfs-protocol
|
||||
/test-tool
|
||||
/test-fake-ssh
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Загрузка…
Ссылка в новой задаче