gvfs-helper: add prefetch support

Teach gvfs-helper to support "/gvfs/prefetch" REST API.
This includes a new `gvfs-helper prefetch --since=<t>` command line option.
And a new `objects.prefetch` verb in `gvfs-helper server` mode.

If `since` argument is omitted, `gvfs-helper` will search the local
shared-cache for the most recent prefetch packfile and start from
there.

The <t> is usually a seconds-since-epoch, but may also be a "friendly"
date -- such as "midnight", "yesterday" and etc. using the existing
date selection mechanism.

Add `gh_client__prefetch()` API to allow `git.exe` to easily call
prefetch (and using the same long-running process as immediate and
queued object fetches).

Expanded t5799 unit tests to include prefetch tests.  Test setup now
also builds some commits-and-trees packfiles for testing purposes with
well-known timestamps.

Expanded t/helper/test-gvfs-protocol.exe to support "/gvfs/prefetch"
REST API.

Massive refactor of existing packfile handling in gvfs-helper.c to
reuse more code between "/gvfs/objects POST" and "/gvfs/prefetch".
With this we now properly name packfiles with the checksum SHA1
rather than a date string.

Refactor also addresses some of the confusing tempfile setup and
install_<result> code processing (introduced to handle the ambiguity
of how POST works with commit objects).

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
This commit is contained in:
Jeff Hostetler 2019-11-11 15:09:31 -05:00 коммит произвёл Johannes Schindelin
Родитель a264938f4d
Коммит 84504b0256
5 изменённых файлов: 1522 добавлений и 359 удалений

Просмотреть файл

@ -24,13 +24,14 @@ static struct hashmap gh_server__subprocess_map;
static struct object_directory *gh_client__chosen_odb;
/*
* The "objects" capability has 2 verbs: "get" and "post".
* The "objects" capability has verbs: "get" and "post" and "prefetch".
*/
#define CAP_OBJECTS (1u<<1)
#define CAP_OBJECTS_NAME "objects"
#define CAP_OBJECTS__VERB_GET1_NAME "get"
#define CAP_OBJECTS__VERB_POST_NAME "post"
#define CAP_OBJECTS__VERB_PREFETCH_NAME "prefetch"
static int gh_client__start_fn(struct subprocess_entry *subprocess)
{
@ -129,6 +130,44 @@ static int gh_client__send__objects_get(struct child_process *process,
return 0;
}
/*
* Send a request to gvfs-helper to prefetch packfiles from either the
* cache-server or the main Git server using "/gvfs/prefetch".
*
* objects.prefetch LF
* [<seconds-since_epoch> LF]
* <flush>
*/
static int gh_client__send__objects_prefetch(struct child_process *process,
timestamp_t seconds_since_epoch)
{
int err;
/*
* We assume that all of the packet_ routines call error()
* so that we don't have to.
*/
err = packet_write_fmt_gently(
process->in,
(CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_PREFETCH_NAME "\n"));
if (err)
return err;
if (seconds_since_epoch) {
err = packet_write_fmt_gently(process->in, "%" PRItime "\n",
seconds_since_epoch);
if (err)
return err;
}
err = packet_flush_gently(process->in);
if (err)
return err;
return 0;
}
/*
* Update the loose object cache to include the newly created
* object.
@ -176,7 +215,7 @@ static void gh_client__update_packed_git(const char *line)
}
/*
* Both CAP_OBJECTS verbs return the same format response:
* CAP_OBJECTS verbs return the same format response:
*
* <odb>
* <data>*
@ -216,6 +255,8 @@ static int gh_client__objects__receive_response(
const char *v1;
char *line;
int len;
int nr_loose = 0;
int nr_packfile = 0;
int err = 0;
while (1) {
@ -234,13 +275,13 @@ static int gh_client__objects__receive_response(
else if (starts_with(line, "packfile")) {
gh_client__update_packed_git(line);
ghc |= GHC__CREATED__PACKFILE;
*p_nr_packfile += 1;
nr_packfile++;
}
else if (starts_with(line, "loose")) {
gh_client__update_loose_cache(line);
ghc |= GHC__CREATED__LOOSE;
*p_nr_loose += 1;
nr_loose++;
}
else if (starts_with(line, "ok"))
@ -254,6 +295,8 @@ static int gh_client__objects__receive_response(
}
*p_ghc = ghc;
*p_nr_loose = nr_loose;
*p_nr_packfile = nr_packfile;
return err;
}
@ -310,7 +353,7 @@ static struct gh_server__process *gh_client__find_long_running_process(
/*
* Find an existing long-running process with the above command
* line -or- create a new long-running process for this and
* subsequent 'get' requests.
* subsequent requests.
*/
if (!gh_server__subprocess_map_initialized) {
gh_server__subprocess_map_initialized = 1;
@ -347,10 +390,14 @@ static struct gh_server__process *gh_client__find_long_running_process(
void gh_client__queue_oid(const struct object_id *oid)
{
// TODO consider removing this trace2. it is useful for interactive
// TODO debugging, but may generate way too much noise for a data
// TODO event.
trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid));
/*
* Keep this trace as a printf only, so that it goes to the
* perf log, but not the event log. It is useful for interactive
* debugging, but generates way too much (unuseful) noise for the
* database.
*/
if (trace2_is_enabled())
trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid));
if (!oidset_insert(&gh_client__oidset_queued, oid))
gh_client__oidset_count++;
@ -431,10 +478,14 @@ int gh_client__get_immediate(const struct object_id *oid,
int nr_packfile = 0;
int err = 0;
// TODO consider removing this trace2. it is useful for interactive
// TODO debugging, but may generate way too much noise for a data
// TODO event.
trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid));
/*
* Keep this trace as a printf only, so that it goes to the
* perf log, but not the event log. It is useful for interactive
* debugging, but generates way too much (unuseful) noise for the
* database.
*/
if (trace2_is_enabled())
trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid));
entry = gh_client__find_long_running_process(CAP_OBJECTS);
if (!entry)
@ -463,3 +514,55 @@ int gh_client__get_immediate(const struct object_id *oid,
return err;
}
/*
* Ask gvfs-helper to prefetch commits-and-trees packfiles since a
* given timestamp.
*
* If seconds_since_epoch is zero, gvfs-helper will scan the ODB for
* the last received prefetch and ask for ones newer than that.
*/
int gh_client__prefetch(timestamp_t seconds_since_epoch,
int *nr_packfiles_received)
{
struct gh_server__process *entry;
struct child_process *process;
enum gh_client__created ghc;
int nr_loose = 0;
int nr_packfile = 0;
int err = 0;
entry = gh_client__find_long_running_process(CAP_OBJECTS);
if (!entry)
return -1;
trace2_region_enter("gh-client", "objects/prefetch", the_repository);
trace2_data_intmax("gh-client", the_repository, "prefetch/since",
seconds_since_epoch);
process = &entry->subprocess.process;
sigchain_push(SIGPIPE, SIG_IGN);
err = gh_client__send__objects_prefetch(process, seconds_since_epoch);
if (!err)
err = gh_client__objects__receive_response(
process, &ghc, &nr_loose, &nr_packfile);
sigchain_pop(SIGPIPE);
if (err) {
subprocess_stop(&gh_server__subprocess_map,
(struct subprocess_entry *)entry);
FREE_AND_NULL(entry);
}
trace2_data_intmax("gh-client", the_repository,
"prefetch/packfile_count", nr_packfile);
trace2_region_leave("gh-client", "objects/prefetch", the_repository);
if (nr_packfiles_received)
*nr_packfiles_received = nr_packfile;
return err;
}

Просмотреть файл

@ -66,4 +66,22 @@ void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr);
*/
int gh_client__drain_queue(enum gh_client__created *p_ghc);
/*
* Ask `gvfs-helper server` to fetch any "prefetch packs"
* available on the server more recent than the requested time.
*
* If seconds_since_epoch is zero, gvfs-helper will scan the ODB for
* the last received prefetch and ask for ones newer than that.
*
* A long-running background process is used to subsequent requests
* (either prefetch or regular immediate/queued requests) more efficient.
*
* One or more packfiles will be created in the shared-cache ODB.
*
* Returns 0 on success, -1 on error. Optionally also returns the
* number of prefetch packs received.
*/
int gh_client__prefetch(timestamp_t seconds_since_epoch,
int *nr_packfiles_received);
#endif /* GVFS_HELPER_CLIENT_H */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -14,6 +14,7 @@
#include "json-writer.h"
#include "oidset.h"
#include "date.h"
#include "packfile.h"
#define TR2_CAT "test-gvfs-protocol"
@ -541,9 +542,6 @@ static enum worker_result send_loose_object(const struct object_id *oid,
return send_http_error(1, 404, "Not Found", -1, WR_MAYHEM);
}
trace2_printf("%s: OBJECT type=%d len=%ld '%.40s'", TR2_CAT,
type, size, (const char *)content);
/*
* We are blending several somewhat independent concepts here:
*
@ -856,7 +854,6 @@ static enum worker_result get_packfile_from_oids(
goto done;
}
trace2_printf("%s: pack-objects returned %d bytes", TR2_CAT, buf_packfile->len);
wr = WR_OK;
done:
@ -1004,6 +1001,305 @@ done:
return wr;
}
/*
* bswap.h only defines big endian functions.
* The GVFS Protocol defines fields in little endian.
*/
static inline uint64_t my_get_le64(uint64_t le_val)
{
#if GIT_BYTE_ORDER == GIT_LITTLE_ENDIAN
return le_val;
#else
return default_bswap64(le_val);
#endif
}
static inline uint16_t my_get_le16(uint16_t le_val)
{
#if GIT_BYTE_ORDER == GIT_LITTLE_ENDIAN
return le_val;
#else
return default_bswap16(le_val);
#endif
}
/*
* GVFS Protocol headers for the multipack format
* All integer values are little-endian on the wire.
*
* Note: technically, the protocol defines the `ph` fields as signed, but
* that makes a mess of the bswap routines and we're not going to overflow
* them for a very long time.
*/
static unsigned char v1_h[6] = { 'G', 'P', 'R', 'E', ' ', 0x01 };
struct ph {
uint64_t timestamp;
uint64_t len_pack;
uint64_t len_idx;
};
/*
* Accumulate a list of commits-and-trees packfiles we have in the local ODB.
* The test script should have pre-created a set of "ct-<epoch>.pack" and .idx
* files for us. We serve these as is and DO NOT try to dynamically create
* new commits/trees packfiles (like the cache-server does). We are only
* testing if/whether gvfs-helper.exe can receive one or more packfiles and
* idx files over the protocol.
*/
struct ct_pack_item {
struct ph ph;
struct strbuf path_pack;
struct strbuf path_idx;
};
static void ct_pack_item__free(struct ct_pack_item *item)
{
if (!item)
return;
strbuf_release(&item->path_pack);
strbuf_release(&item->path_idx);
free(item);
}
struct ct_pack_data {
struct ct_pack_item **items;
size_t nr, alloc;
};
static void ct_pack_data__release(struct ct_pack_data *data)
{
int k;
if (!data)
return;
for (k = 0; k < data->nr; k++)
ct_pack_item__free(data->items[k]);
FREE_AND_NULL(data->items);
data->nr = 0;
data->alloc = 0;
}
static void cb_ct_pack(const char *full_path, size_t full_path_len,
const char *file_path, void *void_data)
{
struct ct_pack_data *data = void_data;
struct ct_pack_item *item = NULL;
struct stat st;
const char *v;
/*
* We only want "ct-<epoch>.pack" files. The test script creates
* cached commits-and-trees packfiles with this prefix to avoid
* confusion with prefetch packfiles received by gvfs-helper.
*/
if (!ends_with(file_path, ".pack"))
return;
if (!skip_prefix(file_path, "ct-", &v))
return;
item = (struct ct_pack_item *)xcalloc(1, sizeof(*item));
strbuf_init(&item->path_pack, 0);
strbuf_addstr(&item->path_pack, full_path);
strbuf_init(&item->path_idx, 0);
strbuf_addstr(&item->path_idx, full_path);
strbuf_strip_suffix(&item->path_idx, ".pack");
strbuf_addstr(&item->path_idx, ".idx");
item->ph.timestamp = (uint64_t)strtoul(v, NULL, 10);
lstat(item->path_pack.buf, &st);
item->ph.len_pack = (uint64_t)st.st_size;
if (string_list_has_string(&mayhem_list, "no_prefetch_idx"))
item->ph.len_idx = maximum_unsigned_value_of_type(uint64_t);
else if (lstat(item->path_idx.buf, &st) < 0)
item->ph.len_idx = maximum_unsigned_value_of_type(uint64_t);
else
item->ph.len_idx = (uint64_t)st.st_size;
ALLOC_GROW(data->items, data->nr + 1, data->alloc);
data->items[data->nr++] = item;
}
/*
* Sort by increasing EPOCH time.
*/
static int ct_pack_sort_compare(const void *_a, const void *_b)
{
const struct ct_pack_item *a = *(const struct ct_pack_item **)_a;
const struct ct_pack_item *b = *(const struct ct_pack_item **)_b;
return (a->ph.timestamp < b->ph.timestamp) ? -1 : (a->ph.timestamp != b->ph.timestamp);
}
static enum worker_result send_ct_item(const struct ct_pack_item *item)
{
struct ph ph_le;
int fd_pack = -1;
int fd_idx = -1;
enum worker_result wr = WR_OK;
/* send per-packfile header. all fields are little-endian on the wire. */
ph_le.timestamp = my_get_le64(item->ph.timestamp);
ph_le.len_pack = my_get_le64(item->ph.len_pack);
ph_le.len_idx = my_get_le64(item->ph.len_idx);
if (write_in_full(1, &ph_le, sizeof(ph_le)) < 0) {
logerror("unable to write ph_le");
wr = WR_IO_ERROR;
goto done;
}
trace2_printf("%s: sending prefetch pack '%s'", TR2_CAT, item->path_pack.buf);
fd_pack = git_open_cloexec(item->path_pack.buf, O_RDONLY);
if (fd_pack == -1 || copy_fd(fd_pack, 1)) {
logerror("could not send packfile");
wr = WR_IO_ERROR;
goto done;
}
if (item->ph.len_idx != maximum_unsigned_value_of_type(uint64_t)) {
trace2_printf("%s: sending prefetch idx '%s'", TR2_CAT, item->path_idx.buf);
fd_idx = git_open_cloexec(item->path_idx.buf, O_RDONLY);
if (fd_idx == -1 || copy_fd(fd_idx, 1)) {
logerror("could not send idx");
wr = WR_IO_ERROR;
goto done;
}
}
done:
if (fd_pack != -1)
close(fd_pack);
if (fd_idx != -1)
close(fd_idx);
return wr;
}
/*
* The GVFS Protocol defines the lastTimeStamp parameter as the value
* of the last prefetch pack that the client has. Therefore, we only
* want to send newer ones.
*/
static int want_ct_pack(const struct ct_pack_item *item, timestamp_t last_timestamp)
{
return item->ph.timestamp > last_timestamp;
}
static enum worker_result send_multipack(struct ct_pack_data *data,
timestamp_t last_timestamp)
{
struct strbuf response_header = STRBUF_INIT;
struct strbuf uuid = STRBUF_INIT;
enum worker_result wr;
size_t content_len = 0;
unsigned short np = 0;
unsigned short np_le;
int k;
/*
* Precompute the content-length so that we don't have to deal with
* chunking it.
*/
content_len += sizeof(v1_h) + sizeof(np);
for (k = 0; k < data->nr; k++) {
struct ct_pack_item *item = data->items[k];
if (!want_ct_pack(item, last_timestamp))
continue;
np++;
content_len += sizeof(struct ph);
content_len += item->ph.len_pack;
if (item->ph.len_idx != maximum_unsigned_value_of_type(uint64_t))
content_len += item->ph.len_idx;
}
strbuf_addstr(&response_header, "HTTP/1.1 200 OK\r\n");
strbuf_addstr(&response_header, "Cache-Control: private\r\n");
strbuf_addstr(&response_header,
"Content-Type: application/x-gvfs-timestamped-packfiles-indexes\r\n");
strbuf_addf( &response_header, "Content-Length: %d\r\n", (int)content_len);
strbuf_addf( &response_header, "Server: test-gvfs-protocol/%s\r\n", git_version_string);
strbuf_addf( &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822)));
gen_fake_uuid(&uuid);
strbuf_addf( &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf);
strbuf_addstr(&response_header, "\r\n");
if (write_in_full(1, response_header.buf, response_header.len) < 0) {
logerror("unable to write response header");
wr = WR_IO_ERROR;
goto done;
}
/* send protocol version header */
if (write_in_full(1, v1_h, sizeof(v1_h)) < 0) {
logerror("unabled to write v1_h");
wr = WR_IO_ERROR;
goto done;
}
/* send number of packfiles */
np_le = my_get_le16(np);
if (write_in_full(1, &np_le, sizeof(np_le)) < 0) {
logerror("unable to write np");
wr = WR_IO_ERROR;
goto done;
}
for (k = 0; k < data->nr; k++) {
if (!want_ct_pack(data->items[k], last_timestamp))
continue;
wr = send_ct_item(data->items[k]);
if (wr != WR_OK)
goto done;
}
wr = WR_OK;
done:
strbuf_release(&uuid);
strbuf_release(&response_header);
return wr;
}
static enum worker_result do__gvfs_prefetch__get(struct req *req)
{
struct ct_pack_data data;
timestamp_t last_timestamp = 0;
enum worker_result wr;
memset(&data, 0, sizeof(data));
if (req->quest_args.len) {
const char *key = strstr(req->quest_args.buf, "lastPackTimestamp=");
if (key) {
const char *val;
if (skip_prefix(key, "lastPackTimestamp=", &val)) {
last_timestamp = strtol(val, NULL, 10);
}
}
}
trace2_printf("%s: prefetch/since %"PRItime, TR2_CAT, last_timestamp);
for_each_file_in_pack_dir(get_object_directory(), cb_ct_pack, &data);
QSORT(data.items, data.nr, ct_pack_sort_compare);
wr = send_multipack(&data, last_timestamp);
ct_pack_data__release(&data);
return wr;
}
/*
* Read the HTTP request up to the start of the optional message-body.
* We do this byte-by-byte because we have keep-alive turned on and
@ -1159,6 +1455,11 @@ static enum worker_result req__read(struct req *req, int fd)
* We let our caller read/chunk it in as appropriate.
*/
done:
#if 0
/*
* This is useful for debugging the request, but very noisy.
*/
if (trace2_is_enabled()) {
struct string_list_item *item;
trace2_printf("%s: %s", TR2_CAT, req->start_line.buf);
@ -1173,6 +1474,7 @@ done:
for_each_string_list_item(item, &req->header_list)
trace2_printf("%s: Hdrs: %s", TR2_CAT, item->string);
}
#endif
return WR_OK;
}
@ -1221,6 +1523,12 @@ static enum worker_result dispatch(struct req *req)
return do__gvfs_config__get(req);
}
if (!strcmp(req->gvfs_api.buf, "gvfs/prefetch")) {
if (!strcmp(method, "GET"))
return do__gvfs_prefetch__get(req);
}
return send_http_error(1, 501, "Not Implemented", -1,
WR_OK | WR_HANGUP);
}

Просмотреть файл

@ -24,8 +24,8 @@ test_set_port GIT_TEST_GVFS_PROTOCOL_PORT
# actually use it). We are only testing explicit object
# fetching using gvfs-helper.exe in isolation.
#
REPO_SRC="$PWD"/repo_src
REPO_T1="$PWD"/repo_t1
REPO_SRC="$(pwd)"/repo_src
REPO_T1="$(pwd)"/repo_t1
# Setup some loopback URLs where test-gvfs-protocol.exe will be
# listening. We will spawn it directly inside the repo_src directory,
@ -44,22 +44,22 @@ HOST_PORT=127.0.0.1:$GIT_TEST_GVFS_PROTOCOL_PORT
ORIGIN_URL=http://$HOST_PORT/servertype/origin
CACHE_URL=http://$HOST_PORT/servertype/cache
SHARED_CACHE_T1="$PWD"/shared_cache_t1
SHARED_CACHE_T1="$(pwd)"/shared_cache_t1
# The pid-file is created by test-gvfs-protocol.exe when it starts.
# The server will shut down if/when we delete it. (This is a little
# easier than killing it by PID.)
#
PID_FILE="$PWD"/pid-file.pid
SERVER_LOG="$PWD"/OUT.server.log
PID_FILE="$(pwd)"/pid-file.pid
SERVER_LOG="$(pwd)"/OUT.server.log
PATH="$GIT_BUILD_DIR/t/helper/:$PATH" && export PATH
OIDS_FILE="$PWD"/oid_list.txt
OIDS_CT_FILE="$PWD"/oid_ct_list.txt
OIDS_BLOBS_FILE="$PWD"/oids_blobs_file.txt
OID_ONE_BLOB_FILE="$PWD"/oid_one_blob_file.txt
OID_ONE_COMMIT_FILE="$PWD"/oid_one_commit_file.txt
OIDS_FILE="$(pwd)"/oid_list.txt
OIDS_CT_FILE="$(pwd)"/oid_ct_list.txt
OIDS_BLOBS_FILE="$(pwd)"/oids_blobs_file.txt
OID_ONE_BLOB_FILE="$(pwd)"/oid_one_blob_file.txt
OID_ONE_COMMIT_FILE="$(pwd)"/oid_one_commit_file.txt
# Get a list of available OIDs in repo_src so that we can try to fetch
# them and so that we don't have to hard-code a list of known OIDs.
@ -108,6 +108,30 @@ get_one_commit_oid () {
return 0
}
# Create a commits-and-trees packfile for use with "prefetch"
# using the given range of commits.
#
create_commits_and_trees_packfile () {
if test $# -eq 2
then
epoch=$1
revs=$2
else
echo "create_commits_and_trees_packfile: Need 2 args"
return 1
fi
pack_file="$REPO_SRC"/.git/objects/pack/ct-$epoch.pack
idx_file="$REPO_SRC"/.git/objects/pack/ct-$epoch.idx
git -C "$REPO_SRC" pack-objects --stdout --revs --filter=blob:none \
>"$pack_file" <<-EOF
$revs
EOF
git -C "$REPO_SRC" index-pack -o "$idx_file" "$pack_file"
return 0
}
test_expect_success 'setup repos' '
test_create_repo "$REPO_SRC" &&
git -C "$REPO_SRC" branch -M main &&
@ -115,9 +139,16 @@ test_expect_success 'setup repos' '
# test_commit_bulk() does magic to create a packfile containing
# the new commits.
#
# We create branches in repo_src, but also remember the branch OIDs
# in files so that we can refer to them in repo_t1, which will not
# have the commits locally (because we do not clone or fetch).
#
test_commit_bulk -C "$REPO_SRC" --filename="batch_a.%s.t" 9 &&
git -C "$REPO_SRC" branch B1 &&
cp "$REPO_SRC"/.git/refs/heads/main m1.branch &&
#
test_commit_bulk -C "$REPO_SRC" --filename="batch_b.%s.t" 9 &&
git -C "$REPO_SRC" branch B2 &&
cp "$REPO_SRC"/.git/refs/heads/main m2.branch &&
#
# test_commit() creates commits, trees, tags, and blobs and leave
@ -134,8 +165,16 @@ test_expect_success 'setup repos' '
test_commit -C "$REPO_SRC" file7.txt &&
test_commit -C "$REPO_SRC" file8.txt &&
test_commit -C "$REPO_SRC" file9.txt &&
git -C "$REPO_SRC" branch B3 &&
cp "$REPO_SRC"/.git/refs/heads/main m3.branch &&
#
# Create some commits-and-trees-only packfiles for testing prefetch.
# Set arbitrary EPOCH times to make it easier to test fetch-since.
#
create_commits_and_trees_packfile 1000000000 B1 &&
create_commits_and_trees_packfile 1100000000 B1..B2 &&
create_commits_and_trees_packfile 1200000000 B2..B3 &&
#
# gvfs-helper.exe writes downloaded objects to a shared-cache directory
# rather than the ODB inside the .git directory.
#
@ -160,10 +199,10 @@ test_expect_success 'setup repos' '
EOF
cat <<-EOF >creds.sh &&
#!/bin/sh
cat "$PWD"/creds.txt
cat "$(pwd)"/creds.txt
EOF
chmod 755 creds.sh &&
git -C "$REPO_T1" config --local credential.helper "!f() { cat \"$PWD\"/creds.txt; }; f" &&
git -C "$REPO_T1" config --local credential.helper "!f() { cat \"$(pwd)\"/creds.txt; }; f" &&
#
# Create some test data sets.
#
@ -554,8 +593,8 @@ test_expect_success 'basic: POST-request a single blob' '
# Request a single commit via POST. Per the GVFS Protocol, the server
# should implicitly send us a packfile containing the commit and the
# trees it references. Confirm that properly handled the receipt of
# the packfile. (Here, we are testing that asking for a single object
# yields a packfile rather than a loose object.)
# the packfile. (Here, we are testing that asking for a single commit
# via POST yields a packfile rather than a loose object.)
#
# We DO NOT verify that the packfile contains commits/trees and no blobs
# because our test helper doesn't implement the filtering.
@ -587,6 +626,105 @@ test_expect_success 'basic: POST-request a single commit' '
verify_connection_count 1
'
test_expect_success 'basic: PREFETCH w/o arg gets all' '
test_when_finished "per_test_cleanup" &&
start_gvfs_protocol_server &&
# Without a "since" argument gives us all "ct-*.pack" since the EPOCH
# because we do not have any prefetch packs locally.
#
git -C "$REPO_T1" gvfs-helper \
--cache-server=disable \
--remote=origin \
--no-progress \
prefetch >OUT.output &&
# gvfs-helper prints a "packfile <path>" message for each received
# packfile.
#
verify_received_packfile_count 3 &&
stop_gvfs_protocol_server &&
verify_connection_count 1
'
test_expect_success 'basic: PREFETCH w/ arg' '
test_when_finished "per_test_cleanup" &&
start_gvfs_protocol_server &&
# Ask for cached packfiles NEWER THAN the given time.
#
git -C "$REPO_T1" gvfs-helper \
--cache-server=disable \
--remote=origin \
--no-progress \
prefetch --since="1000000000" >OUT.output &&
# gvfs-helper prints a "packfile <path>" message for each received
# packfile.
#
verify_received_packfile_count 2 &&
stop_gvfs_protocol_server &&
verify_connection_count 1
'
test_expect_success 'basic: PREFETCH mayhem no_prefetch_idx' '
test_when_finished "per_test_cleanup" &&
start_gvfs_protocol_server_with_mayhem no_prefetch_idx &&
# Request prefetch packs, but tell server to not send any
# idx files and force gvfs-helper to compute them.
#
git -C "$REPO_T1" gvfs-helper \
--cache-server=disable \
--remote=origin \
--no-progress \
prefetch --since="1000000000" >OUT.output &&
# gvfs-helper prints a "packfile <path>" message for each received
# packfile.
#
verify_received_packfile_count 2 &&
stop_gvfs_protocol_server &&
verify_connection_count 1
'
test_expect_success 'basic: PREFETCH up-to-date' '
test_when_finished "per_test_cleanup" &&
start_gvfs_protocol_server &&
# Ask for cached packfiles NEWER THAN the given time.
#
git -C "$REPO_T1" gvfs-helper \
--cache-server=disable \
--remote=origin \
--no-progress \
prefetch --since="1000000000" >OUT.output &&
# gvfs-helper prints a "packfile <path>" message for each received
# packfile.
#
verify_received_packfile_count 2 &&
# Ask again for any packfiles newer than what we have cached locally.
#
git -C "$REPO_T1" gvfs-helper \
--cache-server=disable \
--remote=origin \
--no-progress \
prefetch >OUT.output &&
# gvfs-helper prints a "packfile <path>" message for each received
# packfile.
#
verify_received_packfile_count 0 &&
stop_gvfs_protocol_server &&
verify_connection_count 2
'
#################################################################
# Tests to see how gvfs-helper responds to network problems.
#
@ -960,44 +1098,6 @@ test_expect_success 'HTTP GET Auth on Cache Server' '
# magically fetched whenever required.
#################################################################
test_expect_success 'integration: explicit commit/trees, implicit blobs: log file' '
test_when_finished "per_test_cleanup" &&
start_gvfs_protocol_server &&
# We have a very empty repo. Seed it with all of the commits
# and trees. The purpose of this test is to demand-load the
# needed blobs only, so we prefetch the commits and trees.
#
git -C "$REPO_T1" gvfs-helper \
--cache-server=disable \
--remote=origin \
get \
<"$OIDS_CT_FILE" >OUT.output &&
# Confirm that we do not have the blobs locally.
# With gvfs-helper turned off, we should fail.
#
test_must_fail \
git -C "$REPO_T1" -c core.useGVFSHelper=false \
log $(cat m3.brach) -- file9.txt \
>OUT.output 2>OUT.stderr &&
# Turn on gvfs-helper and retry. This should implicitly fetch
# any needed blobs.
#
git -C "$REPO_T1" -c core.useGVFSHelper=true \
log $(cat m3.branch) -- file9.txt \
>OUT.output 2>OUT.stderr &&
# Verify that gvfs-helper wrote the fetched the blobs to the
# local ODB, such that a second attempt with gvfs-helper
# turned off should succeed.
#
git -C "$REPO_T1" -c core.useGVFSHelper=false \
log $(cat m3.branch) -- file9.txt \
>OUT.output 2>OUT.stderr
'
test_expect_success 'integration: explicit commit/trees, implicit blobs: diff 2 commits' '
test_when_finished "per_test_cleanup" &&
start_gvfs_protocol_server &&