2005-07-05 00:26:53 +04:00
|
|
|
#include "cache.h"
|
2017-06-14 21:07:36 +03:00
|
|
|
#include "config.h"
|
2005-07-05 00:26:53 +04:00
|
|
|
#include "refs.h"
|
|
|
|
#include "pkt-line.h"
|
2006-09-10 14:20:24 +04:00
|
|
|
#include "sideband.h"
|
2005-10-14 05:57:40 +04:00
|
|
|
#include "tag.h"
|
|
|
|
#include "object.h"
|
2005-10-28 06:48:32 +04:00
|
|
|
#include "commit.h"
|
2018-04-11 00:26:18 +03:00
|
|
|
#include "exec-cmd.h"
|
2006-10-30 22:08:43 +03:00
|
|
|
#include "diff.h"
|
|
|
|
#include "revision.h"
|
|
|
|
#include "list-objects.h"
|
2017-12-08 18:58:39 +03:00
|
|
|
#include "list-objects-filter.h"
|
|
|
|
#include "list-objects-filter-options.h"
|
2007-10-19 23:47:59 +04:00
|
|
|
#include "run-command.h"
|
2013-07-09 00:56:53 +04:00
|
|
|
#include "connect.h"
|
2011-08-06 00:54:06 +04:00
|
|
|
#include "sigchain.h"
|
2012-08-03 20:19:16 +04:00
|
|
|
#include "version.h"
|
upload/receive-pack: allow hiding ref hierarchies
A repository may have refs that are only used for its internal
bookkeeping purposes that should not be exposed to the others that
come over the network.
Teach upload-pack to omit some refs from its initial advertisement
by paying attention to the uploadpack.hiderefs multi-valued
configuration variable. Do the same to receive-pack via the
receive.hiderefs variable. As a convenient short-hand, allow using
transfer.hiderefs to set the value to both of these variables.
Any ref that is under the hierarchies listed on the value of these
variable is excluded from responses to requests made by "ls-remote",
"fetch", etc. (for upload-pack) and "push" (for receive-pack).
Because these hidden refs do not count as OUR_REF, an attempt to
fetch objects at the tip of them will be rejected, and because these
refs do not get advertised, "git push :" will not see local branches
that have the same name as them as "matching" ones to be sent.
An attempt to update/delete these hidden refs with an explicit
refspec, e.g. "git push origin :refs/hidden/22", is rejected. This
is not a new restriction. To the pusher, it would appear that there
is no such ref, so its push request will conclude with "Now that I
sent you all the data, it is time for you to update the refs. I saw
that the ref did not exist when I started pushing, and I want the
result to point at this commit". The receiving end will apply the
compare-and-swap rule to this request and rejects the push with
"Well, your update request conflicts with somebody else; I see there
is such a ref.", which is the right thing to do. Otherwise a push to
a hidden ref will always be "the last one wins", which is not a good
default.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 04:08:30 +04:00
|
|
|
#include "string-list.h"
|
2016-05-31 12:57:08 +03:00
|
|
|
#include "parse-options.h"
|
2016-06-12 13:53:58 +03:00
|
|
|
#include "argv-array.h"
|
2016-10-12 00:20:16 +03:00
|
|
|
#include "prio-queue.h"
|
2017-10-16 20:55:26 +03:00
|
|
|
#include "protocol.h"
|
2017-12-08 18:58:39 +03:00
|
|
|
#include "quote.h"
|
2005-07-05 00:26:53 +04:00
|
|
|
|
2016-05-31 12:57:08 +03:00
|
|
|
static const char * const upload_pack_usage[] = {
|
|
|
|
N_("git upload-pack [<options>] <dir>"),
|
|
|
|
NULL
|
|
|
|
};
|
2005-07-05 00:26:53 +04:00
|
|
|
|
2014-03-25 17:23:26 +04:00
|
|
|
/* Remember to update object flag allocation in object.h */
|
2006-07-06 08:28:20 +04:00
|
|
|
#define THEY_HAVE (1u << 11)
|
|
|
|
#define OUR_REF (1u << 12)
|
|
|
|
#define WANTED (1u << 13)
|
|
|
|
#define COMMON_KNOWN (1u << 14)
|
|
|
|
#define REACHABLE (1u << 15)
|
|
|
|
|
2006-10-30 22:09:53 +03:00
|
|
|
#define SHALLOW (1u << 16)
|
|
|
|
#define NOT_SHALLOW (1u << 17)
|
|
|
|
#define CLIENT_SHALLOW (1u << 18)
|
2013-01-29 09:49:57 +04:00
|
|
|
#define HIDDEN_REF (1u << 19)
|
2006-10-30 22:09:53 +03:00
|
|
|
|
2017-04-26 22:29:31 +03:00
|
|
|
static timestamp_t oldest_have;
|
2006-07-06 08:28:20 +04:00
|
|
|
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 13:54:09 +03:00
|
|
|
static int deepen_relative;
|
2013-01-29 08:45:43 +04:00
|
|
|
static int multi_ack;
|
2011-03-29 23:29:10 +04:00
|
|
|
static int no_done;
|
2008-03-04 06:27:33 +03:00
|
|
|
static int use_thin_pack, use_ofs_delta, use_include_tag;
|
2009-06-16 22:41:16 +04:00
|
|
|
static int no_progress, daemon_mode;
|
2015-05-21 23:23:38 +03:00
|
|
|
/* Allow specifying sha1 if it is a ref tip. */
|
|
|
|
#define ALLOW_TIP_SHA1 01
|
2015-05-21 23:23:39 +03:00
|
|
|
/* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
|
|
|
|
#define ALLOW_REACHABLE_SHA1 02
|
2016-11-11 20:23:48 +03:00
|
|
|
/* Allow request of any sha1. Implies ALLOW_TIP_SHA1 and ALLOW_REACHABLE_SHA1. */
|
|
|
|
#define ALLOW_ANY_SHA1 07
|
2015-05-21 23:23:38 +03:00
|
|
|
static unsigned int allow_unadvertised_object_request;
|
2009-06-10 03:50:18 +04:00
|
|
|
static int shallow_nr;
|
2006-07-06 05:00:02 +04:00
|
|
|
static struct object_array have_obj;
|
|
|
|
static struct object_array want_obj;
|
2009-09-04 03:08:33 +04:00
|
|
|
static struct object_array extra_edge_obj;
|
2006-08-15 21:23:48 +04:00
|
|
|
static unsigned int timeout;
|
2013-09-08 13:02:06 +04:00
|
|
|
static int keepalive = 5;
|
2006-09-11 03:27:08 +04:00
|
|
|
/* 0 for no sideband,
|
|
|
|
* otherwise maximum packet size (up to 65520 bytes).
|
|
|
|
*/
|
2006-08-15 21:23:48 +04:00
|
|
|
static int use_sideband;
|
2009-10-31 03:47:33 +03:00
|
|
|
static int advertise_refs;
|
|
|
|
static int stateless_rpc;
|
upload-pack: provide a hook for running pack-objects
When upload-pack serves a client request, it turns to
pack-objects to do the heavy lifting of creating a
packfile. There's no easy way to intercept the call to
pack-objects, but there are a few good reasons to want to do
so:
1. If you're debugging a client or server issue with
fetching, you may want to store a copy of the generated
packfile.
2. If you're gathering data from real-world fetches for
performance analysis or debugging, storing a copy of
the arguments and stdin lets you replay the pack
generation at your leisure.
3. You may want to insert a caching layer around
pack-objects; it is the most CPU- and memory-intensive
part of serving a fetch, and its output is a pure
function[1] of its input, making it an ideal place to
consolidate identical requests.
This patch adds a simple "hook" interface to intercept calls
to pack-objects. The new test demonstrates how it can be
used for debugging (using it for caching is a
straightforward extension; the tricky part is writing the
actual caching layer).
This hook is unlike the normal hook scripts found in the
"hooks/" directory of a repository. Because we promise that
upload-pack is safe to run in an untrusted repository, we
cannot execute arbitrary code or commands found in the
repository (neither in hooks/, nor in the config). So
instead, this hook is triggered from a config variable that
is explicitly ignored in the per-repo config.
The config variable holds the actual shell command to run as
the hook. Another approach would be to simply treat it as a
boolean: "should I respect the upload-pack hooks in this
repo?", and then run the script from "hooks/" as we usually
do. However, that isn't as flexible; there's no way to run a
hook approved by the site administrator (e.g., in
"/etc/gitconfig") on a repository whose contents are not
trusted. The approach taken by this patch is more
fine-grained, if a little less conventional for git hooks
(it does behave similar to other configured commands like
diff.external, etc).
[1] Pack-objects isn't _actually_ a pure function. Its
output depends on the exact packing of the object
database, and if multi-threading is used for delta
compression, can even differ racily. But for the
purposes of caching, that's OK; of the many possible
outputs for a given input, it is sufficient only that we
output one of them.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-05-19 01:45:37 +03:00
|
|
|
static const char *pack_objects_hook;
|
2005-10-20 01:27:01 +04:00
|
|
|
|
2017-12-08 18:58:39 +03:00
|
|
|
static int filter_capability_requested;
|
2018-03-28 23:33:03 +03:00
|
|
|
static int allow_filter;
|
2017-12-08 18:58:39 +03:00
|
|
|
static struct list_objects_filter_options filter_options;
|
|
|
|
|
2005-10-20 01:27:01 +04:00
|
|
|
static void reset_timeout(void)
|
|
|
|
{
|
|
|
|
alarm(timeout);
|
|
|
|
}
|
2005-07-05 02:29:17 +04:00
|
|
|
|
2016-06-14 17:49:17 +03:00
|
|
|
static void send_client_data(int fd, const char *data, ssize_t sz)
|
2006-06-21 11:30:21 +04:00
|
|
|
{
|
2016-06-14 17:49:16 +03:00
|
|
|
if (use_sideband) {
|
|
|
|
send_sideband(1, fd, data, sz, use_sideband);
|
2016-06-14 17:49:17 +03:00
|
|
|
return;
|
2016-06-14 17:49:16 +03:00
|
|
|
}
|
2006-09-10 14:20:24 +04:00
|
|
|
if (fd == 3)
|
|
|
|
/* emergency quit */
|
|
|
|
fd = 2;
|
|
|
|
if (fd == 2) {
|
2007-01-08 18:58:23 +03:00
|
|
|
/* XXX: are we happy to lose stuff here? */
|
2006-09-10 14:20:24 +04:00
|
|
|
xwrite(fd, data, sz);
|
2016-06-14 17:49:17 +03:00
|
|
|
return;
|
2006-06-21 11:30:21 +04:00
|
|
|
}
|
2013-02-21 00:01:56 +04:00
|
|
|
write_or_die(fd, data, sz);
|
2006-06-21 11:30:21 +04:00
|
|
|
}
|
|
|
|
|
2014-03-11 16:59:46 +04:00
|
|
|
static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
|
|
|
|
{
|
|
|
|
FILE *fp = cb_data;
|
|
|
|
if (graft->nr_parent == -1)
|
2015-03-14 02:39:34 +03:00
|
|
|
fprintf(fp, "--shallow %s\n", oid_to_hex(&graft->oid));
|
2014-03-11 16:59:46 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-07-05 02:29:17 +04:00
|
|
|
static void create_pack_file(void)
|
|
|
|
{
|
2014-08-19 23:09:35 +04:00
|
|
|
struct child_process pack_objects = CHILD_PROCESS_INIT;
|
2006-06-21 09:48:23 +04:00
|
|
|
char data[8193], progress[128];
|
2006-06-21 11:30:21 +04:00
|
|
|
char abort_msg[] = "aborting due to possible repository "
|
|
|
|
"corruption on the remote side.";
|
2006-06-21 05:26:34 +04:00
|
|
|
int buffered = -1;
|
2009-12-10 23:17:11 +03:00
|
|
|
ssize_t sz;
|
2016-02-25 15:13:26 +03:00
|
|
|
int i;
|
2013-08-16 13:52:05 +04:00
|
|
|
FILE *pipe_fd;
|
2005-07-05 03:35:13 +04:00
|
|
|
|
upload-pack: provide a hook for running pack-objects
When upload-pack serves a client request, it turns to
pack-objects to do the heavy lifting of creating a
packfile. There's no easy way to intercept the call to
pack-objects, but there are a few good reasons to want to do
so:
1. If you're debugging a client or server issue with
fetching, you may want to store a copy of the generated
packfile.
2. If you're gathering data from real-world fetches for
performance analysis or debugging, storing a copy of
the arguments and stdin lets you replay the pack
generation at your leisure.
3. You may want to insert a caching layer around
pack-objects; it is the most CPU- and memory-intensive
part of serving a fetch, and its output is a pure
function[1] of its input, making it an ideal place to
consolidate identical requests.
This patch adds a simple "hook" interface to intercept calls
to pack-objects. The new test demonstrates how it can be
used for debugging (using it for caching is a
straightforward extension; the tricky part is writing the
actual caching layer).
This hook is unlike the normal hook scripts found in the
"hooks/" directory of a repository. Because we promise that
upload-pack is safe to run in an untrusted repository, we
cannot execute arbitrary code or commands found in the
repository (neither in hooks/, nor in the config). So
instead, this hook is triggered from a config variable that
is explicitly ignored in the per-repo config.
The config variable holds the actual shell command to run as
the hook. Another approach would be to simply treat it as a
boolean: "should I respect the upload-pack hooks in this
repo?", and then run the script from "hooks/" as we usually
do. However, that isn't as flexible; there's no way to run a
hook approved by the site administrator (e.g., in
"/etc/gitconfig") on a repository whose contents are not
trusted. The approach taken by this patch is more
fine-grained, if a little less conventional for git hooks
(it does behave similar to other configured commands like
diff.external, etc).
[1] Pack-objects isn't _actually_ a pure function. Its
output depends on the exact packing of the object
database, and if multi-threading is used for delta
compression, can even differ racily. But for the
purposes of caching, that's OK; of the many possible
outputs for a given input, it is sufficient only that we
output one of them.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-05-19 01:45:37 +03:00
|
|
|
if (!pack_objects_hook)
|
|
|
|
pack_objects.git_cmd = 1;
|
|
|
|
else {
|
|
|
|
argv_array_push(&pack_objects.args, pack_objects_hook);
|
|
|
|
argv_array_push(&pack_objects.args, "git");
|
|
|
|
pack_objects.use_shell = 1;
|
|
|
|
}
|
|
|
|
|
2013-08-16 13:52:05 +04:00
|
|
|
if (shallow_nr) {
|
2016-02-25 15:13:26 +03:00
|
|
|
argv_array_push(&pack_objects.args, "--shallow-file");
|
|
|
|
argv_array_push(&pack_objects.args, "");
|
2009-06-10 03:50:18 +04:00
|
|
|
}
|
2016-02-25 15:13:26 +03:00
|
|
|
argv_array_push(&pack_objects.args, "pack-objects");
|
|
|
|
argv_array_push(&pack_objects.args, "--revs");
|
2013-08-16 13:52:05 +04:00
|
|
|
if (use_thin_pack)
|
2016-02-25 15:13:26 +03:00
|
|
|
argv_array_push(&pack_objects.args, "--thin");
|
2005-07-05 03:35:13 +04:00
|
|
|
|
2016-02-25 15:13:26 +03:00
|
|
|
argv_array_push(&pack_objects.args, "--stdout");
|
2014-12-25 02:05:40 +03:00
|
|
|
if (shallow_nr)
|
2016-02-25 15:13:26 +03:00
|
|
|
argv_array_push(&pack_objects.args, "--shallow");
|
2007-10-19 23:47:59 +04:00
|
|
|
if (!no_progress)
|
2016-02-25 15:13:26 +03:00
|
|
|
argv_array_push(&pack_objects.args, "--progress");
|
2007-10-19 23:47:59 +04:00
|
|
|
if (use_ofs_delta)
|
2016-02-25 15:13:26 +03:00
|
|
|
argv_array_push(&pack_objects.args, "--delta-base-offset");
|
2008-03-04 06:27:33 +03:00
|
|
|
if (use_include_tag)
|
2016-02-25 15:13:26 +03:00
|
|
|
argv_array_push(&pack_objects.args, "--include-tag");
|
2017-12-08 18:58:39 +03:00
|
|
|
if (filter_options.filter_spec) {
|
2017-12-08 18:58:42 +03:00
|
|
|
if (pack_objects.use_shell) {
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
sq_quote_buf(&buf, filter_options.filter_spec);
|
|
|
|
argv_array_pushf(&pack_objects.args, "--filter=%s", buf.buf);
|
|
|
|
strbuf_release(&buf);
|
|
|
|
} else {
|
|
|
|
argv_array_pushf(&pack_objects.args, "--filter=%s",
|
|
|
|
filter_options.filter_spec);
|
|
|
|
}
|
2017-12-08 18:58:39 +03:00
|
|
|
}
|
2007-10-19 23:47:59 +04:00
|
|
|
|
upload-pack: start pack-objects before async rev-list
In a pthread-enabled version of upload-pack, there's a race condition
that can cause a deadlock on the fflush(NULL) we call from run-command.
What happens is this:
1. Upload-pack is informed we are doing a shallow clone.
2. We call start_async() to spawn a thread that will generate rev-list
results to feed to pack-objects. It gets a file descriptor to a
pipe which will eventually hook to pack-objects.
3. The rev-list thread uses fdopen to create a new output stream
around the fd we gave it, called pack_pipe.
4. The thread writes results to pack_pipe. Outside of our control,
libc is doing locking on the stream. We keep writing until the OS
pipe buffer is full, and then we block in write(), still holding
the lock.
5. The main thread now uses start_command to spawn pack-objects.
Before forking, it calls fflush(NULL) to flush every stdio output
buffer. It blocks trying to get the lock on pack_pipe.
And we have a deadlock. The thread will block until somebody starts
reading from the pipe. But nobody will read from the pipe until we
finish flushing to the pipe.
To fix this, we swap the start order: we start the
pack-objects reader first, and then the rev-list writer
after. Thus the problematic fflush(NULL) happens before we
even open the new file descriptor (and even if it didn't,
flushing should no longer block, as the reader at the end of
the pipe is now active).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-04-07 01:33:33 +04:00
|
|
|
pack_objects.in = -1;
|
2007-10-19 23:47:59 +04:00
|
|
|
pack_objects.out = -1;
|
|
|
|
pack_objects.err = -1;
|
2007-10-19 23:48:03 +04:00
|
|
|
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
if (start_command(&pack_objects))
|
2008-08-31 20:39:19 +04:00
|
|
|
die("git upload-pack: unable to fork git-pack-objects");
|
2006-06-21 05:26:34 +04:00
|
|
|
|
2013-08-16 13:52:05 +04:00
|
|
|
pipe_fd = xfdopen(pack_objects.in, "w");
|
|
|
|
|
2014-03-11 16:59:46 +04:00
|
|
|
if (shallow_nr)
|
|
|
|
for_each_commit_graft(write_one_shallow, pipe_fd);
|
|
|
|
|
2013-08-16 13:52:05 +04:00
|
|
|
for (i = 0; i < want_obj.nr; i++)
|
|
|
|
fprintf(pipe_fd, "%s\n",
|
2015-11-10 05:22:28 +03:00
|
|
|
oid_to_hex(&want_obj.objects[i].item->oid));
|
2013-08-16 13:52:05 +04:00
|
|
|
fprintf(pipe_fd, "--not\n");
|
|
|
|
for (i = 0; i < have_obj.nr; i++)
|
|
|
|
fprintf(pipe_fd, "%s\n",
|
2015-11-10 05:22:28 +03:00
|
|
|
oid_to_hex(&have_obj.objects[i].item->oid));
|
2013-08-16 13:52:05 +04:00
|
|
|
for (i = 0; i < extra_edge_obj.nr; i++)
|
|
|
|
fprintf(pipe_fd, "%s\n",
|
2015-11-10 05:22:28 +03:00
|
|
|
oid_to_hex(&extra_edge_obj.objects[i].item->oid));
|
2013-08-16 13:52:05 +04:00
|
|
|
fprintf(pipe_fd, "\n");
|
|
|
|
fflush(pipe_fd);
|
|
|
|
fclose(pipe_fd);
|
2009-06-10 03:50:18 +04:00
|
|
|
|
2007-10-19 23:47:59 +04:00
|
|
|
/* We read from pack_objects.err to capture stderr output for
|
|
|
|
* progress bar, and pack_objects.out to capture the pack data.
|
2006-06-21 05:26:34 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct pollfd pfd[2];
|
2006-06-21 09:48:23 +04:00
|
|
|
int pe, pu, pollsize;
|
2013-09-08 13:01:31 +04:00
|
|
|
int ret;
|
2006-06-21 05:26:34 +04:00
|
|
|
|
2006-07-18 21:14:51 +04:00
|
|
|
reset_timeout();
|
|
|
|
|
2006-06-21 05:26:34 +04:00
|
|
|
pollsize = 0;
|
2006-06-21 09:48:23 +04:00
|
|
|
pe = pu = -1;
|
2006-06-21 05:26:34 +04:00
|
|
|
|
2007-10-19 23:47:59 +04:00
|
|
|
if (0 <= pack_objects.out) {
|
|
|
|
pfd[pollsize].fd = pack_objects.out;
|
2006-06-21 05:26:34 +04:00
|
|
|
pfd[pollsize].events = POLLIN;
|
|
|
|
pu = pollsize;
|
|
|
|
pollsize++;
|
|
|
|
}
|
2007-10-19 23:47:59 +04:00
|
|
|
if (0 <= pack_objects.err) {
|
|
|
|
pfd[pollsize].fd = pack_objects.err;
|
2006-06-21 09:48:23 +04:00
|
|
|
pfd[pollsize].events = POLLIN;
|
|
|
|
pe = pollsize;
|
|
|
|
pollsize++;
|
|
|
|
}
|
2006-06-21 05:26:34 +04:00
|
|
|
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
if (!pollsize)
|
|
|
|
break;
|
|
|
|
|
2014-08-22 19:19:11 +04:00
|
|
|
ret = poll(pfd, pollsize,
|
|
|
|
keepalive < 0 ? -1 : 1000 * keepalive);
|
|
|
|
|
2013-09-08 13:01:31 +04:00
|
|
|
if (ret < 0) {
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
if (errno != EINTR) {
|
2016-05-08 12:47:59 +03:00
|
|
|
error_errno("poll failed, resuming");
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
sleep(1);
|
2006-06-21 05:26:34 +04:00
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
continue;
|
|
|
|
}
|
2009-11-12 01:24:42 +03:00
|
|
|
if (0 <= pe && (pfd[pe].revents & (POLLIN|POLLHUP))) {
|
|
|
|
/* Status ready; we ship that in the side-band
|
|
|
|
* or dump to the standard error.
|
|
|
|
*/
|
|
|
|
sz = xread(pack_objects.err, progress,
|
|
|
|
sizeof(progress));
|
|
|
|
if (0 < sz)
|
|
|
|
send_client_data(2, progress, sz);
|
|
|
|
else if (sz == 0) {
|
|
|
|
close(pack_objects.err);
|
|
|
|
pack_objects.err = -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
goto fail;
|
|
|
|
/* give priority to status messages */
|
|
|
|
continue;
|
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
if (0 <= pu && (pfd[pu].revents & (POLLIN|POLLHUP))) {
|
|
|
|
/* Data ready; we keep the last byte to ourselves
|
|
|
|
* in case we detect broken rev-list, so that we
|
|
|
|
* can leave the stream corrupted. This is
|
|
|
|
* unfortunate -- unpack-objects would happily
|
|
|
|
* accept a valid packdata with trailing garbage,
|
|
|
|
* so appending garbage after we pass all the
|
|
|
|
* pack data is not good enough to signal
|
|
|
|
* breakage to downstream.
|
|
|
|
*/
|
|
|
|
char *cp = data;
|
|
|
|
ssize_t outsz = 0;
|
|
|
|
if (0 <= buffered) {
|
|
|
|
*cp++ = buffered;
|
|
|
|
outsz++;
|
2006-06-21 05:26:34 +04:00
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
sz = xread(pack_objects.out, cp,
|
|
|
|
sizeof(data) - outsz);
|
|
|
|
if (0 < sz)
|
2009-12-10 23:17:11 +03:00
|
|
|
;
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
else if (sz == 0) {
|
|
|
|
close(pack_objects.out);
|
|
|
|
pack_objects.out = -1;
|
2006-06-21 09:48:23 +04:00
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
else
|
|
|
|
goto fail;
|
|
|
|
sz += outsz;
|
|
|
|
if (1 < sz) {
|
|
|
|
buffered = data[sz-1] & 0xFF;
|
|
|
|
sz--;
|
2006-06-21 05:26:34 +04:00
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
else
|
|
|
|
buffered = -1;
|
2016-06-14 17:49:17 +03:00
|
|
|
send_client_data(1, data, sz);
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
}
|
2013-09-08 13:01:31 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We hit the keepalive timeout without saying anything; send
|
|
|
|
* an empty message on the data sideband just to let the other
|
|
|
|
* side know we're still working on it, but don't have any data
|
|
|
|
* yet.
|
|
|
|
*
|
|
|
|
* If we don't have a sideband channel, there's no room in the
|
|
|
|
* protocol to say anything, so those clients are just out of
|
|
|
|
* luck.
|
|
|
|
*/
|
|
|
|
if (!ret && use_sideband) {
|
|
|
|
static const char buf[] = "0005\1";
|
|
|
|
write_or_die(1, buf, 5);
|
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
}
|
2006-06-21 05:26:34 +04:00
|
|
|
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
if (finish_command(&pack_objects)) {
|
2008-08-31 20:39:19 +04:00
|
|
|
error("git upload-pack: git-pack-objects died with error.");
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
goto fail;
|
|
|
|
}
|
2006-06-21 05:26:34 +04:00
|
|
|
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
/* flush the data */
|
|
|
|
if (0 <= buffered) {
|
|
|
|
data[0] = buffered;
|
2016-06-14 17:49:17 +03:00
|
|
|
send_client_data(1, data, 1);
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
fprintf(stderr, "flushed.\n");
|
2006-06-21 05:26:34 +04:00
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 22:46:48 +03:00
|
|
|
if (use_sideband)
|
|
|
|
packet_flush(1);
|
|
|
|
return;
|
|
|
|
|
2006-06-21 05:26:34 +04:00
|
|
|
fail:
|
2006-06-21 11:30:21 +04:00
|
|
|
send_client_data(3, abort_msg, sizeof(abort_msg));
|
2008-08-31 20:39:19 +04:00
|
|
|
die("git upload-pack: %s", abort_msg);
|
2005-07-05 02:29:17 +04:00
|
|
|
}
|
|
|
|
|
2017-05-07 01:10:28 +03:00
|
|
|
static int got_oid(const char *hex, struct object_id *oid)
|
2005-07-05 00:26:53 +04:00
|
|
|
{
|
2006-07-06 05:00:02 +04:00
|
|
|
struct object *o;
|
2006-07-06 08:28:20 +04:00
|
|
|
int we_knew_they_have = 0;
|
2006-07-06 05:00:02 +04:00
|
|
|
|
2017-05-07 01:10:28 +03:00
|
|
|
if (get_oid_hex(hex, oid))
|
2008-08-31 20:39:19 +04:00
|
|
|
die("git upload-pack: expected SHA1 object, got '%s'", hex);
|
2017-05-07 01:10:28 +03:00
|
|
|
if (!has_object_file(oid))
|
2006-07-06 08:28:20 +04:00
|
|
|
return -1;
|
2006-07-06 05:00:02 +04:00
|
|
|
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 01:10:38 +03:00
|
|
|
o = parse_object(oid);
|
2006-07-06 05:00:02 +04:00
|
|
|
if (!o)
|
2017-05-07 01:10:28 +03:00
|
|
|
die("oops (%s)", oid_to_hex(oid));
|
2006-08-13 09:16:51 +04:00
|
|
|
if (o->type == OBJ_COMMIT) {
|
2006-07-06 05:00:02 +04:00
|
|
|
struct commit_list *parents;
|
2006-07-06 08:28:20 +04:00
|
|
|
struct commit *commit = (struct commit *)o;
|
2006-07-06 05:00:02 +04:00
|
|
|
if (o->flags & THEY_HAVE)
|
2006-07-06 08:28:20 +04:00
|
|
|
we_knew_they_have = 1;
|
|
|
|
else
|
|
|
|
o->flags |= THEY_HAVE;
|
|
|
|
if (!oldest_have || (commit->date < oldest_have))
|
|
|
|
oldest_have = commit->date;
|
|
|
|
for (parents = commit->parents;
|
2006-07-06 05:00:02 +04:00
|
|
|
parents;
|
|
|
|
parents = parents->next)
|
|
|
|
parents->item->object.flags |= THEY_HAVE;
|
2005-07-05 02:29:17 +04:00
|
|
|
}
|
2006-07-06 08:28:20 +04:00
|
|
|
if (!we_knew_they_have) {
|
|
|
|
add_object_array(o, NULL, &have_obj);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reachable(struct commit *want)
|
|
|
|
{
|
2016-10-12 00:20:16 +03:00
|
|
|
struct prio_queue work = { compare_commits_by_commit_date };
|
2006-07-06 08:28:20 +04:00
|
|
|
|
2016-10-12 00:20:16 +03:00
|
|
|
prio_queue_put(&work, want);
|
|
|
|
while (work.nr) {
|
2015-10-24 19:21:31 +03:00
|
|
|
struct commit_list *list;
|
2016-10-12 00:20:16 +03:00
|
|
|
struct commit *commit = prio_queue_get(&work);
|
2006-07-06 08:28:20 +04:00
|
|
|
|
|
|
|
if (commit->object.flags & THEY_HAVE) {
|
|
|
|
want->object.flags |= COMMON_KNOWN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!commit->object.parsed)
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 01:10:38 +03:00
|
|
|
parse_object(&commit->object.oid);
|
2006-07-06 08:28:20 +04:00
|
|
|
if (commit->object.flags & REACHABLE)
|
|
|
|
continue;
|
|
|
|
commit->object.flags |= REACHABLE;
|
|
|
|
if (commit->date < oldest_have)
|
|
|
|
continue;
|
|
|
|
for (list = commit->parents; list; list = list->next) {
|
|
|
|
struct commit *parent = list->item;
|
|
|
|
if (!(parent->object.flags & REACHABLE))
|
2016-10-12 00:20:16 +03:00
|
|
|
prio_queue_put(&work, parent);
|
2006-07-06 08:28:20 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
want->object.flags |= REACHABLE;
|
|
|
|
clear_commit_marks(want, REACHABLE);
|
2016-10-12 00:20:16 +03:00
|
|
|
clear_prio_queue(&work);
|
2006-07-06 08:28:20 +04:00
|
|
|
return (want->object.flags & COMMON_KNOWN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ok_to_give_up(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!have_obj.nr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < want_obj.nr; i++) {
|
|
|
|
struct object *want = want_obj.objects[i].item;
|
|
|
|
|
|
|
|
if (want->flags & COMMON_KNOWN)
|
|
|
|
continue;
|
|
|
|
want = deref_tag(want, "a want line", 0);
|
|
|
|
if (!want || want->type != OBJ_COMMIT) {
|
|
|
|
/* no way to tell if this is reachable by
|
|
|
|
* looking at the ancestry chain alone, so
|
|
|
|
* leave a note to ourselves not to worry about
|
|
|
|
* this object anymore.
|
|
|
|
*/
|
|
|
|
want_obj.objects[i].item->flags |= COMMON_KNOWN;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!reachable((struct commit *)want))
|
|
|
|
return 0;
|
|
|
|
}
|
2005-07-05 02:29:17 +04:00
|
|
|
return 1;
|
2005-07-05 00:26:53 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int get_common_commits(void)
|
|
|
|
{
|
2017-05-07 01:10:28 +03:00
|
|
|
struct object_id oid;
|
|
|
|
char last_hex[GIT_MAX_HEXSZ + 1];
|
2011-03-15 02:48:39 +03:00
|
|
|
int got_common = 0;
|
|
|
|
int got_other = 0;
|
2011-03-29 23:29:10 +04:00
|
|
|
int sent_ready = 0;
|
2005-07-05 00:26:53 +04:00
|
|
|
|
2005-10-28 06:48:32 +04:00
|
|
|
save_commit_buffer = 0;
|
|
|
|
|
2009-09-01 09:35:10 +04:00
|
|
|
for (;;) {
|
pkt-line: provide a LARGE_PACKET_MAX static buffer
Most of the callers of packet_read_line just read into a
static 1000-byte buffer (callers which handle arbitrary
binary data already use LARGE_PACKET_MAX). This works fine
in practice, because:
1. The only variable-sized data in these lines is a ref
name, and refs tend to be a lot shorter than 1000
characters.
2. When sending ref lines, git-core always limits itself
to 1000 byte packets.
However, the only limit given in the protocol specification
in Documentation/technical/protocol-common.txt is
LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in
pack-protocol.txt, and then only describing what we write,
not as a specific limit for readers.
This patch lets us bump the 1000-byte limit to
LARGE_PACKET_MAX. Even though git-core will never write a
packet where this makes a difference, there are two good
reasons to do this:
1. Other git implementations may have followed
protocol-common.txt and used a larger maximum size. We
don't bump into it in practice because it would involve
very long ref names.
2. We may want to increase the 1000-byte limit one day.
Since packets are transferred before any capabilities,
it's difficult to do this in a backwards-compatible
way. But if we bump the size of buffer the readers can
handle, eventually older versions of git will be
obsolete enough that we can justify bumping the
writers, as well. We don't have plans to do this
anytime soon, but there is no reason not to start the
clock ticking now.
Just bumping all of the reading bufs to LARGE_PACKET_MAX
would waste memory. Instead, since most readers just read
into a temporary buffer anyway, let's provide a single
static buffer that all callers can use. We can further wrap
this detail away by having the packet_read_line wrapper just
use the buffer transparently and return a pointer to the
static storage. That covers most of the cases, and the
remaining ones already read into their own LARGE_PACKET_MAX
buffers.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-21 00:02:57 +04:00
|
|
|
char *line = packet_read_line(0, NULL);
|
2016-06-12 13:53:49 +03:00
|
|
|
const char *arg;
|
|
|
|
|
2005-10-20 01:27:01 +04:00
|
|
|
reset_timeout();
|
2005-07-05 00:26:53 +04:00
|
|
|
|
pkt-line: provide a LARGE_PACKET_MAX static buffer
Most of the callers of packet_read_line just read into a
static 1000-byte buffer (callers which handle arbitrary
binary data already use LARGE_PACKET_MAX). This works fine
in practice, because:
1. The only variable-sized data in these lines is a ref
name, and refs tend to be a lot shorter than 1000
characters.
2. When sending ref lines, git-core always limits itself
to 1000 byte packets.
However, the only limit given in the protocol specification
in Documentation/technical/protocol-common.txt is
LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in
pack-protocol.txt, and then only describing what we write,
not as a specific limit for readers.
This patch lets us bump the 1000-byte limit to
LARGE_PACKET_MAX. Even though git-core will never write a
packet where this makes a difference, there are two good
reasons to do this:
1. Other git implementations may have followed
protocol-common.txt and used a larger maximum size. We
don't bump into it in practice because it would involve
very long ref names.
2. We may want to increase the 1000-byte limit one day.
Since packets are transferred before any capabilities,
it's difficult to do this in a backwards-compatible
way. But if we bump the size of buffer the readers can
handle, eventually older versions of git will be
obsolete enough that we can justify bumping the
writers, as well. We don't have plans to do this
anytime soon, but there is no reason not to start the
clock ticking now.
Just bumping all of the reading bufs to LARGE_PACKET_MAX
would waste memory. Instead, since most readers just read
into a temporary buffer anyway, let's provide a single
static buffer that all callers can use. We can further wrap
this detail away by having the packet_read_line wrapper just
use the buffer transparently and return a pointer to the
static storage. That covers most of the cases, and the
remaining ones already read into their own LARGE_PACKET_MAX
buffers.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-21 00:02:57 +04:00
|
|
|
if (!line) {
|
2011-03-15 02:48:39 +03:00
|
|
|
if (multi_ack == 2 && got_common
|
2011-03-29 23:29:10 +04:00
|
|
|
&& !got_other && ok_to_give_up()) {
|
|
|
|
sent_ready = 1;
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "ACK %s ready\n", last_hex);
|
2011-03-29 23:29:10 +04:00
|
|
|
}
|
2006-07-06 05:00:02 +04:00
|
|
|
if (have_obj.nr == 0 || multi_ack)
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "NAK\n");
|
2011-03-29 23:29:10 +04:00
|
|
|
|
|
|
|
if (no_done && sent_ready) {
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "ACK %s\n", last_hex);
|
2011-03-29 23:29:10 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2009-10-31 03:47:33 +03:00
|
|
|
if (stateless_rpc)
|
|
|
|
exit(0);
|
2011-03-15 02:48:39 +03:00
|
|
|
got_common = 0;
|
|
|
|
got_other = 0;
|
2005-07-05 00:26:53 +04:00
|
|
|
continue;
|
|
|
|
}
|
2016-06-12 13:53:49 +03:00
|
|
|
if (skip_prefix(line, "have ", &arg)) {
|
2017-05-07 01:10:28 +03:00
|
|
|
switch (got_oid(arg, &oid)) {
|
2006-07-06 08:28:20 +04:00
|
|
|
case -1: /* they have what we do not */
|
2011-03-15 02:48:39 +03:00
|
|
|
got_other = 1;
|
2009-10-31 03:47:25 +03:00
|
|
|
if (multi_ack && ok_to_give_up()) {
|
2017-05-07 01:10:28 +03:00
|
|
|
const char *hex = oid_to_hex(&oid);
|
2011-03-29 23:29:10 +04:00
|
|
|
if (multi_ack == 2) {
|
|
|
|
sent_ready = 1;
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "ACK %s ready\n", hex);
|
2011-03-29 23:29:10 +04:00
|
|
|
} else
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "ACK %s continue\n", hex);
|
2009-10-31 03:47:25 +03:00
|
|
|
}
|
2006-07-06 08:28:20 +04:00
|
|
|
break;
|
|
|
|
default:
|
2011-03-15 02:48:39 +03:00
|
|
|
got_common = 1;
|
2017-05-07 01:10:28 +03:00
|
|
|
memcpy(last_hex, oid_to_hex(&oid), 41);
|
2009-10-31 03:47:25 +03:00
|
|
|
if (multi_ack == 2)
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "ACK %s common\n", last_hex);
|
2009-10-31 03:47:25 +03:00
|
|
|
else if (multi_ack)
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "ACK %s continue\n", last_hex);
|
2006-07-06 05:12:12 +04:00
|
|
|
else if (have_obj.nr == 1)
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "ACK %s\n", last_hex);
|
2006-07-06 08:28:20 +04:00
|
|
|
break;
|
2005-10-26 01:55:24 +04:00
|
|
|
}
|
2005-07-05 00:26:53 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp(line, "done")) {
|
2006-07-06 05:00:02 +04:00
|
|
|
if (have_obj.nr > 0) {
|
2005-10-28 06:49:16 +04:00
|
|
|
if (multi_ack)
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "ACK %s\n", last_hex);
|
2005-10-28 06:49:16 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "NAK\n");
|
2005-07-05 00:26:53 +04:00
|
|
|
return -1;
|
|
|
|
}
|
2008-08-31 20:39:19 +04:00
|
|
|
die("git upload-pack: expected SHA1 list, got '%s'", line);
|
2005-07-05 00:26:53 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-29 09:49:57 +04:00
|
|
|
static int is_our_ref(struct object *o)
|
|
|
|
{
|
2015-05-21 23:23:39 +03:00
|
|
|
int allow_hidden_ref = (allow_unadvertised_object_request &
|
|
|
|
(ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1));
|
2015-05-21 23:23:38 +03:00
|
|
|
return o->flags & ((allow_hidden_ref ? HIDDEN_REF : 0) | OUR_REF);
|
2013-01-29 09:49:57 +04:00
|
|
|
}
|
|
|
|
|
2016-06-12 13:54:07 +03:00
|
|
|
/*
|
|
|
|
* on successful case, it's up to the caller to close cmd->out
|
|
|
|
*/
|
|
|
|
static int do_reachable_revlist(struct child_process *cmd,
|
2016-06-12 13:54:08 +03:00
|
|
|
struct object_array *src,
|
|
|
|
struct object_array *reachable)
|
2011-08-06 00:54:06 +04:00
|
|
|
{
|
|
|
|
static const char *argv[] = {
|
|
|
|
"rev-list", "--stdin", NULL,
|
|
|
|
};
|
|
|
|
struct object *o;
|
|
|
|
char namebuf[42]; /* ^ + SHA-1 + LF */
|
|
|
|
int i;
|
|
|
|
|
2016-06-12 13:54:07 +03:00
|
|
|
cmd->argv = argv;
|
|
|
|
cmd->git_cmd = 1;
|
|
|
|
cmd->no_stderr = 1;
|
|
|
|
cmd->in = -1;
|
|
|
|
cmd->out = -1;
|
2011-08-06 00:54:06 +04:00
|
|
|
|
|
|
|
/*
|
2016-06-12 13:53:51 +03:00
|
|
|
* If the next rev-list --stdin encounters an unknown commit,
|
|
|
|
* it terminates, which will cause SIGPIPE in the write loop
|
2011-08-06 00:54:06 +04:00
|
|
|
* below.
|
|
|
|
*/
|
|
|
|
sigchain_push(SIGPIPE, SIG_IGN);
|
|
|
|
|
2016-06-12 13:54:07 +03:00
|
|
|
if (start_command(cmd))
|
2016-06-12 13:53:51 +03:00
|
|
|
goto error;
|
|
|
|
|
2011-08-06 00:54:06 +04:00
|
|
|
namebuf[0] = '^';
|
2017-05-07 01:10:28 +03:00
|
|
|
namebuf[GIT_SHA1_HEXSZ + 1] = '\n';
|
2011-08-06 00:54:06 +04:00
|
|
|
for (i = get_max_object_index(); 0 < i; ) {
|
|
|
|
o = get_indexed_object(--i);
|
2011-08-24 09:47:17 +04:00
|
|
|
if (!o)
|
|
|
|
continue;
|
2016-06-12 13:54:08 +03:00
|
|
|
if (reachable && o->type == OBJ_COMMIT)
|
|
|
|
o->flags &= ~TMP_MARK;
|
2013-01-29 09:49:57 +04:00
|
|
|
if (!is_our_ref(o))
|
2011-08-06 00:54:06 +04:00
|
|
|
continue;
|
2015-11-10 05:22:28 +03:00
|
|
|
memcpy(namebuf + 1, oid_to_hex(&o->oid), GIT_SHA1_HEXSZ);
|
2017-05-07 01:10:28 +03:00
|
|
|
if (write_in_full(cmd->in, namebuf, GIT_SHA1_HEXSZ + 2) < 0)
|
2011-08-06 00:54:06 +04:00
|
|
|
goto error;
|
|
|
|
}
|
2017-05-07 01:10:28 +03:00
|
|
|
namebuf[GIT_SHA1_HEXSZ] = '\n';
|
2016-06-12 13:53:52 +03:00
|
|
|
for (i = 0; i < src->nr; i++) {
|
|
|
|
o = src->objects[i].item;
|
2016-06-12 13:54:08 +03:00
|
|
|
if (is_our_ref(o)) {
|
|
|
|
if (reachable)
|
|
|
|
add_object_array(o, NULL, reachable);
|
2011-08-06 00:54:06 +04:00
|
|
|
continue;
|
2016-06-12 13:54:08 +03:00
|
|
|
}
|
|
|
|
if (reachable && o->type == OBJ_COMMIT)
|
|
|
|
o->flags |= TMP_MARK;
|
2015-11-10 05:22:28 +03:00
|
|
|
memcpy(namebuf, oid_to_hex(&o->oid), GIT_SHA1_HEXSZ);
|
2017-05-07 01:10:28 +03:00
|
|
|
if (write_in_full(cmd->in, namebuf, GIT_SHA1_HEXSZ + 1) < 0)
|
2011-08-06 00:54:06 +04:00
|
|
|
goto error;
|
|
|
|
}
|
2016-06-12 13:54:07 +03:00
|
|
|
close(cmd->in);
|
|
|
|
cmd->in = -1;
|
|
|
|
sigchain_pop(SIGPIPE);
|
2011-08-06 00:54:06 +04:00
|
|
|
|
2016-06-12 13:54:07 +03:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
2011-08-06 00:54:06 +04:00
|
|
|
sigchain_pop(SIGPIPE);
|
|
|
|
|
2016-06-12 13:54:07 +03:00
|
|
|
if (cmd->in >= 0)
|
|
|
|
close(cmd->in);
|
|
|
|
if (cmd->out >= 0)
|
|
|
|
close(cmd->out);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-06-12 13:54:08 +03:00
|
|
|
static int get_reachable_list(struct object_array *src,
|
|
|
|
struct object_array *reachable)
|
|
|
|
{
|
|
|
|
struct child_process cmd = CHILD_PROCESS_INIT;
|
|
|
|
int i;
|
|
|
|
struct object *o;
|
|
|
|
char namebuf[42]; /* ^ + SHA-1 + LF */
|
|
|
|
|
|
|
|
if (do_reachable_revlist(&cmd, src, reachable) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
while ((i = read_in_full(cmd.out, namebuf, 41)) == 41) {
|
|
|
|
struct object_id sha1;
|
|
|
|
|
|
|
|
if (namebuf[40] != '\n' || get_oid_hex(namebuf, &sha1))
|
|
|
|
break;
|
|
|
|
|
|
|
|
o = lookup_object(sha1.hash);
|
|
|
|
if (o && o->type == OBJ_COMMIT) {
|
|
|
|
o->flags &= ~TMP_MARK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = get_max_object_index(); 0 < i; i--) {
|
|
|
|
o = get_indexed_object(i - 1);
|
|
|
|
if (o && o->type == OBJ_COMMIT &&
|
|
|
|
(o->flags & TMP_MARK)) {
|
|
|
|
add_object_array(o, NULL, reachable);
|
|
|
|
o->flags &= ~TMP_MARK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(cmd.out);
|
|
|
|
|
|
|
|
if (finish_command(&cmd))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-12 13:54:07 +03:00
|
|
|
static int has_unreachable(struct object_array *src)
|
|
|
|
{
|
|
|
|
struct child_process cmd = CHILD_PROCESS_INIT;
|
|
|
|
char buf[1];
|
|
|
|
int i;
|
|
|
|
|
2016-06-12 13:54:08 +03:00
|
|
|
if (do_reachable_revlist(&cmd, src, NULL) < 0)
|
2016-06-12 13:54:07 +03:00
|
|
|
return 1;
|
2011-08-06 00:54:06 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The commits out of the rev-list are not ancestors of
|
|
|
|
* our ref.
|
|
|
|
*/
|
2016-06-12 13:54:07 +03:00
|
|
|
i = read_in_full(cmd.out, buf, 1);
|
2011-08-06 00:54:06 +04:00
|
|
|
if (i)
|
|
|
|
goto error;
|
|
|
|
close(cmd.out);
|
2016-06-12 13:53:51 +03:00
|
|
|
cmd.out = -1;
|
2011-08-06 00:54:06 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* rev-list may have died by encountering a bad commit
|
|
|
|
* in the history, in which case we do want to bail out
|
|
|
|
* even when it showed no commit.
|
|
|
|
*/
|
|
|
|
if (finish_command(&cmd))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/* All the non-tip ones are ancestors of what we advertised */
|
2016-06-12 13:53:52 +03:00
|
|
|
return 0;
|
2011-08-06 00:54:06 +04:00
|
|
|
|
|
|
|
error:
|
2016-06-12 13:53:51 +03:00
|
|
|
sigchain_pop(SIGPIPE);
|
|
|
|
if (cmd.out >= 0)
|
|
|
|
close(cmd.out);
|
2016-06-12 13:53:52 +03:00
|
|
|
return 1;
|
|
|
|
}
|
2016-06-12 13:53:51 +03:00
|
|
|
|
2016-06-12 13:53:52 +03:00
|
|
|
static void check_non_tip(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In the normal in-process case without
|
|
|
|
* uploadpack.allowReachableSHA1InWant,
|
|
|
|
* non-tip requests can never happen.
|
|
|
|
*/
|
|
|
|
if (!stateless_rpc && !(allow_unadvertised_object_request & ALLOW_REACHABLE_SHA1))
|
|
|
|
goto error;
|
|
|
|
if (!has_unreachable(&want_obj))
|
|
|
|
/* All the non-tip ones are ancestors of what we advertised */
|
|
|
|
return;
|
2011-08-06 00:54:06 +04:00
|
|
|
|
|
|
|
error:
|
|
|
|
/* Pick one of them (we know there at least is one) */
|
|
|
|
for (i = 0; i < want_obj.nr; i++) {
|
2016-06-12 13:53:52 +03:00
|
|
|
struct object *o = want_obj.objects[i].item;
|
2013-01-29 09:49:57 +04:00
|
|
|
if (!is_our_ref(o))
|
2011-08-06 00:54:06 +04:00
|
|
|
die("git upload-pack: not our ref %s",
|
2015-11-10 05:22:28 +03:00
|
|
|
oid_to_hex(&o->oid));
|
2011-08-06 00:54:06 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-12 13:53:46 +03:00
|
|
|
static void send_shallow(struct commit_list *result)
|
|
|
|
{
|
|
|
|
while (result) {
|
|
|
|
struct object *object = &result->item->object;
|
|
|
|
if (!(object->flags & (CLIENT_SHALLOW|NOT_SHALLOW))) {
|
2016-10-31 23:15:21 +03:00
|
|
|
packet_write_fmt(1, "shallow %s",
|
|
|
|
oid_to_hex(&object->oid));
|
2017-05-07 01:10:06 +03:00
|
|
|
register_shallow(&object->oid);
|
2016-06-12 13:53:46 +03:00
|
|
|
shallow_nr++;
|
|
|
|
}
|
|
|
|
result = result->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-12 13:53:48 +03:00
|
|
|
static void send_unshallow(const struct object_array *shallows)
|
2016-06-12 13:53:45 +03:00
|
|
|
{
|
|
|
|
int i;
|
2016-06-12 13:53:48 +03:00
|
|
|
|
2016-06-12 13:53:45 +03:00
|
|
|
for (i = 0; i < shallows->nr; i++) {
|
|
|
|
struct object *object = shallows->objects[i].item;
|
|
|
|
if (object->flags & NOT_SHALLOW) {
|
|
|
|
struct commit_list *parents;
|
2016-10-31 23:15:21 +03:00
|
|
|
packet_write_fmt(1, "unshallow %s",
|
|
|
|
oid_to_hex(&object->oid));
|
2016-06-12 13:53:45 +03:00
|
|
|
object->flags &= ~CLIENT_SHALLOW;
|
2016-06-12 13:53:48 +03:00
|
|
|
/*
|
|
|
|
* We want to _register_ "object" as shallow, but we
|
|
|
|
* also need to traverse object's parents to deepen a
|
|
|
|
* shallow clone. Unregister it for now so we can
|
|
|
|
* parse and add the parents to the want list, then
|
|
|
|
* re-register it.
|
|
|
|
*/
|
2017-05-07 01:10:06 +03:00
|
|
|
unregister_shallow(&object->oid);
|
2016-06-12 13:53:45 +03:00
|
|
|
object->parsed = 0;
|
|
|
|
parse_commit_or_die((struct commit *)object);
|
|
|
|
parents = ((struct commit *)object)->parents;
|
|
|
|
while (parents) {
|
|
|
|
add_object_array(&parents->item->object,
|
|
|
|
NULL, &want_obj);
|
|
|
|
parents = parents->next;
|
|
|
|
}
|
|
|
|
add_object_array(object, NULL, &extra_edge_obj);
|
|
|
|
}
|
|
|
|
/* make sure commit traversal conforms to client */
|
2017-05-07 01:10:06 +03:00
|
|
|
register_shallow(&object->oid);
|
2016-06-12 13:53:45 +03:00
|
|
|
}
|
2016-06-12 13:53:48 +03:00
|
|
|
}
|
|
|
|
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 13:54:09 +03:00
|
|
|
static void deepen(int depth, int deepen_relative,
|
|
|
|
struct object_array *shallows)
|
2016-06-12 13:53:48 +03:00
|
|
|
{
|
|
|
|
if (depth == INFINITE_DEPTH && !is_repository_shallow()) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < shallows->nr; i++) {
|
|
|
|
struct object *object = shallows->objects[i].item;
|
|
|
|
object->flags |= NOT_SHALLOW;
|
|
|
|
}
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 13:54:09 +03:00
|
|
|
} else if (deepen_relative) {
|
|
|
|
struct object_array reachable_shallows = OBJECT_ARRAY_INIT;
|
|
|
|
struct commit_list *result;
|
|
|
|
|
|
|
|
get_reachable_list(shallows, &reachable_shallows);
|
|
|
|
result = get_shallow_commits(&reachable_shallows,
|
|
|
|
depth + 1,
|
|
|
|
SHALLOW, NOT_SHALLOW);
|
|
|
|
send_shallow(result);
|
|
|
|
free_commit_list(result);
|
|
|
|
object_array_clear(&reachable_shallows);
|
2016-06-12 13:53:48 +03:00
|
|
|
} else {
|
|
|
|
struct commit_list *result;
|
|
|
|
|
|
|
|
result = get_shallow_commits(&want_obj, depth,
|
|
|
|
SHALLOW, NOT_SHALLOW);
|
|
|
|
send_shallow(result);
|
|
|
|
free_commit_list(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
send_unshallow(shallows);
|
2016-06-12 13:53:45 +03:00
|
|
|
packet_flush(1);
|
|
|
|
}
|
|
|
|
|
2016-06-12 13:53:58 +03:00
|
|
|
static void deepen_by_rev_list(int ac, const char **av,
|
|
|
|
struct object_array *shallows)
|
|
|
|
{
|
|
|
|
struct commit_list *result;
|
|
|
|
|
|
|
|
result = get_shallow_commits_by_rev_list(ac, av, SHALLOW, NOT_SHALLOW);
|
|
|
|
send_shallow(result);
|
|
|
|
free_commit_list(result);
|
|
|
|
send_unshallow(shallows);
|
|
|
|
packet_flush(1);
|
|
|
|
}
|
|
|
|
|
2006-07-06 05:00:02 +04:00
|
|
|
static void receive_needs(void)
|
2005-07-05 02:29:17 +04:00
|
|
|
{
|
2010-08-29 06:04:17 +04:00
|
|
|
struct object_array shallows = OBJECT_ARRAY_INIT;
|
2016-06-12 13:54:03 +03:00
|
|
|
struct string_list deepen_not = STRING_LIST_INIT_DUP;
|
pkt-line: provide a LARGE_PACKET_MAX static buffer
Most of the callers of packet_read_line just read into a
static 1000-byte buffer (callers which handle arbitrary
binary data already use LARGE_PACKET_MAX). This works fine
in practice, because:
1. The only variable-sized data in these lines is a ref
name, and refs tend to be a lot shorter than 1000
characters.
2. When sending ref lines, git-core always limits itself
to 1000 byte packets.
However, the only limit given in the protocol specification
in Documentation/technical/protocol-common.txt is
LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in
pack-protocol.txt, and then only describing what we write,
not as a specific limit for readers.
This patch lets us bump the 1000-byte limit to
LARGE_PACKET_MAX. Even though git-core will never write a
packet where this makes a difference, there are two good
reasons to do this:
1. Other git implementations may have followed
protocol-common.txt and used a larger maximum size. We
don't bump into it in practice because it would involve
very long ref names.
2. We may want to increase the 1000-byte limit one day.
Since packets are transferred before any capabilities,
it's difficult to do this in a backwards-compatible
way. But if we bump the size of buffer the readers can
handle, eventually older versions of git will be
obsolete enough that we can justify bumping the
writers, as well. We don't have plans to do this
anytime soon, but there is no reason not to start the
clock ticking now.
Just bumping all of the reading bufs to LARGE_PACKET_MAX
would waste memory. Instead, since most readers just read
into a temporary buffer anyway, let's provide a single
static buffer that all callers can use. We can further wrap
this detail away by having the packet_read_line wrapper just
use the buffer transparently and return a pointer to the
static storage. That covers most of the cases, and the
remaining ones already read into their own LARGE_PACKET_MAX
buffers.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-21 00:02:57 +04:00
|
|
|
int depth = 0;
|
2011-08-06 00:54:06 +04:00
|
|
|
int has_non_tip = 0;
|
2017-04-26 22:29:31 +03:00
|
|
|
timestamp_t deepen_since = 0;
|
2016-06-12 13:53:58 +03:00
|
|
|
int deepen_rev_list = 0;
|
2005-07-05 02:29:17 +04:00
|
|
|
|
2009-06-10 03:50:18 +04:00
|
|
|
shallow_nr = 0;
|
2005-07-05 02:29:17 +04:00
|
|
|
for (;;) {
|
2005-10-25 05:59:18 +04:00
|
|
|
struct object *o;
|
2012-01-09 01:06:19 +04:00
|
|
|
const char *features;
|
2017-05-07 01:10:28 +03:00
|
|
|
struct object_id oid_buf;
|
pkt-line: provide a LARGE_PACKET_MAX static buffer
Most of the callers of packet_read_line just read into a
static 1000-byte buffer (callers which handle arbitrary
binary data already use LARGE_PACKET_MAX). This works fine
in practice, because:
1. The only variable-sized data in these lines is a ref
name, and refs tend to be a lot shorter than 1000
characters.
2. When sending ref lines, git-core always limits itself
to 1000 byte packets.
However, the only limit given in the protocol specification
in Documentation/technical/protocol-common.txt is
LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in
pack-protocol.txt, and then only describing what we write,
not as a specific limit for readers.
This patch lets us bump the 1000-byte limit to
LARGE_PACKET_MAX. Even though git-core will never write a
packet where this makes a difference, there are two good
reasons to do this:
1. Other git implementations may have followed
protocol-common.txt and used a larger maximum size. We
don't bump into it in practice because it would involve
very long ref names.
2. We may want to increase the 1000-byte limit one day.
Since packets are transferred before any capabilities,
it's difficult to do this in a backwards-compatible
way. But if we bump the size of buffer the readers can
handle, eventually older versions of git will be
obsolete enough that we can justify bumping the
writers, as well. We don't have plans to do this
anytime soon, but there is no reason not to start the
clock ticking now.
Just bumping all of the reading bufs to LARGE_PACKET_MAX
would waste memory. Instead, since most readers just read
into a temporary buffer anyway, let's provide a single
static buffer that all callers can use. We can further wrap
this detail away by having the packet_read_line wrapper just
use the buffer transparently and return a pointer to the
static storage. That covers most of the cases, and the
remaining ones already read into their own LARGE_PACKET_MAX
buffers.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-21 00:02:57 +04:00
|
|
|
char *line = packet_read_line(0, NULL);
|
2016-06-12 13:53:49 +03:00
|
|
|
const char *arg;
|
|
|
|
|
2005-10-20 01:27:01 +04:00
|
|
|
reset_timeout();
|
pkt-line: provide a LARGE_PACKET_MAX static buffer
Most of the callers of packet_read_line just read into a
static 1000-byte buffer (callers which handle arbitrary
binary data already use LARGE_PACKET_MAX). This works fine
in practice, because:
1. The only variable-sized data in these lines is a ref
name, and refs tend to be a lot shorter than 1000
characters.
2. When sending ref lines, git-core always limits itself
to 1000 byte packets.
However, the only limit given in the protocol specification
in Documentation/technical/protocol-common.txt is
LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in
pack-protocol.txt, and then only describing what we write,
not as a specific limit for readers.
This patch lets us bump the 1000-byte limit to
LARGE_PACKET_MAX. Even though git-core will never write a
packet where this makes a difference, there are two good
reasons to do this:
1. Other git implementations may have followed
protocol-common.txt and used a larger maximum size. We
don't bump into it in practice because it would involve
very long ref names.
2. We may want to increase the 1000-byte limit one day.
Since packets are transferred before any capabilities,
it's difficult to do this in a backwards-compatible
way. But if we bump the size of buffer the readers can
handle, eventually older versions of git will be
obsolete enough that we can justify bumping the
writers, as well. We don't have plans to do this
anytime soon, but there is no reason not to start the
clock ticking now.
Just bumping all of the reading bufs to LARGE_PACKET_MAX
would waste memory. Instead, since most readers just read
into a temporary buffer anyway, let's provide a single
static buffer that all callers can use. We can further wrap
this detail away by having the packet_read_line wrapper just
use the buffer transparently and return a pointer to the
static storage. That covers most of the cases, and the
remaining ones already read into their own LARGE_PACKET_MAX
buffers.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-21 00:02:57 +04:00
|
|
|
if (!line)
|
2006-10-30 22:09:06 +03:00
|
|
|
break;
|
2005-10-06 01:49:54 +04:00
|
|
|
|
2016-06-12 13:53:49 +03:00
|
|
|
if (skip_prefix(line, "shallow ", &arg)) {
|
2017-05-07 01:10:28 +03:00
|
|
|
struct object_id oid;
|
2006-10-30 22:09:06 +03:00
|
|
|
struct object *object;
|
2017-05-07 01:10:28 +03:00
|
|
|
if (get_oid_hex(arg, &oid))
|
2006-10-30 22:09:06 +03:00
|
|
|
die("invalid shallow line: %s", line);
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 01:10:38 +03:00
|
|
|
object = parse_object(&oid);
|
2006-10-30 22:09:06 +03:00
|
|
|
if (!object)
|
2013-04-29 09:32:04 +04:00
|
|
|
continue;
|
2013-01-08 15:32:36 +04:00
|
|
|
if (object->type != OBJ_COMMIT)
|
2017-05-07 01:10:28 +03:00
|
|
|
die("invalid shallow object %s", oid_to_hex(&oid));
|
2013-02-20 23:54:57 +04:00
|
|
|
if (!(object->flags & CLIENT_SHALLOW)) {
|
|
|
|
object->flags |= CLIENT_SHALLOW;
|
|
|
|
add_object_array(object, NULL, &shallows);
|
|
|
|
}
|
2006-10-30 22:09:06 +03:00
|
|
|
continue;
|
|
|
|
}
|
2016-06-12 13:53:49 +03:00
|
|
|
if (skip_prefix(line, "deepen ", &arg)) {
|
2016-06-12 13:53:50 +03:00
|
|
|
char *end = NULL;
|
2016-06-12 13:53:49 +03:00
|
|
|
depth = strtol(arg, &end, 0);
|
2016-06-12 13:53:50 +03:00
|
|
|
if (!end || *end || depth <= 0)
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 22:09:29 +03:00
|
|
|
die("Invalid deepen: %s", line);
|
|
|
|
continue;
|
|
|
|
}
|
2016-06-12 13:53:58 +03:00
|
|
|
if (skip_prefix(line, "deepen-since ", &arg)) {
|
|
|
|
char *end = NULL;
|
2017-04-21 13:45:44 +03:00
|
|
|
deepen_since = parse_timestamp(arg, &end, 0);
|
2016-06-12 13:53:58 +03:00
|
|
|
if (!end || *end || !deepen_since ||
|
|
|
|
/* revisions.c's max_age -1 is special */
|
|
|
|
deepen_since == -1)
|
|
|
|
die("Invalid deepen-since: %s", line);
|
|
|
|
deepen_rev_list = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2016-06-12 13:54:03 +03:00
|
|
|
if (skip_prefix(line, "deepen-not ", &arg)) {
|
|
|
|
char *ref = NULL;
|
2017-05-07 01:10:28 +03:00
|
|
|
struct object_id oid;
|
2017-10-16 01:06:57 +03:00
|
|
|
if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
|
2016-06-12 13:54:03 +03:00
|
|
|
die("git upload-pack: ambiguous deepen-not: %s", line);
|
|
|
|
string_list_append(&deepen_not, ref);
|
|
|
|
free(ref);
|
|
|
|
deepen_rev_list = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2017-12-08 18:58:39 +03:00
|
|
|
if (skip_prefix(line, "filter ", &arg)) {
|
|
|
|
if (!filter_capability_requested)
|
|
|
|
die("git upload-pack: filtering capability not negotiated");
|
|
|
|
parse_list_objects_filter(&filter_options, arg);
|
|
|
|
continue;
|
|
|
|
}
|
2016-06-12 13:53:49 +03:00
|
|
|
if (!skip_prefix(line, "want ", &arg) ||
|
2017-05-07 01:10:28 +03:00
|
|
|
get_oid_hex(arg, &oid_buf))
|
2008-08-31 20:39:19 +04:00
|
|
|
die("git upload-pack: protocol error, "
|
2005-10-06 01:49:54 +04:00
|
|
|
"expected to get sha, not '%s'", line);
|
2012-01-09 01:06:19 +04:00
|
|
|
|
2016-06-12 13:53:49 +03:00
|
|
|
features = arg + 40;
|
2012-01-09 01:06:19 +04:00
|
|
|
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 13:54:09 +03:00
|
|
|
if (parse_feature_request(features, "deepen-relative"))
|
|
|
|
deepen_relative = 1;
|
2012-01-09 01:06:19 +04:00
|
|
|
if (parse_feature_request(features, "multi_ack_detailed"))
|
2009-10-31 03:47:25 +03:00
|
|
|
multi_ack = 2;
|
2012-01-09 01:06:19 +04:00
|
|
|
else if (parse_feature_request(features, "multi_ack"))
|
2005-10-28 06:49:16 +04:00
|
|
|
multi_ack = 1;
|
2012-01-09 01:06:19 +04:00
|
|
|
if (parse_feature_request(features, "no-done"))
|
2011-03-29 23:29:10 +04:00
|
|
|
no_done = 1;
|
2012-01-09 01:06:19 +04:00
|
|
|
if (parse_feature_request(features, "thin-pack"))
|
2006-02-20 11:38:39 +03:00
|
|
|
use_thin_pack = 1;
|
2012-01-09 01:06:19 +04:00
|
|
|
if (parse_feature_request(features, "ofs-delta"))
|
2006-09-26 19:27:39 +04:00
|
|
|
use_ofs_delta = 1;
|
2012-01-09 01:06:19 +04:00
|
|
|
if (parse_feature_request(features, "side-band-64k"))
|
2006-09-11 03:27:08 +04:00
|
|
|
use_sideband = LARGE_PACKET_MAX;
|
2012-01-09 01:06:19 +04:00
|
|
|
else if (parse_feature_request(features, "side-band"))
|
2006-09-11 03:27:08 +04:00
|
|
|
use_sideband = DEFAULT_PACKET_MAX;
|
2012-01-09 01:06:19 +04:00
|
|
|
if (parse_feature_request(features, "no-progress"))
|
2007-02-23 22:03:10 +03:00
|
|
|
no_progress = 1;
|
2012-01-09 01:06:19 +04:00
|
|
|
if (parse_feature_request(features, "include-tag"))
|
2008-03-04 06:27:33 +03:00
|
|
|
use_include_tag = 1;
|
2018-03-28 23:33:03 +03:00
|
|
|
if (allow_filter && parse_feature_request(features, "filter"))
|
2017-12-08 18:58:39 +03:00
|
|
|
filter_capability_requested = 1;
|
2005-10-25 05:59:18 +04:00
|
|
|
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 01:10:38 +03:00
|
|
|
o = parse_object(&oid_buf);
|
2017-02-23 21:43:03 +03:00
|
|
|
if (!o) {
|
|
|
|
packet_write_fmt(1,
|
|
|
|
"ERR upload-pack: not our ref %s",
|
2017-05-07 01:10:28 +03:00
|
|
|
oid_to_hex(&oid_buf));
|
2010-08-01 00:11:46 +04:00
|
|
|
die("git upload-pack: not our ref %s",
|
2017-05-07 01:10:28 +03:00
|
|
|
oid_to_hex(&oid_buf));
|
2017-02-23 21:43:03 +03:00
|
|
|
}
|
2005-10-25 05:59:18 +04:00
|
|
|
if (!(o->flags & WANTED)) {
|
|
|
|
o->flags |= WANTED;
|
2016-11-11 20:23:48 +03:00
|
|
|
if (!((allow_unadvertised_object_request & ALLOW_ANY_SHA1) == ALLOW_ANY_SHA1
|
|
|
|
|| is_our_ref(o)))
|
2011-08-06 00:54:06 +04:00
|
|
|
has_non_tip = 1;
|
2006-07-06 05:00:02 +04:00
|
|
|
add_object_array(o, NULL, &want_obj);
|
2005-10-25 05:59:18 +04:00
|
|
|
}
|
2005-07-05 02:29:17 +04:00
|
|
|
}
|
2009-06-16 22:41:16 +04:00
|
|
|
|
2011-08-06 00:54:06 +04:00
|
|
|
/*
|
|
|
|
* We have sent all our refs already, and the other end
|
|
|
|
* should have chosen out of them. When we are operating
|
|
|
|
* in the stateless RPC mode, however, their choice may
|
|
|
|
* have been based on the set of older refs advertised
|
|
|
|
* by another process that handled the initial request.
|
|
|
|
*/
|
|
|
|
if (has_non_tip)
|
|
|
|
check_non_tip();
|
|
|
|
|
2009-06-16 22:41:16 +04:00
|
|
|
if (!use_sideband && daemon_mode)
|
|
|
|
no_progress = 1;
|
|
|
|
|
2016-06-12 13:53:58 +03:00
|
|
|
if (depth == 0 && !deepen_rev_list && shallows.nr == 0)
|
2006-10-30 22:09:53 +03:00
|
|
|
return;
|
2016-06-12 13:53:58 +03:00
|
|
|
if (depth > 0 && deepen_rev_list)
|
2016-06-12 13:54:03 +03:00
|
|
|
die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
|
2016-06-12 13:53:45 +03:00
|
|
|
if (depth > 0)
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 13:54:09 +03:00
|
|
|
deepen(depth, deepen_relative, &shallows);
|
2016-06-12 13:53:58 +03:00
|
|
|
else if (deepen_rev_list) {
|
|
|
|
struct argv_array av = ARGV_ARRAY_INIT;
|
2006-10-30 22:09:53 +03:00
|
|
|
int i;
|
2016-06-12 13:53:58 +03:00
|
|
|
|
|
|
|
argv_array_push(&av, "rev-list");
|
|
|
|
if (deepen_since)
|
2017-04-21 13:45:48 +03:00
|
|
|
argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
|
2016-06-12 13:54:03 +03:00
|
|
|
if (deepen_not.nr) {
|
|
|
|
argv_array_push(&av, "--not");
|
|
|
|
for (i = 0; i < deepen_not.nr; i++) {
|
|
|
|
struct string_list_item *s = deepen_not.items + i;
|
|
|
|
argv_array_push(&av, s->string);
|
2006-10-30 22:09:53 +03:00
|
|
|
}
|
2016-06-12 13:54:03 +03:00
|
|
|
argv_array_push(&av, "--not");
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 22:09:29 +03:00
|
|
|
}
|
2016-06-12 13:53:58 +03:00
|
|
|
for (i = 0; i < want_obj.nr; i++) {
|
|
|
|
struct object *o = want_obj.objects[i].item;
|
|
|
|
argv_array_push(&av, oid_to_hex(&o->oid));
|
2006-10-30 22:09:53 +03:00
|
|
|
}
|
2016-06-12 13:53:58 +03:00
|
|
|
deepen_by_rev_list(av.argc, av.argv, &shallows);
|
|
|
|
argv_array_clear(&av);
|
|
|
|
}
|
2016-06-12 13:53:45 +03:00
|
|
|
else
|
2006-10-30 22:09:53 +03:00
|
|
|
if (shallows.nr > 0) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < shallows.nr; i++)
|
2017-05-07 01:10:06 +03:00
|
|
|
register_shallow(&shallows.objects[i].item->oid);
|
2006-10-30 22:09:53 +03:00
|
|
|
}
|
2009-06-10 03:50:18 +04:00
|
|
|
|
|
|
|
shallow_nr += shallows.nr;
|
2017-09-23 02:34:52 +03:00
|
|
|
object_array_clear(&shallows);
|
2005-07-05 02:29:17 +04:00
|
|
|
}
|
|
|
|
|
upload/receive-pack: allow hiding ref hierarchies
A repository may have refs that are only used for its internal
bookkeeping purposes that should not be exposed to the others that
come over the network.
Teach upload-pack to omit some refs from its initial advertisement
by paying attention to the uploadpack.hiderefs multi-valued
configuration variable. Do the same to receive-pack via the
receive.hiderefs variable. As a convenient short-hand, allow using
transfer.hiderefs to set the value to both of these variables.
Any ref that is under the hierarchies listed on the value of these
variable is excluded from responses to requests made by "ls-remote",
"fetch", etc. (for upload-pack) and "push" (for receive-pack).
Because these hidden refs do not count as OUR_REF, an attempt to
fetch objects at the tip of them will be rejected, and because these
refs do not get advertised, "git push :" will not see local branches
that have the same name as them as "matching" ones to be sent.
An attempt to update/delete these hidden refs with an explicit
refspec, e.g. "git push origin :refs/hidden/22", is rejected. This
is not a new restriction. To the pusher, it would appear that there
is no such ref, so its push request will conclude with "Now that I
sent you all the data, it is time for you to update the refs. I saw
that the ref did not exist when I started pushing, and I want the
result to point at this commit". The receiving end will apply the
compare-and-swap rule to this request and rejects the push with
"Well, your update request conflicts with somebody else; I see there
is such a ref.", which is the right thing to do. Otherwise a push to
a hidden ref will always be "the last one wins", which is not a good
default.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 04:08:30 +04:00
|
|
|
/* return non-zero if the ref is hidden, otherwise 0 */
|
2015-11-03 10:58:16 +03:00
|
|
|
static int mark_our_ref(const char *refname, const char *refname_full,
|
|
|
|
const struct object_id *oid)
|
2013-01-19 03:48:49 +04:00
|
|
|
{
|
2015-05-25 21:39:12 +03:00
|
|
|
struct object *o = lookup_unknown_object(oid->hash);
|
upload/receive-pack: allow hiding ref hierarchies
A repository may have refs that are only used for its internal
bookkeeping purposes that should not be exposed to the others that
come over the network.
Teach upload-pack to omit some refs from its initial advertisement
by paying attention to the uploadpack.hiderefs multi-valued
configuration variable. Do the same to receive-pack via the
receive.hiderefs variable. As a convenient short-hand, allow using
transfer.hiderefs to set the value to both of these variables.
Any ref that is under the hierarchies listed on the value of these
variable is excluded from responses to requests made by "ls-remote",
"fetch", etc. (for upload-pack) and "push" (for receive-pack).
Because these hidden refs do not count as OUR_REF, an attempt to
fetch objects at the tip of them will be rejected, and because these
refs do not get advertised, "git push :" will not see local branches
that have the same name as them as "matching" ones to be sent.
An attempt to update/delete these hidden refs with an explicit
refspec, e.g. "git push origin :refs/hidden/22", is rejected. This
is not a new restriction. To the pusher, it would appear that there
is no such ref, so its push request will conclude with "Now that I
sent you all the data, it is time for you to update the refs. I saw
that the ref did not exist when I started pushing, and I want the
result to point at this commit". The receiving end will apply the
compare-and-swap rule to this request and rejects the push with
"Well, your update request conflicts with somebody else; I see there
is such a ref.", which is the right thing to do. Otherwise a push to
a hidden ref will always be "the last one wins", which is not a good
default.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 04:08:30 +04:00
|
|
|
|
2015-11-03 10:58:16 +03:00
|
|
|
if (ref_is_hidden(refname, refname_full)) {
|
2013-01-29 09:49:57 +04:00
|
|
|
o->flags |= HIDDEN_REF;
|
upload/receive-pack: allow hiding ref hierarchies
A repository may have refs that are only used for its internal
bookkeeping purposes that should not be exposed to the others that
come over the network.
Teach upload-pack to omit some refs from its initial advertisement
by paying attention to the uploadpack.hiderefs multi-valued
configuration variable. Do the same to receive-pack via the
receive.hiderefs variable. As a convenient short-hand, allow using
transfer.hiderefs to set the value to both of these variables.
Any ref that is under the hierarchies listed on the value of these
variable is excluded from responses to requests made by "ls-remote",
"fetch", etc. (for upload-pack) and "push" (for receive-pack).
Because these hidden refs do not count as OUR_REF, an attempt to
fetch objects at the tip of them will be rejected, and because these
refs do not get advertised, "git push :" will not see local branches
that have the same name as them as "matching" ones to be sent.
An attempt to update/delete these hidden refs with an explicit
refspec, e.g. "git push origin :refs/hidden/22", is rejected. This
is not a new restriction. To the pusher, it would appear that there
is no such ref, so its push request will conclude with "Now that I
sent you all the data, it is time for you to update the refs. I saw
that the ref did not exist when I started pushing, and I want the
result to point at this commit". The receiving end will apply the
compare-and-swap rule to this request and rejects the push with
"Well, your update request conflicts with somebody else; I see there
is such a ref.", which is the right thing to do. Otherwise a push to
a hidden ref will always be "the last one wins", which is not a good
default.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 04:08:30 +04:00
|
|
|
return 1;
|
2013-01-29 09:49:57 +04:00
|
|
|
}
|
2013-01-29 08:45:43 +04:00
|
|
|
o->flags |= OUR_REF;
|
2013-01-19 03:48:49 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-03 10:58:16 +03:00
|
|
|
static int check_ref(const char *refname_full, const struct object_id *oid,
|
2015-05-25 21:39:12 +03:00
|
|
|
int flag, void *cb_data)
|
upload-pack: fix transfer.hiderefs over smart-http
When upload-pack advertises the refs (either for a normal,
non-stateless request, or for the initial contact in a
stateless one), we call for_each_ref with the send_ref
function as its callback. send_ref, in turn, calls
mark_our_ref, which checks whether the ref is hidden, and
sets OUR_REF or HIDDEN_REF on the object as appropriate. If
it is hidden, mark_our_ref also returns "1" to signal
send_ref that the ref should not be advertised.
If we are not advertising refs, (i.e., the follow-up
invocation by an http client to send its "want" lines), we
use mark_our_ref directly as a callback to for_each_ref. Its
marking does the right thing, but when it then returns "1"
to for_each_ref, the latter interprets this as an error and
stops iterating. As a result, we skip marking all of the
refs that come lexicographically after it. Any "want" lines
from the client asking for those objects will fail, as they
were not properly marked with OUR_REF.
To solve this, we introduce a wrapper callback around
mark_our_ref which always returns 0 (even if the ref is
hidden, we want to keep iterating). We also tweak the
signature of mark_our_ref to exclude unnecessary parameters
that were present only to conform to the callback interface.
This should make it less likely for somebody to accidentally
use it as a callback in the future.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-13 07:42:12 +03:00
|
|
|
{
|
2015-11-03 10:58:16 +03:00
|
|
|
const char *refname = strip_namespace(refname_full);
|
|
|
|
|
|
|
|
mark_our_ref(refname, refname_full, oid);
|
upload-pack: fix transfer.hiderefs over smart-http
When upload-pack advertises the refs (either for a normal,
non-stateless request, or for the initial contact in a
stateless one), we call for_each_ref with the send_ref
function as its callback. send_ref, in turn, calls
mark_our_ref, which checks whether the ref is hidden, and
sets OUR_REF or HIDDEN_REF on the object as appropriate. If
it is hidden, mark_our_ref also returns "1" to signal
send_ref that the ref should not be advertised.
If we are not advertising refs, (i.e., the follow-up
invocation by an http client to send its "want" lines), we
use mark_our_ref directly as a callback to for_each_ref. Its
marking does the right thing, but when it then returns "1"
to for_each_ref, the latter interprets this as an error and
stops iterating. As a result, we skip marking all of the
refs that come lexicographically after it. Any "want" lines
from the client asking for those objects will fail, as they
were not properly marked with OUR_REF.
To solve this, we introduce a wrapper callback around
mark_our_ref which always returns 0 (even if the ref is
hidden, we want to keep iterating). We also tweak the
signature of mark_our_ref to exclude unnecessary parameters
that were present only to conform to the callback interface.
This should make it less likely for somebody to accidentally
use it as a callback in the future.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-13 07:42:12 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-18 03:17:33 +04:00
|
|
|
static void format_symref_info(struct strbuf *buf, struct string_list *symref)
|
|
|
|
{
|
|
|
|
struct string_list_item *item;
|
|
|
|
|
|
|
|
if (!symref->nr)
|
|
|
|
return;
|
|
|
|
for_each_string_list_item(item, symref)
|
|
|
|
strbuf_addf(buf, " symref=%s:%s", item->string, (char *)item->util);
|
|
|
|
}
|
|
|
|
|
2015-05-25 21:39:12 +03:00
|
|
|
static int send_ref(const char *refname, const struct object_id *oid,
|
|
|
|
int flag, void *cb_data)
|
2005-07-05 00:26:53 +04:00
|
|
|
{
|
2006-10-30 22:09:06 +03:00
|
|
|
static const char *capabilities = "multi_ack thin-pack side-band"
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 13:54:09 +03:00
|
|
|
" side-band-64k ofs-delta shallow deepen-since deepen-not"
|
|
|
|
" deepen-relative no-progress include-tag multi_ack_detailed";
|
2011-07-09 03:13:32 +04:00
|
|
|
const char *refname_nons = strip_namespace(refname);
|
2015-05-25 21:39:13 +03:00
|
|
|
struct object_id peeled;
|
2006-02-18 03:14:52 +03:00
|
|
|
|
2015-11-03 10:58:16 +03:00
|
|
|
if (mark_our_ref(refname_nons, refname, oid))
|
upload/receive-pack: allow hiding ref hierarchies
A repository may have refs that are only used for its internal
bookkeeping purposes that should not be exposed to the others that
come over the network.
Teach upload-pack to omit some refs from its initial advertisement
by paying attention to the uploadpack.hiderefs multi-valued
configuration variable. Do the same to receive-pack via the
receive.hiderefs variable. As a convenient short-hand, allow using
transfer.hiderefs to set the value to both of these variables.
Any ref that is under the hierarchies listed on the value of these
variable is excluded from responses to requests made by "ls-remote",
"fetch", etc. (for upload-pack) and "push" (for receive-pack).
Because these hidden refs do not count as OUR_REF, an attempt to
fetch objects at the tip of them will be rejected, and because these
refs do not get advertised, "git push :" will not see local branches
that have the same name as them as "matching" ones to be sent.
An attempt to update/delete these hidden refs with an explicit
refspec, e.g. "git push origin :refs/hidden/22", is rejected. This
is not a new restriction. To the pusher, it would appear that there
is no such ref, so its push request will conclude with "Now that I
sent you all the data, it is time for you to update the refs. I saw
that the ref did not exist when I started pushing, and I want the
result to point at this commit". The receiving end will apply the
compare-and-swap rule to this request and rejects the push with
"Well, your update request conflicts with somebody else; I see there
is such a ref.", which is the right thing to do. Otherwise a push to
a hidden ref will always be "the last one wins", which is not a good
default.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 04:08:30 +04:00
|
|
|
return 0;
|
2013-01-19 03:48:49 +04:00
|
|
|
|
2013-09-18 03:17:33 +04:00
|
|
|
if (capabilities) {
|
|
|
|
struct strbuf symref_info = STRBUF_INIT;
|
|
|
|
|
|
|
|
format_symref_info(&symref_info, cb_data);
|
2017-12-08 18:58:39 +03:00
|
|
|
packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s agent=%s\n",
|
2015-05-25 21:39:12 +03:00
|
|
|
oid_to_hex(oid), refname_nons,
|
2011-03-29 21:24:59 +04:00
|
|
|
0, capabilities,
|
2015-05-21 23:23:38 +03:00
|
|
|
(allow_unadvertised_object_request & ALLOW_TIP_SHA1) ?
|
|
|
|
" allow-tip-sha1-in-want" : "",
|
2015-05-21 23:23:39 +03:00
|
|
|
(allow_unadvertised_object_request & ALLOW_REACHABLE_SHA1) ?
|
|
|
|
" allow-reachable-sha1-in-want" : "",
|
2012-08-03 20:19:16 +04:00
|
|
|
stateless_rpc ? " no-done" : "",
|
2013-09-18 03:17:33 +04:00
|
|
|
symref_info.buf,
|
2018-03-28 23:33:03 +03:00
|
|
|
allow_filter ? " filter" : "",
|
2012-08-03 20:19:16 +04:00
|
|
|
git_user_agent_sanitized());
|
2013-09-18 03:17:33 +04:00
|
|
|
strbuf_release(&symref_info);
|
|
|
|
} else {
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "%s %s\n", oid_to_hex(oid), refname_nons);
|
2013-09-18 03:17:33 +04:00
|
|
|
}
|
2005-10-28 07:56:41 +04:00
|
|
|
capabilities = NULL;
|
2017-10-16 01:07:02 +03:00
|
|
|
if (!peel_ref(refname, &peeled))
|
2016-10-17 02:20:29 +03:00
|
|
|
packet_write_fmt(1, "%s %s^{}\n", oid_to_hex(&peeled), refname_nons);
|
2005-07-05 00:26:53 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-25 21:39:10 +03:00
|
|
|
static int find_symref(const char *refname, const struct object_id *oid,
|
|
|
|
int flag, void *cb_data)
|
2013-09-18 03:17:33 +04:00
|
|
|
{
|
|
|
|
const char *symref_target;
|
|
|
|
struct string_list_item *item;
|
|
|
|
|
|
|
|
if ((flag & REF_ISSYMREF) == 0)
|
|
|
|
return 0;
|
2017-09-23 12:45:04 +03:00
|
|
|
symref_target = resolve_ref_unsafe(refname, 0, NULL, &flag);
|
2013-09-18 03:17:33 +04:00
|
|
|
if (!symref_target || (flag & REF_ISSYMREF) == 0)
|
|
|
|
die("'%s' is a symref but it is not?", refname);
|
|
|
|
item = string_list_append(cb_data, refname);
|
|
|
|
item->util = xstrdup(symref_target);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-08-15 00:40:51 +04:00
|
|
|
static void upload_pack(void)
|
2005-07-05 00:26:53 +04:00
|
|
|
{
|
2013-09-18 03:17:33 +04:00
|
|
|
struct string_list symref = STRING_LIST_INIT_DUP;
|
|
|
|
|
|
|
|
head_ref_namespaced(find_symref, &symref);
|
|
|
|
|
2009-10-31 03:47:33 +03:00
|
|
|
if (advertise_refs || !stateless_rpc) {
|
|
|
|
reset_timeout();
|
2013-09-18 03:17:33 +04:00
|
|
|
head_ref_namespaced(send_ref, &symref);
|
|
|
|
for_each_namespaced_ref(send_ref, &symref);
|
2013-12-05 17:02:32 +04:00
|
|
|
advertise_shallow_grafts(1);
|
2009-10-31 03:47:33 +03:00
|
|
|
packet_flush(1);
|
|
|
|
} else {
|
upload-pack: fix transfer.hiderefs over smart-http
When upload-pack advertises the refs (either for a normal,
non-stateless request, or for the initial contact in a
stateless one), we call for_each_ref with the send_ref
function as its callback. send_ref, in turn, calls
mark_our_ref, which checks whether the ref is hidden, and
sets OUR_REF or HIDDEN_REF on the object as appropriate. If
it is hidden, mark_our_ref also returns "1" to signal
send_ref that the ref should not be advertised.
If we are not advertising refs, (i.e., the follow-up
invocation by an http client to send its "want" lines), we
use mark_our_ref directly as a callback to for_each_ref. Its
marking does the right thing, but when it then returns "1"
to for_each_ref, the latter interprets this as an error and
stops iterating. As a result, we skip marking all of the
refs that come lexicographically after it. Any "want" lines
from the client asking for those objects will fail, as they
were not properly marked with OUR_REF.
To solve this, we introduce a wrapper callback around
mark_our_ref which always returns 0 (even if the ref is
hidden, we want to keep iterating). We also tweak the
signature of mark_our_ref to exclude unnecessary parameters
that were present only to conform to the callback interface.
This should make it less likely for somebody to accidentally
use it as a callback in the future.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-13 07:42:12 +03:00
|
|
|
head_ref_namespaced(check_ref, NULL);
|
|
|
|
for_each_namespaced_ref(check_ref, NULL);
|
2009-10-31 03:47:33 +03:00
|
|
|
}
|
2013-09-18 03:17:33 +04:00
|
|
|
string_list_clear(&symref, 1);
|
2009-10-31 03:47:33 +03:00
|
|
|
if (advertise_refs)
|
|
|
|
return;
|
|
|
|
|
2006-07-06 05:00:02 +04:00
|
|
|
receive_needs();
|
2006-08-15 00:40:51 +04:00
|
|
|
if (want_obj.nr) {
|
|
|
|
get_common_commits();
|
|
|
|
create_pack_file();
|
|
|
|
}
|
2005-07-05 00:26:53 +04:00
|
|
|
}
|
|
|
|
|
upload/receive-pack: allow hiding ref hierarchies
A repository may have refs that are only used for its internal
bookkeeping purposes that should not be exposed to the others that
come over the network.
Teach upload-pack to omit some refs from its initial advertisement
by paying attention to the uploadpack.hiderefs multi-valued
configuration variable. Do the same to receive-pack via the
receive.hiderefs variable. As a convenient short-hand, allow using
transfer.hiderefs to set the value to both of these variables.
Any ref that is under the hierarchies listed on the value of these
variable is excluded from responses to requests made by "ls-remote",
"fetch", etc. (for upload-pack) and "push" (for receive-pack).
Because these hidden refs do not count as OUR_REF, an attempt to
fetch objects at the tip of them will be rejected, and because these
refs do not get advertised, "git push :" will not see local branches
that have the same name as them as "matching" ones to be sent.
An attempt to update/delete these hidden refs with an explicit
refspec, e.g. "git push origin :refs/hidden/22", is rejected. This
is not a new restriction. To the pusher, it would appear that there
is no such ref, so its push request will conclude with "Now that I
sent you all the data, it is time for you to update the refs. I saw
that the ref did not exist when I started pushing, and I want the
result to point at this commit". The receiving end will apply the
compare-and-swap rule to this request and rejects the push with
"Well, your update request conflicts with somebody else; I see there
is such a ref.", which is the right thing to do. Otherwise a push to
a hidden ref will always be "the last one wins", which is not a good
default.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 04:08:30 +04:00
|
|
|
static int upload_pack_config(const char *var, const char *value, void *unused)
|
|
|
|
{
|
2015-05-21 23:23:38 +03:00
|
|
|
if (!strcmp("uploadpack.allowtipsha1inwant", var)) {
|
|
|
|
if (git_config_bool(var, value))
|
|
|
|
allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
|
|
|
|
else
|
|
|
|
allow_unadvertised_object_request &= ~ALLOW_TIP_SHA1;
|
2015-05-21 23:23:39 +03:00
|
|
|
} else if (!strcmp("uploadpack.allowreachablesha1inwant", var)) {
|
|
|
|
if (git_config_bool(var, value))
|
|
|
|
allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
|
|
|
|
else
|
|
|
|
allow_unadvertised_object_request &= ~ALLOW_REACHABLE_SHA1;
|
2016-11-11 20:23:48 +03:00
|
|
|
} else if (!strcmp("uploadpack.allowanysha1inwant", var)) {
|
|
|
|
if (git_config_bool(var, value))
|
|
|
|
allow_unadvertised_object_request |= ALLOW_ANY_SHA1;
|
|
|
|
else
|
|
|
|
allow_unadvertised_object_request &= ~ALLOW_ANY_SHA1;
|
2015-05-21 23:23:38 +03:00
|
|
|
} else if (!strcmp("uploadpack.keepalive", var)) {
|
2013-09-08 13:01:31 +04:00
|
|
|
keepalive = git_config_int(var, value);
|
|
|
|
if (!keepalive)
|
|
|
|
keepalive = -1;
|
upload-pack: provide a hook for running pack-objects
When upload-pack serves a client request, it turns to
pack-objects to do the heavy lifting of creating a
packfile. There's no easy way to intercept the call to
pack-objects, but there are a few good reasons to want to do
so:
1. If you're debugging a client or server issue with
fetching, you may want to store a copy of the generated
packfile.
2. If you're gathering data from real-world fetches for
performance analysis or debugging, storing a copy of
the arguments and stdin lets you replay the pack
generation at your leisure.
3. You may want to insert a caching layer around
pack-objects; it is the most CPU- and memory-intensive
part of serving a fetch, and its output is a pure
function[1] of its input, making it an ideal place to
consolidate identical requests.
This patch adds a simple "hook" interface to intercept calls
to pack-objects. The new test demonstrates how it can be
used for debugging (using it for caching is a
straightforward extension; the tricky part is writing the
actual caching layer).
This hook is unlike the normal hook scripts found in the
"hooks/" directory of a repository. Because we promise that
upload-pack is safe to run in an untrusted repository, we
cannot execute arbitrary code or commands found in the
repository (neither in hooks/, nor in the config). So
instead, this hook is triggered from a config variable that
is explicitly ignored in the per-repo config.
The config variable holds the actual shell command to run as
the hook. Another approach would be to simply treat it as a
boolean: "should I respect the upload-pack hooks in this
repo?", and then run the script from "hooks/" as we usually
do. However, that isn't as flexible; there's no way to run a
hook approved by the site administrator (e.g., in
"/etc/gitconfig") on a repository whose contents are not
trusted. The approach taken by this patch is more
fine-grained, if a little less conventional for git hooks
(it does behave similar to other configured commands like
diff.external, etc).
[1] Pack-objects isn't _actually_ a pure function. Its
output depends on the exact packing of the object
database, and if multi-threading is used for delta
compression, can even differ racily. But for the
purposes of caching, that's OK; of the many possible
outputs for a given input, it is sufficient only that we
output one of them.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-05-19 01:45:37 +03:00
|
|
|
} else if (current_config_scope() != CONFIG_SCOPE_REPO) {
|
|
|
|
if (!strcmp("uploadpack.packobjectshook", var))
|
|
|
|
return git_config_string(&pack_objects_hook, var, value);
|
2017-12-08 18:58:39 +03:00
|
|
|
} else if (!strcmp("uploadpack.allowfilter", var)) {
|
2018-03-28 23:33:03 +03:00
|
|
|
allow_filter = git_config_bool(var, value);
|
2013-09-08 13:01:31 +04:00
|
|
|
}
|
upload/receive-pack: allow hiding ref hierarchies
A repository may have refs that are only used for its internal
bookkeeping purposes that should not be exposed to the others that
come over the network.
Teach upload-pack to omit some refs from its initial advertisement
by paying attention to the uploadpack.hiderefs multi-valued
configuration variable. Do the same to receive-pack via the
receive.hiderefs variable. As a convenient short-hand, allow using
transfer.hiderefs to set the value to both of these variables.
Any ref that is under the hierarchies listed on the value of these
variable is excluded from responses to requests made by "ls-remote",
"fetch", etc. (for upload-pack) and "push" (for receive-pack).
Because these hidden refs do not count as OUR_REF, an attempt to
fetch objects at the tip of them will be rejected, and because these
refs do not get advertised, "git push :" will not see local branches
that have the same name as them as "matching" ones to be sent.
An attempt to update/delete these hidden refs with an explicit
refspec, e.g. "git push origin :refs/hidden/22", is rejected. This
is not a new restriction. To the pusher, it would appear that there
is no such ref, so its push request will conclude with "Now that I
sent you all the data, it is time for you to update the refs. I saw
that the ref did not exist when I started pushing, and I want the
result to point at this commit". The receiving end will apply the
compare-and-swap rule to this request and rejects the push with
"Well, your update request conflicts with somebody else; I see there
is such a ref.", which is the right thing to do. Otherwise a push to
a hidden ref will always be "the last one wins", which is not a good
default.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 04:08:30 +04:00
|
|
|
return parse_hide_refs_config(var, value, "uploadpack");
|
|
|
|
}
|
|
|
|
|
add an extra level of indirection to main()
There are certain startup tasks that we expect every git
process to do. In some cases this is just to improve the
quality of the program (e.g., setting up gettext()). In
others it is a requirement for using certain functions in
libgit.a (e.g., system_path() expects that you have called
git_extract_argv0_path()).
Most commands are builtins and are covered by the git.c
version of main(). However, there are still a few external
commands that use their own main(). Each of these has to
remember to include the correct startup sequence, and we are
not always consistent.
Rather than just fix the inconsistencies, let's make this
harder to get wrong by providing a common main() that can
run this standard startup.
We basically have two options to do this:
- the compat/mingw.h file already does something like this by
adding a #define that replaces the definition of main with a
wrapper that calls mingw_startup().
The upside is that the code in each program doesn't need
to be changed at all; it's rewritten on the fly by the
preprocessor.
The downside is that it may make debugging of the startup
sequence a bit more confusing, as the preprocessor is
quietly inserting new code.
- the builtin functions are all of the form cmd_foo(),
and git.c's main() calls them.
This is much more explicit, which may make things more
obvious to somebody reading the code. It's also more
flexible (because of course we have to figure out _which_
cmd_foo() to call).
The downside is that each of the builtins must define
cmd_foo(), instead of just main().
This patch chooses the latter option, preferring the more
explicit approach, even though it is more invasive. We
introduce a new file common-main.c, with the "real" main. It
expects to call cmd_main() from whatever other objects it is
linked against.
We link common-main.o against anything that links against
libgit.a, since we know that such programs will need to do
this setup. Note that common-main.o can't actually go inside
libgit.a, as the linker would not pick up its main()
function automatically (it has no callers).
The rest of the patch is just adjusting all of the various
external programs (mostly in t/helper) to use cmd_main().
I've provided a global declaration for cmd_main(), which
means that all of the programs also need to match its
signature. In particular, many functions need to switch to
"const char **" instead of "char **" for argv. This effect
ripples out to a few other variables and functions, as well.
This makes the patch even more invasive, but the end result
is much better. We should be treating argv strings as const
anyway, and now all programs conform to the same signature
(which also matches the way builtins are defined).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-01 08:58:58 +03:00
|
|
|
int cmd_main(int argc, const char **argv)
|
2005-07-05 00:26:53 +04:00
|
|
|
{
|
2016-05-31 12:57:08 +03:00
|
|
|
const char *dir;
|
2005-10-20 01:27:01 +04:00
|
|
|
int strict = 0;
|
2016-05-31 12:57:08 +03:00
|
|
|
struct option options[] = {
|
|
|
|
OPT_BOOL(0, "stateless-rpc", &stateless_rpc,
|
|
|
|
N_("quit after a single request/response exchange")),
|
|
|
|
OPT_BOOL(0, "advertise-refs", &advertise_refs,
|
2016-08-09 11:53:38 +03:00
|
|
|
N_("exit immediately after initial ref advertisement")),
|
2016-05-31 12:57:08 +03:00
|
|
|
OPT_BOOL(0, "strict", &strict,
|
|
|
|
N_("do not try <directory>/.git/ if <directory> is no Git directory")),
|
|
|
|
OPT_INTEGER(0, "timeout", &timeout,
|
|
|
|
N_("interrupt transfer after <n> seconds of inactivity")),
|
|
|
|
OPT_END()
|
|
|
|
};
|
2005-10-20 01:27:01 +04:00
|
|
|
|
2011-02-24 17:30:19 +03:00
|
|
|
packet_trace_identity("upload-pack");
|
2014-02-18 15:24:55 +04:00
|
|
|
check_replace_refs = 0;
|
2009-01-18 15:00:12 +03:00
|
|
|
|
2016-05-31 12:57:08 +03:00
|
|
|
argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
|
2005-10-20 01:27:01 +04:00
|
|
|
|
2016-05-31 12:57:08 +03:00
|
|
|
if (argc != 1)
|
|
|
|
usage_with_options(upload_pack_usage, options);
|
2007-06-07 11:04:01 +04:00
|
|
|
|
2016-05-31 12:57:08 +03:00
|
|
|
if (timeout)
|
|
|
|
daemon_mode = 1;
|
2008-02-12 14:28:01 +03:00
|
|
|
|
2008-07-21 23:19:52 +04:00
|
|
|
setup_path();
|
2008-02-12 14:28:01 +03:00
|
|
|
|
2016-05-31 12:57:08 +03:00
|
|
|
dir = argv[0];
|
2005-07-09 03:22:22 +04:00
|
|
|
|
2005-11-17 22:37:14 +03:00
|
|
|
if (!enter_repo(dir, strict))
|
2009-03-04 11:32:29 +03:00
|
|
|
die("'%s' does not appear to be a git repository", dir);
|
2013-12-05 17:02:32 +04:00
|
|
|
|
upload/receive-pack: allow hiding ref hierarchies
A repository may have refs that are only used for its internal
bookkeeping purposes that should not be exposed to the others that
come over the network.
Teach upload-pack to omit some refs from its initial advertisement
by paying attention to the uploadpack.hiderefs multi-valued
configuration variable. Do the same to receive-pack via the
receive.hiderefs variable. As a convenient short-hand, allow using
transfer.hiderefs to set the value to both of these variables.
Any ref that is under the hierarchies listed on the value of these
variable is excluded from responses to requests made by "ls-remote",
"fetch", etc. (for upload-pack) and "push" (for receive-pack).
Because these hidden refs do not count as OUR_REF, an attempt to
fetch objects at the tip of them will be rejected, and because these
refs do not get advertised, "git push :" will not see local branches
that have the same name as them as "matching" ones to be sent.
An attempt to update/delete these hidden refs with an explicit
refspec, e.g. "git push origin :refs/hidden/22", is rejected. This
is not a new restriction. To the pusher, it would appear that there
is no such ref, so its push request will conclude with "Now that I
sent you all the data, it is time for you to update the refs. I saw
that the ref did not exist when I started pushing, and I want the
result to point at this commit". The receiving end will apply the
compare-and-swap rule to this request and rejects the push with
"Well, your update request conflicts with somebody else; I see there
is such a ref.", which is the right thing to do. Otherwise a push to
a hidden ref will always be "the last one wins", which is not a good
default.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-19 04:08:30 +04:00
|
|
|
git_config(upload_pack_config, NULL);
|
2017-10-16 20:55:26 +03:00
|
|
|
|
|
|
|
switch (determine_protocol_version_server()) {
|
|
|
|
case protocol_v1:
|
|
|
|
/*
|
|
|
|
* v1 is just the original protocol with a version string,
|
|
|
|
* so just fall through after writing the version string.
|
|
|
|
*/
|
|
|
|
if (advertise_refs || !stateless_rpc)
|
|
|
|
packet_write_fmt(1, "version 1\n");
|
|
|
|
|
|
|
|
/* fallthrough */
|
|
|
|
case protocol_v0:
|
|
|
|
upload_pack();
|
|
|
|
break;
|
|
|
|
case protocol_unknown_version:
|
|
|
|
BUG("unknown protocol version");
|
|
|
|
}
|
|
|
|
|
2005-07-05 00:26:53 +04:00
|
|
|
return 0;
|
|
|
|
}
|