2006-08-03 19:24:37 +04:00
|
|
|
#include "builtin.h"
|
2005-06-26 02:27:14 +04:00
|
|
|
#include "cache.h"
|
2022-04-05 08:20:13 +03:00
|
|
|
#include "bulk-checkin.h"
|
2017-06-14 21:07:36 +03:00
|
|
|
#include "config.h"
|
2018-05-16 02:42:15 +03:00
|
|
|
#include "object-store.h"
|
2005-06-26 02:59:31 +04:00
|
|
|
#include "object.h"
|
2005-06-26 15:29:18 +04:00
|
|
|
#include "delta.h"
|
2005-06-29 01:21:02 +04:00
|
|
|
#include "pack.h"
|
2006-04-02 16:44:09 +04:00
|
|
|
#include "blob.h"
|
|
|
|
#include "commit.h"
|
|
|
|
#include "tag.h"
|
|
|
|
#include "tree.h"
|
2008-02-26 00:46:11 +03:00
|
|
|
#include "tree-walk.h"
|
2007-04-18 22:27:45 +04:00
|
|
|
#include "progress.h"
|
2008-02-26 00:46:10 +03:00
|
|
|
#include "decorate.h"
|
2008-02-26 00:46:11 +03:00
|
|
|
#include "fsck.h"
|
2005-06-26 02:27:14 +04:00
|
|
|
|
2008-02-26 00:46:11 +03:00
|
|
|
static int dry_run, quiet, recover, has_errors, strict;
|
usage: do not insist that standard input must come from a file
The synopsys text and the usage string of subcommands that read list
of things from the standard input are often shown like this:
git gostak [--distim] < <list-of-doshes>
This is problematic in a number of ways:
* The way to use these commands is more often to feed them the
output from another command, not feed them from a file.
* Manual pages outside Git, commands that operate on the data read
from the standard input, e.g "sort", "grep", "sed", etc., are not
described with such a "< redirection-from-file" in their synopsys
text. Our doing so introduces inconsistency.
* We do not insist on where the output should go, by saying
git gostak [--distim] < <list-of-doshes> > <output>
* As it is our convention to enclose placeholders inside <braket>,
the redirection operator followed by a placeholder filename
becomes very hard to read, both in the documentation and in the
help text.
Let's clean them all up, after making sure that the documentation
clearly describes the modes that take information from the standard
input and what kind of things are expected on the input.
[jc: stole example for fmt-merge-msg from Jonathan]
Helped-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-10-16 21:27:42 +03:00
|
|
|
static const char unpack_usage[] = "git unpack-objects [-n] [-q] [-r] [--strict]";
|
2005-06-26 02:27:14 +04:00
|
|
|
|
2005-06-29 07:34:23 +04:00
|
|
|
/* We always read in 4kB chunks. */
|
|
|
|
static unsigned char buffer[4096];
|
2007-04-09 09:06:30 +04:00
|
|
|
static unsigned int offset, len;
|
|
|
|
static off_t consumed_bytes;
|
2016-08-24 21:41:56 +03:00
|
|
|
static off_t max_input_size;
|
2018-02-01 05:18:40 +03:00
|
|
|
static git_hash_ctx ctx;
|
2015-06-22 18:25:00 +03:00
|
|
|
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
|
2019-11-19 04:25:25 +03:00
|
|
|
static struct progress *progress;
|
2005-06-26 02:27:14 +04:00
|
|
|
|
2008-03-05 10:46:51 +03:00
|
|
|
/*
|
|
|
|
* When running under --strict mode, objects whose reachability are
|
|
|
|
* suspect are kept in core without getting written in the object
|
|
|
|
* store.
|
|
|
|
*/
|
2008-02-26 00:46:10 +03:00
|
|
|
struct obj_buffer {
|
|
|
|
char *buffer;
|
|
|
|
unsigned long size;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct decoration obj_decorate;
|
|
|
|
|
|
|
|
static struct obj_buffer *lookup_object_buffer(struct object *base)
|
|
|
|
{
|
|
|
|
return lookup_decoration(&obj_decorate, base);
|
|
|
|
}
|
|
|
|
|
2008-02-26 00:46:11 +03:00
|
|
|
static void add_object_buffer(struct object *object, char *buffer, unsigned long size)
|
|
|
|
{
|
|
|
|
struct obj_buffer *obj;
|
2021-03-13 19:17:22 +03:00
|
|
|
CALLOC_ARRAY(obj, 1);
|
2008-02-26 00:46:11 +03:00
|
|
|
obj->buffer = buffer;
|
|
|
|
obj->size = size;
|
|
|
|
if (add_decoration(&obj_decorate, object, obj))
|
2015-11-10 05:22:28 +03:00
|
|
|
die("object %s tried to add buffer twice!", oid_to_hex(&object->oid));
|
2008-02-26 00:46:11 +03:00
|
|
|
}
|
|
|
|
|
2005-06-29 07:34:23 +04:00
|
|
|
/*
|
|
|
|
* Make sure at least "min" bytes are available in the buffer, and
|
|
|
|
* return the pointer to the buffer.
|
|
|
|
*/
|
2006-10-31 04:44:27 +03:00
|
|
|
static void *fill(int min)
|
2005-06-29 07:34:23 +04:00
|
|
|
{
|
|
|
|
if (min <= len)
|
|
|
|
return buffer + offset;
|
|
|
|
if (min > sizeof(buffer))
|
|
|
|
die("cannot fill %d bytes", min);
|
|
|
|
if (offset) {
|
2018-02-01 05:18:40 +03:00
|
|
|
the_hash_algo->update_fn(&ctx, buffer, offset);
|
2006-10-31 04:44:27 +03:00
|
|
|
memmove(buffer, buffer + offset, len);
|
2005-06-29 07:34:23 +04:00
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
do {
|
2007-05-15 16:49:22 +04:00
|
|
|
ssize_t ret = xread(0, buffer + len, sizeof(buffer) - len);
|
2005-06-29 07:34:23 +04:00
|
|
|
if (ret <= 0) {
|
|
|
|
if (!ret)
|
|
|
|
die("early EOF");
|
2009-06-27 19:58:46 +04:00
|
|
|
die_errno("read error on input");
|
2005-06-29 07:34:23 +04:00
|
|
|
}
|
|
|
|
len += ret;
|
|
|
|
} while (len < min);
|
|
|
|
return buffer;
|
|
|
|
}
|
2005-06-26 02:59:31 +04:00
|
|
|
|
2005-06-29 07:34:23 +04:00
|
|
|
static void use(int bytes)
|
|
|
|
{
|
|
|
|
if (bytes > len)
|
|
|
|
die("used more bytes than were available");
|
|
|
|
len -= bytes;
|
|
|
|
offset += bytes;
|
2007-04-09 09:06:30 +04:00
|
|
|
|
|
|
|
/* make sure off_t is sufficiently large not to wrap */
|
2010-10-05 11:24:10 +04:00
|
|
|
if (signed_add_overflows(consumed_bytes, bytes))
|
2007-04-09 09:06:30 +04:00
|
|
|
die("pack too large for current definition of off_t");
|
2006-09-21 08:07:39 +04:00
|
|
|
consumed_bytes += bytes;
|
2016-08-24 21:41:56 +03:00
|
|
|
if (max_input_size && consumed_bytes > max_input_size)
|
|
|
|
die(_("pack exceeds maximum allowed size"));
|
2019-11-19 04:25:25 +03:00
|
|
|
display_throughput(progress, consumed_bytes);
|
2005-06-29 07:34:23 +04:00
|
|
|
}
|
2005-06-26 02:27:14 +04:00
|
|
|
|
2022-06-11 05:44:16 +03:00
|
|
|
/*
|
|
|
|
* Decompress zstream from the standard input into a newly
|
|
|
|
* allocated buffer of specified size and return the buffer.
|
|
|
|
* The caller is responsible to free the returned buffer.
|
|
|
|
*
|
|
|
|
* But for dry_run mode, "get_data()" is only used to check the
|
|
|
|
* integrity of data, and the returned buffer is not used at all.
|
|
|
|
* Therefore, in dry_run mode, "get_data()" will release the small
|
|
|
|
* allocated buffer which is reused to hold temporary zstream output
|
|
|
|
* and return NULL instead of returning garbage data.
|
|
|
|
*/
|
2005-06-29 07:34:23 +04:00
|
|
|
static void *get_data(unsigned long size)
|
2005-06-26 02:27:14 +04:00
|
|
|
{
|
2011-06-10 22:52:15 +04:00
|
|
|
git_zstream stream;
|
2022-06-11 05:44:16 +03:00
|
|
|
unsigned long bufsize = dry_run && size > 8192 ? 8192 : size;
|
|
|
|
void *buf = xmallocz(bufsize);
|
2005-06-29 07:34:23 +04:00
|
|
|
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
|
|
|
|
|
|
|
stream.next_out = buf;
|
2022-06-11 05:44:16 +03:00
|
|
|
stream.avail_out = bufsize;
|
2005-06-29 07:34:23 +04:00
|
|
|
stream.next_in = fill(1);
|
|
|
|
stream.avail_in = len;
|
2009-01-08 06:54:47 +03:00
|
|
|
git_inflate_init(&stream);
|
2005-06-29 07:34:23 +04:00
|
|
|
|
|
|
|
for (;;) {
|
2009-01-08 06:54:47 +03:00
|
|
|
int ret = git_inflate(&stream, 0);
|
2005-06-29 07:34:23 +04:00
|
|
|
use(len - stream.avail_in);
|
|
|
|
if (stream.total_out == size && ret == Z_STREAM_END)
|
|
|
|
break;
|
2006-09-04 09:55:54 +04:00
|
|
|
if (ret != Z_OK) {
|
2012-04-30 04:28:45 +04:00
|
|
|
error("inflate returned %d", ret);
|
2017-06-16 02:15:46 +03:00
|
|
|
FREE_AND_NULL(buf);
|
2006-09-13 23:59:20 +04:00
|
|
|
if (!recover)
|
2006-09-04 09:55:54 +04:00
|
|
|
exit(1);
|
|
|
|
has_errors = 1;
|
|
|
|
break;
|
|
|
|
}
|
2005-06-29 07:34:23 +04:00
|
|
|
stream.next_in = fill(1);
|
|
|
|
stream.avail_in = len;
|
2022-06-11 05:44:16 +03:00
|
|
|
if (dry_run) {
|
|
|
|
/* reuse the buffer in dry_run mode */
|
|
|
|
stream.next_out = buf;
|
|
|
|
stream.avail_out = bufsize > size - stream.total_out ?
|
|
|
|
size - stream.total_out :
|
|
|
|
bufsize;
|
|
|
|
}
|
2005-06-29 07:34:23 +04:00
|
|
|
}
|
2009-01-08 06:54:47 +03:00
|
|
|
git_inflate_end(&stream);
|
2022-06-11 05:44:16 +03:00
|
|
|
if (dry_run)
|
|
|
|
FREE_AND_NULL(buf);
|
2005-06-29 07:34:23 +04:00
|
|
|
return buf;
|
2005-06-26 02:27:14 +04:00
|
|
|
}
|
|
|
|
|
2005-06-29 07:34:23 +04:00
|
|
|
struct delta_info {
|
2017-05-07 01:10:12 +03:00
|
|
|
struct object_id base_oid;
|
2007-04-09 09:06:30 +04:00
|
|
|
unsigned nr;
|
|
|
|
off_t base_offset;
|
2005-06-29 07:34:23 +04:00
|
|
|
unsigned long size;
|
|
|
|
void *delta;
|
|
|
|
struct delta_info *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct delta_info *delta_list;
|
|
|
|
|
2017-05-07 01:10:12 +03:00
|
|
|
static void add_delta_to_list(unsigned nr, const struct object_id *base_oid,
|
2007-04-09 09:06:30 +04:00
|
|
|
off_t base_offset,
|
2006-09-21 08:07:39 +04:00
|
|
|
void *delta, unsigned long size)
|
2005-06-26 02:27:14 +04:00
|
|
|
{
|
2005-06-29 07:34:23 +04:00
|
|
|
struct delta_info *info = xmalloc(sizeof(*info));
|
2005-06-26 02:27:14 +04:00
|
|
|
|
2017-05-07 01:10:12 +03:00
|
|
|
oidcpy(&info->base_oid, base_oid);
|
2006-09-21 08:07:39 +04:00
|
|
|
info->base_offset = base_offset;
|
2005-06-29 07:34:23 +04:00
|
|
|
info->size = size;
|
|
|
|
info->delta = delta;
|
2006-09-21 08:07:39 +04:00
|
|
|
info->nr = nr;
|
2005-06-29 07:34:23 +04:00
|
|
|
info->next = delta_list;
|
|
|
|
delta_list = info;
|
2005-06-26 02:27:14 +04:00
|
|
|
}
|
|
|
|
|
2006-09-21 08:07:39 +04:00
|
|
|
struct obj_info {
|
2007-04-09 09:06:30 +04:00
|
|
|
off_t offset;
|
2017-05-07 01:10:12 +03:00
|
|
|
struct object_id oid;
|
2008-02-26 00:46:11 +03:00
|
|
|
struct object *obj;
|
2006-09-21 08:07:39 +04:00
|
|
|
};
|
|
|
|
|
2018-03-06 13:16:14 +03:00
|
|
|
/* Remember to update object flag allocation in object.h */
|
2008-02-26 00:46:11 +03:00
|
|
|
#define FLAG_OPEN (1u<<20)
|
|
|
|
#define FLAG_WRITTEN (1u<<21)
|
|
|
|
|
2006-09-21 08:07:39 +04:00
|
|
|
static struct obj_info *obj_list;
|
2009-06-18 21:28:43 +04:00
|
|
|
static unsigned nr_objects;
|
2008-02-26 00:46:11 +03:00
|
|
|
|
2008-03-05 10:46:51 +03:00
|
|
|
/*
|
|
|
|
* Called only from check_object() after it verified this object
|
|
|
|
* is Ok.
|
|
|
|
*/
|
2014-09-10 17:52:51 +04:00
|
|
|
static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
|
2008-02-26 00:46:11 +03:00
|
|
|
{
|
2017-05-07 01:10:12 +03:00
|
|
|
struct object_id oid;
|
2014-09-10 17:52:51 +04:00
|
|
|
|
2018-01-28 03:13:19 +03:00
|
|
|
if (write_object_file(obj_buf->buffer, obj_buf->size,
|
2022-02-05 02:48:26 +03:00
|
|
|
obj->type, &oid) < 0)
|
2015-11-10 05:22:28 +03:00
|
|
|
die("failed to write object %s", oid_to_hex(&obj->oid));
|
2008-02-26 00:46:11 +03:00
|
|
|
obj->flags |= FLAG_WRITTEN;
|
|
|
|
}
|
|
|
|
|
2008-03-05 10:46:51 +03:00
|
|
|
/*
|
|
|
|
* At the very end of the processing, write_rest() scans the objects
|
|
|
|
* that have reachability requirements and calls this function.
|
|
|
|
* Verify its reachability and validity recursively and write it out.
|
|
|
|
*/
|
2021-03-28 16:15:35 +03:00
|
|
|
static int check_object(struct object *obj, enum object_type type,
|
|
|
|
void *data, struct fsck_options *options)
|
2008-02-26 00:46:11 +03:00
|
|
|
{
|
2014-09-10 17:52:51 +04:00
|
|
|
struct obj_buffer *obj_buf;
|
|
|
|
|
2008-02-26 00:46:11 +03:00
|
|
|
if (!obj)
|
2009-08-13 23:41:14 +04:00
|
|
|
return 1;
|
2008-02-26 00:46:11 +03:00
|
|
|
|
|
|
|
if (obj->flags & FLAG_WRITTEN)
|
2009-08-13 23:41:14 +04:00
|
|
|
return 0;
|
2008-02-26 00:46:11 +03:00
|
|
|
|
|
|
|
if (type != OBJ_ANY && obj->type != type)
|
|
|
|
die("object type mismatch");
|
|
|
|
|
|
|
|
if (!(obj->flags & FLAG_OPEN)) {
|
|
|
|
unsigned long size;
|
2018-04-25 21:20:59 +03:00
|
|
|
int type = oid_object_info(the_repository, &obj->oid, &size);
|
2008-02-26 00:46:11 +03:00
|
|
|
if (type != obj->type || type <= 0)
|
|
|
|
die("object of unexpected type");
|
|
|
|
obj->flags |= FLAG_WRITTEN;
|
2009-08-13 23:41:14 +04:00
|
|
|
return 0;
|
2008-02-26 00:46:11 +03:00
|
|
|
}
|
|
|
|
|
2014-09-10 17:52:51 +04:00
|
|
|
obj_buf = lookup_object_buffer(obj);
|
|
|
|
if (!obj_buf)
|
2015-11-10 05:22:28 +03:00
|
|
|
die("Whoops! Cannot find object '%s'", oid_to_hex(&obj->oid));
|
2015-06-22 18:25:00 +03:00
|
|
|
if (fsck_object(obj, obj_buf->buffer, obj_buf->size, &fsck_options))
|
2018-05-02 23:37:09 +03:00
|
|
|
die("fsck error in packed object");
|
2015-06-22 18:25:00 +03:00
|
|
|
fsck_options.walk = check_object;
|
|
|
|
if (fsck_walk(obj, NULL, &fsck_options))
|
2015-11-10 05:22:28 +03:00
|
|
|
die("Error on reachable objects of %s", oid_to_hex(&obj->oid));
|
2014-09-10 17:52:51 +04:00
|
|
|
write_cached_object(obj, obj_buf);
|
2009-08-13 23:41:14 +04:00
|
|
|
return 0;
|
2008-02-26 00:46:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void write_rest(void)
|
|
|
|
{
|
|
|
|
unsigned i;
|
2009-08-13 23:41:14 +04:00
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
if (obj_list[i].obj)
|
2015-06-22 18:25:00 +03:00
|
|
|
check_object(obj_list[i].obj, OBJ_ANY, NULL, NULL);
|
2009-08-13 23:41:14 +04:00
|
|
|
}
|
2008-02-26 00:46:11 +03:00
|
|
|
}
|
2005-06-29 07:34:23 +04:00
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
static void added_object(unsigned nr, enum object_type type,
|
|
|
|
void *data, unsigned long size);
|
2006-09-21 08:07:39 +04:00
|
|
|
|
2008-03-05 10:46:51 +03:00
|
|
|
/*
|
|
|
|
* Write out nr-th object from the list, now we know the contents
|
|
|
|
* of it. Under --strict, this buffers structured objects in-core,
|
|
|
|
* to be checked at the end.
|
|
|
|
*/
|
2007-02-26 22:55:59 +03:00
|
|
|
static void write_object(unsigned nr, enum object_type type,
|
|
|
|
void *buf, unsigned long size)
|
2005-06-29 20:38:02 +04:00
|
|
|
{
|
2008-02-26 00:46:11 +03:00
|
|
|
if (!strict) {
|
2022-02-05 02:48:26 +03:00
|
|
|
if (write_object_file(buf, size, type,
|
2018-01-28 03:13:19 +03:00
|
|
|
&obj_list[nr].oid) < 0)
|
2008-02-26 00:46:11 +03:00
|
|
|
die("failed to write object");
|
2008-03-05 10:46:51 +03:00
|
|
|
added_object(nr, type, buf, size);
|
2008-02-26 00:46:11 +03:00
|
|
|
free(buf);
|
2008-03-05 10:46:51 +03:00
|
|
|
obj_list[nr].obj = NULL;
|
2008-02-26 00:46:11 +03:00
|
|
|
} else if (type == OBJ_BLOB) {
|
|
|
|
struct blob *blob;
|
2022-02-05 02:48:26 +03:00
|
|
|
if (write_object_file(buf, size, type,
|
2018-01-28 03:13:19 +03:00
|
|
|
&obj_list[nr].oid) < 0)
|
2008-02-26 00:46:11 +03:00
|
|
|
die("failed to write object");
|
2008-03-05 10:46:51 +03:00
|
|
|
added_object(nr, type, buf, size);
|
2008-02-26 00:46:11 +03:00
|
|
|
free(buf);
|
|
|
|
|
2018-06-29 04:21:55 +03:00
|
|
|
blob = lookup_blob(the_repository, &obj_list[nr].oid);
|
2008-02-26 00:46:11 +03:00
|
|
|
if (blob)
|
|
|
|
blob->object.flags |= FLAG_WRITTEN;
|
|
|
|
else
|
|
|
|
die("invalid blob object");
|
2008-03-05 10:46:51 +03:00
|
|
|
obj_list[nr].obj = NULL;
|
2008-02-26 00:46:11 +03:00
|
|
|
} else {
|
|
|
|
struct object *obj;
|
|
|
|
int eaten;
|
2022-02-05 02:48:32 +03:00
|
|
|
hash_object_file(the_hash_algo, buf, size, type,
|
2020-01-30 23:32:22 +03:00
|
|
|
&obj_list[nr].oid);
|
2008-03-05 10:46:51 +03:00
|
|
|
added_object(nr, type, buf, size);
|
2018-06-29 04:21:53 +03:00
|
|
|
obj = parse_object_buffer(the_repository, &obj_list[nr].oid,
|
|
|
|
type, size, buf,
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 01:10:38 +03:00
|
|
|
&eaten);
|
2008-02-26 00:46:11 +03:00
|
|
|
if (!obj)
|
2018-02-14 21:59:24 +03:00
|
|
|
die("invalid %s", type_name(type));
|
2008-02-26 00:46:11 +03:00
|
|
|
add_object_buffer(obj, buf, size);
|
|
|
|
obj->flags |= FLAG_OPEN;
|
|
|
|
obj_list[nr].obj = obj;
|
|
|
|
}
|
2005-06-29 20:38:02 +04:00
|
|
|
}
|
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
static void resolve_delta(unsigned nr, enum object_type type,
|
2006-09-04 09:55:54 +04:00
|
|
|
void *base, unsigned long base_size,
|
|
|
|
void *delta, unsigned long delta_size)
|
2005-06-26 02:27:14 +04:00
|
|
|
{
|
2005-06-29 07:34:23 +04:00
|
|
|
void *result;
|
|
|
|
unsigned long result_size;
|
2005-06-26 02:27:14 +04:00
|
|
|
|
2005-06-29 07:34:23 +04:00
|
|
|
result = patch_delta(base, base_size,
|
|
|
|
delta, delta_size,
|
|
|
|
&result_size);
|
|
|
|
if (!result)
|
|
|
|
die("failed to apply delta");
|
|
|
|
free(delta);
|
2007-02-26 22:55:59 +03:00
|
|
|
write_object(nr, type, result, result_size);
|
2005-06-26 02:27:14 +04:00
|
|
|
}
|
|
|
|
|
2008-03-05 10:46:51 +03:00
|
|
|
/*
|
|
|
|
* We now know the contents of an object (which is nr-th in the pack);
|
|
|
|
* resolve all the deltified objects that are based on it.
|
|
|
|
*/
|
2007-02-26 22:55:59 +03:00
|
|
|
static void added_object(unsigned nr, enum object_type type,
|
|
|
|
void *data, unsigned long size)
|
2005-06-26 02:59:31 +04:00
|
|
|
{
|
2005-06-29 07:34:23 +04:00
|
|
|
struct delta_info **p = &delta_list;
|
|
|
|
struct delta_info *info;
|
|
|
|
|
|
|
|
while ((info = *p) != NULL) {
|
convert "oidcmp() == 0" to oideq()
Using the more restrictive oideq() should, in the long run,
give the compiler more opportunities to optimize these
callsites. For now, this conversion should be a complete
noop with respect to the generated code.
The result is also perhaps a little more readable, as it
avoids the "zero is equal" idiom. Since it's so prevalent in
C, I think seasoned programmers tend not to even notice it
anymore, but it can sometimes make for awkward double
negations (e.g., we can drop a few !!oidcmp() instances
here).
This patch was generated almost entirely by the included
coccinelle patch. This mechanical conversion should be
completely safe, because we check explicitly for cases where
oidcmp() is compared to 0, which is what oideq() is doing
under the hood. Note that we don't have to catch "!oidcmp()"
separately; coccinelle's standard isomorphisms make sure the
two are treated equivalently.
I say "almost" because I did hand-edit the coccinelle output
to fix up a few style violations (it mostly keeps the
original formatting, but sometimes unwraps long lines).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-29 00:22:40 +03:00
|
|
|
if (oideq(&info->base_oid, &obj_list[nr].oid) ||
|
2006-09-21 08:07:39 +04:00
|
|
|
info->base_offset == obj_list[nr].offset) {
|
2005-06-29 07:34:23 +04:00
|
|
|
*p = info->next;
|
|
|
|
p = &delta_list;
|
2006-09-21 08:07:39 +04:00
|
|
|
resolve_delta(info->nr, type, data, size,
|
|
|
|
info->delta, info->size);
|
2005-06-29 07:34:23 +04:00
|
|
|
free(info);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
p = &info->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
static void unpack_non_delta_entry(enum object_type type, unsigned long size,
|
2006-09-21 08:07:39 +04:00
|
|
|
unsigned nr)
|
2005-06-29 07:34:23 +04:00
|
|
|
{
|
|
|
|
void *buf = get_data(size);
|
2005-06-26 15:29:18 +04:00
|
|
|
|
2022-06-11 05:44:16 +03:00
|
|
|
if (buf)
|
2007-02-26 22:55:59 +03:00
|
|
|
write_object(nr, type, buf, size);
|
2005-06-26 15:29:18 +04:00
|
|
|
}
|
|
|
|
|
unpack-objects: use stream_loose_object() to unpack large objects
Make use of the stream_loose_object() function introduced in the
preceding commit to unpack large objects. Before this we'd need to
malloc() the size of the blob before unpacking it, which could cause
OOM with very large blobs.
We could use the new streaming interface to unpack all blobs, but
doing so would be much slower, as demonstrated e.g. with this
benchmark using git-hyperfine[0]:
rm -rf /tmp/scalar.git &&
git clone --bare https://github.com/Microsoft/scalar.git /tmp/scalar.git &&
mv /tmp/scalar.git/objects/pack/*.pack /tmp/scalar.git/my.pack &&
git hyperfine \
-r 2 --warmup 1 \
-L rev origin/master,HEAD -L v "10,512,1k,1m" \
-s 'make' \
-p 'git init --bare dest.git' \
-c 'rm -rf dest.git' \
'./git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/scalar.git/my.pack'
Here we'll perform worse with lower core.bigFileThreshold settings
with this change in terms of speed, but we're getting lower memory use
in return:
Summary
'./git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' ran
1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.01 ± 0.02 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.02 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.09 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.10 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.11 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
A better benchmark to demonstrate the benefits of that this one, which
creates an artificial repo with a 1, 25, 50, 75 and 100MB blob:
rm -rf /tmp/repo &&
git init /tmp/repo &&
(
cd /tmp/repo &&
for i in 1 25 50 75 100
do
dd if=/dev/urandom of=blob.$i count=$(($i*1024)) bs=1024
done &&
git add blob.* &&
git commit -mblobs &&
git gc &&
PACK=$(echo .git/objects/pack/pack-*.pack) &&
cp "$PACK" my.pack
) &&
git hyperfine \
--show-output \
-L rev origin/master,HEAD -L v "512,50m,100m" \
-s 'make' \
-p 'git init --bare dest.git' \
-c 'rm -rf dest.git' \
'/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum'
Using this test we'll always use >100MB of memory on
origin/master (around ~105MB), but max out at e.g. ~55MB if we set
core.bigFileThreshold=50m.
The relevant "Maximum resident set size" lines were manually added
below the relevant benchmark:
'/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master' ran
Maximum resident set size (kbytes): 107080
1.02 ± 0.78 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master'
Maximum resident set size (kbytes): 106968
1.09 ± 0.79 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master'
Maximum resident set size (kbytes): 107032
1.42 ± 1.07 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 107072
1.83 ± 1.02 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 55704
2.16 ± 1.19 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 4564
This shows that if you have enough memory this new streaming method is
slower the lower you set the streaming threshold, but the benefit is
more bounded memory use.
An earlier version of this patch introduced a new
"core.bigFileStreamingThreshold" instead of re-using the existing
"core.bigFileThreshold" variable[1]. As noted in a detailed overview
of its users in [2] using it has several different meanings.
Still, we consider it good enough to simply re-use it. While it's
possible that someone might want to e.g. consider objects "small" for
the purposes of diffing but "big" for the purposes of writing them
such use-cases are probably too obscure to worry about. We can always
split up "core.bigFileThreshold" in the future if there's a need for
that.
0. https://github.com/avar/git-hyperfine/
1. https://lore.kernel.org/git/20211210103435.83656-1-chiyutianyi@gmail.com/
2. https://lore.kernel.org/git/20220120112114.47618-5-chiyutianyi@gmail.com/
Helped-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Helped-by: Derrick Stolee <stolee@gmail.com>
Helped-by: Jiang Xin <zhiyou.jx@alibaba-inc.com>
Signed-off-by: Han Xin <chiyutianyi@gmail.com>
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-11 05:44:21 +03:00
|
|
|
struct input_zstream_data {
|
|
|
|
git_zstream *zstream;
|
|
|
|
unsigned char buf[8192];
|
|
|
|
int status;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const void *feed_input_zstream(struct input_stream *in_stream,
|
|
|
|
unsigned long *readlen)
|
|
|
|
{
|
|
|
|
struct input_zstream_data *data = in_stream->data;
|
|
|
|
git_zstream *zstream = data->zstream;
|
|
|
|
void *in = fill(1);
|
|
|
|
|
|
|
|
if (in_stream->is_finished) {
|
|
|
|
*readlen = 0;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
zstream->next_out = data->buf;
|
|
|
|
zstream->avail_out = sizeof(data->buf);
|
|
|
|
zstream->next_in = in;
|
|
|
|
zstream->avail_in = len;
|
|
|
|
|
|
|
|
data->status = git_inflate(zstream, 0);
|
|
|
|
|
|
|
|
in_stream->is_finished = data->status != Z_OK;
|
|
|
|
use(len - zstream->avail_in);
|
|
|
|
*readlen = sizeof(data->buf) - zstream->avail_out;
|
|
|
|
|
|
|
|
return data->buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void stream_blob(unsigned long size, unsigned nr)
|
|
|
|
{
|
|
|
|
git_zstream zstream = { 0 };
|
|
|
|
struct input_zstream_data data = { 0 };
|
|
|
|
struct input_stream in_stream = {
|
|
|
|
.read = feed_input_zstream,
|
|
|
|
.data = &data,
|
|
|
|
};
|
|
|
|
struct obj_info *info = &obj_list[nr];
|
|
|
|
|
|
|
|
data.zstream = &zstream;
|
|
|
|
git_inflate_init(&zstream);
|
|
|
|
|
|
|
|
if (stream_loose_object(&in_stream, size, &info->oid))
|
|
|
|
die(_("failed to write object in stream"));
|
|
|
|
|
|
|
|
if (data.status != Z_STREAM_END)
|
|
|
|
die(_("inflate returned (%d)"), data.status);
|
|
|
|
git_inflate_end(&zstream);
|
|
|
|
|
|
|
|
if (strict) {
|
|
|
|
struct blob *blob = lookup_blob(the_repository, &info->oid);
|
|
|
|
|
|
|
|
if (!blob)
|
|
|
|
die(_("invalid blob object from stream"));
|
|
|
|
blob->object.flags |= FLAG_WRITTEN;
|
|
|
|
}
|
|
|
|
info->obj = NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-07 01:10:12 +03:00
|
|
|
static int resolve_against_held(unsigned nr, const struct object_id *base,
|
2008-03-05 10:46:51 +03:00
|
|
|
void *delta_data, unsigned long delta_size)
|
|
|
|
{
|
|
|
|
struct object *obj;
|
|
|
|
struct obj_buffer *obj_buffer;
|
2019-06-20 10:41:14 +03:00
|
|
|
obj = lookup_object(the_repository, base);
|
2008-03-05 10:46:51 +03:00
|
|
|
if (!obj)
|
|
|
|
return 0;
|
|
|
|
obj_buffer = lookup_object_buffer(obj);
|
|
|
|
if (!obj_buffer)
|
|
|
|
return 0;
|
|
|
|
resolve_delta(nr, obj->type, obj_buffer->buffer,
|
|
|
|
obj_buffer->size, delta_data, delta_size);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
|
2006-09-21 08:07:39 +04:00
|
|
|
unsigned nr)
|
2005-06-26 15:29:18 +04:00
|
|
|
{
|
2005-06-29 07:34:23 +04:00
|
|
|
void *delta_data, *base;
|
|
|
|
unsigned long base_size;
|
2017-05-07 01:10:12 +03:00
|
|
|
struct object_id base_oid;
|
2005-06-26 15:29:18 +04:00
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
if (type == OBJ_REF_DELTA) {
|
2021-04-26 04:02:50 +03:00
|
|
|
oidread(&base_oid, fill(the_hash_algo->rawsz));
|
2018-02-01 05:18:40 +03:00
|
|
|
use(the_hash_algo->rawsz);
|
2006-09-21 08:07:39 +04:00
|
|
|
delta_data = get_data(delta_size);
|
2022-06-11 05:44:16 +03:00
|
|
|
if (!delta_data)
|
2006-09-21 08:07:39 +04:00
|
|
|
return;
|
2017-05-07 01:10:12 +03:00
|
|
|
if (has_object_file(&base_oid))
|
2008-03-05 10:46:51 +03:00
|
|
|
; /* Ok we have this one */
|
2017-05-07 01:10:12 +03:00
|
|
|
else if (resolve_against_held(nr, &base_oid,
|
2008-03-05 10:46:51 +03:00
|
|
|
delta_data, delta_size))
|
|
|
|
return; /* we are done */
|
|
|
|
else {
|
|
|
|
/* cannot resolve yet --- queue it */
|
2017-05-07 01:10:12 +03:00
|
|
|
oidclr(&obj_list[nr].oid);
|
|
|
|
add_delta_to_list(nr, &base_oid, 0, delta_data, delta_size);
|
2006-09-21 08:07:39 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
unsigned base_found = 0;
|
|
|
|
unsigned char *pack, c;
|
2007-04-09 09:06:30 +04:00
|
|
|
off_t base_offset;
|
2006-09-21 08:07:39 +04:00
|
|
|
unsigned lo, mid, hi;
|
2005-06-26 19:40:08 +04:00
|
|
|
|
2006-09-21 08:07:39 +04:00
|
|
|
pack = fill(1);
|
|
|
|
c = *pack;
|
|
|
|
use(1);
|
|
|
|
base_offset = c & 127;
|
|
|
|
while (c & 128) {
|
|
|
|
base_offset += 1;
|
2007-04-09 09:06:29 +04:00
|
|
|
if (!base_offset || MSB(base_offset, 7))
|
2006-09-21 08:07:39 +04:00
|
|
|
die("offset value overflow for delta base object");
|
|
|
|
pack = fill(1);
|
|
|
|
c = *pack;
|
|
|
|
use(1);
|
|
|
|
base_offset = (base_offset << 7) + (c & 127);
|
|
|
|
}
|
|
|
|
base_offset = obj_list[nr].offset - base_offset;
|
2008-10-30 02:02:45 +03:00
|
|
|
if (base_offset <= 0 || base_offset >= obj_list[nr].offset)
|
|
|
|
die("offset value out of bound for delta base object");
|
2005-06-26 15:29:18 +04:00
|
|
|
|
2006-09-21 08:07:39 +04:00
|
|
|
delta_data = get_data(delta_size);
|
2022-06-11 05:44:16 +03:00
|
|
|
if (!delta_data)
|
2006-09-21 08:07:39 +04:00
|
|
|
return;
|
|
|
|
lo = 0;
|
|
|
|
hi = nr;
|
|
|
|
while (lo < hi) {
|
2017-10-08 21:29:37 +03:00
|
|
|
mid = lo + (hi - lo) / 2;
|
2006-09-21 08:07:39 +04:00
|
|
|
if (base_offset < obj_list[mid].offset) {
|
|
|
|
hi = mid;
|
|
|
|
} else if (base_offset > obj_list[mid].offset) {
|
|
|
|
lo = mid + 1;
|
|
|
|
} else {
|
2017-05-07 01:10:12 +03:00
|
|
|
oidcpy(&base_oid, &obj_list[mid].oid);
|
|
|
|
base_found = !is_null_oid(&base_oid);
|
2006-09-21 08:07:39 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!base_found) {
|
2008-03-05 10:46:51 +03:00
|
|
|
/*
|
|
|
|
* The delta base object is itself a delta that
|
|
|
|
* has not been resolved yet.
|
|
|
|
*/
|
2017-05-07 01:10:12 +03:00
|
|
|
oidclr(&obj_list[nr].oid);
|
2021-04-26 04:02:56 +03:00
|
|
|
add_delta_to_list(nr, null_oid(), base_offset,
|
|
|
|
delta_data, delta_size);
|
2006-09-21 08:07:39 +04:00
|
|
|
return;
|
2008-02-26 00:46:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-07 01:10:12 +03:00
|
|
|
if (resolve_against_held(nr, &base_oid, delta_data, delta_size))
|
2008-03-05 10:46:51 +03:00
|
|
|
return;
|
2006-09-21 08:07:39 +04:00
|
|
|
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 05:27:53 +03:00
|
|
|
base = read_object_file(&base_oid, &type, &base_size);
|
2006-09-04 09:55:54 +04:00
|
|
|
if (!base) {
|
|
|
|
error("failed to read delta-pack base object %s",
|
2017-05-07 01:10:12 +03:00
|
|
|
oid_to_hex(&base_oid));
|
2006-09-13 23:59:20 +04:00
|
|
|
if (!recover)
|
2006-09-04 09:55:54 +04:00
|
|
|
exit(1);
|
|
|
|
has_errors = 1;
|
|
|
|
return;
|
|
|
|
}
|
2006-09-21 08:07:39 +04:00
|
|
|
resolve_delta(nr, type, base, base_size, delta_data, delta_size);
|
2005-08-03 16:11:00 +04:00
|
|
|
free(base);
|
2005-06-26 15:29:18 +04:00
|
|
|
}
|
|
|
|
|
2007-04-18 22:27:45 +04:00
|
|
|
static void unpack_one(unsigned nr)
|
2005-06-26 15:29:18 +04:00
|
|
|
{
|
2005-06-29 09:15:57 +04:00
|
|
|
unsigned shift;
|
Fix big left-shifts of unsigned char
Shifting 'unsigned char' or 'unsigned short' left can result in sign
extension errors, since the C integer promotion rules means that the
unsigned char/short will get implicitly promoted to a signed 'int' due to
the shift (or due to other operations).
This normally doesn't matter, but if you shift things up sufficiently, it
will now set the sign bit in 'int', and a subsequent cast to a bigger type
(eg 'long' or 'unsigned long') will now sign-extend the value despite the
original expression being unsigned.
One example of this would be something like
unsigned long size;
unsigned char c;
size += c << 24;
where despite all the variables being unsigned, 'c << 24' ends up being a
signed entity, and will get sign-extended when then doing the addition in
an 'unsigned long' type.
Since git uses 'unsigned char' pointers extensively, we actually have this
bug in a couple of places.
I may have missed some, but this is the result of looking at
git grep '[^0-9 ][ ]*<<[ ][a-z]' -- '*.c' '*.h'
git grep '<<[ ]*24'
which catches at least the common byte cases (shifting variables by a
variable amount, and shifting by 24 bits).
I also grepped for just 'unsigned char' variables in general, and
converted the ones that most obviously ended up getting implicitly cast
immediately anyway (eg hash_name(), encode_85()).
In addition to just avoiding 'unsigned char', this patch also tries to use
a common idiom for the delta header size thing. We had three different
variations on it: "& 0x7fUL" in one place (getting the sign extension
right), and "& ~0x80" and "& 0x7f" in two other places (not getting it
right). Apart from making them all just avoid using "unsigned char" at
all, I also unified them to then use a simple "& 0x7f".
I considered making a sparse extension which warns about doing implicit
casts from unsigned types to signed types, but it gets rather complex very
quickly, so this is just a hack.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-06-18 04:22:27 +04:00
|
|
|
unsigned char *pack;
|
|
|
|
unsigned long size, c;
|
2005-06-29 07:34:23 +04:00
|
|
|
enum object_type type;
|
2005-06-26 02:59:31 +04:00
|
|
|
|
2006-09-21 08:07:39 +04:00
|
|
|
obj_list[nr].offset = consumed_bytes;
|
|
|
|
|
2005-06-29 07:34:23 +04:00
|
|
|
pack = fill(1);
|
|
|
|
c = *pack;
|
|
|
|
use(1);
|
2005-06-29 01:21:02 +04:00
|
|
|
type = (c >> 4) & 7;
|
|
|
|
size = (c & 15);
|
2005-06-29 09:15:57 +04:00
|
|
|
shift = 4;
|
2005-06-29 01:21:02 +04:00
|
|
|
while (c & 0x80) {
|
2005-06-29 07:34:23 +04:00
|
|
|
pack = fill(1);
|
2006-09-21 08:07:39 +04:00
|
|
|
c = *pack;
|
2005-06-29 07:34:23 +04:00
|
|
|
use(1);
|
2005-06-29 09:15:57 +04:00
|
|
|
size += (c & 0x7f) << shift;
|
|
|
|
shift += 7;
|
2005-06-29 01:21:02 +04:00
|
|
|
}
|
2007-04-18 22:27:45 +04:00
|
|
|
|
2005-06-29 01:21:02 +04:00
|
|
|
switch (type) {
|
unpack-objects: use stream_loose_object() to unpack large objects
Make use of the stream_loose_object() function introduced in the
preceding commit to unpack large objects. Before this we'd need to
malloc() the size of the blob before unpacking it, which could cause
OOM with very large blobs.
We could use the new streaming interface to unpack all blobs, but
doing so would be much slower, as demonstrated e.g. with this
benchmark using git-hyperfine[0]:
rm -rf /tmp/scalar.git &&
git clone --bare https://github.com/Microsoft/scalar.git /tmp/scalar.git &&
mv /tmp/scalar.git/objects/pack/*.pack /tmp/scalar.git/my.pack &&
git hyperfine \
-r 2 --warmup 1 \
-L rev origin/master,HEAD -L v "10,512,1k,1m" \
-s 'make' \
-p 'git init --bare dest.git' \
-c 'rm -rf dest.git' \
'./git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/scalar.git/my.pack'
Here we'll perform worse with lower core.bigFileThreshold settings
with this change in terms of speed, but we're getting lower memory use
in return:
Summary
'./git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' ran
1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.01 ± 0.02 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.02 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.09 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.10 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.11 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
A better benchmark to demonstrate the benefits of that this one, which
creates an artificial repo with a 1, 25, 50, 75 and 100MB blob:
rm -rf /tmp/repo &&
git init /tmp/repo &&
(
cd /tmp/repo &&
for i in 1 25 50 75 100
do
dd if=/dev/urandom of=blob.$i count=$(($i*1024)) bs=1024
done &&
git add blob.* &&
git commit -mblobs &&
git gc &&
PACK=$(echo .git/objects/pack/pack-*.pack) &&
cp "$PACK" my.pack
) &&
git hyperfine \
--show-output \
-L rev origin/master,HEAD -L v "512,50m,100m" \
-s 'make' \
-p 'git init --bare dest.git' \
-c 'rm -rf dest.git' \
'/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum'
Using this test we'll always use >100MB of memory on
origin/master (around ~105MB), but max out at e.g. ~55MB if we set
core.bigFileThreshold=50m.
The relevant "Maximum resident set size" lines were manually added
below the relevant benchmark:
'/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master' ran
Maximum resident set size (kbytes): 107080
1.02 ± 0.78 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master'
Maximum resident set size (kbytes): 106968
1.09 ± 0.79 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master'
Maximum resident set size (kbytes): 107032
1.42 ± 1.07 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 107072
1.83 ± 1.02 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 55704
2.16 ± 1.19 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 4564
This shows that if you have enough memory this new streaming method is
slower the lower you set the streaming threshold, but the benefit is
more bounded memory use.
An earlier version of this patch introduced a new
"core.bigFileStreamingThreshold" instead of re-using the existing
"core.bigFileThreshold" variable[1]. As noted in a detailed overview
of its users in [2] using it has several different meanings.
Still, we consider it good enough to simply re-use it. While it's
possible that someone might want to e.g. consider objects "small" for
the purposes of diffing but "big" for the purposes of writing them
such use-cases are probably too obscure to worry about. We can always
split up "core.bigFileThreshold" in the future if there's a need for
that.
0. https://github.com/avar/git-hyperfine/
1. https://lore.kernel.org/git/20211210103435.83656-1-chiyutianyi@gmail.com/
2. https://lore.kernel.org/git/20220120112114.47618-5-chiyutianyi@gmail.com/
Helped-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Helped-by: Derrick Stolee <stolee@gmail.com>
Helped-by: Jiang Xin <zhiyou.jx@alibaba-inc.com>
Signed-off-by: Han Xin <chiyutianyi@gmail.com>
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-11 05:44:21 +03:00
|
|
|
case OBJ_BLOB:
|
|
|
|
if (!dry_run && size > big_file_threshold) {
|
|
|
|
stream_blob(size, nr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* fallthrough */
|
2005-06-29 01:21:02 +04:00
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TREE:
|
|
|
|
case OBJ_TAG:
|
2006-09-21 08:07:39 +04:00
|
|
|
unpack_non_delta_entry(type, size, nr);
|
2005-06-29 01:21:02 +04:00
|
|
|
return;
|
2006-09-21 08:06:49 +04:00
|
|
|
case OBJ_REF_DELTA:
|
2006-09-21 08:07:39 +04:00
|
|
|
case OBJ_OFS_DELTA:
|
|
|
|
unpack_delta_entry(type, size, nr);
|
2005-06-29 01:21:02 +04:00
|
|
|
return;
|
2005-06-29 07:34:23 +04:00
|
|
|
default:
|
2006-09-04 09:55:54 +04:00
|
|
|
error("bad object type %d", type);
|
|
|
|
has_errors = 1;
|
2006-09-13 23:59:20 +04:00
|
|
|
if (recover)
|
2006-09-04 09:55:54 +04:00
|
|
|
return;
|
|
|
|
exit(1);
|
2005-06-26 02:59:31 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unpack_all(void)
|
|
|
|
{
|
2005-06-29 07:34:23 +04:00
|
|
|
int i;
|
|
|
|
struct pack_header *hdr = fill(sizeof(struct pack_header));
|
2008-02-26 00:46:11 +03:00
|
|
|
|
|
|
|
nr_objects = ntohl(hdr->hdr_entries);
|
2005-06-29 07:34:23 +04:00
|
|
|
|
|
|
|
if (ntohl(hdr->hdr_signature) != PACK_SIGNATURE)
|
|
|
|
die("bad pack file");
|
2006-02-10 01:50:04 +03:00
|
|
|
if (!pack_version_ok(hdr->hdr_version))
|
2008-07-03 19:52:09 +04:00
|
|
|
die("unknown pack file version %"PRIu32,
|
|
|
|
ntohl(hdr->hdr_version));
|
2007-04-18 22:27:45 +04:00
|
|
|
use(sizeof(struct pack_header));
|
2005-06-29 07:34:23 +04:00
|
|
|
|
2007-04-20 22:10:07 +04:00
|
|
|
if (!quiet)
|
2014-02-21 16:50:18 +04:00
|
|
|
progress = start_progress(_("Unpacking objects"), nr_objects);
|
2021-03-13 19:17:22 +03:00
|
|
|
CALLOC_ARRAY(obj_list, nr_objects);
|
2022-04-05 08:20:13 +03:00
|
|
|
begin_odb_transaction();
|
2007-04-18 22:27:45 +04:00
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
unpack_one(i);
|
2007-10-30 21:57:33 +03:00
|
|
|
display_progress(progress, i + 1);
|
2007-04-18 22:27:45 +04:00
|
|
|
}
|
2022-04-05 08:20:13 +03:00
|
|
|
end_odb_transaction();
|
2007-10-30 21:57:33 +03:00
|
|
|
stop_progress(&progress);
|
2007-04-18 22:27:45 +04:00
|
|
|
|
2005-06-29 07:34:23 +04:00
|
|
|
if (delta_list)
|
|
|
|
die("unresolved deltas left after unpacking");
|
2005-06-26 02:59:31 +04:00
|
|
|
}
|
|
|
|
|
2006-08-03 19:24:37 +04:00
|
|
|
int cmd_unpack_objects(int argc, const char **argv, const char *prefix)
|
2005-06-26 02:27:14 +04:00
|
|
|
{
|
|
|
|
int i;
|
2017-05-07 01:10:12 +03:00
|
|
|
struct object_id oid;
|
2005-06-26 02:27:14 +04:00
|
|
|
|
2018-07-18 23:45:20 +03:00
|
|
|
read_replace_refs = 0;
|
2009-01-23 12:07:46 +03:00
|
|
|
|
2008-05-14 21:46:53 +04:00
|
|
|
git_config(git_default_config, NULL);
|
2005-11-26 11:50:02 +03:00
|
|
|
|
2006-01-07 05:53:16 +03:00
|
|
|
quiet = !isatty(2);
|
|
|
|
|
2005-06-26 02:27:14 +04:00
|
|
|
for (i = 1 ; i < argc; i++) {
|
|
|
|
const char *arg = argv[i];
|
|
|
|
|
|
|
|
if (*arg == '-') {
|
2005-06-26 02:59:31 +04:00
|
|
|
if (!strcmp(arg, "-n")) {
|
|
|
|
dry_run = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2005-07-09 21:43:02 +04:00
|
|
|
if (!strcmp(arg, "-q")) {
|
|
|
|
quiet = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2006-09-04 09:55:54 +04:00
|
|
|
if (!strcmp(arg, "-r")) {
|
2006-09-13 23:59:20 +04:00
|
|
|
recover = 1;
|
2006-09-04 09:55:54 +04:00
|
|
|
continue;
|
|
|
|
}
|
2008-02-26 00:46:11 +03:00
|
|
|
if (!strcmp(arg, "--strict")) {
|
|
|
|
strict = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2015-06-22 18:25:31 +03:00
|
|
|
if (skip_prefix(arg, "--strict=", &arg)) {
|
|
|
|
strict = 1;
|
|
|
|
fsck_set_msg_types(&fsck_options, arg);
|
|
|
|
continue;
|
|
|
|
}
|
2013-12-01 00:55:40 +04:00
|
|
|
if (starts_with(arg, "--pack_header=")) {
|
2006-11-02 01:06:20 +03:00
|
|
|
struct pack_header *hdr;
|
|
|
|
char *c;
|
|
|
|
|
|
|
|
hdr = (struct pack_header *)buffer;
|
|
|
|
hdr->hdr_signature = htonl(PACK_SIGNATURE);
|
|
|
|
hdr->hdr_version = htonl(strtoul(arg + 14, &c, 10));
|
|
|
|
if (*c != ',')
|
|
|
|
die("bad %s", arg);
|
|
|
|
hdr->hdr_entries = htonl(strtoul(c + 1, &c, 10));
|
|
|
|
if (*c)
|
|
|
|
die("bad %s", arg);
|
|
|
|
len = sizeof(*hdr);
|
|
|
|
continue;
|
|
|
|
}
|
2016-08-24 21:41:56 +03:00
|
|
|
if (skip_prefix(arg, "--max-input-size=", &arg)) {
|
|
|
|
max_input_size = strtoumax(arg, NULL, 10);
|
|
|
|
continue;
|
|
|
|
}
|
2005-06-26 02:27:14 +04:00
|
|
|
usage(unpack_usage);
|
|
|
|
}
|
2005-06-29 07:34:23 +04:00
|
|
|
|
|
|
|
/* We don't take any non-flag arguments now.. Maybe some day */
|
2005-06-26 02:27:14 +04:00
|
|
|
usage(unpack_usage);
|
2005-06-29 07:34:23 +04:00
|
|
|
}
|
2018-02-01 05:18:40 +03:00
|
|
|
the_hash_algo->init_fn(&ctx);
|
2005-06-26 02:59:31 +04:00
|
|
|
unpack_all();
|
2018-02-01 05:18:40 +03:00
|
|
|
the_hash_algo->update_fn(&ctx, buffer, offset);
|
2021-04-26 04:02:53 +03:00
|
|
|
the_hash_algo->final_oid_fn(&oid, &ctx);
|
2018-05-05 02:40:08 +03:00
|
|
|
if (strict) {
|
2008-02-26 00:46:11 +03:00
|
|
|
write_rest();
|
2018-05-05 02:40:08 +03:00
|
|
|
if (fsck_finish(&fsck_options))
|
|
|
|
die(_("fsck error in pack objects"));
|
|
|
|
}
|
2018-08-29 00:22:52 +03:00
|
|
|
if (!hasheq(fill(the_hash_algo->rawsz), oid.hash))
|
2005-06-29 07:34:23 +04:00
|
|
|
die("final sha1 did not match");
|
2018-02-01 05:18:40 +03:00
|
|
|
use(the_hash_algo->rawsz);
|
2005-06-29 07:34:23 +04:00
|
|
|
|
|
|
|
/* Write the last part of the buffer to stdout */
|
|
|
|
while (len) {
|
2005-12-20 03:18:28 +03:00
|
|
|
int ret = xwrite(1, buffer + offset, len);
|
|
|
|
if (ret <= 0)
|
2005-06-29 07:34:23 +04:00
|
|
|
break;
|
|
|
|
len -= ret;
|
|
|
|
offset += ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* All done */
|
2006-09-04 09:55:54 +04:00
|
|
|
return has_errors;
|
2005-06-26 02:27:14 +04:00
|
|
|
}
|