зеркало из https://github.com/microsoft/git.git
Merge branch 'bc/hash-transition-16'
Conversion from unsigned char[20] to struct object_id continues. * bc/hash-transition-16: (35 commits) gitweb: make hash size independent Git.pm: make hash size independent read-cache: read data in a hash-independent way dir: make untracked cache extension hash size independent builtin/difftool: use parse_oid_hex refspec: make hash size independent archive: convert struct archiver_args to object_id builtin/get-tar-commit-id: make hash size independent get-tar-commit-id: parse comment record hash: add a function to lookup hash algorithm by length remote-curl: make hash size independent http: replace sha1_to_hex http: compute hash of downloaded objects using the_hash_algo http: replace hard-coded constant with the_hash_algo http-walker: replace sha1_to_hex http-push: remove remaining uses of sha1_to_hex http-backend: allow 64-character hex names http-push: convert to use the_hash_algo builtin/pull: make hash-size independent builtin/am: make hash size independent ...
This commit is contained in:
Коммит
d4e568b2a3
|
@ -326,14 +326,15 @@ static int write_tar_entry(struct archiver_args *args,
|
|||
|
||||
static void write_global_extended_header(struct archiver_args *args)
|
||||
{
|
||||
const unsigned char *sha1 = args->commit_sha1;
|
||||
const struct object_id *oid = args->commit_oid;
|
||||
struct strbuf ext_header = STRBUF_INIT;
|
||||
struct ustar_header header;
|
||||
unsigned int mode;
|
||||
|
||||
if (sha1)
|
||||
if (oid)
|
||||
strbuf_append_ext_header(&ext_header, "comment",
|
||||
sha1_to_hex(sha1), 40);
|
||||
oid_to_hex(oid),
|
||||
the_hash_algo->hexsz);
|
||||
if (args->time > USTAR_MAX_MTIME) {
|
||||
strbuf_append_ext_header_uint(&ext_header, "mtime",
|
||||
args->time);
|
||||
|
|
|
@ -577,7 +577,7 @@ static void write_zip64_trailer(void)
|
|||
write_or_die(1, &locator64, ZIP64_DIR_TRAILER_LOCATOR_SIZE);
|
||||
}
|
||||
|
||||
static void write_zip_trailer(const unsigned char *sha1)
|
||||
static void write_zip_trailer(const struct object_id *oid)
|
||||
{
|
||||
struct zip_dir_trailer trailer;
|
||||
int clamped = 0;
|
||||
|
@ -590,14 +590,14 @@ static void write_zip_trailer(const unsigned char *sha1)
|
|||
copy_le16_clamp(trailer.entries, zip_dir_entries, &clamped);
|
||||
copy_le32(trailer.size, zip_dir.len);
|
||||
copy_le32_clamp(trailer.offset, zip_offset, &clamped);
|
||||
copy_le16(trailer.comment_length, sha1 ? GIT_SHA1_HEXSZ : 0);
|
||||
copy_le16(trailer.comment_length, oid ? the_hash_algo->hexsz : 0);
|
||||
|
||||
write_or_die(1, zip_dir.buf, zip_dir.len);
|
||||
if (clamped)
|
||||
write_zip64_trailer();
|
||||
write_or_die(1, &trailer, ZIP_DIR_TRAILER_SIZE);
|
||||
if (sha1)
|
||||
write_or_die(1, sha1_to_hex(sha1), GIT_SHA1_HEXSZ);
|
||||
if (oid)
|
||||
write_or_die(1, oid_to_hex(oid), the_hash_algo->hexsz);
|
||||
}
|
||||
|
||||
static void dos_time(timestamp_t *timestamp, int *dos_date, int *dos_time)
|
||||
|
@ -635,7 +635,7 @@ static int write_zip_archive(const struct archiver *ar,
|
|||
|
||||
err = write_archive_entries(args, write_zip_entry);
|
||||
if (!err)
|
||||
write_zip_trailer(args->commit_sha1);
|
||||
write_zip_trailer(args->commit_oid);
|
||||
|
||||
strbuf_release(&zip_dir);
|
||||
|
||||
|
|
|
@ -380,7 +380,7 @@ static void parse_treeish_arg(const char **argv,
|
|||
int remote)
|
||||
{
|
||||
const char *name = argv[0];
|
||||
const unsigned char *commit_sha1;
|
||||
const struct object_id *commit_oid;
|
||||
time_t archive_time;
|
||||
struct tree *tree;
|
||||
const struct commit *commit;
|
||||
|
@ -402,10 +402,10 @@ static void parse_treeish_arg(const char **argv,
|
|||
|
||||
commit = lookup_commit_reference_gently(ar_args->repo, &oid, 1);
|
||||
if (commit) {
|
||||
commit_sha1 = commit->object.oid.hash;
|
||||
commit_oid = &commit->object.oid;
|
||||
archive_time = commit->date;
|
||||
} else {
|
||||
commit_sha1 = NULL;
|
||||
commit_oid = NULL;
|
||||
archive_time = time(NULL);
|
||||
}
|
||||
|
||||
|
@ -426,7 +426,7 @@ static void parse_treeish_arg(const char **argv,
|
|||
tree = parse_tree_indirect(&tree_oid);
|
||||
}
|
||||
ar_args->tree = tree;
|
||||
ar_args->commit_sha1 = commit_sha1;
|
||||
ar_args->commit_oid = commit_oid;
|
||||
ar_args->commit = commit;
|
||||
ar_args->time = archive_time;
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ struct archiver_args {
|
|||
const char *base;
|
||||
size_t baselen;
|
||||
struct tree *tree;
|
||||
const unsigned char *commit_sha1;
|
||||
const struct object_id *commit_oid;
|
||||
const struct commit *commit;
|
||||
timestamp_t time;
|
||||
struct pathspec pathspec;
|
||||
|
|
|
@ -486,23 +486,24 @@ static int copy_notes_for_rebase(const struct am_state *state)
|
|||
|
||||
while (!strbuf_getline_lf(&sb, fp)) {
|
||||
struct object_id from_obj, to_obj;
|
||||
const char *p;
|
||||
|
||||
if (sb.len != GIT_SHA1_HEXSZ * 2 + 1) {
|
||||
if (sb.len != the_hash_algo->hexsz * 2 + 1) {
|
||||
ret = error(invalid_line, sb.buf);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
if (get_oid_hex(sb.buf, &from_obj)) {
|
||||
if (parse_oid_hex(sb.buf, &from_obj, &p)) {
|
||||
ret = error(invalid_line, sb.buf);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
if (sb.buf[GIT_SHA1_HEXSZ] != ' ') {
|
||||
if (*p != ' ') {
|
||||
ret = error(invalid_line, sb.buf);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
if (get_oid_hex(sb.buf + GIT_SHA1_HEXSZ + 1, &to_obj)) {
|
||||
if (get_oid_hex(p + 1, &to_obj)) {
|
||||
ret = error(invalid_line, sb.buf);
|
||||
goto finish;
|
||||
}
|
||||
|
|
|
@ -65,14 +65,12 @@ static int parse_index_info(char *p, int *mode1, int *mode2,
|
|||
*mode2 = (int)strtol(p + 1, &p, 8);
|
||||
if (*p != ' ')
|
||||
return error("expected ' ', got '%c'", *p);
|
||||
if (get_oid_hex(++p, oid1))
|
||||
return error("expected object ID, got '%s'", p + 1);
|
||||
p += GIT_SHA1_HEXSZ;
|
||||
if (parse_oid_hex(++p, oid1, (const char **)&p))
|
||||
return error("expected object ID, got '%s'", p);
|
||||
if (*p != ' ')
|
||||
return error("expected ' ', got '%c'", *p);
|
||||
if (get_oid_hex(++p, oid2))
|
||||
return error("expected object ID, got '%s'", p + 1);
|
||||
p += GIT_SHA1_HEXSZ;
|
||||
if (parse_oid_hex(++p, oid2, (const char **)&p))
|
||||
return error("expected object ID, got '%s'", p);
|
||||
if (*p != ' ')
|
||||
return error("expected ' ', got '%c'", *p);
|
||||
*status = *++p;
|
||||
|
|
|
@ -21,6 +21,8 @@ int cmd_get_tar_commit_id(int argc, const char **argv, const char *prefix)
|
|||
char *content = buffer + RECORDSIZE;
|
||||
const char *comment;
|
||||
ssize_t n;
|
||||
long len;
|
||||
char *end;
|
||||
|
||||
if (argc != 1)
|
||||
usage(builtin_get_tar_commit_id_usage);
|
||||
|
@ -32,10 +34,18 @@ int cmd_get_tar_commit_id(int argc, const char **argv, const char *prefix)
|
|||
die_errno("git get-tar-commit-id: EOF before reading tar header");
|
||||
if (header->typeflag[0] != 'g')
|
||||
return 1;
|
||||
if (!skip_prefix(content, "52 comment=", &comment))
|
||||
|
||||
len = strtol(content, &end, 10);
|
||||
if (errno == ERANGE || end == content || len < 0)
|
||||
return 1;
|
||||
if (!skip_prefix(end, " comment=", &comment))
|
||||
return 1;
|
||||
len -= comment - content;
|
||||
if (len < 1 || !(len % 2) ||
|
||||
hash_algo_by_length((len - 1) / 2) == GIT_HASH_UNKNOWN)
|
||||
return 1;
|
||||
|
||||
if (write_in_full(1, comment, 41) < 0)
|
||||
if (write_in_full(1, comment, len) < 0)
|
||||
die_errno("git get-tar-commit-id: write error");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -361,23 +361,25 @@ static char const * const name_rev_usage[] = {
|
|||
static void name_rev_line(char *p, struct name_ref_data *data)
|
||||
{
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
int forty = 0;
|
||||
int counter = 0;
|
||||
char *p_start;
|
||||
const unsigned hexsz = the_hash_algo->hexsz;
|
||||
|
||||
for (p_start = p; *p; p++) {
|
||||
#define ishex(x) (isdigit((x)) || ((x) >= 'a' && (x) <= 'f'))
|
||||
if (!ishex(*p))
|
||||
forty = 0;
|
||||
else if (++forty == GIT_SHA1_HEXSZ &&
|
||||
counter = 0;
|
||||
else if (++counter == hexsz &&
|
||||
!ishex(*(p+1))) {
|
||||
struct object_id oid;
|
||||
const char *name = NULL;
|
||||
char c = *(p+1);
|
||||
int p_len = p - p_start + 1;
|
||||
|
||||
forty = 0;
|
||||
counter = 0;
|
||||
|
||||
*(p+1) = 0;
|
||||
if (!get_oid(p - (GIT_SHA1_HEXSZ - 1), &oid)) {
|
||||
if (!get_oid(p - (hexsz - 1), &oid)) {
|
||||
struct object *o =
|
||||
lookup_object(the_repository,
|
||||
oid.hash);
|
||||
|
@ -390,7 +392,7 @@ static void name_rev_line(char *p, struct name_ref_data *data)
|
|||
continue;
|
||||
|
||||
if (data->name_only)
|
||||
printf("%.*s%s", p_len - GIT_SHA1_HEXSZ, p_start, name);
|
||||
printf("%.*s%s", p_len - hexsz, p_start, name);
|
||||
else
|
||||
printf("%.*s (%s)", p_len, p_start, name);
|
||||
p_start = p + 1;
|
||||
|
|
|
@ -1487,6 +1487,7 @@ static int can_reuse_delta(const unsigned char *base_sha1,
|
|||
struct object_entry **base_out)
|
||||
{
|
||||
struct object_entry *base;
|
||||
struct object_id base_oid;
|
||||
|
||||
if (!base_sha1)
|
||||
return 0;
|
||||
|
@ -1508,10 +1509,9 @@ static int can_reuse_delta(const unsigned char *base_sha1,
|
|||
* even if it was buried too deep in history to make it into the
|
||||
* packing list.
|
||||
*/
|
||||
if (thin && bitmap_has_sha1_in_uninteresting(bitmap_git, base_sha1)) {
|
||||
oidread(&base_oid, base_sha1);
|
||||
if (thin && bitmap_has_oid_in_uninteresting(bitmap_git, &base_oid)) {
|
||||
if (use_delta_islands) {
|
||||
struct object_id base_oid;
|
||||
hashcpy(base_oid.hash, base_sha1);
|
||||
if (!in_same_island(&delta->idx.oid, &base_oid))
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -641,7 +641,7 @@ int cmd_pack_redundant(int argc, const char **argv, const char *prefix)
|
|||
pl = red = pack_list_difference(local_packs, min);
|
||||
while (pl) {
|
||||
printf("%s\n%s\n",
|
||||
sha1_pack_index_name(pl->pack->sha1),
|
||||
sha1_pack_index_name(pl->pack->hash),
|
||||
pl->pack->pack_name);
|
||||
pl = pl->next;
|
||||
}
|
||||
|
|
|
@ -369,9 +369,10 @@ static void get_merge_heads(struct oid_array *merge_heads)
|
|||
|
||||
fp = xfopen(filename, "r");
|
||||
while (strbuf_getline_lf(&sb, fp) != EOF) {
|
||||
if (get_oid_hex(sb.buf, &oid))
|
||||
continue; /* invalid line: does not start with SHA1 */
|
||||
if (starts_with(sb.buf + GIT_SHA1_HEXSZ, "\tnot-for-merge\t"))
|
||||
const char *p;
|
||||
if (parse_oid_hex(sb.buf, &oid, &p))
|
||||
continue; /* invalid line: does not start with object ID */
|
||||
if (starts_with(p, "\tnot-for-merge\t"))
|
||||
continue; /* ref is not-for-merge */
|
||||
oid_array_append(merge_heads, &oid);
|
||||
}
|
||||
|
@ -760,7 +761,7 @@ static int get_rebase_fork_point(struct object_id *fork_point, const char *repo,
|
|||
cp.no_stderr = 1;
|
||||
cp.git_cmd = 1;
|
||||
|
||||
ret = capture_command(&cp, &sb, GIT_SHA1_HEXSZ);
|
||||
ret = capture_command(&cp, &sb, GIT_MAX_HEXSZ);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
|
@ -805,7 +806,7 @@ static int get_octopus_merge_base(struct object_id *merge_base,
|
|||
}
|
||||
|
||||
/**
|
||||
* Given the current HEAD SHA1, the merge head returned from git-fetch and the
|
||||
* Given the current HEAD oid, the merge head returned from git-fetch and the
|
||||
* fork point calculated by get_rebase_fork_point(), runs git-rebase with the
|
||||
* appropriate arguments and returns its exit status.
|
||||
*/
|
||||
|
|
28
dir.c
28
dir.c
|
@ -2544,13 +2544,9 @@ struct ondisk_untracked_cache {
|
|||
struct stat_data info_exclude_stat;
|
||||
struct stat_data excludes_file_stat;
|
||||
uint32_t dir_flags;
|
||||
unsigned char info_exclude_sha1[20];
|
||||
unsigned char excludes_file_sha1[20];
|
||||
char exclude_per_dir[FLEX_ARRAY];
|
||||
};
|
||||
|
||||
#define ouc_offset(x) offsetof(struct ondisk_untracked_cache, x)
|
||||
#define ouc_size(len) (ouc_offset(exclude_per_dir) + len + 1)
|
||||
|
||||
struct write_data {
|
||||
int index; /* number of written untracked_cache_dir */
|
||||
|
@ -2633,20 +2629,21 @@ void write_untracked_extension(struct strbuf *out, struct untracked_cache *untra
|
|||
struct write_data wd;
|
||||
unsigned char varbuf[16];
|
||||
int varint_len;
|
||||
size_t len = strlen(untracked->exclude_per_dir);
|
||||
const unsigned hashsz = the_hash_algo->rawsz;
|
||||
|
||||
FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
|
||||
ouc = xcalloc(1, sizeof(*ouc));
|
||||
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
|
||||
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
|
||||
hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash);
|
||||
hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash);
|
||||
ouc->dir_flags = htonl(untracked->dir_flags);
|
||||
|
||||
varint_len = encode_varint(untracked->ident.len, varbuf);
|
||||
strbuf_add(out, varbuf, varint_len);
|
||||
strbuf_addbuf(out, &untracked->ident);
|
||||
|
||||
strbuf_add(out, ouc, ouc_size(len));
|
||||
strbuf_add(out, ouc, sizeof(*ouc));
|
||||
strbuf_add(out, untracked->ss_info_exclude.oid.hash, hashsz);
|
||||
strbuf_add(out, untracked->ss_excludes_file.oid.hash, hashsz);
|
||||
strbuf_add(out, untracked->exclude_per_dir, strlen(untracked->exclude_per_dir) + 1);
|
||||
FREE_AND_NULL(ouc);
|
||||
|
||||
if (!untracked->root) {
|
||||
|
@ -2833,6 +2830,9 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
|
|||
int ident_len;
|
||||
ssize_t len;
|
||||
const char *exclude_per_dir;
|
||||
const unsigned hashsz = the_hash_algo->rawsz;
|
||||
const unsigned offset = sizeof(struct ondisk_untracked_cache);
|
||||
const unsigned exclude_per_dir_offset = offset + 2 * hashsz;
|
||||
|
||||
if (sz <= 1 || end[-1] != '\0')
|
||||
return NULL;
|
||||
|
@ -2844,7 +2844,7 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
|
|||
ident = (const char *)next;
|
||||
next += ident_len;
|
||||
|
||||
if (next + ouc_size(0) > end)
|
||||
if (next + exclude_per_dir_offset + 1 > end)
|
||||
return NULL;
|
||||
|
||||
uc = xcalloc(1, sizeof(*uc));
|
||||
|
@ -2852,15 +2852,15 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
|
|||
strbuf_add(&uc->ident, ident, ident_len);
|
||||
load_oid_stat(&uc->ss_info_exclude,
|
||||
next + ouc_offset(info_exclude_stat),
|
||||
next + ouc_offset(info_exclude_sha1));
|
||||
next + offset);
|
||||
load_oid_stat(&uc->ss_excludes_file,
|
||||
next + ouc_offset(excludes_file_stat),
|
||||
next + ouc_offset(excludes_file_sha1));
|
||||
next + offset + hashsz);
|
||||
uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
|
||||
exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir);
|
||||
exclude_per_dir = (const char *)next + exclude_per_dir_offset;
|
||||
uc->exclude_per_dir = xstrdup(exclude_per_dir);
|
||||
/* NUL after exclude_per_dir is covered by sizeof(*ouc) */
|
||||
next += ouc_size(strlen(exclude_per_dir));
|
||||
next += exclude_per_dir_offset + strlen(exclude_per_dir) + 1;
|
||||
if (next >= end)
|
||||
goto done2;
|
||||
|
||||
|
|
|
@ -29,6 +29,13 @@
|
|||
*/
|
||||
#define NO_DELTA S_ISUID
|
||||
|
||||
/*
|
||||
* The amount of additional space required in order to write an object into the
|
||||
* current pack. This is the hash lengths at the end of the pack, plus the
|
||||
* length of one object ID.
|
||||
*/
|
||||
#define PACK_SIZE_THRESHOLD (the_hash_algo->rawsz * 3)
|
||||
|
||||
struct object_entry {
|
||||
struct pack_idx_entry idx;
|
||||
struct object_entry *next;
|
||||
|
@ -742,7 +749,8 @@ static const char *create_index(void)
|
|||
if (c != last)
|
||||
die("internal consistency error creating the index");
|
||||
|
||||
tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts, pack_data->sha1);
|
||||
tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts,
|
||||
pack_data->hash);
|
||||
free(idx);
|
||||
return tmpfile;
|
||||
}
|
||||
|
@ -753,7 +761,7 @@ static char *keep_pack(const char *curr_index_name)
|
|||
struct strbuf name = STRBUF_INIT;
|
||||
int keep_fd;
|
||||
|
||||
odb_pack_name(&name, pack_data->sha1, "keep");
|
||||
odb_pack_name(&name, pack_data->hash, "keep");
|
||||
keep_fd = odb_pack_keep(name.buf);
|
||||
if (keep_fd < 0)
|
||||
die_errno("cannot create keep file");
|
||||
|
@ -761,11 +769,11 @@ static char *keep_pack(const char *curr_index_name)
|
|||
if (close(keep_fd))
|
||||
die_errno("failed to write keep file");
|
||||
|
||||
odb_pack_name(&name, pack_data->sha1, "pack");
|
||||
odb_pack_name(&name, pack_data->hash, "pack");
|
||||
if (finalize_object_file(pack_data->pack_name, name.buf))
|
||||
die("cannot store pack file");
|
||||
|
||||
odb_pack_name(&name, pack_data->sha1, "idx");
|
||||
odb_pack_name(&name, pack_data->hash, "idx");
|
||||
if (finalize_object_file(curr_index_name, name.buf))
|
||||
die("cannot store index file");
|
||||
free((void *)curr_index_name);
|
||||
|
@ -779,7 +787,7 @@ static void unkeep_all_packs(void)
|
|||
|
||||
for (k = 0; k < pack_id; k++) {
|
||||
struct packed_git *p = all_packs[k];
|
||||
odb_pack_name(&name, p->sha1, "keep");
|
||||
odb_pack_name(&name, p->hash, "keep");
|
||||
unlink_or_warn(name.buf);
|
||||
}
|
||||
strbuf_release(&name);
|
||||
|
@ -821,9 +829,9 @@ static void end_packfile(void)
|
|||
|
||||
close_pack_windows(pack_data);
|
||||
finalize_hashfile(pack_file, cur_pack_oid.hash, 0);
|
||||
fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
|
||||
pack_data->pack_name, object_count,
|
||||
cur_pack_oid.hash, pack_size);
|
||||
fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
|
||||
pack_data->pack_name, object_count,
|
||||
cur_pack_oid.hash, pack_size);
|
||||
|
||||
if (object_count <= unpack_limit) {
|
||||
if (!loosen_small_pack(pack_data)) {
|
||||
|
@ -948,8 +956,9 @@ static int store_object(
|
|||
git_deflate_end(&s);
|
||||
|
||||
/* Determine if we should auto-checkpoint. */
|
||||
if ((max_packsize && (pack_size + 60 + s.total_out) > max_packsize)
|
||||
|| (pack_size + 60 + s.total_out) < pack_size) {
|
||||
if ((max_packsize
|
||||
&& (pack_size + PACK_SIZE_THRESHOLD + s.total_out) > max_packsize)
|
||||
|| (pack_size + PACK_SIZE_THRESHOLD + s.total_out) < pack_size) {
|
||||
|
||||
/* This new object needs to *not* have the current pack_id. */
|
||||
e->pack_id = pack_id + 1;
|
||||
|
@ -1044,8 +1053,9 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
|
|||
int status = Z_OK;
|
||||
|
||||
/* Determine if we should auto-checkpoint. */
|
||||
if ((max_packsize && (pack_size + 60 + len) > max_packsize)
|
||||
|| (pack_size + 60 + len) < pack_size)
|
||||
if ((max_packsize
|
||||
&& (pack_size + PACK_SIZE_THRESHOLD + len) > max_packsize)
|
||||
|| (pack_size + PACK_SIZE_THRESHOLD + len) < pack_size)
|
||||
cycle_packfile();
|
||||
|
||||
hashfile_checkpoint(pack_file, &checkpoint);
|
||||
|
@ -1240,7 +1250,7 @@ static void load_tree(struct tree_entry *root)
|
|||
c += e->name->str_len + 1;
|
||||
hashcpy(e->versions[0].oid.hash, (unsigned char *)c);
|
||||
hashcpy(e->versions[1].oid.hash, (unsigned char *)c);
|
||||
c += GIT_SHA1_RAWSZ;
|
||||
c += the_hash_algo->rawsz;
|
||||
}
|
||||
free(buf);
|
||||
}
|
||||
|
@ -1287,7 +1297,7 @@ static void mktree(struct tree_content *t, int v, struct strbuf *b)
|
|||
strbuf_addf(b, "%o %s%c",
|
||||
(unsigned int)(e->versions[v].mode & ~NO_DELTA),
|
||||
e->name->str_dat, '\0');
|
||||
strbuf_add(b, e->versions[v].oid.hash, GIT_SHA1_RAWSZ);
|
||||
strbuf_add(b, e->versions[v].oid.hash, the_hash_algo->rawsz);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2036,7 +2046,9 @@ static uintmax_t do_change_note_fanout(
|
|||
unsigned int i, tmp_hex_oid_len, tmp_fullpath_len;
|
||||
uintmax_t num_notes = 0;
|
||||
struct object_id oid;
|
||||
char realpath[60];
|
||||
/* hex oid + '/' between each pair of hex digits + NUL */
|
||||
char realpath[GIT_MAX_HEXSZ + ((GIT_MAX_HEXSZ / 2) - 1) + 1];
|
||||
const unsigned hexsz = the_hash_algo->hexsz;
|
||||
|
||||
if (!root->tree)
|
||||
load_tree(root);
|
||||
|
@ -2056,7 +2068,7 @@ static uintmax_t do_change_note_fanout(
|
|||
* of 2 chars.
|
||||
*/
|
||||
if (!e->versions[1].mode ||
|
||||
tmp_hex_oid_len > GIT_SHA1_HEXSZ ||
|
||||
tmp_hex_oid_len > hexsz ||
|
||||
e->name->str_len % 2)
|
||||
continue;
|
||||
|
||||
|
@ -2070,7 +2082,7 @@ static uintmax_t do_change_note_fanout(
|
|||
tmp_fullpath_len += e->name->str_len;
|
||||
fullpath[tmp_fullpath_len] = '\0';
|
||||
|
||||
if (tmp_hex_oid_len == GIT_SHA1_HEXSZ && !get_oid_hex(hex_oid, &oid)) {
|
||||
if (tmp_hex_oid_len == hexsz && !get_oid_hex(hex_oid, &oid)) {
|
||||
/* This is a note entry */
|
||||
if (fanout == 0xff) {
|
||||
/* Counting mode, no rename */
|
||||
|
@ -2348,7 +2360,7 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
|
|||
struct object_entry *oe;
|
||||
struct branch *s;
|
||||
struct object_id oid, commit_oid;
|
||||
char path[60];
|
||||
char path[GIT_MAX_RAWSZ * 3];
|
||||
uint16_t inline_data = 0;
|
||||
unsigned char new_fanout;
|
||||
|
||||
|
@ -2401,7 +2413,7 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
|
|||
char *buf = read_object_with_reference(&commit_oid,
|
||||
commit_type, &size,
|
||||
&commit_oid);
|
||||
if (!buf || size < 46)
|
||||
if (!buf || size < the_hash_algo->hexsz + 6)
|
||||
die("Not a valid commit: %s", p);
|
||||
free(buf);
|
||||
} else
|
||||
|
@ -2452,7 +2464,7 @@ static void file_change_deleteall(struct branch *b)
|
|||
|
||||
static void parse_from_commit(struct branch *b, char *buf, unsigned long size)
|
||||
{
|
||||
if (!buf || size < GIT_SHA1_HEXSZ + 6)
|
||||
if (!buf || size < the_hash_algo->hexsz + 6)
|
||||
die("Not a valid commit: %s", oid_to_hex(&b->oid));
|
||||
if (memcmp("tree ", buf, 5)
|
||||
|| get_oid_hex(buf + 5, &b->branch_tree.versions[1].oid))
|
||||
|
@ -2551,7 +2563,7 @@ static struct hash_list *parse_merge(unsigned int *count)
|
|||
char *buf = read_object_with_reference(&n->oid,
|
||||
commit_type,
|
||||
&size, &n->oid);
|
||||
if (!buf || size < 46)
|
||||
if (!buf || size < the_hash_algo->hexsz + 6)
|
||||
die("Not a valid commit: %s", from);
|
||||
free(buf);
|
||||
} else
|
||||
|
@ -2840,7 +2852,7 @@ static void parse_get_mark(const char *p)
|
|||
die("Unknown mark: %s", command_buf.buf);
|
||||
|
||||
xsnprintf(output, sizeof(output), "%s\n", oid_to_hex(&oe->idx.oid));
|
||||
cat_blob_write(output, GIT_SHA1_HEXSZ + 1);
|
||||
cat_blob_write(output, the_hash_algo->hexsz + 1);
|
||||
}
|
||||
|
||||
static void parse_cat_blob(const char *p)
|
||||
|
@ -2870,6 +2882,8 @@ static struct object_entry *dereference(struct object_entry *oe,
|
|||
{
|
||||
unsigned long size;
|
||||
char *buf = NULL;
|
||||
const unsigned hexsz = the_hash_algo->hexsz;
|
||||
|
||||
if (!oe) {
|
||||
enum object_type type = oid_object_info(the_repository, oid,
|
||||
NULL);
|
||||
|
@ -2903,12 +2917,12 @@ static struct object_entry *dereference(struct object_entry *oe,
|
|||
/* Peel one layer. */
|
||||
switch (oe->type) {
|
||||
case OBJ_TAG:
|
||||
if (size < GIT_SHA1_HEXSZ + strlen("object ") ||
|
||||
if (size < hexsz + strlen("object ") ||
|
||||
get_oid_hex(buf + strlen("object "), oid))
|
||||
die("Invalid SHA1 in tag: %s", command_buf.buf);
|
||||
break;
|
||||
case OBJ_COMMIT:
|
||||
if (size < GIT_SHA1_HEXSZ + strlen("tree ") ||
|
||||
if (size < hexsz + strlen("tree ") ||
|
||||
get_oid_hex(buf + strlen("tree "), oid))
|
||||
die("Invalid SHA1 in commit: %s", command_buf.buf);
|
||||
}
|
||||
|
@ -2940,7 +2954,7 @@ static struct object_entry *parse_treeish_dataref(const char **p)
|
|||
return e;
|
||||
}
|
||||
|
||||
static void print_ls(int mode, const unsigned char *sha1, const char *path)
|
||||
static void print_ls(int mode, const unsigned char *hash, const char *path)
|
||||
{
|
||||
static struct strbuf line = STRBUF_INIT;
|
||||
|
||||
|
@ -2960,7 +2974,7 @@ static void print_ls(int mode, const unsigned char *sha1, const char *path)
|
|||
/* mode SP type SP object_name TAB path LF */
|
||||
strbuf_reset(&line);
|
||||
strbuf_addf(&line, "%06o %s %s\t",
|
||||
mode & ~NO_DELTA, type, sha1_to_hex(sha1));
|
||||
mode & ~NO_DELTA, type, hash_to_hex(hash));
|
||||
quote_c_style(path, &line, NULL, 0);
|
||||
strbuf_addch(&line, '\n');
|
||||
}
|
||||
|
|
|
@ -788,6 +788,38 @@ sub check_loadavg {
|
|||
# ======================================================================
|
||||
# input validation and dispatch
|
||||
|
||||
# Various hash size-related values.
|
||||
my $sha1_len = 40;
|
||||
my $sha256_extra_len = 24;
|
||||
my $sha256_len = $sha1_len + $sha256_extra_len;
|
||||
|
||||
# A regex matching $len hex characters. $len may be a range (e.g. 7,64).
|
||||
sub oid_nlen_regex {
|
||||
my $len = shift;
|
||||
my $hchr = qr/[0-9a-fA-F]/;
|
||||
return qr/(?:(?:$hchr){$len})/;
|
||||
}
|
||||
|
||||
# A regex matching two sets of $nlen hex characters, prefixed by the literal
|
||||
# string $prefix and with the literal string $infix between them.
|
||||
sub oid_nlen_prefix_infix_regex {
|
||||
my $nlen = shift;
|
||||
my $prefix = shift;
|
||||
my $infix = shift;
|
||||
|
||||
my $rx = oid_nlen_regex($nlen);
|
||||
|
||||
return qr/^\Q$prefix\E$rx\Q$infix\E$rx$/;
|
||||
}
|
||||
|
||||
# A regex matching a valid object ID.
|
||||
our $oid_regex;
|
||||
{
|
||||
my $x = oid_nlen_regex($sha1_len);
|
||||
my $y = oid_nlen_regex($sha256_extra_len);
|
||||
$oid_regex = qr/(?:$x(?:$y)?)/;
|
||||
}
|
||||
|
||||
# input parameters can be collected from a variety of sources (presently, CGI
|
||||
# and PATH_INFO), so we define an %input_params hash that collects them all
|
||||
# together during validation: this allows subsequent uses (e.g. href()) to be
|
||||
|
@ -1516,7 +1548,7 @@ sub is_valid_refname {
|
|||
|
||||
return undef unless defined $input;
|
||||
# textual hashes are O.K.
|
||||
if ($input =~ m/^[0-9a-fA-F]{40}$/) {
|
||||
if ($input =~ m/^$oid_regex$/) {
|
||||
return 1;
|
||||
}
|
||||
# it must be correct pathname
|
||||
|
@ -2028,6 +2060,9 @@ sub file_type_long {
|
|||
sub format_log_line_html {
|
||||
my $line = shift;
|
||||
|
||||
# Potentially abbreviated OID.
|
||||
my $regex = oid_nlen_regex("7,64");
|
||||
|
||||
$line = esc_html($line, -nbsp=>1);
|
||||
$line =~ s{
|
||||
\b
|
||||
|
@ -2037,10 +2072,10 @@ sub format_log_line_html {
|
|||
(?<!-) # see strbuf_check_tag_ref(). Tags can't start with -
|
||||
[A-Za-z0-9.-]+
|
||||
(?!\.) # refs can't end with ".", see check_refname_format()
|
||||
-g[0-9a-fA-F]{7,40}
|
||||
-g$regex
|
||||
|
|
||||
# Just a normal looking Git SHA1
|
||||
[0-9a-fA-F]{7,40}
|
||||
$regex
|
||||
)
|
||||
\b
|
||||
}{
|
||||
|
@ -2286,7 +2321,8 @@ sub format_extended_diff_header_line {
|
|||
')</span>';
|
||||
}
|
||||
# match <hash>
|
||||
if ($line =~ m/^index [0-9a-fA-F]{40},[0-9a-fA-F]{40}/) {
|
||||
if ($line =~ oid_nlen_prefix_infix_regex($sha1_len, "index ", ",") |
|
||||
$line =~ oid_nlen_prefix_infix_regex($sha256_len, "index ", ",")) {
|
||||
# can match only for combined diff
|
||||
$line = 'index ';
|
||||
for (my $i = 0; $i < $diffinfo->{'nparents'}; $i++) {
|
||||
|
@ -2308,7 +2344,8 @@ sub format_extended_diff_header_line {
|
|||
$line .= '0' x 7;
|
||||
}
|
||||
|
||||
} elsif ($line =~ m/^index [0-9a-fA-F]{40}..[0-9a-fA-F]{40}/) {
|
||||
} elsif ($line =~ oid_nlen_prefix_infix_regex($sha1_len, "index ", "..") |
|
||||
$line =~ oid_nlen_prefix_infix_regex($sha256_len, "index ", "..")) {
|
||||
# can match only for ordinary diff
|
||||
my ($from_link, $to_link);
|
||||
if ($from->{'href'}) {
|
||||
|
@ -2834,7 +2871,7 @@ sub git_get_hash_by_path {
|
|||
}
|
||||
|
||||
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa panic.c'
|
||||
$line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40})\t/;
|
||||
$line =~ m/^([0-9]+) (.+) ($oid_regex)\t/;
|
||||
if (defined $type && $type ne $2) {
|
||||
# type doesn't match
|
||||
return undef;
|
||||
|
@ -3333,7 +3370,7 @@ sub git_get_references {
|
|||
|
||||
while (my $line = <$fd>) {
|
||||
chomp $line;
|
||||
if ($line =~ m!^([0-9a-fA-F]{40})\srefs/($type.*)$!) {
|
||||
if ($line =~ m!^($oid_regex)\srefs/($type.*)$!) {
|
||||
if (defined $refs{$1}) {
|
||||
push @{$refs{$1}}, $2;
|
||||
} else {
|
||||
|
@ -3407,7 +3444,7 @@ sub parse_tag {
|
|||
$tag{'id'} = $tag_id;
|
||||
while (my $line = <$fd>) {
|
||||
chomp $line;
|
||||
if ($line =~ m/^object ([0-9a-fA-F]{40})$/) {
|
||||
if ($line =~ m/^object ($oid_regex)$/) {
|
||||
$tag{'object'} = $1;
|
||||
} elsif ($line =~ m/^type (.+)$/) {
|
||||
$tag{'type'} = $1;
|
||||
|
@ -3451,15 +3488,15 @@ sub parse_commit_text {
|
|||
}
|
||||
|
||||
my $header = shift @commit_lines;
|
||||
if ($header !~ m/^[0-9a-fA-F]{40}/) {
|
||||
if ($header !~ m/^$oid_regex/) {
|
||||
return;
|
||||
}
|
||||
($co{'id'}, my @parents) = split ' ', $header;
|
||||
while (my $line = shift @commit_lines) {
|
||||
last if $line eq "\n";
|
||||
if ($line =~ m/^tree ([0-9a-fA-F]{40})$/) {
|
||||
if ($line =~ m/^tree ($oid_regex)$/) {
|
||||
$co{'tree'} = $1;
|
||||
} elsif ((!defined $withparents) && ($line =~ m/^parent ([0-9a-fA-F]{40})$/)) {
|
||||
} elsif ((!defined $withparents) && ($line =~ m/^parent ($oid_regex)$/)) {
|
||||
push @parents, $1;
|
||||
} elsif ($line =~ m/^author (.*) ([0-9]+) (.*)$/) {
|
||||
$co{'author'} = to_utf8($1);
|
||||
|
@ -3591,7 +3628,7 @@ sub parse_difftree_raw_line {
|
|||
|
||||
# ':100644 100644 03b218260e99b78c6df0ed378e59ed9205ccc96d 3b93d5e7cc7f7dd4ebed13a5cc1a4ad976fc94d8 M ls-files.c'
|
||||
# ':100644 100644 7f9281985086971d3877aca27704f2aaf9c448ce bc190ebc71bbd923f2b728e505408f5e54bd073a M rev-tree.c'
|
||||
if ($line =~ m/^:([0-7]{6}) ([0-7]{6}) ([0-9a-fA-F]{40}) ([0-9a-fA-F]{40}) (.)([0-9]{0,3})\t(.*)$/) {
|
||||
if ($line =~ m/^:([0-7]{6}) ([0-7]{6}) ($oid_regex) ($oid_regex) (.)([0-9]{0,3})\t(.*)$/) {
|
||||
$res{'from_mode'} = $1;
|
||||
$res{'to_mode'} = $2;
|
||||
$res{'from_id'} = $3;
|
||||
|
@ -3606,7 +3643,7 @@ sub parse_difftree_raw_line {
|
|||
}
|
||||
# '::100755 100755 100755 60e79ca1b01bc8b057abe17ddab484699a7f5fdb 94067cc5f73388f33722d52ae02f44692bc07490 94067cc5f73388f33722d52ae02f44692bc07490 MR git-gui/git-gui.sh'
|
||||
# combined diff (for merge commit)
|
||||
elsif ($line =~ s/^(::+)((?:[0-7]{6} )+)((?:[0-9a-fA-F]{40} )+)([a-zA-Z]+)\t(.*)$//) {
|
||||
elsif ($line =~ s/^(::+)((?:[0-7]{6} )+)((?:$oid_regex )+)([a-zA-Z]+)\t(.*)$//) {
|
||||
$res{'nparents'} = length($1);
|
||||
$res{'from_mode'} = [ split(' ', $2) ];
|
||||
$res{'to_mode'} = pop @{$res{'from_mode'}};
|
||||
|
@ -3616,7 +3653,7 @@ sub parse_difftree_raw_line {
|
|||
$res{'to_file'} = unquote($5);
|
||||
}
|
||||
# 'c512b523472485aef4fff9e57b229d9d243c967f'
|
||||
elsif ($line =~ m/^([0-9a-fA-F]{40})$/) {
|
||||
elsif ($line =~ m/^($oid_regex)$/) {
|
||||
$res{'commit'} = $1;
|
||||
}
|
||||
|
||||
|
@ -3644,7 +3681,7 @@ sub parse_ls_tree_line {
|
|||
|
||||
if ($opts{'-l'}) {
|
||||
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa 16717 panic.c'
|
||||
$line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40}) +(-|[0-9]+)\t(.+)$/s;
|
||||
$line =~ m/^([0-9]+) (.+) ($oid_regex) +(-|[0-9]+)\t(.+)$/s;
|
||||
|
||||
$res{'mode'} = $1;
|
||||
$res{'type'} = $2;
|
||||
|
@ -3657,7 +3694,7 @@ sub parse_ls_tree_line {
|
|||
}
|
||||
} else {
|
||||
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa panic.c'
|
||||
$line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40})\t(.+)$/s;
|
||||
$line =~ m/^([0-9]+) (.+) ($oid_regex)\t(.+)$/s;
|
||||
|
||||
$res{'mode'} = $1;
|
||||
$res{'type'} = $2;
|
||||
|
@ -4799,7 +4836,7 @@ sub fill_from_file_info {
|
|||
sub is_deleted {
|
||||
my $diffinfo = shift;
|
||||
|
||||
return $diffinfo->{'to_id'} eq ('0' x 40);
|
||||
return $diffinfo->{'to_id'} eq ('0' x 40) || $diffinfo->{'to_id'} eq ('0' x 64);
|
||||
}
|
||||
|
||||
# does patch correspond to [previous] difftree raw line
|
||||
|
@ -6285,7 +6322,7 @@ sub git_search_changes {
|
|||
-class => "list subject"},
|
||||
chop_and_escape_str($co{'title'}, 50) . "<br/>");
|
||||
} elsif (defined $set{'to_id'}) {
|
||||
next if ($set{'to_id'} =~ m/^0{40}$/);
|
||||
next if is_deleted(\%set);
|
||||
|
||||
print $cgi->a({-href => href(action=>"blob", hash_base=>$co{'id'},
|
||||
hash=>$set{'to_id'}, file_name=>$set{'to_file'}),
|
||||
|
@ -6829,7 +6866,7 @@ sub git_blame_common {
|
|||
# the header: <SHA-1> <src lineno> <dst lineno> [<lines in group>]
|
||||
# no <lines in group> for subsequent lines in group of lines
|
||||
my ($full_rev, $orig_lineno, $lineno, $group_size) =
|
||||
($line =~ /^([0-9a-f]{40}) (\d+) (\d+)(?: (\d+))?$/);
|
||||
($line =~ /^($oid_regex) (\d+) (\d+)(?: (\d+))?$/);
|
||||
if (!exists $metainfo{$full_rev}) {
|
||||
$metainfo{$full_rev} = { 'nprevious' => 0 };
|
||||
}
|
||||
|
@ -6879,7 +6916,7 @@ sub git_blame_common {
|
|||
}
|
||||
# 'previous' <sha1 of parent commit> <filename at commit>
|
||||
if (exists $meta->{'previous'} &&
|
||||
$meta->{'previous'} =~ /^([a-fA-F0-9]{40}) (.*)$/) {
|
||||
$meta->{'previous'} =~ /^($oid_regex) (.*)$/) {
|
||||
$meta->{'parent'} = $1;
|
||||
$meta->{'file_parent'} = unquote($2);
|
||||
}
|
||||
|
@ -6996,7 +7033,7 @@ sub git_blob_plain {
|
|||
} else {
|
||||
die_error(400, "No file name defined");
|
||||
}
|
||||
} elsif ($hash =~ m/^[0-9a-fA-F]{40}$/) {
|
||||
} elsif ($hash =~ m/^$oid_regex$/) {
|
||||
# blobs defined by non-textual hash id's can be cached
|
||||
$expires = "+1d";
|
||||
}
|
||||
|
@ -7057,7 +7094,7 @@ sub git_blob {
|
|||
} else {
|
||||
die_error(400, "No file name defined");
|
||||
}
|
||||
} elsif ($hash =~ m/^[0-9a-fA-F]{40}$/) {
|
||||
} elsif ($hash =~ m/^$oid_regex$/) {
|
||||
# blobs defined by non-textual hash id's can be cached
|
||||
$expires = "+1d";
|
||||
}
|
||||
|
@ -7515,7 +7552,7 @@ sub git_commit {
|
|||
|
||||
# non-textual hash id's can be cached
|
||||
my $expires;
|
||||
if ($hash =~ m/^[0-9a-fA-F]{40}$/) {
|
||||
if ($hash =~ m/^$oid_regex$/) {
|
||||
$expires = "+1d";
|
||||
}
|
||||
my $refs = git_get_references();
|
||||
|
@ -7609,7 +7646,7 @@ sub git_object {
|
|||
close $fd;
|
||||
|
||||
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa panic.c'
|
||||
unless ($line && $line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40})\t/) {
|
||||
unless ($line && $line =~ m/^([0-9]+) (.+) ($oid_regex)\t/) {
|
||||
die_error(404, "File or directory for given base does not exist");
|
||||
}
|
||||
$type = $2;
|
||||
|
@ -7649,7 +7686,7 @@ sub git_blobdiff {
|
|||
or die_error(404, "Blob diff not found");
|
||||
|
||||
} elsif (defined $hash &&
|
||||
$hash =~ /[0-9a-fA-F]{40}/) {
|
||||
$hash =~ $oid_regex) {
|
||||
# try to find filename from $hash
|
||||
|
||||
# read filtered raw output
|
||||
|
@ -7659,7 +7696,7 @@ sub git_blobdiff {
|
|||
@difftree =
|
||||
# ':100644 100644 03b21826... 3b93d5e7... M ls-files.c'
|
||||
# $hash == to_id
|
||||
grep { /^:[0-7]{6} [0-7]{6} [0-9a-fA-F]{40} $hash/ }
|
||||
grep { /^:[0-7]{6} [0-7]{6} $oid_regex $hash/ }
|
||||
map { chomp; $_ } <$fd>;
|
||||
close $fd
|
||||
or die_error(404, "Reading git-diff-tree failed");
|
||||
|
@ -7682,8 +7719,8 @@ sub git_blobdiff {
|
|||
$hash ||= $diffinfo{'to_id'};
|
||||
|
||||
# non-textual hash id's can be cached
|
||||
if ($hash_base =~ m/^[0-9a-fA-F]{40}$/ &&
|
||||
$hash_parent_base =~ m/^[0-9a-fA-F]{40}$/) {
|
||||
if ($hash_base =~ m/^$oid_regex$/ &&
|
||||
$hash_parent_base =~ m/^$oid_regex$/) {
|
||||
$expires = '+1d';
|
||||
}
|
||||
|
||||
|
@ -7819,7 +7856,7 @@ sub git_commitdiff {
|
|||
$hash_parent ne '-c' && $hash_parent ne '--cc') {
|
||||
# commitdiff with two commits given
|
||||
my $hash_parent_short = $hash_parent;
|
||||
if ($hash_parent =~ m/^[0-9a-fA-F]{40}$/) {
|
||||
if ($hash_parent =~ m/^$oid_regex$/) {
|
||||
$hash_parent_short = substr($hash_parent, 0, 7);
|
||||
}
|
||||
$formats_nav .=
|
||||
|
@ -7928,7 +7965,7 @@ sub git_commitdiff {
|
|||
|
||||
# non-textual hash id's can be cached
|
||||
my $expires;
|
||||
if ($hash =~ m/^[0-9a-fA-F]{40}$/) {
|
||||
if ($hash =~ m/^$oid_regex$/) {
|
||||
$expires = "+1d";
|
||||
}
|
||||
|
||||
|
|
2
hash.h
2
hash.h
|
@ -131,6 +131,8 @@ extern const struct git_hash_algo hash_algos[GIT_HASH_NALGOS];
|
|||
int hash_algo_by_name(const char *name);
|
||||
/* Identical, except based on the format ID. */
|
||||
int hash_algo_by_id(uint32_t format_id);
|
||||
/* Identical, except based on the length. */
|
||||
int hash_algo_by_length(int len);
|
||||
/* Identical, except for a pointer to struct git_hash_algo. */
|
||||
static inline int hash_algo_by_ptr(const struct git_hash_algo *p)
|
||||
{
|
||||
|
|
|
@ -711,8 +711,11 @@ static struct service_cmd {
|
|||
{"GET", "/objects/info/http-alternates$", get_text_file},
|
||||
{"GET", "/objects/info/packs$", get_info_packs},
|
||||
{"GET", "/objects/[0-9a-f]{2}/[0-9a-f]{38}$", get_loose_object},
|
||||
{"GET", "/objects/[0-9a-f]{2}/[0-9a-f]{62}$", get_loose_object},
|
||||
{"GET", "/objects/pack/pack-[0-9a-f]{40}\\.pack$", get_pack_file},
|
||||
{"GET", "/objects/pack/pack-[0-9a-f]{64}\\.pack$", get_pack_file},
|
||||
{"GET", "/objects/pack/pack-[0-9a-f]{40}\\.idx$", get_idx_file},
|
||||
{"GET", "/objects/pack/pack-[0-9a-f]{64}\\.idx$", get_idx_file},
|
||||
|
||||
{"POST", "/git-upload-pack$", service_rpc},
|
||||
{"POST", "/git-receive-pack$", service_rpc}
|
||||
|
|
29
http-push.c
29
http-push.c
|
@ -145,7 +145,7 @@ struct remote_lock {
|
|||
char *url;
|
||||
char *owner;
|
||||
char *token;
|
||||
char tmpfile_suffix[41];
|
||||
char tmpfile_suffix[GIT_MAX_HEXSZ + 1];
|
||||
time_t start_time;
|
||||
long timeout;
|
||||
int refreshing;
|
||||
|
@ -315,7 +315,8 @@ static void start_fetch_packed(struct transfer_request *request)
|
|||
return;
|
||||
}
|
||||
|
||||
fprintf(stderr, "Fetching pack %s\n", sha1_to_hex(target->sha1));
|
||||
fprintf(stderr, "Fetching pack %s\n",
|
||||
hash_to_hex(target->hash));
|
||||
fprintf(stderr, " which contains %s\n", oid_to_hex(&request->obj->oid));
|
||||
|
||||
preq = new_http_pack_request(target, repo->url);
|
||||
|
@ -398,7 +399,7 @@ static void start_put(struct transfer_request *request)
|
|||
request->dest = strbuf_detach(&buf, NULL);
|
||||
|
||||
append_remote_object_url(&buf, repo->url, hex, 0);
|
||||
strbuf_add(&buf, request->lock->tmpfile_suffix, 41);
|
||||
strbuf_add(&buf, request->lock->tmpfile_suffix, the_hash_algo->hexsz + 1);
|
||||
request->url = strbuf_detach(&buf, NULL);
|
||||
|
||||
slot = get_active_slot();
|
||||
|
@ -757,8 +758,8 @@ static void handle_lockprop_ctx(struct xml_ctx *ctx, int tag_closed)
|
|||
static void handle_new_lock_ctx(struct xml_ctx *ctx, int tag_closed)
|
||||
{
|
||||
struct remote_lock *lock = (struct remote_lock *)ctx->userData;
|
||||
git_SHA_CTX sha_ctx;
|
||||
unsigned char lock_token_sha1[20];
|
||||
git_hash_ctx hash_ctx;
|
||||
unsigned char lock_token_hash[GIT_MAX_RAWSZ];
|
||||
|
||||
if (tag_closed && ctx->cdata) {
|
||||
if (!strcmp(ctx->name, DAV_ACTIVELOCK_OWNER)) {
|
||||
|
@ -770,12 +771,12 @@ static void handle_new_lock_ctx(struct xml_ctx *ctx, int tag_closed)
|
|||
} else if (!strcmp(ctx->name, DAV_ACTIVELOCK_TOKEN)) {
|
||||
lock->token = xstrdup(ctx->cdata);
|
||||
|
||||
git_SHA1_Init(&sha_ctx);
|
||||
git_SHA1_Update(&sha_ctx, lock->token, strlen(lock->token));
|
||||
git_SHA1_Final(lock_token_sha1, &sha_ctx);
|
||||
the_hash_algo->init_fn(&hash_ctx);
|
||||
the_hash_algo->update_fn(&hash_ctx, lock->token, strlen(lock->token));
|
||||
the_hash_algo->final_fn(lock_token_hash, &hash_ctx);
|
||||
|
||||
lock->tmpfile_suffix[0] = '_';
|
||||
memcpy(lock->tmpfile_suffix + 1, sha1_to_hex(lock_token_sha1), 40);
|
||||
memcpy(lock->tmpfile_suffix + 1, hash_to_hex(lock_token_hash), the_hash_algo->hexsz);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1017,7 +1018,7 @@ static void remote_ls(const char *path, int flags,
|
|||
/* extract hex from sharded "xx/x{38}" filename */
|
||||
static int get_oid_hex_from_objpath(const char *path, struct object_id *oid)
|
||||
{
|
||||
if (strlen(path) != GIT_SHA1_HEXSZ + 1)
|
||||
if (strlen(path) != the_hash_algo->hexsz + 1)
|
||||
return -1;
|
||||
|
||||
if (hex_to_bytes(oid->hash, path, 1))
|
||||
|
@ -1025,7 +1026,7 @@ static int get_oid_hex_from_objpath(const char *path, struct object_id *oid)
|
|||
path += 2;
|
||||
path++; /* skip '/' */
|
||||
|
||||
return hex_to_bytes(oid->hash + 1, path, GIT_SHA1_RAWSZ - 1);
|
||||
return hex_to_bytes(oid->hash + 1, path, the_hash_algo->rawsz - 1);
|
||||
}
|
||||
|
||||
static void process_ls_object(struct remote_ls_ctx *ls)
|
||||
|
@ -1373,7 +1374,7 @@ static int get_delta(struct rev_info *revs, struct remote_lock *lock)
|
|||
return count;
|
||||
}
|
||||
|
||||
static int update_remote(unsigned char *sha1, struct remote_lock *lock)
|
||||
static int update_remote(const struct object_id *oid, struct remote_lock *lock)
|
||||
{
|
||||
struct active_request_slot *slot;
|
||||
struct slot_results results;
|
||||
|
@ -1382,7 +1383,7 @@ static int update_remote(unsigned char *sha1, struct remote_lock *lock)
|
|||
|
||||
dav_headers = get_dav_token_headers(lock, DAV_HEADER_IF);
|
||||
|
||||
strbuf_addf(&out_buffer.buf, "%s\n", sha1_to_hex(sha1));
|
||||
strbuf_addf(&out_buffer.buf, "%s\n", oid_to_hex(oid));
|
||||
|
||||
slot = get_active_slot();
|
||||
slot->results = &results;
|
||||
|
@ -1947,7 +1948,7 @@ int cmd_main(int argc, const char **argv)
|
|||
run_request_queue();
|
||||
|
||||
/* Update the remote branch if all went well */
|
||||
if (aborted || !update_remote(ref->new_oid.hash, ref_lock))
|
||||
if (aborted || !update_remote(&ref->new_oid, ref_lock))
|
||||
rc = 1;
|
||||
|
||||
if (!rc)
|
||||
|
|
|
@ -442,9 +442,9 @@ static int http_fetch_pack(struct walker *walker, struct alt_base *repo, unsigne
|
|||
|
||||
if (walker->get_verbosely) {
|
||||
fprintf(stderr, "Getting pack %s\n",
|
||||
sha1_to_hex(target->sha1));
|
||||
hash_to_hex(target->hash));
|
||||
fprintf(stderr, " which contains %s\n",
|
||||
sha1_to_hex(sha1));
|
||||
hash_to_hex(sha1));
|
||||
}
|
||||
|
||||
preq = new_http_pack_request(target, repo->base);
|
||||
|
@ -481,9 +481,9 @@ static void abort_object_request(struct object_request *obj_req)
|
|||
release_object_request(obj_req);
|
||||
}
|
||||
|
||||
static int fetch_object(struct walker *walker, unsigned char *sha1)
|
||||
static int fetch_object(struct walker *walker, unsigned char *hash)
|
||||
{
|
||||
char *hex = sha1_to_hex(sha1);
|
||||
char *hex = hash_to_hex(hash);
|
||||
int ret = 0;
|
||||
struct object_request *obj_req = NULL;
|
||||
struct http_object_request *req;
|
||||
|
@ -491,7 +491,7 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
|
|||
|
||||
list_for_each(pos, head) {
|
||||
obj_req = list_entry(pos, struct object_request, node);
|
||||
if (hasheq(obj_req->oid.hash, sha1))
|
||||
if (hasheq(obj_req->oid.hash, hash))
|
||||
break;
|
||||
}
|
||||
if (obj_req == NULL)
|
||||
|
@ -556,20 +556,20 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int fetch(struct walker *walker, unsigned char *sha1)
|
||||
static int fetch(struct walker *walker, unsigned char *hash)
|
||||
{
|
||||
struct walker_data *data = walker->data;
|
||||
struct alt_base *altbase = data->alt;
|
||||
|
||||
if (!fetch_object(walker, sha1))
|
||||
if (!fetch_object(walker, hash))
|
||||
return 0;
|
||||
while (altbase) {
|
||||
if (!http_fetch_pack(walker, altbase, sha1))
|
||||
if (!http_fetch_pack(walker, altbase, hash))
|
||||
return 0;
|
||||
fetch_alternates(walker, data->alt->base);
|
||||
altbase = altbase->next;
|
||||
}
|
||||
return error("Unable to find %s under %s", sha1_to_hex(sha1),
|
||||
return error("Unable to find %s under %s", hash_to_hex(hash),
|
||||
data->alt->base);
|
||||
}
|
||||
|
||||
|
|
33
http.c
33
http.c
|
@ -2071,7 +2071,7 @@ int http_fetch_ref(const char *base, struct ref *ref)
|
|||
url = quote_ref_url(base, ref->name);
|
||||
if (http_get_strbuf(url, &buffer, &options) == HTTP_OK) {
|
||||
strbuf_rtrim(&buffer);
|
||||
if (buffer.len == 40)
|
||||
if (buffer.len == the_hash_algo->hexsz)
|
||||
ret = get_oid_hex(buffer.buf, &ref->old_oid);
|
||||
else if (starts_with(buffer.buf, "ref: ")) {
|
||||
ref->symref = xstrdup(buffer.buf + 5);
|
||||
|
@ -2085,19 +2085,19 @@ int http_fetch_ref(const char *base, struct ref *ref)
|
|||
}
|
||||
|
||||
/* Helpers for fetching packs */
|
||||
static char *fetch_pack_index(unsigned char *sha1, const char *base_url)
|
||||
static char *fetch_pack_index(unsigned char *hash, const char *base_url)
|
||||
{
|
||||
char *url, *tmp;
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
|
||||
if (http_is_verbose)
|
||||
fprintf(stderr, "Getting index for pack %s\n", sha1_to_hex(sha1));
|
||||
fprintf(stderr, "Getting index for pack %s\n", hash_to_hex(hash));
|
||||
|
||||
end_url_with_slash(&buf, base_url);
|
||||
strbuf_addf(&buf, "objects/pack/pack-%s.idx", sha1_to_hex(sha1));
|
||||
strbuf_addf(&buf, "objects/pack/pack-%s.idx", hash_to_hex(hash));
|
||||
url = strbuf_detach(&buf, NULL);
|
||||
|
||||
strbuf_addf(&buf, "%s.temp", sha1_pack_index_name(sha1));
|
||||
strbuf_addf(&buf, "%s.temp", sha1_pack_index_name(hash));
|
||||
tmp = strbuf_detach(&buf, NULL);
|
||||
|
||||
if (http_get_file(url, tmp, NULL) != HTTP_OK) {
|
||||
|
@ -2235,10 +2235,10 @@ int finish_http_pack_request(struct http_pack_request *preq)
|
|||
return -1;
|
||||
}
|
||||
|
||||
unlink(sha1_pack_index_name(p->sha1));
|
||||
unlink(sha1_pack_index_name(p->hash));
|
||||
|
||||
if (finalize_object_file(preq->tmpfile.buf, sha1_pack_name(p->sha1))
|
||||
|| finalize_object_file(tmp_idx, sha1_pack_index_name(p->sha1))) {
|
||||
if (finalize_object_file(preq->tmpfile.buf, sha1_pack_name(p->hash))
|
||||
|| finalize_object_file(tmp_idx, sha1_pack_index_name(p->hash))) {
|
||||
free(tmp_idx);
|
||||
return -1;
|
||||
}
|
||||
|
@ -2261,10 +2261,10 @@ struct http_pack_request *new_http_pack_request(
|
|||
|
||||
end_url_with_slash(&buf, base_url);
|
||||
strbuf_addf(&buf, "objects/pack/pack-%s.pack",
|
||||
sha1_to_hex(target->sha1));
|
||||
hash_to_hex(target->hash));
|
||||
preq->url = strbuf_detach(&buf, NULL);
|
||||
|
||||
strbuf_addf(&preq->tmpfile, "%s.temp", sha1_pack_name(target->sha1));
|
||||
strbuf_addf(&preq->tmpfile, "%s.temp", sha1_pack_name(target->hash));
|
||||
preq->packfile = fopen(preq->tmpfile.buf, "a");
|
||||
if (!preq->packfile) {
|
||||
error("Unable to open local file %s for pack",
|
||||
|
@ -2288,7 +2288,8 @@ struct http_pack_request *new_http_pack_request(
|
|||
if (http_is_verbose)
|
||||
fprintf(stderr,
|
||||
"Resuming fetch of pack %s at byte %"PRIuMAX"\n",
|
||||
sha1_to_hex(target->sha1), (uintmax_t)prev_posn);
|
||||
hash_to_hex(target->hash),
|
||||
(uintmax_t)prev_posn);
|
||||
http_opt_request_remainder(preq->slot->curl, prev_posn);
|
||||
}
|
||||
|
||||
|
@ -2335,8 +2336,8 @@ static size_t fwrite_sha1_file(char *ptr, size_t eltsize, size_t nmemb,
|
|||
freq->stream.next_out = expn;
|
||||
freq->stream.avail_out = sizeof(expn);
|
||||
freq->zret = git_inflate(&freq->stream, Z_SYNC_FLUSH);
|
||||
git_SHA1_Update(&freq->c, expn,
|
||||
sizeof(expn) - freq->stream.avail_out);
|
||||
the_hash_algo->update_fn(&freq->c, expn,
|
||||
sizeof(expn) - freq->stream.avail_out);
|
||||
} while (freq->stream.avail_in && freq->zret == Z_OK);
|
||||
return size;
|
||||
}
|
||||
|
@ -2394,7 +2395,7 @@ struct http_object_request *new_http_object_request(const char *base_url,
|
|||
|
||||
git_inflate_init(&freq->stream);
|
||||
|
||||
git_SHA1_Init(&freq->c);
|
||||
the_hash_algo->init_fn(&freq->c);
|
||||
|
||||
freq->url = get_remote_object_url(base_url, hex, 0);
|
||||
|
||||
|
@ -2429,7 +2430,7 @@ struct http_object_request *new_http_object_request(const char *base_url,
|
|||
if (prev_read == -1) {
|
||||
memset(&freq->stream, 0, sizeof(freq->stream));
|
||||
git_inflate_init(&freq->stream);
|
||||
git_SHA1_Init(&freq->c);
|
||||
the_hash_algo->init_fn(&freq->c);
|
||||
if (prev_posn>0) {
|
||||
prev_posn = 0;
|
||||
lseek(freq->localfile, 0, SEEK_SET);
|
||||
|
@ -2500,7 +2501,7 @@ int finish_http_object_request(struct http_object_request *freq)
|
|||
}
|
||||
|
||||
git_inflate_end(&freq->stream);
|
||||
git_SHA1_Final(freq->real_oid.hash, &freq->c);
|
||||
the_hash_algo->final_fn(freq->real_oid.hash, &freq->c);
|
||||
if (freq->zret != Z_STREAM_END) {
|
||||
unlink_or_warn(freq->tmpfile.buf);
|
||||
return -1;
|
||||
|
|
2
http.h
2
http.h
|
@ -234,7 +234,7 @@ struct http_object_request {
|
|||
long http_code;
|
||||
struct object_id oid;
|
||||
struct object_id real_oid;
|
||||
git_SHA_CTX c;
|
||||
git_hash_ctx c;
|
||||
git_zstream stream;
|
||||
int zret;
|
||||
int rename;
|
||||
|
|
18
khash.h
18
khash.h
|
@ -332,4 +332,22 @@ typedef kh_sha1_t khash_sha1;
|
|||
KHASH_INIT(sha1_pos, const unsigned char *, int, 1, sha1hash, __kh_oid_cmp)
|
||||
typedef kh_sha1_pos_t khash_sha1_pos;
|
||||
|
||||
static inline unsigned int oid_hash(struct object_id oid)
|
||||
{
|
||||
return sha1hash(oid.hash);
|
||||
}
|
||||
|
||||
static inline int oid_equal(struct object_id a, struct object_id b)
|
||||
{
|
||||
return oideq(&a, &b);
|
||||
}
|
||||
|
||||
KHASH_INIT(oid, struct object_id, int, 0, oid_hash, oid_equal)
|
||||
|
||||
KHASH_INIT(oid_map, struct object_id, void *, 1, oid_hash, oid_equal)
|
||||
typedef kh_oid_t khash_oid_map;
|
||||
|
||||
KHASH_INIT(oid_pos, struct object_id, int, 1, oid_hash, oid_equal)
|
||||
typedef kh_oid_pos_t khash_oid_pos;
|
||||
|
||||
#endif /* __AC_KHASH_H */
|
||||
|
|
|
@ -1122,7 +1122,7 @@ static int find_first_merges(struct repository *repo,
|
|||
struct commit *commit;
|
||||
int contains_another;
|
||||
|
||||
char merged_revision[42];
|
||||
char merged_revision[GIT_MAX_HEXSZ + 2];
|
||||
const char *rev_args[] = { "rev-list", "--merges", "--ancestry-path",
|
||||
"--all", merged_revision, NULL };
|
||||
struct rev_info revs;
|
||||
|
|
|
@ -29,14 +29,14 @@ void init_notes_merge_options(struct repository *r,
|
|||
|
||||
static int path_to_oid(const char *path, struct object_id *oid)
|
||||
{
|
||||
char hex_oid[GIT_SHA1_HEXSZ];
|
||||
char hex_oid[GIT_MAX_HEXSZ];
|
||||
int i = 0;
|
||||
while (*path && i < GIT_SHA1_HEXSZ) {
|
||||
while (*path && i < the_hash_algo->hexsz) {
|
||||
if (*path != '/')
|
||||
hex_oid[i++] = *path;
|
||||
path++;
|
||||
}
|
||||
if (*path || i != GIT_SHA1_HEXSZ)
|
||||
if (*path || i != the_hash_algo->hexsz)
|
||||
return -1;
|
||||
return get_oid_hex(hex_oid, oid);
|
||||
}
|
||||
|
|
44
notes.c
44
notes.c
|
@ -67,8 +67,9 @@ struct non_note {
|
|||
|
||||
#define GET_NIBBLE(n, sha1) ((((sha1)[(n) >> 1]) >> ((~(n) & 0x01) << 2)) & 0x0f)
|
||||
|
||||
#define KEY_INDEX (GIT_SHA1_RAWSZ - 1)
|
||||
#define FANOUT_PATH_SEPARATORS ((GIT_SHA1_HEXSZ / 2) - 1)
|
||||
#define KEY_INDEX (the_hash_algo->rawsz - 1)
|
||||
#define FANOUT_PATH_SEPARATORS (the_hash_algo->rawsz - 1)
|
||||
#define FANOUT_PATH_SEPARATORS_MAX ((GIT_MAX_HEXSZ / 2) - 1)
|
||||
#define SUBTREE_SHA1_PREFIXCMP(key_sha1, subtree_sha1) \
|
||||
(memcmp(key_sha1, subtree_sha1, subtree_sha1[KEY_INDEX]))
|
||||
|
||||
|
@ -198,7 +199,7 @@ static void note_tree_remove(struct notes_tree *t,
|
|||
struct leaf_node *entry)
|
||||
{
|
||||
struct leaf_node *l;
|
||||
struct int_node *parent_stack[GIT_SHA1_RAWSZ];
|
||||
struct int_node *parent_stack[GIT_MAX_RAWSZ];
|
||||
unsigned char i, j;
|
||||
void **p = note_tree_search(t, &tree, &n, entry->key_oid.hash);
|
||||
|
||||
|
@ -394,6 +395,7 @@ static void load_subtree(struct notes_tree *t, struct leaf_node *subtree,
|
|||
void *buf;
|
||||
struct tree_desc desc;
|
||||
struct name_entry entry;
|
||||
const unsigned hashsz = the_hash_algo->rawsz;
|
||||
|
||||
buf = fill_tree_descriptor(&desc, &subtree->val_oid);
|
||||
if (!buf)
|
||||
|
@ -401,7 +403,7 @@ static void load_subtree(struct notes_tree *t, struct leaf_node *subtree,
|
|||
oid_to_hex(&subtree->val_oid));
|
||||
|
||||
prefix_len = subtree->key_oid.hash[KEY_INDEX];
|
||||
if (prefix_len >= GIT_SHA1_RAWSZ)
|
||||
if (prefix_len >= hashsz)
|
||||
BUG("prefix_len (%"PRIuMAX") is out of range", (uintmax_t)prefix_len);
|
||||
if (prefix_len * 2 < n)
|
||||
BUG("prefix_len (%"PRIuMAX") is too small", (uintmax_t)prefix_len);
|
||||
|
@ -411,7 +413,7 @@ static void load_subtree(struct notes_tree *t, struct leaf_node *subtree,
|
|||
struct leaf_node *l;
|
||||
size_t path_len = strlen(entry.path);
|
||||
|
||||
if (path_len == 2 * (GIT_SHA1_RAWSZ - prefix_len)) {
|
||||
if (path_len == 2 * (hashsz - prefix_len)) {
|
||||
/* This is potentially the remainder of the SHA-1 */
|
||||
|
||||
if (!S_ISREG(entry.mode))
|
||||
|
@ -419,7 +421,7 @@ static void load_subtree(struct notes_tree *t, struct leaf_node *subtree,
|
|||
goto handle_non_note;
|
||||
|
||||
if (hex_to_bytes(object_oid.hash + prefix_len, entry.path,
|
||||
GIT_SHA1_RAWSZ - prefix_len))
|
||||
hashsz - prefix_len))
|
||||
goto handle_non_note; /* entry.path is not a SHA1 */
|
||||
|
||||
type = PTR_TYPE_NOTE;
|
||||
|
@ -439,7 +441,7 @@ static void load_subtree(struct notes_tree *t, struct leaf_node *subtree,
|
|||
* except for the last byte, where we write
|
||||
* the length:
|
||||
*/
|
||||
memset(object_oid.hash + len, 0, GIT_SHA1_RAWSZ - len - 1);
|
||||
memset(object_oid.hash + len, 0, hashsz - len - 1);
|
||||
object_oid.hash[KEY_INDEX] = (unsigned char)len;
|
||||
|
||||
type = PTR_TYPE_SUBTREE;
|
||||
|
@ -527,22 +529,22 @@ static unsigned char determine_fanout(struct int_node *tree, unsigned char n,
|
|||
return fanout + 1;
|
||||
}
|
||||
|
||||
/* hex SHA1 + 19 * '/' + NUL */
|
||||
#define FANOUT_PATH_MAX GIT_SHA1_HEXSZ + FANOUT_PATH_SEPARATORS + 1
|
||||
/* hex oid + '/' between each pair of hex digits + NUL */
|
||||
#define FANOUT_PATH_MAX GIT_MAX_HEXSZ + FANOUT_PATH_SEPARATORS_MAX + 1
|
||||
|
||||
static void construct_path_with_fanout(const unsigned char *sha1,
|
||||
static void construct_path_with_fanout(const unsigned char *hash,
|
||||
unsigned char fanout, char *path)
|
||||
{
|
||||
unsigned int i = 0, j = 0;
|
||||
const char *hex_sha1 = sha1_to_hex(sha1);
|
||||
assert(fanout < GIT_SHA1_RAWSZ);
|
||||
const char *hex_hash = hash_to_hex(hash);
|
||||
assert(fanout < the_hash_algo->rawsz);
|
||||
while (fanout) {
|
||||
path[i++] = hex_sha1[j++];
|
||||
path[i++] = hex_sha1[j++];
|
||||
path[i++] = hex_hash[j++];
|
||||
path[i++] = hex_hash[j++];
|
||||
path[i++] = '/';
|
||||
fanout--;
|
||||
}
|
||||
xsnprintf(path + i, FANOUT_PATH_MAX - i, "%s", hex_sha1 + j);
|
||||
xsnprintf(path + i, FANOUT_PATH_MAX - i, "%s", hex_hash + j);
|
||||
}
|
||||
|
||||
static int for_each_note_helper(struct notes_tree *t, struct int_node *tree,
|
||||
|
@ -637,10 +639,10 @@ static inline int matches_tree_write_stack(struct tree_write_stack *tws,
|
|||
|
||||
static void write_tree_entry(struct strbuf *buf, unsigned int mode,
|
||||
const char *path, unsigned int path_len, const
|
||||
unsigned char *sha1)
|
||||
unsigned char *hash)
|
||||
{
|
||||
strbuf_addf(buf, "%o %.*s%c", mode, path_len, path, '\0');
|
||||
strbuf_add(buf, sha1, GIT_SHA1_RAWSZ);
|
||||
strbuf_add(buf, hash, the_hash_algo->rawsz);
|
||||
}
|
||||
|
||||
static void tree_write_stack_init_subtree(struct tree_write_stack *tws,
|
||||
|
@ -652,7 +654,7 @@ static void tree_write_stack_init_subtree(struct tree_write_stack *tws,
|
|||
n = (struct tree_write_stack *)
|
||||
xmalloc(sizeof(struct tree_write_stack));
|
||||
n->next = NULL;
|
||||
strbuf_init(&n->buf, 256 * (32 + GIT_SHA1_HEXSZ)); /* assume 256 entries per tree */
|
||||
strbuf_init(&n->buf, 256 * (32 + the_hash_algo->hexsz)); /* assume 256 entries per tree */
|
||||
n->path[0] = n->path[1] = '\0';
|
||||
tws->next = n;
|
||||
tws->path[0] = path[0];
|
||||
|
@ -757,7 +759,7 @@ static int write_each_note(const struct object_id *object_oid,
|
|||
note_path[note_path_len] = '\0';
|
||||
mode = 040000;
|
||||
}
|
||||
assert(note_path_len <= GIT_SHA1_HEXSZ + FANOUT_PATH_SEPARATORS);
|
||||
assert(note_path_len <= GIT_MAX_HEXSZ + FANOUT_PATH_SEPARATORS);
|
||||
|
||||
/* Weave non-note entries into note entries */
|
||||
return write_each_non_note_until(note_path, d) ||
|
||||
|
@ -1137,7 +1139,7 @@ int write_notes_tree(struct notes_tree *t, struct object_id *result)
|
|||
|
||||
/* Prepare for traversal of current notes tree */
|
||||
root.next = NULL; /* last forward entry in list is grounded */
|
||||
strbuf_init(&root.buf, 256 * (32 + GIT_SHA1_HEXSZ)); /* assume 256 entries */
|
||||
strbuf_init(&root.buf, 256 * (32 + the_hash_algo->hexsz)); /* assume 256 entries */
|
||||
root.path[0] = root.path[1] = '\0';
|
||||
cb_data.root = &root;
|
||||
cb_data.next_non_note = t->first_non_note;
|
||||
|
@ -1165,7 +1167,7 @@ void prune_notes(struct notes_tree *t, int flags)
|
|||
|
||||
while (l) {
|
||||
if (flags & NOTES_PRUNE_VERBOSE)
|
||||
printf("%s\n", sha1_to_hex(l->sha1));
|
||||
printf("%s\n", hash_to_hex(l->sha1));
|
||||
if (!(flags & NOTES_PRUNE_DRYRUN))
|
||||
remove_note(t, l->sha1);
|
||||
l = l->next;
|
||||
|
|
|
@ -77,7 +77,7 @@ struct packed_git {
|
|||
freshened:1,
|
||||
do_not_close:1,
|
||||
pack_promisor:1;
|
||||
unsigned char sha1[20];
|
||||
unsigned char hash[GIT_MAX_RAWSZ];
|
||||
struct revindex_entry *revindex;
|
||||
/* something like ".git/objects/pack/xxxxx.pack" */
|
||||
char pack_name[FLEX_ARRAY]; /* more */
|
||||
|
|
12
oidset.h
12
oidset.h
|
@ -16,18 +16,6 @@
|
|||
* table overhead.
|
||||
*/
|
||||
|
||||
static inline unsigned int oid_hash(struct object_id oid)
|
||||
{
|
||||
return sha1hash(oid.hash);
|
||||
}
|
||||
|
||||
static inline int oid_equal(struct object_id a, struct object_id b)
|
||||
{
|
||||
return oideq(&a, &b);
|
||||
}
|
||||
|
||||
KHASH_INIT(oid, struct object_id, int, 0, oid_hash, oid_equal)
|
||||
|
||||
/**
|
||||
* A single oidset; should be zero-initialized (or use OIDSET_INIT).
|
||||
*/
|
||||
|
|
|
@ -142,13 +142,13 @@ static inline void reset_all_seen(void)
|
|||
seen_objects_nr = 0;
|
||||
}
|
||||
|
||||
static uint32_t find_object_pos(const unsigned char *sha1)
|
||||
static uint32_t find_object_pos(const unsigned char *hash)
|
||||
{
|
||||
struct object_entry *entry = packlist_find(writer.to_pack, sha1, NULL);
|
||||
struct object_entry *entry = packlist_find(writer.to_pack, hash, NULL);
|
||||
|
||||
if (!entry) {
|
||||
die("Failed to write bitmap index. Packfile doesn't have full closure "
|
||||
"(object %s is missing)", sha1_to_hex(sha1));
|
||||
"(object %s is missing)", hash_to_hex(hash));
|
||||
}
|
||||
|
||||
return oe_in_pack_pos(writer.to_pack, entry);
|
||||
|
@ -535,7 +535,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
|
|||
header.entry_count = htonl(writer.selected_nr);
|
||||
hashcpy(header.checksum, writer.pack_checksum);
|
||||
|
||||
hashwrite(f, &header, sizeof(header));
|
||||
hashwrite(f, &header, sizeof(header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz);
|
||||
dump_bitmap(f, writer.commits);
|
||||
dump_bitmap(f, writer.trees);
|
||||
dump_bitmap(f, writer.blobs);
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
* commit.
|
||||
*/
|
||||
struct stored_bitmap {
|
||||
unsigned char sha1[20];
|
||||
struct object_id oid;
|
||||
struct ewah_bitmap *root;
|
||||
struct stored_bitmap *xor;
|
||||
int flags;
|
||||
|
@ -60,8 +60,8 @@ struct bitmap_index {
|
|||
struct ewah_bitmap *blobs;
|
||||
struct ewah_bitmap *tags;
|
||||
|
||||
/* Map from SHA1 -> `stored_bitmap` for all the bitmapped commits */
|
||||
khash_sha1 *bitmaps;
|
||||
/* Map from object ID -> `stored_bitmap` for all the bitmapped commits */
|
||||
kh_oid_map_t *bitmaps;
|
||||
|
||||
/* Number of bitmapped commits */
|
||||
uint32_t entry_count;
|
||||
|
@ -80,7 +80,7 @@ struct bitmap_index {
|
|||
struct object **objects;
|
||||
uint32_t *hashes;
|
||||
uint32_t count, alloc;
|
||||
khash_sha1_pos *positions;
|
||||
kh_oid_pos_t *positions;
|
||||
} ext_index;
|
||||
|
||||
/* Bitmap result of the last performed walk */
|
||||
|
@ -138,7 +138,7 @@ static int load_bitmap_header(struct bitmap_index *index)
|
|||
{
|
||||
struct bitmap_disk_header *header = (void *)index->map;
|
||||
|
||||
if (index->map_size < sizeof(*header) + 20)
|
||||
if (index->map_size < sizeof(*header) + the_hash_algo->rawsz)
|
||||
return error("Corrupted bitmap index (missing header data)");
|
||||
|
||||
if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0)
|
||||
|
@ -157,19 +157,19 @@ static int load_bitmap_header(struct bitmap_index *index)
|
|||
"(Git requires BITMAP_OPT_FULL_DAG)");
|
||||
|
||||
if (flags & BITMAP_OPT_HASH_CACHE) {
|
||||
unsigned char *end = index->map + index->map_size - 20;
|
||||
unsigned char *end = index->map + index->map_size - the_hash_algo->rawsz;
|
||||
index->hashes = ((uint32_t *)end) - index->pack->num_objects;
|
||||
}
|
||||
}
|
||||
|
||||
index->entry_count = ntohl(header->entry_count);
|
||||
index->map_pos += sizeof(*header);
|
||||
index->map_pos += sizeof(*header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct stored_bitmap *store_bitmap(struct bitmap_index *index,
|
||||
struct ewah_bitmap *root,
|
||||
const unsigned char *sha1,
|
||||
const unsigned char *hash,
|
||||
struct stored_bitmap *xor_with,
|
||||
int flags)
|
||||
{
|
||||
|
@ -181,15 +181,15 @@ static struct stored_bitmap *store_bitmap(struct bitmap_index *index,
|
|||
stored->root = root;
|
||||
stored->xor = xor_with;
|
||||
stored->flags = flags;
|
||||
hashcpy(stored->sha1, sha1);
|
||||
oidread(&stored->oid, hash);
|
||||
|
||||
hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret);
|
||||
hash_pos = kh_put_oid_map(index->bitmaps, stored->oid, &ret);
|
||||
|
||||
/* a 0 return code means the insertion succeeded with no changes,
|
||||
* because the SHA1 already existed on the map. this is bad, there
|
||||
* shouldn't be duplicated commits in the index */
|
||||
if (ret == 0) {
|
||||
error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1));
|
||||
error("Duplicate entry in bitmap index: %s", hash_to_hex(hash));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -306,8 +306,8 @@ static int load_pack_bitmap(struct bitmap_index *bitmap_git)
|
|||
{
|
||||
assert(bitmap_git->map);
|
||||
|
||||
bitmap_git->bitmaps = kh_init_sha1();
|
||||
bitmap_git->ext_index.positions = kh_init_sha1_pos();
|
||||
bitmap_git->bitmaps = kh_init_oid_map();
|
||||
bitmap_git->ext_index.positions = kh_init_oid_pos();
|
||||
if (load_pack_revindex(bitmap_git->pack))
|
||||
goto failed;
|
||||
|
||||
|
@ -363,10 +363,10 @@ struct include_data {
|
|||
};
|
||||
|
||||
static inline int bitmap_position_extended(struct bitmap_index *bitmap_git,
|
||||
const unsigned char *sha1)
|
||||
const struct object_id *oid)
|
||||
{
|
||||
khash_sha1_pos *positions = bitmap_git->ext_index.positions;
|
||||
khiter_t pos = kh_get_sha1_pos(positions, sha1);
|
||||
khash_oid_pos *positions = bitmap_git->ext_index.positions;
|
||||
khiter_t pos = kh_get_oid_pos(positions, *oid);
|
||||
|
||||
if (pos < kh_end(positions)) {
|
||||
int bitmap_pos = kh_value(positions, pos);
|
||||
|
@ -377,9 +377,9 @@ static inline int bitmap_position_extended(struct bitmap_index *bitmap_git,
|
|||
}
|
||||
|
||||
static inline int bitmap_position_packfile(struct bitmap_index *bitmap_git,
|
||||
const unsigned char *sha1)
|
||||
const struct object_id *oid)
|
||||
{
|
||||
off_t offset = find_pack_entry_one(sha1, bitmap_git->pack);
|
||||
off_t offset = find_pack_entry_one(oid->hash, bitmap_git->pack);
|
||||
if (!offset)
|
||||
return -1;
|
||||
|
||||
|
@ -387,10 +387,10 @@ static inline int bitmap_position_packfile(struct bitmap_index *bitmap_git,
|
|||
}
|
||||
|
||||
static int bitmap_position(struct bitmap_index *bitmap_git,
|
||||
const unsigned char *sha1)
|
||||
const struct object_id *oid)
|
||||
{
|
||||
int pos = bitmap_position_packfile(bitmap_git, sha1);
|
||||
return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, sha1);
|
||||
int pos = bitmap_position_packfile(bitmap_git, oid);
|
||||
return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, oid);
|
||||
}
|
||||
|
||||
static int ext_index_add_object(struct bitmap_index *bitmap_git,
|
||||
|
@ -402,7 +402,7 @@ static int ext_index_add_object(struct bitmap_index *bitmap_git,
|
|||
int hash_ret;
|
||||
int bitmap_pos;
|
||||
|
||||
hash_pos = kh_put_sha1_pos(eindex->positions, object->oid.hash, &hash_ret);
|
||||
hash_pos = kh_put_oid_pos(eindex->positions, object->oid, &hash_ret);
|
||||
if (hash_ret > 0) {
|
||||
if (eindex->count >= eindex->alloc) {
|
||||
eindex->alloc = (eindex->alloc + 16) * 3 / 2;
|
||||
|
@ -432,7 +432,7 @@ static void show_object(struct object *object, const char *name, void *data_)
|
|||
struct bitmap_show_data *data = data_;
|
||||
int bitmap_pos;
|
||||
|
||||
bitmap_pos = bitmap_position(data->bitmap_git, object->oid.hash);
|
||||
bitmap_pos = bitmap_position(data->bitmap_git, &object->oid);
|
||||
|
||||
if (bitmap_pos < 0)
|
||||
bitmap_pos = ext_index_add_object(data->bitmap_git, object,
|
||||
|
@ -447,7 +447,7 @@ static void show_commit(struct commit *commit, void *data)
|
|||
|
||||
static int add_to_include_set(struct bitmap_index *bitmap_git,
|
||||
struct include_data *data,
|
||||
const unsigned char *sha1,
|
||||
const struct object_id *oid,
|
||||
int bitmap_pos)
|
||||
{
|
||||
khiter_t hash_pos;
|
||||
|
@ -458,7 +458,7 @@ static int add_to_include_set(struct bitmap_index *bitmap_git,
|
|||
if (bitmap_get(data->base, bitmap_pos))
|
||||
return 0;
|
||||
|
||||
hash_pos = kh_get_sha1(bitmap_git->bitmaps, sha1);
|
||||
hash_pos = kh_get_oid_map(bitmap_git->bitmaps, *oid);
|
||||
if (hash_pos < kh_end(bitmap_git->bitmaps)) {
|
||||
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, hash_pos);
|
||||
bitmap_or_ewah(data->base, lookup_stored_bitmap(st));
|
||||
|
@ -474,13 +474,13 @@ static int should_include(struct commit *commit, void *_data)
|
|||
struct include_data *data = _data;
|
||||
int bitmap_pos;
|
||||
|
||||
bitmap_pos = bitmap_position(data->bitmap_git, commit->object.oid.hash);
|
||||
bitmap_pos = bitmap_position(data->bitmap_git, &commit->object.oid);
|
||||
if (bitmap_pos < 0)
|
||||
bitmap_pos = ext_index_add_object(data->bitmap_git,
|
||||
(struct object *)commit,
|
||||
NULL);
|
||||
|
||||
if (!add_to_include_set(data->bitmap_git, data, commit->object.oid.hash,
|
||||
if (!add_to_include_set(data->bitmap_git, data, &commit->object.oid,
|
||||
bitmap_pos)) {
|
||||
struct commit_list *parent = commit->parents;
|
||||
|
||||
|
@ -518,7 +518,7 @@ static struct bitmap *find_objects(struct bitmap_index *bitmap_git,
|
|||
roots = roots->next;
|
||||
|
||||
if (object->type == OBJ_COMMIT) {
|
||||
khiter_t pos = kh_get_sha1(bitmap_git->bitmaps, object->oid.hash);
|
||||
khiter_t pos = kh_get_oid_map(bitmap_git->bitmaps, object->oid);
|
||||
|
||||
if (pos < kh_end(bitmap_git->bitmaps)) {
|
||||
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, pos);
|
||||
|
@ -560,7 +560,7 @@ static struct bitmap *find_objects(struct bitmap_index *bitmap_git,
|
|||
int pos;
|
||||
|
||||
roots = roots->next;
|
||||
pos = bitmap_position(bitmap_git, object->oid.hash);
|
||||
pos = bitmap_position(bitmap_git, &object->oid);
|
||||
|
||||
if (pos < 0 || base == NULL || !bitmap_get(base, pos)) {
|
||||
object->flags &= ~UNINTERESTING;
|
||||
|
@ -806,7 +806,7 @@ int reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git,
|
|||
|
||||
fprintf(stderr, "Failed to reuse at %d (%016llx)\n",
|
||||
reuse_objects, result->words[i]);
|
||||
fprintf(stderr, " %s\n", sha1_to_hex(sha1));
|
||||
fprintf(stderr, " %s\n", hash_to_hex(sha1));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -926,7 +926,7 @@ static void test_show_object(struct object *object, const char *name,
|
|||
struct bitmap_test_data *tdata = data;
|
||||
int bitmap_pos;
|
||||
|
||||
bitmap_pos = bitmap_position(tdata->bitmap_git, object->oid.hash);
|
||||
bitmap_pos = bitmap_position(tdata->bitmap_git, &object->oid);
|
||||
if (bitmap_pos < 0)
|
||||
die("Object not in bitmap: %s\n", oid_to_hex(&object->oid));
|
||||
|
||||
|
@ -940,7 +940,7 @@ static void test_show_commit(struct commit *commit, void *data)
|
|||
int bitmap_pos;
|
||||
|
||||
bitmap_pos = bitmap_position(tdata->bitmap_git,
|
||||
commit->object.oid.hash);
|
||||
&commit->object.oid);
|
||||
if (bitmap_pos < 0)
|
||||
die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid));
|
||||
|
||||
|
@ -967,7 +967,7 @@ void test_bitmap_walk(struct rev_info *revs)
|
|||
bitmap_git->version, bitmap_git->entry_count);
|
||||
|
||||
root = revs->pending.objects[0].item;
|
||||
pos = kh_get_sha1(bitmap_git->bitmaps, root->oid.hash);
|
||||
pos = kh_get_oid_map(bitmap_git->bitmaps, root->oid);
|
||||
|
||||
if (pos < kh_end(bitmap_git->bitmaps)) {
|
||||
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, pos);
|
||||
|
@ -1081,7 +1081,7 @@ int rebuild_existing_bitmaps(struct bitmap_index *bitmap_git,
|
|||
lookup_stored_bitmap(stored),
|
||||
rebuild)) {
|
||||
hash_pos = kh_put_sha1(reused_bitmaps,
|
||||
stored->sha1,
|
||||
stored->oid.hash,
|
||||
&hash_ret);
|
||||
kh_value(reused_bitmaps, hash_pos) =
|
||||
bitmap_to_ewah(rebuild);
|
||||
|
@ -1109,7 +1109,7 @@ void free_bitmap_index(struct bitmap_index *b)
|
|||
ewah_pool_free(b->trees);
|
||||
ewah_pool_free(b->blobs);
|
||||
ewah_pool_free(b->tags);
|
||||
kh_destroy_sha1(b->bitmaps);
|
||||
kh_destroy_oid_map(b->bitmaps);
|
||||
free(b->ext_index.objects);
|
||||
free(b->ext_index.hashes);
|
||||
bitmap_free(b->result);
|
||||
|
@ -1117,8 +1117,8 @@ void free_bitmap_index(struct bitmap_index *b)
|
|||
free(b);
|
||||
}
|
||||
|
||||
int bitmap_has_sha1_in_uninteresting(struct bitmap_index *bitmap_git,
|
||||
const unsigned char *sha1)
|
||||
int bitmap_has_oid_in_uninteresting(struct bitmap_index *bitmap_git,
|
||||
const struct object_id *oid)
|
||||
{
|
||||
int pos;
|
||||
|
||||
|
@ -1127,7 +1127,7 @@ int bitmap_has_sha1_in_uninteresting(struct bitmap_index *bitmap_git,
|
|||
if (!bitmap_git->haves)
|
||||
return 0; /* walk had no "haves" */
|
||||
|
||||
pos = bitmap_position_packfile(bitmap_git, sha1);
|
||||
pos = bitmap_position_packfile(bitmap_git, oid);
|
||||
if (pos < 0)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ struct bitmap_disk_header {
|
|||
uint16_t version;
|
||||
uint16_t options;
|
||||
uint32_t entry_count;
|
||||
unsigned char checksum[20];
|
||||
unsigned char checksum[GIT_MAX_RAWSZ];
|
||||
};
|
||||
|
||||
static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};
|
||||
|
@ -59,7 +59,7 @@ void free_bitmap_index(struct bitmap_index *);
|
|||
* queried to see if a particular object was reachable from any of the
|
||||
* objects flagged as UNINTERESTING.
|
||||
*/
|
||||
int bitmap_has_sha1_in_uninteresting(struct bitmap_index *, const unsigned char *sha1);
|
||||
int bitmap_has_oid_in_uninteresting(struct bitmap_index *, const struct object_id *oid);
|
||||
|
||||
void bitmap_writer_show_progress(int show);
|
||||
void bitmap_writer_set_checksum(unsigned char *sha1);
|
||||
|
|
|
@ -235,7 +235,7 @@ struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path)
|
|||
struct packed_git *p = alloc_packed_git(alloc);
|
||||
|
||||
memcpy(p->pack_name, path, alloc); /* includes NUL */
|
||||
hashcpy(p->sha1, sha1);
|
||||
hashcpy(p->hash, sha1);
|
||||
if (check_packed_git_idx(idx_path, p)) {
|
||||
free(p);
|
||||
return NULL;
|
||||
|
@ -732,8 +732,8 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local)
|
|||
p->pack_local = local;
|
||||
p->mtime = st.st_mtime;
|
||||
if (path_len < the_hash_algo->hexsz ||
|
||||
get_sha1_hex(path + path_len - the_hash_algo->hexsz, p->sha1))
|
||||
hashclr(p->sha1);
|
||||
get_sha1_hex(path + path_len - the_hash_algo->hexsz, p->hash))
|
||||
hashclr(p->hash);
|
||||
return p;
|
||||
}
|
||||
|
||||
|
|
|
@ -980,7 +980,7 @@ sub cat_blob {
|
|||
return -1;
|
||||
}
|
||||
|
||||
if ($description !~ /^[0-9a-fA-F]{40} \S+ (\d+)$/) {
|
||||
if ($description !~ /^[0-9a-fA-F]{40}(?:[0-9a-fA-F]{24})? \S+ (\d+)$/) {
|
||||
carp "Unexpected result returned from git cat-file";
|
||||
return -1;
|
||||
}
|
||||
|
|
74
read-cache.c
74
read-cache.c
|
@ -1641,39 +1641,24 @@ struct ondisk_cache_entry {
|
|||
uint32_t uid;
|
||||
uint32_t gid;
|
||||
uint32_t size;
|
||||
unsigned char sha1[20];
|
||||
uint16_t flags;
|
||||
char name[FLEX_ARRAY]; /* more */
|
||||
};
|
||||
|
||||
/*
|
||||
* This struct is used when CE_EXTENDED bit is 1
|
||||
* The struct must match ondisk_cache_entry exactly from
|
||||
* ctime till flags
|
||||
*/
|
||||
struct ondisk_cache_entry_extended {
|
||||
struct cache_time ctime;
|
||||
struct cache_time mtime;
|
||||
uint32_t dev;
|
||||
uint32_t ino;
|
||||
uint32_t mode;
|
||||
uint32_t uid;
|
||||
uint32_t gid;
|
||||
uint32_t size;
|
||||
unsigned char sha1[20];
|
||||
uint16_t flags;
|
||||
uint16_t flags2;
|
||||
char name[FLEX_ARRAY]; /* more */
|
||||
/*
|
||||
* unsigned char hash[hashsz];
|
||||
* uint16_t flags;
|
||||
* if (flags & CE_EXTENDED)
|
||||
* uint16_t flags2;
|
||||
*/
|
||||
unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)];
|
||||
char name[FLEX_ARRAY];
|
||||
};
|
||||
|
||||
/* These are only used for v3 or lower */
|
||||
#define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
|
||||
#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,name) + (len) + 8) & ~7)
|
||||
#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)
|
||||
#define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
|
||||
#define ondisk_cache_entry_extended_size(len) align_flex_name(ondisk_cache_entry_extended,len)
|
||||
#define ondisk_ce_size(ce) (((ce)->ce_flags & CE_EXTENDED) ? \
|
||||
ondisk_cache_entry_extended_size(ce_namelen(ce)) : \
|
||||
ondisk_cache_entry_size(ce_namelen(ce)))
|
||||
#define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \
|
||||
((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)
|
||||
#define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))
|
||||
#define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))
|
||||
|
||||
/* Allow fsck to force verification of the index checksum. */
|
||||
int verify_index_checksum;
|
||||
|
@ -1747,6 +1732,8 @@ static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
|
|||
struct cache_entry *ce;
|
||||
size_t len;
|
||||
const char *name;
|
||||
const unsigned hashsz = the_hash_algo->rawsz;
|
||||
const uint16_t *flagsp = (const uint16_t *)(ondisk->data + hashsz);
|
||||
unsigned int flags;
|
||||
size_t copy_len = 0;
|
||||
/*
|
||||
|
@ -1759,22 +1746,20 @@ static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
|
|||
int expand_name_field = version == 4;
|
||||
|
||||
/* On-disk flags are just 16 bits */
|
||||
flags = get_be16(&ondisk->flags);
|
||||
flags = get_be16(flagsp);
|
||||
len = flags & CE_NAMEMASK;
|
||||
|
||||
if (flags & CE_EXTENDED) {
|
||||
struct ondisk_cache_entry_extended *ondisk2;
|
||||
int extended_flags;
|
||||
ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
|
||||
extended_flags = get_be16(&ondisk2->flags2) << 16;
|
||||
extended_flags = get_be16(flagsp + 1) << 16;
|
||||
/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
|
||||
if (extended_flags & ~CE_EXTENDED_FLAGS)
|
||||
die(_("unknown index entry format 0x%08x"), extended_flags);
|
||||
flags |= extended_flags;
|
||||
name = ondisk2->name;
|
||||
name = (const char *)(flagsp + 2);
|
||||
}
|
||||
else
|
||||
name = ondisk->name;
|
||||
name = (const char *)(flagsp + 1);
|
||||
|
||||
if (expand_name_field) {
|
||||
const unsigned char *cp = (const unsigned char *)name;
|
||||
|
@ -1813,7 +1798,9 @@ static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
|
|||
ce->ce_flags = flags & ~CE_NAMEMASK;
|
||||
ce->ce_namelen = len;
|
||||
ce->index = 0;
|
||||
hashcpy(ce->oid.hash, ondisk->sha1);
|
||||
hashcpy(ce->oid.hash, ondisk->data);
|
||||
memcpy(ce->name, name, len);
|
||||
ce->name[len] = '\0';
|
||||
|
||||
if (expand_name_field) {
|
||||
if (copy_len)
|
||||
|
@ -2557,6 +2544,8 @@ static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
|
|||
struct cache_entry *ce)
|
||||
{
|
||||
short flags;
|
||||
const unsigned hashsz = the_hash_algo->rawsz;
|
||||
uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz);
|
||||
|
||||
ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
|
||||
ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
|
||||
|
@ -2568,15 +2557,13 @@ static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
|
|||
ondisk->uid = htonl(ce->ce_stat_data.sd_uid);
|
||||
ondisk->gid = htonl(ce->ce_stat_data.sd_gid);
|
||||
ondisk->size = htonl(ce->ce_stat_data.sd_size);
|
||||
hashcpy(ondisk->sha1, ce->oid.hash);
|
||||
hashcpy(ondisk->data, ce->oid.hash);
|
||||
|
||||
flags = ce->ce_flags & ~CE_NAMEMASK;
|
||||
flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
|
||||
ondisk->flags = htons(flags);
|
||||
flagsp[0] = htons(flags);
|
||||
if (ce->ce_flags & CE_EXTENDED) {
|
||||
struct ondisk_cache_entry_extended *ondisk2;
|
||||
ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
|
||||
ondisk2->flags2 = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
|
||||
flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2595,10 +2582,7 @@ static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce,
|
|||
stripped_name = 1;
|
||||
}
|
||||
|
||||
if (ce->ce_flags & CE_EXTENDED)
|
||||
size = offsetof(struct ondisk_cache_entry_extended, name);
|
||||
else
|
||||
size = offsetof(struct ondisk_cache_entry, name);
|
||||
size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0);
|
||||
|
||||
if (!previous_name) {
|
||||
int len = ce_namelen(ce);
|
||||
|
@ -2756,7 +2740,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
|
|||
struct cache_entry **cache = istate->cache;
|
||||
int entries = istate->cache_nr;
|
||||
struct stat st;
|
||||
struct ondisk_cache_entry_extended ondisk;
|
||||
struct ondisk_cache_entry ondisk;
|
||||
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
|
||||
int drop_cache_tree = istate->drop_cache_tree;
|
||||
off_t offset;
|
||||
|
|
|
@ -72,7 +72,7 @@ static int parse_refspec(struct refspec_item *item, const char *refspec, int fet
|
|||
/* LHS */
|
||||
if (!*item->src)
|
||||
; /* empty is ok; it means "HEAD" */
|
||||
else if (llen == GIT_SHA1_HEXSZ && !get_oid_hex(item->src, &unused))
|
||||
else if (llen == the_hash_algo->hexsz && !get_oid_hex(item->src, &unused))
|
||||
item->exact_sha1 = 1; /* ok */
|
||||
else if (!check_refname_format(item->src, flags))
|
||||
; /* valid looking ref is ok */
|
||||
|
|
|
@ -250,7 +250,7 @@ static struct ref *parse_info_refs(struct discovery *heads)
|
|||
if (data[i] == '\t')
|
||||
mid = &data[i];
|
||||
if (data[i] == '\n') {
|
||||
if (mid - start != 40)
|
||||
if (mid - start != the_hash_algo->hexsz)
|
||||
die(_("%sinfo/refs not valid: is this a git repository?"),
|
||||
transport_anonymize_url(url.buf));
|
||||
data[i] = 0;
|
||||
|
@ -1114,12 +1114,13 @@ static void parse_fetch(struct strbuf *buf)
|
|||
const char *name;
|
||||
struct ref *ref;
|
||||
struct object_id old_oid;
|
||||
const char *q;
|
||||
|
||||
if (get_oid_hex(p, &old_oid))
|
||||
if (parse_oid_hex(p, &old_oid, &q))
|
||||
die(_("protocol error: expected sha/ref, got %s'"), p);
|
||||
if (p[GIT_SHA1_HEXSZ] == ' ')
|
||||
name = p + GIT_SHA1_HEXSZ + 1;
|
||||
else if (!p[GIT_SHA1_HEXSZ])
|
||||
if (*q == ' ')
|
||||
name = q + 1;
|
||||
else if (!*q)
|
||||
name = "";
|
||||
else
|
||||
die(_("protocol error: expected sha/ref, got %s'"), p);
|
||||
|
|
|
@ -189,6 +189,14 @@ int hash_algo_by_id(uint32_t format_id)
|
|||
return GIT_HASH_UNKNOWN;
|
||||
}
|
||||
|
||||
int hash_algo_by_length(int len)
|
||||
{
|
||||
int i;
|
||||
for (i = 1; i < GIT_HASH_NALGOS; i++)
|
||||
if (len == hash_algos[i].rawsz)
|
||||
return i;
|
||||
return GIT_HASH_UNKNOWN;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is meant to hold a *small* number of objects that you would
|
||||
|
|
|
@ -994,7 +994,7 @@ static int submodule_needs_pushing(struct repository *r,
|
|||
if (start_command(&cp))
|
||||
die("Could not run 'git rev-list <commits> --not --remotes -n 1' command in submodule %s",
|
||||
path);
|
||||
if (strbuf_read(&buf, cp.out, 41))
|
||||
if (strbuf_read(&buf, cp.out, the_hash_algo->hexsz + 1))
|
||||
needs_pushing = 1;
|
||||
finish_command(&cp);
|
||||
close(cp.out);
|
||||
|
|
|
@ -139,7 +139,7 @@ create_lib_submodule_repo () {
|
|||
git revert HEAD &&
|
||||
|
||||
git checkout -b invalid_sub1 add_sub1 &&
|
||||
git update-index --cacheinfo 160000 0123456789012345678901234567890123456789 sub1 &&
|
||||
git update-index --cacheinfo 160000 $(test_oid numeric) sub1 &&
|
||||
git commit -m "Invalid sub1 commit" &&
|
||||
git checkout -b valid_sub1 &&
|
||||
git revert HEAD &&
|
||||
|
@ -196,6 +196,7 @@ test_git_directory_exists() {
|
|||
# the submodule repo if it doesn't exist and configures the most problematic
|
||||
# settings for diff.ignoreSubmodules.
|
||||
prolog () {
|
||||
test_oid_init &&
|
||||
(test -d submodule_update_repo || create_lib_submodule_repo) &&
|
||||
test_config_global diff.ignoreSubmodules all &&
|
||||
test_config diff.ignoreSubmodules all
|
||||
|
|
Загрузка…
Ссылка в новой задаче