зеркало из https://github.com/microsoft/git.git
Merge branch 'mt/use-passed-repo-more-in-funcs'
Some codepaths were given a repository instance as a parameter to work in the repository, but passed the_repository instance to its callees, which has been cleaned up (somewhat). * mt/use-passed-repo-more-in-funcs: sha1-file: allow check_object_signature() to handle any repo sha1-file: pass git_hash_algo to hash_object_file() sha1-file: pass git_hash_algo to write_object_file_prepare() streaming: allow open_istream() to handle any repo pack-check: use given repo's hash_algo at verify_packfile() cache-tree: use given repo's hash_algo at verify_one() diff: make diff_populate_filespec() honor its repo argument
This commit is contained in:
Коммит
78e67cda42
6
apply.c
6
apply.c
|
@ -3157,7 +3157,8 @@ static int apply_binary(struct apply_state *state,
|
|||
* See if the old one matches what the patch
|
||||
* applies to.
|
||||
*/
|
||||
hash_object_file(img->buf, img->len, blob_type, &oid);
|
||||
hash_object_file(the_hash_algo, img->buf, img->len, blob_type,
|
||||
&oid);
|
||||
if (strcmp(oid_to_hex(&oid), patch->old_oid_prefix))
|
||||
return error(_("the patch applies to '%s' (%s), "
|
||||
"which does not match the "
|
||||
|
@ -3202,7 +3203,8 @@ static int apply_binary(struct apply_state *state,
|
|||
name);
|
||||
|
||||
/* verify that the result matches */
|
||||
hash_object_file(img->buf, img->len, blob_type, &oid);
|
||||
hash_object_file(the_hash_algo, img->buf, img->len, blob_type,
|
||||
&oid);
|
||||
if (strcmp(oid_to_hex(&oid), patch->new_oid_prefix))
|
||||
return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"),
|
||||
name, patch->new_oid_prefix, oid_to_hex(&oid));
|
||||
|
|
|
@ -112,7 +112,7 @@ static void write_trailer(void)
|
|||
* queues up writes, so that all our write(2) calls write exactly one
|
||||
* full block; pads writes to RECORDSIZE
|
||||
*/
|
||||
static int stream_blocked(const struct object_id *oid)
|
||||
static int stream_blocked(struct repository *r, const struct object_id *oid)
|
||||
{
|
||||
struct git_istream *st;
|
||||
enum object_type type;
|
||||
|
@ -120,7 +120,7 @@ static int stream_blocked(const struct object_id *oid)
|
|||
char buf[BLOCKSIZE];
|
||||
ssize_t readlen;
|
||||
|
||||
st = open_istream(oid, &type, &sz, NULL);
|
||||
st = open_istream(r, oid, &type, &sz, NULL);
|
||||
if (!st)
|
||||
return error(_("cannot stream blob %s"), oid_to_hex(oid));
|
||||
for (;;) {
|
||||
|
@ -324,7 +324,7 @@ static int write_tar_entry(struct archiver_args *args,
|
|||
if (buffer)
|
||||
write_blocked(buffer, size);
|
||||
else
|
||||
err = stream_blocked(oid);
|
||||
err = stream_blocked(args->repo, oid);
|
||||
}
|
||||
free(buffer);
|
||||
return err;
|
||||
|
|
|
@ -345,7 +345,8 @@ static int write_zip_entry(struct archiver_args *args,
|
|||
|
||||
if (S_ISREG(mode) && type == OBJ_BLOB && !args->convert &&
|
||||
size > big_file_threshold) {
|
||||
stream = open_istream(oid, &type, &size, NULL);
|
||||
stream = open_istream(args->repo, oid, &type, &size,
|
||||
NULL);
|
||||
if (!stream)
|
||||
return error(_("cannot stream blob %s"),
|
||||
oid_to_hex(oid));
|
||||
|
|
|
@ -293,7 +293,8 @@ static void export_blob(const struct object_id *oid)
|
|||
buf = read_object_file(oid, &type, &size);
|
||||
if (!buf)
|
||||
die("could not read blob %s", oid_to_hex(oid));
|
||||
if (check_object_signature(oid, buf, size, type_name(type)) < 0)
|
||||
if (check_object_signature(the_repository, oid, buf, size,
|
||||
type_name(type)) < 0)
|
||||
die("oid mismatch in blob %s", oid_to_hex(oid));
|
||||
object = parse_object_buffer(the_repository, oid, type,
|
||||
size, buf, &eaten);
|
||||
|
|
|
@ -757,7 +757,8 @@ static int check_collison(struct object_entry *entry)
|
|||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.entry = entry;
|
||||
data.st = open_istream(&entry->idx.oid, &type, &size, NULL);
|
||||
data.st = open_istream(the_repository, &entry->idx.oid, &type, &size,
|
||||
NULL);
|
||||
if (!data.st)
|
||||
return -1;
|
||||
if (size != entry->size || type != entry->type)
|
||||
|
@ -948,7 +949,7 @@ static void resolve_delta(struct object_entry *delta_obj,
|
|||
free(delta_data);
|
||||
if (!result->data)
|
||||
bad_object(delta_obj->idx.offset, _("failed to apply delta"));
|
||||
hash_object_file(result->data, result->size,
|
||||
hash_object_file(the_hash_algo, result->data, result->size,
|
||||
type_name(delta_obj->real_type), &delta_obj->idx.oid);
|
||||
sha1_object(result->data, NULL, result->size, delta_obj->real_type,
|
||||
&delta_obj->idx.oid);
|
||||
|
@ -1383,8 +1384,9 @@ static void fix_unresolved_deltas(struct hashfile *f)
|
|||
if (!base_obj->data)
|
||||
continue;
|
||||
|
||||
if (check_object_signature(&d->oid, base_obj->data,
|
||||
base_obj->size, type_name(type)))
|
||||
if (check_object_signature(the_repository, &d->oid,
|
||||
base_obj->data, base_obj->size,
|
||||
type_name(type)))
|
||||
die(_("local object %s is corrupt"), oid_to_hex(&d->oid));
|
||||
base_obj->obj = append_obj_to_pack(f, d->oid.hash,
|
||||
base_obj->data, base_obj->size, type);
|
||||
|
|
|
@ -29,8 +29,11 @@ static int verify_object(const struct object_id *oid, const char *expected_type)
|
|||
const struct object_id *repl = lookup_replace_object(the_repository, oid);
|
||||
|
||||
if (buffer) {
|
||||
if (type == type_from_string(expected_type))
|
||||
ret = check_object_signature(repl, buffer, size, expected_type);
|
||||
if (type == type_from_string(expected_type)) {
|
||||
ret = check_object_signature(the_repository, repl,
|
||||
buffer, size,
|
||||
expected_type);
|
||||
}
|
||||
free(buffer);
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -304,7 +304,8 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
|
|||
if (!usable_delta) {
|
||||
if (oe_type(entry) == OBJ_BLOB &&
|
||||
oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
|
||||
(st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
|
||||
(st = open_istream(the_repository, &entry->idx.oid, &type,
|
||||
&size, NULL)) != NULL)
|
||||
buf = NULL;
|
||||
else {
|
||||
buf = read_object_file(&entry->idx.oid, &type, &size);
|
||||
|
|
|
@ -409,7 +409,8 @@ static int check_one_mergetag(struct commit *commit,
|
|||
struct tag *tag;
|
||||
int i;
|
||||
|
||||
hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid);
|
||||
hash_object_file(the_hash_algo, extra->value, extra->len,
|
||||
type_name(OBJ_TAG), &tag_oid);
|
||||
tag = lookup_tag(the_repository, &tag_oid);
|
||||
if (!tag)
|
||||
return error(_("bad mergetag in commit '%s'"), ref);
|
||||
|
|
|
@ -265,7 +265,8 @@ static void write_object(unsigned nr, enum object_type type,
|
|||
} else {
|
||||
struct object *obj;
|
||||
int eaten;
|
||||
hash_object_file(buf, size, type_name(type), &obj_list[nr].oid);
|
||||
hash_object_file(the_hash_algo, buf, size, type_name(type),
|
||||
&obj_list[nr].oid);
|
||||
added_object(nr, type, buf, size);
|
||||
obj = parse_object_buffer(the_repository, &obj_list[nr].oid,
|
||||
type, size, buf,
|
||||
|
|
11
cache-tree.c
11
cache-tree.c
|
@ -407,13 +407,15 @@ static int update_one(struct cache_tree *it,
|
|||
|
||||
if (repair) {
|
||||
struct object_id oid;
|
||||
hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
|
||||
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
|
||||
tree_type, &oid);
|
||||
if (has_object_file_with_flags(&oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
oidcpy(&it->oid, &oid);
|
||||
else
|
||||
to_invalidate = 1;
|
||||
} else if (dryrun) {
|
||||
hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
|
||||
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
|
||||
tree_type, &it->oid);
|
||||
} else if (write_object_file(buffer.buf, buffer.len, tree_type,
|
||||
&it->oid)) {
|
||||
strbuf_release(&buffer);
|
||||
|
@ -826,9 +828,10 @@ static void verify_one(struct repository *r,
|
|||
i++;
|
||||
}
|
||||
strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0');
|
||||
strbuf_add(&tree_buf, oid->hash, the_hash_algo->rawsz);
|
||||
strbuf_add(&tree_buf, oid->hash, r->hash_algo->rawsz);
|
||||
}
|
||||
hash_object_file(tree_buf.buf, tree_buf.len, tree_type, &new_oid);
|
||||
hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, tree_type,
|
||||
&new_oid);
|
||||
if (!oideq(&new_oid, &it->oid))
|
||||
BUG("cache-tree for path %.*s does not match. "
|
||||
"Expected %s got %s", len, path->buf,
|
||||
|
|
3
cache.h
3
cache.h
|
@ -1363,7 +1363,8 @@ int git_open_cloexec(const char *name, int flags);
|
|||
int unpack_loose_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
|
||||
int parse_loose_header(const char *hdr, unsigned long *sizep);
|
||||
|
||||
int check_object_signature(const struct object_id *oid, void *buf, unsigned long size, const char *type);
|
||||
int check_object_signature(struct repository *r, const struct object_id *oid,
|
||||
void *buf, unsigned long size, const char *type);
|
||||
|
||||
int finalize_object_file(const char *tmpfile, const char *filename);
|
||||
|
||||
|
|
|
@ -1146,7 +1146,7 @@ static int ident_to_worktree(const char *src, size_t len,
|
|||
/* are we "faking" in place editing ? */
|
||||
if (src == buf->buf)
|
||||
to_free = strbuf_detach(buf, NULL);
|
||||
hash_object_file(src, len, "blob", &oid);
|
||||
hash_object_file(the_hash_algo, src, len, "blob", &oid);
|
||||
|
||||
strbuf_grow(buf, len + cnt * (the_hash_algo->hexsz + 3));
|
||||
for (;;) {
|
||||
|
|
2
diff.c
2
diff.c
|
@ -4024,7 +4024,7 @@ int diff_populate_filespec(struct repository *r,
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
s->data = read_object_file(&s->oid, &type, &s->size);
|
||||
s->data = repo_read_object_file(r, &s->oid, &type, &s->size);
|
||||
if (!s->data)
|
||||
die("unable to read %s", oid_to_hex(&s->oid));
|
||||
s->should_free = 1;
|
||||
|
|
|
@ -263,8 +263,8 @@ static unsigned int hash_filespec(struct repository *r,
|
|||
if (!filespec->oid_valid) {
|
||||
if (diff_populate_filespec(r, filespec, 0))
|
||||
return 0;
|
||||
hash_object_file(filespec->data, filespec->size, "blob",
|
||||
&filespec->oid);
|
||||
hash_object_file(r->hash_algo, filespec->data, filespec->size,
|
||||
"blob", &filespec->oid);
|
||||
}
|
||||
return oidhash(&filespec->oid);
|
||||
}
|
||||
|
|
4
dir.c
4
dir.c
|
@ -1074,8 +1074,8 @@ static int add_patterns(const char *fname, const char *base, int baselen,
|
|||
oidcpy(&oid_stat->oid,
|
||||
&istate->cache[pos]->oid);
|
||||
else
|
||||
hash_object_file(buf, size, "blob",
|
||||
&oid_stat->oid);
|
||||
hash_object_file(the_hash_algo, buf, size,
|
||||
"blob", &oid_stat->oid);
|
||||
fill_stat_data(&oid_stat->stat, &st);
|
||||
oid_stat->valid = 1;
|
||||
}
|
||||
|
|
|
@ -501,7 +501,8 @@ static int show_one_mergetag(struct commit *commit,
|
|||
int status, nth;
|
||||
size_t payload_size, gpg_message_offset;
|
||||
|
||||
hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid);
|
||||
hash_object_file(the_hash_algo, extra->value, extra->len,
|
||||
type_name(OBJ_TAG), &oid);
|
||||
tag = lookup_tag(the_repository, &oid);
|
||||
if (!tag)
|
||||
return -1; /* error message already given */
|
||||
|
|
|
@ -201,8 +201,9 @@ static inline void *repo_read_object_file(struct repository *r,
|
|||
/* Read and unpack an object file into memory, write memory to an object file */
|
||||
int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
|
||||
|
||||
int hash_object_file(const void *buf, unsigned long len,
|
||||
const char *type, struct object_id *oid);
|
||||
int hash_object_file(const struct git_hash_algo *algo, const void *buf,
|
||||
unsigned long len, const char *type,
|
||||
struct object_id *oid);
|
||||
|
||||
int write_object_file(const void *buf, unsigned long len,
|
||||
const char *type, struct object_id *oid);
|
||||
|
|
5
object.c
5
object.c
|
@ -262,7 +262,7 @@ struct object *parse_object(struct repository *r, const struct object_id *oid)
|
|||
if ((obj && obj->type == OBJ_BLOB && repo_has_object_file(r, oid)) ||
|
||||
(!obj && repo_has_object_file(r, oid) &&
|
||||
oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
|
||||
if (check_object_signature(repl, NULL, 0, NULL) < 0) {
|
||||
if (check_object_signature(r, repl, NULL, 0, NULL) < 0) {
|
||||
error(_("hash mismatch %s"), oid_to_hex(oid));
|
||||
return NULL;
|
||||
}
|
||||
|
@ -272,7 +272,8 @@ struct object *parse_object(struct repository *r, const struct object_id *oid)
|
|||
|
||||
buffer = repo_read_object_file(r, oid, &type, &size);
|
||||
if (buffer) {
|
||||
if (check_object_signature(repl, buffer, size, type_name(type)) < 0) {
|
||||
if (check_object_signature(r, repl, buffer, size,
|
||||
type_name(type)) < 0) {
|
||||
free(buffer);
|
||||
error(_("hash mismatch %s"), oid_to_hex(repl));
|
||||
return NULL;
|
||||
|
|
12
pack-check.c
12
pack-check.c
|
@ -67,23 +67,23 @@ static int verify_packfile(struct repository *r,
|
|||
if (!is_pack_valid(p))
|
||||
return error("packfile %s cannot be accessed", p->pack_name);
|
||||
|
||||
the_hash_algo->init_fn(&ctx);
|
||||
r->hash_algo->init_fn(&ctx);
|
||||
do {
|
||||
unsigned long remaining;
|
||||
unsigned char *in = use_pack(p, w_curs, offset, &remaining);
|
||||
offset += remaining;
|
||||
if (!pack_sig_ofs)
|
||||
pack_sig_ofs = p->pack_size - the_hash_algo->rawsz;
|
||||
pack_sig_ofs = p->pack_size - r->hash_algo->rawsz;
|
||||
if (offset > pack_sig_ofs)
|
||||
remaining -= (unsigned int)(offset - pack_sig_ofs);
|
||||
the_hash_algo->update_fn(&ctx, in, remaining);
|
||||
r->hash_algo->update_fn(&ctx, in, remaining);
|
||||
} while (offset < pack_sig_ofs);
|
||||
the_hash_algo->final_fn(hash, &ctx);
|
||||
r->hash_algo->final_fn(hash, &ctx);
|
||||
pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
|
||||
if (!hasheq(hash, pack_sig))
|
||||
err = error("%s pack checksum mismatch",
|
||||
p->pack_name);
|
||||
if (!hasheq(index_base + index_size - the_hash_algo->hexsz, pack_sig))
|
||||
if (!hasheq(index_base + index_size - r->hash_algo->hexsz, pack_sig))
|
||||
err = error("%s pack checksum does not match its index",
|
||||
p->pack_name);
|
||||
unuse_pack(w_curs);
|
||||
|
@ -144,7 +144,7 @@ static int verify_packfile(struct repository *r,
|
|||
err = error("cannot unpack %s from %s at offset %"PRIuMAX"",
|
||||
oid_to_hex(entries[i].oid.oid), p->pack_name,
|
||||
(uintmax_t)entries[i].offset);
|
||||
else if (check_object_signature(entries[i].oid.oid, data, size, type_name(type)))
|
||||
else if (check_object_signature(r, entries[i].oid.oid, data, size, type_name(type)))
|
||||
err = error("packed %s from %s is corrupt",
|
||||
oid_to_hex(entries[i].oid.oid), p->pack_name);
|
||||
else if (fn) {
|
||||
|
|
55
sha1-file.c
55
sha1-file.c
|
@ -971,8 +971,8 @@ void *xmmap(void *start, size_t length,
|
|||
* With "map" == NULL, try reading the object named with "oid" using
|
||||
* the streaming interface and rehash it to do the same.
|
||||
*/
|
||||
int check_object_signature(const struct object_id *oid, void *map,
|
||||
unsigned long size, const char *type)
|
||||
int check_object_signature(struct repository *r, const struct object_id *oid,
|
||||
void *map, unsigned long size, const char *type)
|
||||
{
|
||||
struct object_id real_oid;
|
||||
enum object_type obj_type;
|
||||
|
@ -982,11 +982,11 @@ int check_object_signature(const struct object_id *oid, void *map,
|
|||
int hdrlen;
|
||||
|
||||
if (map) {
|
||||
hash_object_file(map, size, type, &real_oid);
|
||||
hash_object_file(r->hash_algo, map, size, type, &real_oid);
|
||||
return !oideq(oid, &real_oid) ? -1 : 0;
|
||||
}
|
||||
|
||||
st = open_istream(oid, &obj_type, &size, NULL);
|
||||
st = open_istream(r, oid, &obj_type, &size, NULL);
|
||||
if (!st)
|
||||
return -1;
|
||||
|
||||
|
@ -994,8 +994,8 @@ int check_object_signature(const struct object_id *oid, void *map,
|
|||
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(obj_type), (uintmax_t)size) + 1;
|
||||
|
||||
/* Sha1.. */
|
||||
the_hash_algo->init_fn(&c);
|
||||
the_hash_algo->update_fn(&c, hdr, hdrlen);
|
||||
r->hash_algo->init_fn(&c);
|
||||
r->hash_algo->update_fn(&c, hdr, hdrlen);
|
||||
for (;;) {
|
||||
char buf[1024 * 16];
|
||||
ssize_t readlen = read_istream(st, buf, sizeof(buf));
|
||||
|
@ -1006,9 +1006,9 @@ int check_object_signature(const struct object_id *oid, void *map,
|
|||
}
|
||||
if (!readlen)
|
||||
break;
|
||||
the_hash_algo->update_fn(&c, buf, readlen);
|
||||
r->hash_algo->update_fn(&c, buf, readlen);
|
||||
}
|
||||
the_hash_algo->final_fn(real_oid.hash, &c);
|
||||
r->hash_algo->final_fn(real_oid.hash, &c);
|
||||
close_istream(st);
|
||||
return !oideq(oid, &real_oid) ? -1 : 0;
|
||||
}
|
||||
|
@ -1588,7 +1588,7 @@ int pretend_object_file(void *buf, unsigned long len, enum object_type type,
|
|||
{
|
||||
struct cached_object *co;
|
||||
|
||||
hash_object_file(buf, len, type_name(type), oid);
|
||||
hash_object_file(the_hash_algo, buf, len, type_name(type), oid);
|
||||
if (has_object_file(oid) || find_cached_object(oid))
|
||||
return 0;
|
||||
ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
|
||||
|
@ -1694,7 +1694,8 @@ void *read_object_with_reference(struct repository *r,
|
|||
}
|
||||
}
|
||||
|
||||
static void write_object_file_prepare(const void *buf, unsigned long len,
|
||||
static void write_object_file_prepare(const struct git_hash_algo *algo,
|
||||
const void *buf, unsigned long len,
|
||||
const char *type, struct object_id *oid,
|
||||
char *hdr, int *hdrlen)
|
||||
{
|
||||
|
@ -1704,10 +1705,10 @@ static void write_object_file_prepare(const void *buf, unsigned long len,
|
|||
*hdrlen = xsnprintf(hdr, *hdrlen, "%s %"PRIuMAX , type, (uintmax_t)len)+1;
|
||||
|
||||
/* Sha1.. */
|
||||
the_hash_algo->init_fn(&c);
|
||||
the_hash_algo->update_fn(&c, hdr, *hdrlen);
|
||||
the_hash_algo->update_fn(&c, buf, len);
|
||||
the_hash_algo->final_fn(oid->hash, &c);
|
||||
algo->init_fn(&c);
|
||||
algo->update_fn(&c, hdr, *hdrlen);
|
||||
algo->update_fn(&c, buf, len);
|
||||
algo->final_fn(oid->hash, &c);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1760,12 +1761,13 @@ static int write_buffer(int fd, const void *buf, size_t len)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int hash_object_file(const void *buf, unsigned long len, const char *type,
|
||||
int hash_object_file(const struct git_hash_algo *algo, const void *buf,
|
||||
unsigned long len, const char *type,
|
||||
struct object_id *oid)
|
||||
{
|
||||
char hdr[MAX_HEADER_LEN];
|
||||
int hdrlen = sizeof(hdr);
|
||||
write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
|
||||
write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1923,7 +1925,8 @@ int write_object_file(const void *buf, unsigned long len, const char *type,
|
|||
/* Normally if we have it in the pack then we do not bother writing
|
||||
* it out into .git/objects/??/?{38} file.
|
||||
*/
|
||||
write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
|
||||
write_object_file_prepare(the_hash_algo, buf, len, type, oid, hdr,
|
||||
&hdrlen);
|
||||
if (freshen_packed_object(oid) || freshen_loose_object(oid))
|
||||
return 0;
|
||||
return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
|
||||
|
@ -1939,7 +1942,8 @@ int hash_object_file_literally(const void *buf, unsigned long len,
|
|||
/* type string, SP, %lu of the length plus NUL must fit this */
|
||||
hdrlen = strlen(type) + MAX_HEADER_LEN;
|
||||
header = xmalloc(hdrlen);
|
||||
write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
|
||||
write_object_file_prepare(the_hash_algo, buf, len, type, oid, header,
|
||||
&hdrlen);
|
||||
|
||||
if (!(flags & HASH_WRITE_OBJECT))
|
||||
goto cleanup;
|
||||
|
@ -2049,7 +2053,8 @@ static int index_mem(struct index_state *istate,
|
|||
if (write_object)
|
||||
ret = write_object_file(buf, size, type_name(type), oid);
|
||||
else
|
||||
ret = hash_object_file(buf, size, type_name(type), oid);
|
||||
ret = hash_object_file(the_hash_algo, buf, size,
|
||||
type_name(type), oid);
|
||||
if (re_allocated)
|
||||
free(buf);
|
||||
return ret;
|
||||
|
@ -2075,8 +2080,8 @@ static int index_stream_convert_blob(struct index_state *istate,
|
|||
ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
|
||||
oid);
|
||||
else
|
||||
ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
|
||||
oid);
|
||||
ret = hash_object_file(the_hash_algo, sbuf.buf, sbuf.len,
|
||||
type_name(OBJ_BLOB), oid);
|
||||
strbuf_release(&sbuf);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2194,7 +2199,8 @@ int index_path(struct index_state *istate, struct object_id *oid,
|
|||
if (strbuf_readlink(&sb, path, st->st_size))
|
||||
return error_errno("readlink(\"%s\")", path);
|
||||
if (!(flags & HASH_WRITE_OBJECT))
|
||||
hash_object_file(sb.buf, sb.len, blob_type, oid);
|
||||
hash_object_file(the_hash_algo, sb.buf, sb.len,
|
||||
blob_type, oid);
|
||||
else if (write_object_file(sb.buf, sb.len, blob_type, oid))
|
||||
rc = error(_("%s: failed to insert into database"), path);
|
||||
strbuf_release(&sb);
|
||||
|
@ -2495,8 +2501,9 @@ int read_loose_object(const char *path,
|
|||
git_inflate_end(&stream);
|
||||
goto out;
|
||||
}
|
||||
if (check_object_signature(expected_oid, *contents,
|
||||
*size, type_name(*type))) {
|
||||
if (check_object_signature(the_repository, expected_oid,
|
||||
*contents, *size,
|
||||
type_name(*type))) {
|
||||
error(_("hash mismatch for %s (expected %s)"), path,
|
||||
oid_to_hex(expected_oid));
|
||||
free(*contents);
|
||||
|
|
28
streaming.c
28
streaming.c
|
@ -16,6 +16,7 @@ enum input_source {
|
|||
};
|
||||
|
||||
typedef int (*open_istream_fn)(struct git_istream *,
|
||||
struct repository *,
|
||||
struct object_info *,
|
||||
const struct object_id *,
|
||||
enum object_type *);
|
||||
|
@ -29,8 +30,8 @@ struct stream_vtbl {
|
|||
|
||||
#define open_method_decl(name) \
|
||||
int open_istream_ ##name \
|
||||
(struct git_istream *st, struct object_info *oi, \
|
||||
const struct object_id *oid, \
|
||||
(struct git_istream *st, struct repository *r, \
|
||||
struct object_info *oi, const struct object_id *oid, \
|
||||
enum object_type *type)
|
||||
|
||||
#define close_method_decl(name) \
|
||||
|
@ -108,7 +109,8 @@ ssize_t read_istream(struct git_istream *st, void *buf, size_t sz)
|
|||
return st->vtbl->read(st, buf, sz);
|
||||
}
|
||||
|
||||
static enum input_source istream_source(const struct object_id *oid,
|
||||
static enum input_source istream_source(struct repository *r,
|
||||
const struct object_id *oid,
|
||||
enum object_type *type,
|
||||
struct object_info *oi)
|
||||
{
|
||||
|
@ -117,7 +119,7 @@ static enum input_source istream_source(const struct object_id *oid,
|
|||
|
||||
oi->typep = type;
|
||||
oi->sizep = &size;
|
||||
status = oid_object_info_extended(the_repository, oid, oi, 0);
|
||||
status = oid_object_info_extended(r, oid, oi, 0);
|
||||
if (status < 0)
|
||||
return stream_error;
|
||||
|
||||
|
@ -133,22 +135,23 @@ static enum input_source istream_source(const struct object_id *oid,
|
|||
}
|
||||
}
|
||||
|
||||
struct git_istream *open_istream(const struct object_id *oid,
|
||||
struct git_istream *open_istream(struct repository *r,
|
||||
const struct object_id *oid,
|
||||
enum object_type *type,
|
||||
unsigned long *size,
|
||||
struct stream_filter *filter)
|
||||
{
|
||||
struct git_istream *st;
|
||||
struct object_info oi = OBJECT_INFO_INIT;
|
||||
const struct object_id *real = lookup_replace_object(the_repository, oid);
|
||||
enum input_source src = istream_source(real, type, &oi);
|
||||
const struct object_id *real = lookup_replace_object(r, oid);
|
||||
enum input_source src = istream_source(r, real, type, &oi);
|
||||
|
||||
if (src < 0)
|
||||
return NULL;
|
||||
|
||||
st = xmalloc(sizeof(*st));
|
||||
if (open_istream_tbl[src](st, &oi, real, type)) {
|
||||
if (open_istream_incore(st, &oi, real, type)) {
|
||||
if (open_istream_tbl[src](st, r, &oi, real, type)) {
|
||||
if (open_istream_incore(st, r, &oi, real, type)) {
|
||||
free(st);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -338,8 +341,7 @@ static struct stream_vtbl loose_vtbl = {
|
|||
|
||||
static open_method_decl(loose)
|
||||
{
|
||||
st->u.loose.mapped = map_loose_object(the_repository,
|
||||
oid, &st->u.loose.mapsize);
|
||||
st->u.loose.mapped = map_loose_object(r, oid, &st->u.loose.mapsize);
|
||||
if (!st->u.loose.mapped)
|
||||
return -1;
|
||||
if ((unpack_loose_header(&st->z,
|
||||
|
@ -499,7 +501,7 @@ static struct stream_vtbl incore_vtbl = {
|
|||
|
||||
static open_method_decl(incore)
|
||||
{
|
||||
st->u.incore.buf = read_object_file_extended(the_repository, oid, type, &st->size, 0);
|
||||
st->u.incore.buf = read_object_file_extended(r, oid, type, &st->size, 0);
|
||||
st->u.incore.read_ptr = 0;
|
||||
st->vtbl = &incore_vtbl;
|
||||
|
||||
|
@ -520,7 +522,7 @@ int stream_blob_to_fd(int fd, const struct object_id *oid, struct stream_filter
|
|||
ssize_t kept = 0;
|
||||
int result = -1;
|
||||
|
||||
st = open_istream(oid, &type, &sz, filter);
|
||||
st = open_istream(the_repository, oid, &type, &sz, filter);
|
||||
if (!st) {
|
||||
if (filter)
|
||||
free_stream_filter(filter);
|
||||
|
|
|
@ -8,7 +8,9 @@
|
|||
/* opaque */
|
||||
struct git_istream;
|
||||
|
||||
struct git_istream *open_istream(const struct object_id *, enum object_type *, unsigned long *, struct stream_filter *);
|
||||
struct git_istream *open_istream(struct repository *, const struct object_id *,
|
||||
enum object_type *, unsigned long *,
|
||||
struct stream_filter *);
|
||||
int close_istream(struct git_istream *);
|
||||
ssize_t read_istream(struct git_istream *, void *, size_t);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче