зеркало из https://github.com/microsoft/git.git
object-file API: have hash_object_file() take "enum object_type"
Change the hash_object_file() function to take an "enum object_type". Since a preceding commit all of its callers are passing either "{commit,tree,blob,tag}_type", or the result of a call to type_name(), the parse_object() caller that would pass NULL is now using stream_object_signature(). Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Родитель
0ff7b4f976
Коммит
44439c1c58
4
apply.c
4
apply.c
|
@ -3157,7 +3157,7 @@ static int apply_binary(struct apply_state *state,
|
|||
* See if the old one matches what the patch
|
||||
* applies to.
|
||||
*/
|
||||
hash_object_file(the_hash_algo, img->buf, img->len, blob_type,
|
||||
hash_object_file(the_hash_algo, img->buf, img->len, OBJ_BLOB,
|
||||
&oid);
|
||||
if (strcmp(oid_to_hex(&oid), patch->old_oid_prefix))
|
||||
return error(_("the patch applies to '%s' (%s), "
|
||||
|
@ -3203,7 +3203,7 @@ static int apply_binary(struct apply_state *state,
|
|||
name);
|
||||
|
||||
/* verify that the result matches */
|
||||
hash_object_file(the_hash_algo, img->buf, img->len, blob_type,
|
||||
hash_object_file(the_hash_algo, img->buf, img->len, OBJ_BLOB,
|
||||
&oid);
|
||||
if (strcmp(oid_to_hex(&oid), patch->new_oid_prefix))
|
||||
return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"),
|
||||
|
|
|
@ -300,7 +300,7 @@ static void export_blob(const struct object_id *oid)
|
|||
if (!buf)
|
||||
die("could not read blob %s", oid_to_hex(oid));
|
||||
if (check_object_signature(the_repository, oid, buf, size,
|
||||
type_name(type)) < 0)
|
||||
type) < 0)
|
||||
die("oid mismatch in blob %s", oid_to_hex(oid));
|
||||
object = parse_object_buffer(the_repository, oid, type,
|
||||
size, buf, &eaten);
|
||||
|
|
|
@ -970,7 +970,7 @@ static struct base_data *resolve_delta(struct object_entry *delta_obj,
|
|||
if (!result_data)
|
||||
bad_object(delta_obj->idx.offset, _("failed to apply delta"));
|
||||
hash_object_file(the_hash_algo, result_data, result_size,
|
||||
type_name(delta_obj->real_type), &delta_obj->idx.oid);
|
||||
delta_obj->real_type, &delta_obj->idx.oid);
|
||||
sha1_object(result_data, NULL, result_size, delta_obj->real_type,
|
||||
&delta_obj->idx.oid);
|
||||
|
||||
|
@ -1413,7 +1413,7 @@ static void fix_unresolved_deltas(struct hashfile *f)
|
|||
continue;
|
||||
|
||||
if (check_object_signature(the_repository, &d->oid, data, size,
|
||||
type_name(type)) < 0)
|
||||
type) < 0)
|
||||
die(_("local object %s is corrupt"), oid_to_hex(&d->oid));
|
||||
|
||||
/*
|
||||
|
|
|
@ -62,7 +62,7 @@ static int verify_object_in_tag(struct object_id *tagged_oid, int *tagged_type)
|
|||
|
||||
repl = lookup_replace_object(the_repository, tagged_oid);
|
||||
ret = check_object_signature(the_repository, repl, buffer, size,
|
||||
type_name(*tagged_type));
|
||||
*tagged_type);
|
||||
free(buffer);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -409,7 +409,7 @@ static int check_one_mergetag(struct commit *commit,
|
|||
int i;
|
||||
|
||||
hash_object_file(the_hash_algo, extra->value, extra->len,
|
||||
type_name(OBJ_TAG), &tag_oid);
|
||||
OBJ_TAG, &tag_oid);
|
||||
tag = lookup_tag(the_repository, &tag_oid);
|
||||
if (!tag)
|
||||
return error(_("bad mergetag in commit '%s'"), ref);
|
||||
|
|
|
@ -266,7 +266,7 @@ static void write_object(unsigned nr, enum object_type type,
|
|||
} else {
|
||||
struct object *obj;
|
||||
int eaten;
|
||||
hash_object_file(the_hash_algo, buf, size, type_name(type),
|
||||
hash_object_file(the_hash_algo, buf, size, type,
|
||||
&obj_list[nr].oid);
|
||||
added_object(nr, type, buf, size);
|
||||
obj = parse_object_buffer(the_repository, &obj_list[nr].oid,
|
||||
|
|
|
@ -432,14 +432,14 @@ static int update_one(struct cache_tree *it,
|
|||
if (repair) {
|
||||
struct object_id oid;
|
||||
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
|
||||
tree_type, &oid);
|
||||
OBJ_TREE, &oid);
|
||||
if (has_object_file_with_flags(&oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
|
||||
oidcpy(&it->oid, &oid);
|
||||
else
|
||||
to_invalidate = 1;
|
||||
} else if (dryrun) {
|
||||
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
|
||||
tree_type, &it->oid);
|
||||
OBJ_TREE, &it->oid);
|
||||
} else if (write_object_file_flags(buffer.buf, buffer.len, OBJ_TREE,
|
||||
&it->oid, flags & WRITE_TREE_SILENT
|
||||
? HASH_SILENT : 0)) {
|
||||
|
@ -948,7 +948,7 @@ static int verify_one(struct repository *r,
|
|||
strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0');
|
||||
strbuf_add(&tree_buf, oid->hash, r->hash_algo->rawsz);
|
||||
}
|
||||
hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, tree_type,
|
||||
hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, OBJ_TREE,
|
||||
&new_oid);
|
||||
if (!oideq(&new_oid, &it->oid))
|
||||
BUG("cache-tree for path %.*s does not match. "
|
||||
|
|
3
cache.h
3
cache.h
|
@ -1327,7 +1327,8 @@ int parse_loose_header(const char *hdr, struct object_info *oi);
|
|||
* what we expected, but it might also indicate another error.
|
||||
*/
|
||||
int check_object_signature(struct repository *r, const struct object_id *oid,
|
||||
void *buf, unsigned long size, const char *type);
|
||||
void *map, unsigned long size,
|
||||
enum object_type type);
|
||||
|
||||
/**
|
||||
* A streaming version of check_object_signature().
|
||||
|
|
|
@ -1159,7 +1159,7 @@ static int ident_to_worktree(const char *src, size_t len,
|
|||
/* are we "faking" in place editing ? */
|
||||
if (src == buf->buf)
|
||||
to_free = strbuf_detach(buf, NULL);
|
||||
hash_object_file(the_hash_algo, src, len, "blob", &oid);
|
||||
hash_object_file(the_hash_algo, src, len, OBJ_BLOB, &oid);
|
||||
|
||||
strbuf_grow(buf, len + cnt * (the_hash_algo->hexsz + 3));
|
||||
for (;;) {
|
||||
|
|
|
@ -261,7 +261,7 @@ static unsigned int hash_filespec(struct repository *r,
|
|||
if (diff_populate_filespec(r, filespec, NULL))
|
||||
return 0;
|
||||
hash_object_file(r->hash_algo, filespec->data, filespec->size,
|
||||
"blob", &filespec->oid);
|
||||
OBJ_BLOB, &filespec->oid);
|
||||
}
|
||||
return oidhash(&filespec->oid);
|
||||
}
|
||||
|
|
2
dir.c
2
dir.c
|
@ -1113,7 +1113,7 @@ static int add_patterns(const char *fname, const char *base, int baselen,
|
|||
&istate->cache[pos]->oid);
|
||||
else
|
||||
hash_object_file(the_hash_algo, buf, size,
|
||||
"blob", &oid_stat->oid);
|
||||
OBJ_BLOB, &oid_stat->oid);
|
||||
fill_stat_data(&oid_stat->stat, &st);
|
||||
oid_stat->valid = 1;
|
||||
}
|
||||
|
|
|
@ -561,7 +561,7 @@ static int show_one_mergetag(struct commit *commit,
|
|||
struct strbuf signature = STRBUF_INIT;
|
||||
|
||||
hash_object_file(the_hash_algo, extra->value, extra->len,
|
||||
type_name(OBJ_TAG), &oid);
|
||||
OBJ_TAG, &oid);
|
||||
tag = lookup_tag(the_repository, &oid);
|
||||
if (!tag)
|
||||
return -1; /* error message already given */
|
||||
|
|
|
@ -1067,7 +1067,8 @@ int format_object_header(char *str, size_t size, enum object_type type,
|
|||
}
|
||||
|
||||
int check_object_signature(struct repository *r, const struct object_id *oid,
|
||||
void *buf, unsigned long size, const char *type)
|
||||
void *buf, unsigned long size,
|
||||
enum object_type type)
|
||||
{
|
||||
struct object_id real_oid;
|
||||
|
||||
|
@ -1676,7 +1677,7 @@ int pretend_object_file(void *buf, unsigned long len, enum object_type type,
|
|||
{
|
||||
struct cached_object *co;
|
||||
|
||||
hash_object_file(the_hash_algo, buf, len, type_name(type), oid);
|
||||
hash_object_file(the_hash_algo, buf, len, type, oid);
|
||||
if (has_object_file_with_flags(oid, OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT) ||
|
||||
find_cached_object(oid))
|
||||
return 0;
|
||||
|
@ -1850,15 +1851,23 @@ static int write_buffer(int fd, const void *buf, size_t len)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void hash_object_file(const struct git_hash_algo *algo, const void *buf,
|
||||
unsigned long len, const char *type,
|
||||
struct object_id *oid)
|
||||
static void hash_object_file_literally(const struct git_hash_algo *algo,
|
||||
const void *buf, unsigned long len,
|
||||
const char *type, struct object_id *oid)
|
||||
{
|
||||
char hdr[MAX_HEADER_LEN];
|
||||
int hdrlen = sizeof(hdr);
|
||||
|
||||
write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
|
||||
}
|
||||
|
||||
void hash_object_file(const struct git_hash_algo *algo, const void *buf,
|
||||
unsigned long len, enum object_type type,
|
||||
struct object_id *oid)
|
||||
{
|
||||
hash_object_file_literally(algo, buf, len, type_name(type), oid);
|
||||
}
|
||||
|
||||
/* Finalize a file on disk, and close it. */
|
||||
static void close_loose_object(int fd)
|
||||
{
|
||||
|
@ -2161,9 +2170,7 @@ static int index_mem(struct index_state *istate,
|
|||
if (write_object)
|
||||
ret = write_object_file(buf, size, type, oid);
|
||||
else
|
||||
hash_object_file(the_hash_algo, buf, size, type_name(type),
|
||||
oid);
|
||||
|
||||
hash_object_file(the_hash_algo, buf, size, type, oid);
|
||||
if (re_allocated)
|
||||
free(buf);
|
||||
return ret;
|
||||
|
@ -2189,8 +2196,8 @@ static int index_stream_convert_blob(struct index_state *istate,
|
|||
ret = write_object_file(sbuf.buf, sbuf.len, OBJ_BLOB,
|
||||
oid);
|
||||
else
|
||||
hash_object_file(the_hash_algo, sbuf.buf, sbuf.len,
|
||||
type_name(OBJ_BLOB), oid);
|
||||
hash_object_file(the_hash_algo, sbuf.buf, sbuf.len, OBJ_BLOB,
|
||||
oid);
|
||||
strbuf_release(&sbuf);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2309,7 +2316,7 @@ int index_path(struct index_state *istate, struct object_id *oid,
|
|||
return error_errno("readlink(\"%s\")", path);
|
||||
if (!(flags & HASH_WRITE_OBJECT))
|
||||
hash_object_file(the_hash_algo, sb.buf, sb.len,
|
||||
blob_type, oid);
|
||||
OBJ_BLOB, oid);
|
||||
else if (write_object_file(sb.buf, sb.len, OBJ_BLOB, oid))
|
||||
rc = error(_("%s: failed to insert into database"), path);
|
||||
strbuf_release(&sb);
|
||||
|
@ -2614,9 +2621,9 @@ int read_loose_object(const char *path,
|
|||
git_inflate_end(&stream);
|
||||
goto out;
|
||||
}
|
||||
hash_object_file(the_repository->hash_algo,
|
||||
*contents, *size, oi->type_name->buf,
|
||||
real_oid);
|
||||
hash_object_file_literally(the_repository->hash_algo,
|
||||
*contents, *size,
|
||||
oi->type_name->buf, real_oid);
|
||||
if (!oideq(expected_oid, real_oid))
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -246,7 +246,7 @@ static inline void *repo_read_object_file(struct repository *r,
|
|||
int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
|
||||
|
||||
void hash_object_file(const struct git_hash_algo *algo, const void *buf,
|
||||
unsigned long len, const char *type,
|
||||
unsigned long len, enum object_type type,
|
||||
struct object_id *oid);
|
||||
|
||||
int write_object_file_flags(const void *buf, unsigned long len,
|
||||
|
|
3
object.c
3
object.c
|
@ -289,8 +289,7 @@ struct object *parse_object(struct repository *r, const struct object_id *oid)
|
|||
|
||||
buffer = repo_read_object_file(r, oid, &type, &size);
|
||||
if (buffer) {
|
||||
if (check_object_signature(r, repl, buffer, size,
|
||||
type_name(type)) < 0) {
|
||||
if (check_object_signature(r, repl, buffer, size, type) < 0) {
|
||||
free(buffer);
|
||||
error(_("hash mismatch %s"), oid_to_hex(repl));
|
||||
return NULL;
|
||||
|
|
|
@ -143,7 +143,7 @@ static int verify_packfile(struct repository *r,
|
|||
oid_to_hex(&oid), p->pack_name,
|
||||
(uintmax_t)entries[i].offset);
|
||||
else if (data && check_object_signature(r, &oid, data, size,
|
||||
type_name(type)) < 0)
|
||||
type) < 0)
|
||||
err = error("packed %s from %s is corrupt",
|
||||
oid_to_hex(&oid), p->pack_name);
|
||||
else if (!data && stream_object_signature(r, &oid) < 0)
|
||||
|
|
Загрузка…
Ссылка в новой задаче