зеркало из https://github.com/microsoft/git.git
ref_store: implement `refs_peel_ref()` generically
We're about to stop storing packed refs in a `ref_cache`. That means that the only way we have left to optimize `peel_ref()` is by checking whether the reference being peeled is the one currently being iterated over (in `current_ref_iter`), and if so, using `ref_iterator_peel()`. But this can be done generically; it doesn't have to be implemented per-backend. So implement `refs_peel_ref()` in `refs.c` and remove the `peel_ref()` method from the refs API. This removes the last callers of a couple of functions, so delete them. More cleanup to come... Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Родитель
f3987ab36d
Коммит
ba1c052fa6
18
refs.c
18
refs.c
|
@ -1735,7 +1735,23 @@ int refs_pack_refs(struct ref_store *refs, unsigned int flags)
|
|||
int refs_peel_ref(struct ref_store *refs, const char *refname,
|
||||
unsigned char *sha1)
|
||||
{
|
||||
return refs->be->peel_ref(refs, refname, sha1);
|
||||
int flag;
|
||||
unsigned char base[20];
|
||||
|
||||
if (current_ref_iter && current_ref_iter->refname == refname) {
|
||||
struct object_id peeled;
|
||||
|
||||
if (ref_iterator_peel(current_ref_iter, &peeled))
|
||||
return -1;
|
||||
hashcpy(sha1, peeled.hash);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (refs_read_ref_full(refs, refname,
|
||||
RESOLVE_REF_READING, base, &flag))
|
||||
return -1;
|
||||
|
||||
return peel_object(base, sha1);
|
||||
}
|
||||
|
||||
int peel_ref(const char *refname, unsigned char *sha1)
|
||||
|
|
|
@ -655,43 +655,6 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int files_peel_ref(struct ref_store *ref_store,
|
||||
const char *refname, unsigned char *sha1)
|
||||
{
|
||||
struct files_ref_store *refs =
|
||||
files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
|
||||
"peel_ref");
|
||||
int flag;
|
||||
unsigned char base[20];
|
||||
|
||||
if (current_ref_iter && current_ref_iter->refname == refname) {
|
||||
struct object_id peeled;
|
||||
|
||||
if (ref_iterator_peel(current_ref_iter, &peeled))
|
||||
return -1;
|
||||
hashcpy(sha1, peeled.hash);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (refs_read_ref_full(ref_store, refname,
|
||||
RESOLVE_REF_READING, base, &flag))
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* If the reference is packed, read its ref_entry from the
|
||||
* cache in the hope that we already know its peeled value.
|
||||
* We only try this optimization on packed references because
|
||||
* (a) forcing the filling of the loose reference cache could
|
||||
* be expensive and (b) loose references anyway usually do not
|
||||
* have REF_KNOWS_PEELED.
|
||||
*/
|
||||
if (flag & REF_ISPACKED &&
|
||||
!refs_peel_ref(refs->packed_ref_store, refname, sha1))
|
||||
return 0;
|
||||
|
||||
return peel_object(base, sha1);
|
||||
}
|
||||
|
||||
struct files_ref_iterator {
|
||||
struct ref_iterator base;
|
||||
|
||||
|
@ -3012,7 +2975,6 @@ struct ref_storage_be refs_be_files = {
|
|||
files_initial_transaction_commit,
|
||||
|
||||
files_pack_refs,
|
||||
files_peel_ref,
|
||||
files_create_symref,
|
||||
files_delete_refs,
|
||||
files_rename_ref,
|
||||
|
|
|
@ -850,26 +850,6 @@ static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *re
|
|||
return refs->cache;
|
||||
}
|
||||
|
||||
static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
|
||||
{
|
||||
return get_ref_dir(packed_ref_cache->cache->root);
|
||||
}
|
||||
|
||||
static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
|
||||
{
|
||||
return get_packed_ref_dir(get_packed_ref_cache(refs));
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the ref_entry for the given refname from the packed
|
||||
* references. If it does not exist, return NULL.
|
||||
*/
|
||||
static struct ref_entry *get_packed_ref(struct packed_ref_store *refs,
|
||||
const char *refname)
|
||||
{
|
||||
return find_ref_entry(get_packed_refs(refs), refname);
|
||||
}
|
||||
|
||||
static int packed_read_raw_ref(struct ref_store *ref_store,
|
||||
const char *refname, unsigned char *sha1,
|
||||
struct strbuf *referent, unsigned int *type)
|
||||
|
@ -896,21 +876,6 @@ static int packed_read_raw_ref(struct ref_store *ref_store,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int packed_peel_ref(struct ref_store *ref_store,
|
||||
const char *refname, unsigned char *sha1)
|
||||
{
|
||||
struct packed_ref_store *refs =
|
||||
packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
|
||||
"peel_ref");
|
||||
struct ref_entry *r = get_packed_ref(refs, refname);
|
||||
|
||||
if (!r || peel_entry(r, 0))
|
||||
return -1;
|
||||
|
||||
hashcpy(sha1, r->u.value.peeled.hash);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct packed_ref_iterator {
|
||||
struct ref_iterator base;
|
||||
|
||||
|
@ -1597,7 +1562,6 @@ struct ref_storage_be refs_be_packed = {
|
|||
packed_initial_transaction_commit,
|
||||
|
||||
packed_pack_refs,
|
||||
packed_peel_ref,
|
||||
packed_create_symref,
|
||||
packed_delete_refs,
|
||||
packed_rename_ref,
|
||||
|
|
|
@ -562,8 +562,6 @@ typedef int ref_transaction_commit_fn(struct ref_store *refs,
|
|||
struct strbuf *err);
|
||||
|
||||
typedef int pack_refs_fn(struct ref_store *ref_store, unsigned int flags);
|
||||
typedef int peel_ref_fn(struct ref_store *ref_store,
|
||||
const char *refname, unsigned char *sha1);
|
||||
typedef int create_symref_fn(struct ref_store *ref_store,
|
||||
const char *ref_target,
|
||||
const char *refs_heads_master,
|
||||
|
@ -668,7 +666,6 @@ struct ref_storage_be {
|
|||
ref_transaction_commit_fn *initial_transaction_commit;
|
||||
|
||||
pack_refs_fn *pack_refs;
|
||||
peel_ref_fn *peel_ref;
|
||||
create_symref_fn *create_symref;
|
||||
delete_refs_fn *delete_refs;
|
||||
rename_ref_fn *rename_ref;
|
||||
|
|
Загрузка…
Ссылка в новой задаче