"git pack-refs" that races with new ref creation or deletion have
been susceptible to lossage of refs under right conditions, which
has been tightened up.

* mh/ref-races:
  for_each_ref: load all loose refs before packed refs
  get_packed_ref_cache: reload packed-refs file when it changes
  add a stat_validity struct
  Extract a struct stat_data from cache_entry
  packed_ref_cache: increment refcount when locked
  do_for_each_entry(): increment the packed refs cache refcount
  refs: manage lifetime of packed refs cache via reference counting
  refs: implement simple transactions for the packed-refs file
  refs: wrap the packed refs cache in a level of indirection
  pack_refs(): split creation of packed refs and entry writing
  repack_without_ref(): split list curation and entry writing
This commit is contained in:
Junio C Hamano 2013-06-30 15:40:01 -07:00
Родитель 08585fd48d 98eeb09e8a
Коммит 079424a2cf
6 изменённых файлов: 466 добавлений и 134 удалений

Просмотреть файл

@ -493,13 +493,16 @@ static void write_remote_refs(const struct ref *local_refs)
{
const struct ref *r;
lock_packed_refs(LOCK_DIE_ON_ERROR);
for (r = local_refs; r; r = r->next) {
if (!r->peer_ref)
continue;
add_packed_ref(r->peer_ref->name, r->old_sha1);
}
pack_refs(PACK_REFS_ALL);
if (commit_packed_refs())
die_errno("unable to overwrite old ref-pack file");
}
static void write_followtags(const struct ref *refs, const char *msg)

Просмотреть файл

@ -165,11 +165,13 @@ static void show_ce_entry(const char *tag, struct cache_entry *ce)
}
write_name(ce->name, ce_namelen(ce));
if (debug_mode) {
printf(" ctime: %d:%d\n", ce->ce_ctime.sec, ce->ce_ctime.nsec);
printf(" mtime: %d:%d\n", ce->ce_mtime.sec, ce->ce_mtime.nsec);
printf(" dev: %d\tino: %d\n", ce->ce_dev, ce->ce_ino);
printf(" uid: %d\tgid: %d\n", ce->ce_uid, ce->ce_gid);
printf(" size: %d\tflags: %x\n", ce->ce_size, ce->ce_flags);
struct stat_data *sd = &ce->ce_stat_data;
printf(" ctime: %d:%d\n", sd->sd_ctime.sec, sd->sd_ctime.nsec);
printf(" mtime: %d:%d\n", sd->sd_mtime.sec, sd->sd_mtime.nsec);
printf(" dev: %d\tino: %d\n", sd->sd_dev, sd->sd_ino);
printf(" uid: %d\tgid: %d\n", sd->sd_uid, sd->sd_gid);
printf(" size: %d\tflags: %x\n", sd->sd_size, ce->ce_flags);
}
}

60
cache.h
Просмотреть файл

@ -119,15 +119,19 @@ struct cache_time {
unsigned int nsec;
};
struct stat_data {
struct cache_time sd_ctime;
struct cache_time sd_mtime;
unsigned int sd_dev;
unsigned int sd_ino;
unsigned int sd_uid;
unsigned int sd_gid;
unsigned int sd_size;
};
struct cache_entry {
struct cache_time ce_ctime;
struct cache_time ce_mtime;
unsigned int ce_dev;
unsigned int ce_ino;
struct stat_data ce_stat_data;
unsigned int ce_mode;
unsigned int ce_uid;
unsigned int ce_gid;
unsigned int ce_size;
unsigned int ce_flags;
unsigned int ce_namelen;
unsigned char sha1[20];
@ -511,6 +515,21 @@ extern int limit_pathspec_to_literal(void);
#define HASH_FORMAT_CHECK 2
extern int index_fd(unsigned char *sha1, int fd, struct stat *st, enum object_type type, const char *path, unsigned flags);
extern int index_path(unsigned char *sha1, const char *path, struct stat *st, unsigned flags);
/*
* Record to sd the data from st that we use to check whether a file
* might have changed.
*/
extern void fill_stat_data(struct stat_data *sd, struct stat *st);
/*
* Return 0 if st is consistent with a file not having been changed
* since sd was filled. If there are differences, return a
* combination of MTIME_CHANGED, CTIME_CHANGED, OWNER_CHANGED,
* INODE_CHANGED, and DATA_CHANGED.
*/
extern int match_stat_data(const struct stat_data *sd, struct stat *st);
extern void fill_stat_cache_info(struct cache_entry *ce, struct stat *st);
#define REFRESH_REALLY 0x0001 /* ignore_valid */
@ -1338,4 +1357,31 @@ int checkout_fast_forward(const unsigned char *from,
int sane_execvp(const char *file, char *const argv[]);
/*
* A struct to encapsulate the concept of whether a file has changed
* since we last checked it. This uses criteria similar to those used
* for the index.
*/
struct stat_validity {
struct stat_data *sd;
};
void stat_validity_clear(struct stat_validity *sv);
/*
* Returns 1 if the path is a regular file (or a symlink to a regular
* file) and matches the saved stat_validity, 0 otherwise. A missing
* or inaccessible file is considered a match if the struct was just
* initialized, or if the previous update found an inaccessible file.
*/
int stat_validity_check(struct stat_validity *sv, const char *path);
/*
* Update the stat_validity from a file opened at descriptor fd. If
* the file is missing, inaccessible, or not a regular file, then
* future calls to stat_validity_check will match iff one of those
* conditions continues to be true.
*/
void stat_validity_update(struct stat_validity *sv, int fd);
#endif /* CACHE_H */

Просмотреть файл

@ -67,6 +67,61 @@ void rename_index_entry_at(struct index_state *istate, int nr, const char *new_n
add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
}
void fill_stat_data(struct stat_data *sd, struct stat *st)
{
sd->sd_ctime.sec = (unsigned int)st->st_ctime;
sd->sd_mtime.sec = (unsigned int)st->st_mtime;
sd->sd_ctime.nsec = ST_CTIME_NSEC(*st);
sd->sd_mtime.nsec = ST_MTIME_NSEC(*st);
sd->sd_dev = st->st_dev;
sd->sd_ino = st->st_ino;
sd->sd_uid = st->st_uid;
sd->sd_gid = st->st_gid;
sd->sd_size = st->st_size;
}
int match_stat_data(const struct stat_data *sd, struct stat *st)
{
int changed = 0;
if (sd->sd_mtime.sec != (unsigned int)st->st_mtime)
changed |= MTIME_CHANGED;
if (trust_ctime && check_stat &&
sd->sd_ctime.sec != (unsigned int)st->st_ctime)
changed |= CTIME_CHANGED;
#ifdef USE_NSEC
if (check_stat && sd->sd_mtime.nsec != ST_MTIME_NSEC(*st))
changed |= MTIME_CHANGED;
if (trust_ctime && check_stat &&
sd->sd_ctime.nsec != ST_CTIME_NSEC(*st))
changed |= CTIME_CHANGED;
#endif
if (check_stat) {
if (sd->sd_uid != (unsigned int) st->st_uid ||
sd->sd_gid != (unsigned int) st->st_gid)
changed |= OWNER_CHANGED;
if (sd->sd_ino != (unsigned int) st->st_ino)
changed |= INODE_CHANGED;
}
#ifdef USE_STDEV
/*
* st_dev breaks on network filesystems where different
* clients will have different views of what "device"
* the filesystem is on
*/
if (check_stat && sd->sd_dev != (unsigned int) st->st_dev)
changed |= INODE_CHANGED;
#endif
if (sd->sd_size != (unsigned int) st->st_size)
changed |= DATA_CHANGED;
return changed;
}
/*
* This only updates the "non-critical" parts of the directory
* cache, ie the parts that aren't tracked by GIT, and only used
@ -74,15 +129,7 @@ void rename_index_entry_at(struct index_state *istate, int nr, const char *new_n
*/
void fill_stat_cache_info(struct cache_entry *ce, struct stat *st)
{
ce->ce_ctime.sec = (unsigned int)st->st_ctime;
ce->ce_mtime.sec = (unsigned int)st->st_mtime;
ce->ce_ctime.nsec = ST_CTIME_NSEC(*st);
ce->ce_mtime.nsec = ST_MTIME_NSEC(*st);
ce->ce_dev = st->st_dev;
ce->ce_ino = st->st_ino;
ce->ce_uid = st->st_uid;
ce->ce_gid = st->st_gid;
ce->ce_size = st->st_size;
fill_stat_data(&ce->ce_stat_data, st);
if (assume_unchanged)
ce->ce_flags |= CE_VALID;
@ -195,43 +242,11 @@ static int ce_match_stat_basic(const struct cache_entry *ce, struct stat *st)
default:
die("internal error: ce_mode is %o", ce->ce_mode);
}
if (ce->ce_mtime.sec != (unsigned int)st->st_mtime)
changed |= MTIME_CHANGED;
if (trust_ctime && check_stat &&
ce->ce_ctime.sec != (unsigned int)st->st_ctime)
changed |= CTIME_CHANGED;
#ifdef USE_NSEC
if (check_stat && ce->ce_mtime.nsec != ST_MTIME_NSEC(*st))
changed |= MTIME_CHANGED;
if (trust_ctime && check_stat &&
ce->ce_ctime.nsec != ST_CTIME_NSEC(*st))
changed |= CTIME_CHANGED;
#endif
if (check_stat) {
if (ce->ce_uid != (unsigned int) st->st_uid ||
ce->ce_gid != (unsigned int) st->st_gid)
changed |= OWNER_CHANGED;
if (ce->ce_ino != (unsigned int) st->st_ino)
changed |= INODE_CHANGED;
}
#ifdef USE_STDEV
/*
* st_dev breaks on network filesystems where different
* clients will have different views of what "device"
* the filesystem is on
*/
if (check_stat && ce->ce_dev != (unsigned int) st->st_dev)
changed |= INODE_CHANGED;
#endif
if (ce->ce_size != (unsigned int) st->st_size)
changed |= DATA_CHANGED;
changed |= match_stat_data(&ce->ce_stat_data, st);
/* Racily smudged entry? */
if (!ce->ce_size) {
if (!ce->ce_stat_data.sd_size) {
if (!is_empty_blob_sha1(ce->sha1))
changed |= DATA_CHANGED;
}
@ -246,11 +261,11 @@ static int is_racy_timestamp(const struct index_state *istate,
istate->timestamp.sec &&
#ifdef USE_NSEC
/* nanosecond timestamped files can also be racy! */
(istate->timestamp.sec < ce->ce_mtime.sec ||
(istate->timestamp.sec == ce->ce_mtime.sec &&
istate->timestamp.nsec <= ce->ce_mtime.nsec))
(istate->timestamp.sec < ce->ce_stat_data.sd_mtime.sec ||
(istate->timestamp.sec == ce->ce_stat_data.sd_mtime.sec &&
istate->timestamp.nsec <= ce->ce_stat_data.sd_mtime.nsec))
#else
istate->timestamp.sec <= ce->ce_mtime.sec
istate->timestamp.sec <= ce->ce_stat_data.sd_mtime.sec
#endif
);
}
@ -342,7 +357,7 @@ int ie_modified(const struct index_state *istate,
* then we know it is.
*/
if ((changed & DATA_CHANGED) &&
(S_ISGITLINK(ce->ce_mode) || ce->ce_size != 0))
(S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0))
return changed;
changed_fs = ce_modified_check_fs(ce, st);
@ -1324,16 +1339,16 @@ static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *on
{
struct cache_entry *ce = xmalloc(cache_entry_size(len));
ce->ce_ctime.sec = ntoh_l(ondisk->ctime.sec);
ce->ce_mtime.sec = ntoh_l(ondisk->mtime.sec);
ce->ce_ctime.nsec = ntoh_l(ondisk->ctime.nsec);
ce->ce_mtime.nsec = ntoh_l(ondisk->mtime.nsec);
ce->ce_dev = ntoh_l(ondisk->dev);
ce->ce_ino = ntoh_l(ondisk->ino);
ce->ce_stat_data.sd_ctime.sec = ntoh_l(ondisk->ctime.sec);
ce->ce_stat_data.sd_mtime.sec = ntoh_l(ondisk->mtime.sec);
ce->ce_stat_data.sd_ctime.nsec = ntoh_l(ondisk->ctime.nsec);
ce->ce_stat_data.sd_mtime.nsec = ntoh_l(ondisk->mtime.nsec);
ce->ce_stat_data.sd_dev = ntoh_l(ondisk->dev);
ce->ce_stat_data.sd_ino = ntoh_l(ondisk->ino);
ce->ce_mode = ntoh_l(ondisk->mode);
ce->ce_uid = ntoh_l(ondisk->uid);
ce->ce_gid = ntoh_l(ondisk->gid);
ce->ce_size = ntoh_l(ondisk->size);
ce->ce_stat_data.sd_uid = ntoh_l(ondisk->uid);
ce->ce_stat_data.sd_gid = ntoh_l(ondisk->gid);
ce->ce_stat_data.sd_size = ntoh_l(ondisk->size);
ce->ce_flags = flags & ~CE_NAMEMASK;
ce->ce_namelen = len;
hashcpy(ce->sha1, ondisk->sha1);
@ -1611,7 +1626,7 @@ static void ce_smudge_racily_clean_entry(struct cache_entry *ce)
* The only thing we care about in this function is to smudge the
* falsely clean entry due to touch-update-touch race, so we leave
* everything else as they are. We are called for entries whose
* ce_mtime match the index file mtime.
* ce_stat_data.sd_mtime match the index file mtime.
*
* Note that this actually does not do much for gitlinks, for
* which ce_match_stat_basic() always goes to the actual
@ -1650,7 +1665,7 @@ static void ce_smudge_racily_clean_entry(struct cache_entry *ce)
* file, and never calls us, so the cached size information
* for "frotz" stays 6 which does not match the filesystem.
*/
ce->ce_size = 0;
ce->ce_stat_data.sd_size = 0;
}
}
@ -1660,16 +1675,16 @@ static char *copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
{
short flags;
ondisk->ctime.sec = htonl(ce->ce_ctime.sec);
ondisk->mtime.sec = htonl(ce->ce_mtime.sec);
ondisk->ctime.nsec = htonl(ce->ce_ctime.nsec);
ondisk->mtime.nsec = htonl(ce->ce_mtime.nsec);
ondisk->dev = htonl(ce->ce_dev);
ondisk->ino = htonl(ce->ce_ino);
ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
ondisk->ctime.nsec = htonl(ce->ce_stat_data.sd_ctime.nsec);
ondisk->mtime.nsec = htonl(ce->ce_stat_data.sd_mtime.nsec);
ondisk->dev = htonl(ce->ce_stat_data.sd_dev);
ondisk->ino = htonl(ce->ce_stat_data.sd_ino);
ondisk->mode = htonl(ce->ce_mode);
ondisk->uid = htonl(ce->ce_uid);
ondisk->gid = htonl(ce->ce_gid);
ondisk->size = htonl(ce->ce_size);
ondisk->uid = htonl(ce->ce_stat_data.sd_uid);
ondisk->gid = htonl(ce->ce_stat_data.sd_gid);
ondisk->size = htonl(ce->ce_stat_data.sd_size);
hashcpy(ondisk->sha1, ce->sha1);
flags = ce->ce_flags;
@ -1936,3 +1951,33 @@ void *read_blob_data_from_index(struct index_state *istate, const char *path, un
*size = sz;
return data;
}
void stat_validity_clear(struct stat_validity *sv)
{
free(sv->sd);
sv->sd = NULL;
}
int stat_validity_check(struct stat_validity *sv, const char *path)
{
struct stat st;
if (stat(path, &st) < 0)
return sv->sd == NULL;
if (!sv->sd)
return 0;
return S_ISREG(st.st_mode) && !match_stat_data(sv->sd, &st);
}
void stat_validity_update(struct stat_validity *sv, int fd)
{
struct stat st;
if (fstat(fd, &st) < 0 || !S_ISREG(st.st_mode))
stat_validity_clear(sv);
else {
if (!sv->sd)
sv->sd = xcalloc(1, sizeof(struct stat_data));
fill_stat_data(sv->sd, &st);
}
}

316
refs.c
Просмотреть файл

@ -749,6 +749,21 @@ static int do_for_each_entry_in_dirs(struct ref_dir *dir1,
}
}
/*
* Load all of the refs from the dir into our in-memory cache. The hard work
* of loading loose refs is done by get_ref_dir(), so we just need to recurse
* through all of the sub-directories. We do not even need to care about
* sorting, as traversal order does not matter to us.
*/
static void prime_ref_dir(struct ref_dir *dir)
{
int i;
for (i = 0; i < dir->nr; i++) {
struct ref_entry *entry = dir->entries[i];
if (entry->flag & REF_DIR)
prime_ref_dir(get_ref_dir(entry));
}
}
/*
* Return true iff refname1 and refname2 conflict with each other.
* Two reference names conflict if one of them exactly matches the
@ -806,6 +821,30 @@ static int is_refname_available(const char *refname, const char *oldrefname,
return 1;
}
struct packed_ref_cache {
struct ref_entry *root;
/*
* Count of references to the data structure in this instance,
* including the pointer from ref_cache::packed if any. The
* data will not be freed as long as the reference count is
* nonzero.
*/
unsigned int referrers;
/*
* Iff the packed-refs file associated with this instance is
* currently locked for writing, this points at the associated
* lock (which is owned by somebody else). The referrer count
* is also incremented when the file is locked and decremented
* when it is unlocked.
*/
struct lock_file *lock;
/* The metadata from when this packed-refs cache was read */
struct stat_validity validity;
};
/*
* Future: need to be in "struct repository"
* when doing a full libification.
@ -813,7 +852,7 @@ static int is_refname_available(const char *refname, const char *oldrefname,
static struct ref_cache {
struct ref_cache *next;
struct ref_entry *loose;
struct ref_entry *packed;
struct packed_ref_cache *packed;
/*
* The submodule name, or "" for the main repo. We allocate
* length 1 rather than FLEX_ARRAY so that the main ref_cache
@ -822,11 +861,42 @@ static struct ref_cache {
char name[1];
} ref_cache, *submodule_ref_caches;
/* Lock used for the main packed-refs file: */
static struct lock_file packlock;
/*
* Increment the reference count of *packed_refs.
*/
static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
{
packed_refs->referrers++;
}
/*
* Decrease the reference count of *packed_refs. If it goes to zero,
* free *packed_refs and return true; otherwise return false.
*/
static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
{
if (!--packed_refs->referrers) {
free_ref_entry(packed_refs->root);
stat_validity_clear(&packed_refs->validity);
free(packed_refs);
return 1;
} else {
return 0;
}
}
static void clear_packed_ref_cache(struct ref_cache *refs)
{
if (refs->packed) {
free_ref_entry(refs->packed);
struct packed_ref_cache *packed_refs = refs->packed;
if (packed_refs->lock)
die("internal error: packed-ref cache cleared while locked");
refs->packed = NULL;
release_packed_ref_cache(packed_refs);
}
}
@ -996,29 +1066,57 @@ static void read_packed_refs(FILE *f, struct ref_dir *dir)
}
}
static struct ref_dir *get_packed_refs(struct ref_cache *refs)
/*
* Get the packed_ref_cache for the specified ref_cache, creating it
* if necessary.
*/
static struct packed_ref_cache *get_packed_ref_cache(struct ref_cache *refs)
{
const char *packed_refs_file;
if (*refs->name)
packed_refs_file = git_path_submodule(refs->name, "packed-refs");
else
packed_refs_file = git_path("packed-refs");
if (refs->packed &&
!stat_validity_check(&refs->packed->validity, packed_refs_file))
clear_packed_ref_cache(refs);
if (!refs->packed) {
const char *packed_refs_file;
FILE *f;
refs->packed = create_dir_entry(refs, "", 0, 0);
if (*refs->name)
packed_refs_file = git_path_submodule(refs->name, "packed-refs");
else
packed_refs_file = git_path("packed-refs");
refs->packed = xcalloc(1, sizeof(*refs->packed));
acquire_packed_ref_cache(refs->packed);
refs->packed->root = create_dir_entry(refs, "", 0, 0);
f = fopen(packed_refs_file, "r");
if (f) {
read_packed_refs(f, get_ref_dir(refs->packed));
stat_validity_update(&refs->packed->validity, fileno(f));
read_packed_refs(f, get_ref_dir(refs->packed->root));
fclose(f);
}
}
return get_ref_dir(refs->packed);
return refs->packed;
}
static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
{
return get_ref_dir(packed_ref_cache->root);
}
static struct ref_dir *get_packed_refs(struct ref_cache *refs)
{
return get_packed_ref_dir(get_packed_ref_cache(refs));
}
void add_packed_ref(const char *refname, const unsigned char *sha1)
{
add_ref(get_packed_refs(&ref_cache),
struct packed_ref_cache *packed_ref_cache =
get_packed_ref_cache(&ref_cache);
if (!packed_ref_cache->lock)
die("internal error: packed refs not locked");
add_ref(get_packed_ref_dir(packed_ref_cache),
create_ref_entry(refname, sha1, REF_ISPACKED, 1));
}
@ -1558,13 +1656,31 @@ void warn_dangling_symref(FILE *fp, const char *msg_fmt, const char *refname)
static int do_for_each_entry(struct ref_cache *refs, const char *base,
each_ref_entry_fn fn, void *cb_data)
{
struct ref_dir *packed_dir = get_packed_refs(refs);
struct ref_dir *loose_dir = get_loose_refs(refs);
struct packed_ref_cache *packed_ref_cache;
struct ref_dir *loose_dir;
struct ref_dir *packed_dir;
int retval = 0;
/*
* We must make sure that all loose refs are read before accessing the
* packed-refs file; this avoids a race condition in which loose refs
* are migrated to the packed-refs file by a simultaneous process, but
* our in-memory view is from before the migration. get_packed_ref_cache()
* takes care of making sure our view is up to date with what is on
* disk.
*/
loose_dir = get_loose_refs(refs);
if (base && *base) {
loose_dir = find_containing_dir(loose_dir, base, 0);
}
if (loose_dir)
prime_ref_dir(loose_dir);
packed_ref_cache = get_packed_ref_cache(refs);
acquire_packed_ref_cache(packed_ref_cache);
packed_dir = get_packed_ref_dir(packed_ref_cache);
if (base && *base) {
packed_dir = find_containing_dir(packed_dir, base, 0);
loose_dir = find_containing_dir(loose_dir, base, 0);
}
if (packed_dir && loose_dir) {
@ -1582,6 +1698,7 @@ static int do_for_each_entry(struct ref_cache *refs, const char *base,
loose_dir, 0, fn, cb_data);
}
release_packed_ref_cache(packed_ref_cache);
return retval;
}
@ -2036,6 +2153,73 @@ static void write_packed_entry(int fd, char *refname, unsigned char *sha1,
}
}
/*
* An each_ref_entry_fn that writes the entry to a packed-refs file.
*/
static int write_packed_entry_fn(struct ref_entry *entry, void *cb_data)
{
int *fd = cb_data;
enum peel_status peel_status = peel_entry(entry, 0);
if (peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG)
error("internal error: %s is not a valid packed reference!",
entry->name);
write_packed_entry(*fd, entry->name, entry->u.value.sha1,
peel_status == PEEL_PEELED ?
entry->u.value.peeled : NULL);
return 0;
}
int lock_packed_refs(int flags)
{
struct packed_ref_cache *packed_ref_cache;
/* Discard the old cache because it might be invalid: */
clear_packed_ref_cache(&ref_cache);
if (hold_lock_file_for_update(&packlock, git_path("packed-refs"), flags) < 0)
return -1;
/* Read the current packed-refs while holding the lock: */
packed_ref_cache = get_packed_ref_cache(&ref_cache);
packed_ref_cache->lock = &packlock;
/* Increment the reference count to prevent it from being freed: */
acquire_packed_ref_cache(packed_ref_cache);
return 0;
}
int commit_packed_refs(void)
{
struct packed_ref_cache *packed_ref_cache =
get_packed_ref_cache(&ref_cache);
int error = 0;
if (!packed_ref_cache->lock)
die("internal error: packed-refs not locked");
write_or_die(packed_ref_cache->lock->fd,
PACKED_REFS_HEADER, strlen(PACKED_REFS_HEADER));
do_for_each_entry_in_dir(get_packed_ref_dir(packed_ref_cache),
0, write_packed_entry_fn,
&packed_ref_cache->lock->fd);
if (commit_lock_file(packed_ref_cache->lock))
error = -1;
packed_ref_cache->lock = NULL;
release_packed_ref_cache(packed_ref_cache);
return error;
}
void rollback_packed_refs(void)
{
struct packed_ref_cache *packed_ref_cache =
get_packed_ref_cache(&ref_cache);
if (!packed_ref_cache->lock)
die("internal error: packed-refs not locked");
rollback_lock_file(packed_ref_cache->lock);
packed_ref_cache->lock = NULL;
release_packed_ref_cache(packed_ref_cache);
clear_packed_ref_cache(&ref_cache);
}
struct ref_to_prune {
struct ref_to_prune *next;
unsigned char sha1[20];
@ -2044,35 +2228,50 @@ struct ref_to_prune {
struct pack_refs_cb_data {
unsigned int flags;
struct ref_dir *packed_refs;
struct ref_to_prune *ref_to_prune;
int fd;
};
static int pack_one_ref(struct ref_entry *entry, void *cb_data)
/*
* An each_ref_entry_fn that is run over loose references only. If
* the loose reference can be packed, add an entry in the packed ref
* cache. If the reference should be pruned, also add it to
* ref_to_prune in the pack_refs_cb_data.
*/
static int pack_if_possible_fn(struct ref_entry *entry, void *cb_data)
{
struct pack_refs_cb_data *cb = cb_data;
enum peel_status peel_status;
struct ref_entry *packed_entry;
int is_tag_ref = !prefixcmp(entry->name, "refs/tags/");
/* ALWAYS pack refs that were already packed or are tags */
if (!(cb->flags & PACK_REFS_ALL) && !is_tag_ref &&
!(entry->flag & REF_ISPACKED))
/* ALWAYS pack tags */
if (!(cb->flags & PACK_REFS_ALL) && !is_tag_ref)
return 0;
/* Do not pack symbolic or broken refs: */
if ((entry->flag & REF_ISSYMREF) || !ref_resolves_to_object(entry))
return 0;
/* Add a packed ref cache entry equivalent to the loose entry. */
peel_status = peel_entry(entry, 1);
if (peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG)
die("internal error peeling reference %s (%s)",
entry->name, sha1_to_hex(entry->u.value.sha1));
write_packed_entry(cb->fd, entry->name, entry->u.value.sha1,
peel_status == PEEL_PEELED ?
entry->u.value.peeled : NULL);
packed_entry = find_ref(cb->packed_refs, entry->name);
if (packed_entry) {
/* Overwrite existing packed entry with info from loose entry */
packed_entry->flag = REF_ISPACKED | REF_KNOWS_PEELED;
hashcpy(packed_entry->u.value.sha1, entry->u.value.sha1);
} else {
packed_entry = create_ref_entry(entry->name, entry->u.value.sha1,
REF_ISPACKED | REF_KNOWS_PEELED, 0);
add_ref(cb->packed_refs, packed_entry);
}
hashcpy(packed_entry->u.value.peeled, entry->u.value.peeled);
/* If the ref was already packed, there is no need to prune it. */
if ((cb->flags & PACK_REFS_PRUNE) && !(entry->flag & REF_ISPACKED)) {
/* Schedule the loose reference for pruning if requested. */
if ((cb->flags & PACK_REFS_PRUNE)) {
int namelen = strlen(entry->name) + 1;
struct ref_to_prune *n = xcalloc(1, sizeof(*n) + namelen);
hashcpy(n->sha1, entry->u.value.sha1);
@ -2134,8 +2333,6 @@ static void prune_refs(struct ref_to_prune *r)
}
}
static struct lock_file packlock;
int pack_refs(unsigned int flags)
{
struct pack_refs_cb_data cbdata;
@ -2143,26 +2340,38 @@ int pack_refs(unsigned int flags)
memset(&cbdata, 0, sizeof(cbdata));
cbdata.flags = flags;
cbdata.fd = hold_lock_file_for_update(&packlock, git_path("packed-refs"),
LOCK_DIE_ON_ERROR);
lock_packed_refs(LOCK_DIE_ON_ERROR);
cbdata.packed_refs = get_packed_refs(&ref_cache);
write_or_die(cbdata.fd, PACKED_REFS_HEADER, strlen(PACKED_REFS_HEADER));
do_for_each_entry_in_dir(get_loose_refs(&ref_cache), 0,
pack_if_possible_fn, &cbdata);
do_for_each_entry(&ref_cache, "", pack_one_ref, &cbdata);
if (commit_lock_file(&packlock) < 0)
if (commit_packed_refs())
die_errno("unable to overwrite old ref-pack file");
prune_refs(cbdata.ref_to_prune);
return 0;
}
static int repack_ref_fn(struct ref_entry *entry, void *cb_data)
/*
* If entry is no longer needed in packed-refs, add it to the string
* list pointed to by cb_data. Reasons for deleting entries:
*
* - Entry is broken.
* - Entry is overridden by a loose ref.
* - Entry does not point at a valid object.
*
* In the first and third cases, also emit an error message because these
* are indications of repository corruption.
*/
static int curate_packed_ref_fn(struct ref_entry *entry, void *cb_data)
{
int *fd = cb_data;
enum peel_status peel_status;
struct string_list *refs_to_delete = cb_data;
if (entry->flag & REF_ISBROKEN) {
/* This shouldn't happen to packed refs. */
error("%s is broken!", entry->name);
string_list_append(refs_to_delete, entry->name);
return 0;
}
if (!has_sha1_file(entry->u.value.sha1)) {
@ -2172,7 +2381,7 @@ static int repack_ref_fn(struct ref_entry *entry, void *cb_data)
if (read_ref_full(entry->name, sha1, 0, &flags))
/* We should at least have found the packed ref. */
die("Internal error");
if ((flags & REF_ISSYMREF) || !(flags & REF_ISPACKED))
if ((flags & REF_ISSYMREF) || !(flags & REF_ISPACKED)) {
/*
* This packed reference is overridden by a
* loose reference, so it is OK that its value
@ -2181,9 +2390,11 @@ static int repack_ref_fn(struct ref_entry *entry, void *cb_data)
* collected. For this purpose we don't even
* care whether the loose reference itself is
* invalid, broken, symbolic, etc. Silently
* omit the packed reference from the output.
* remove the packed reference.
*/
string_list_append(refs_to_delete, entry->name);
return 0;
}
/*
* There is no overriding loose reference, so the fact
* that this reference doesn't refer to a valid object
@ -2192,44 +2403,47 @@ static int repack_ref_fn(struct ref_entry *entry, void *cb_data)
* the output.
*/
error("%s does not point to a valid object!", entry->name);
string_list_append(refs_to_delete, entry->name);
return 0;
}
peel_status = peel_entry(entry, 0);
write_packed_entry(*fd, entry->name, entry->u.value.sha1,
peel_status == PEEL_PEELED ?
entry->u.value.peeled : NULL);
return 0;
}
static int repack_without_ref(const char *refname)
{
int fd;
struct ref_dir *packed;
struct string_list refs_to_delete = STRING_LIST_INIT_DUP;
struct string_list_item *ref_to_delete;
if (!get_packed_ref(refname))
return 0; /* refname does not exist in packed refs */
fd = hold_lock_file_for_update(&packlock, git_path("packed-refs"), 0);
if (fd < 0) {
if (lock_packed_refs(0)) {
unable_to_lock_error(git_path("packed-refs"), errno);
return error("cannot delete '%s' from packed refs", refname);
}
clear_packed_ref_cache(&ref_cache);
packed = get_packed_refs(&ref_cache);
/* Remove refname from the cache. */
/* Remove refname from the cache: */
if (remove_entry(packed, refname) == -1) {
/*
* The packed entry disappeared while we were
* acquiring the lock.
*/
rollback_lock_file(&packlock);
rollback_packed_refs();
return 0;
}
write_or_die(fd, PACKED_REFS_HEADER, strlen(PACKED_REFS_HEADER));
do_for_each_entry_in_dir(packed, 0, repack_ref_fn, &fd);
return commit_lock_file(&packlock);
/* Remove any other accumulated cruft: */
do_for_each_entry_in_dir(packed, 0, curate_packed_ref_fn, &refs_to_delete);
for_each_string_list_item(ref_to_delete, &refs_to_delete) {
if (remove_entry(packed, ref_to_delete->string) == -1)
die("internal error");
}
/* Write what remains: */
return commit_packed_refs();
}
int delete_ref(const char *refname, const unsigned char *sha1, int delopt)

26
refs.h
Просмотреть файл

@ -77,11 +77,33 @@ extern int for_each_rawref(each_ref_fn, void *);
extern void warn_dangling_symref(FILE *fp, const char *msg_fmt, const char *refname);
/*
* Add a reference to the in-memory packed reference cache. To actually
* write the reference to the packed-refs file, call pack_refs().
* Lock the packed-refs file for writing. Flags is passed to
* hold_lock_file_for_update(). Return 0 on success.
*/
extern int lock_packed_refs(int flags);
/*
* Add a reference to the in-memory packed reference cache. This may
* only be called while the packed-refs file is locked (see
* lock_packed_refs()). To actually write the packed-refs file, call
* commit_packed_refs().
*/
extern void add_packed_ref(const char *refname, const unsigned char *sha1);
/*
* Write the current version of the packed refs cache from memory to
* disk. The packed-refs file must already be locked for writing (see
* lock_packed_refs()). Return zero on success.
*/
extern int commit_packed_refs(void);
/*
* Rollback the lockfile for the packed-refs file, and discard the
* in-memory packed reference cache. (The packed-refs file will be
* read anew if it is needed again after this function is called.)
*/
extern void rollback_packed_refs(void);
/*
* Flags for controlling behaviour of pack_refs()
* PACK_REFS_PRUNE: Prune loose refs after packing