write_index_as_tree: cleanup tempfile on error

If we failed to write our new index file, we rollback our
lockfile to remove the temporary index. But if we fail
before we even get to the write step (because reading the
old index failed), we leave the lockfile in place, which
makes no sense.

In practice this hasn't been a big deal because failing at
write_index_as_tree() typically results in the whole program
exiting (and thus the tempfile handler kicking in and
cleaning up the files). But this function should
consistently take responsibility for the resources it
allocates.

Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Jeff King 2017-09-05 08:14:07 -04:00 коммит произвёл Junio C Hamano
Родитель 3ec7d702a8
Коммит c82c75b951
1 изменённых файлов: 15 добавлений и 8 удалений

Просмотреть файл

@ -604,6 +604,7 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co
{ {
int entries, was_valid, newfd; int entries, was_valid, newfd;
struct lock_file *lock_file; struct lock_file *lock_file;
int ret = 0;
/* /*
* We can't free this memory, it becomes part of a linked list * We can't free this memory, it becomes part of a linked list
@ -614,8 +615,10 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co
newfd = hold_lock_file_for_update(lock_file, index_path, LOCK_DIE_ON_ERROR); newfd = hold_lock_file_for_update(lock_file, index_path, LOCK_DIE_ON_ERROR);
entries = read_index_from(index_state, index_path); entries = read_index_from(index_state, index_path);
if (entries < 0) if (entries < 0) {
return WRITE_TREE_UNREADABLE_INDEX; ret = WRITE_TREE_UNREADABLE_INDEX;
goto out;
}
if (flags & WRITE_TREE_IGNORE_CACHE_TREE) if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
cache_tree_free(&index_state->cache_tree); cache_tree_free(&index_state->cache_tree);
@ -624,8 +627,10 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co
was_valid = cache_tree_fully_valid(index_state->cache_tree); was_valid = cache_tree_fully_valid(index_state->cache_tree);
if (!was_valid) { if (!was_valid) {
if (cache_tree_update(index_state, flags) < 0) if (cache_tree_update(index_state, flags) < 0) {
return WRITE_TREE_UNMERGED_INDEX; ret = WRITE_TREE_UNMERGED_INDEX;
goto out;
}
if (0 <= newfd) { if (0 <= newfd) {
if (!write_locked_index(index_state, lock_file, COMMIT_LOCK)) if (!write_locked_index(index_state, lock_file, COMMIT_LOCK))
newfd = -1; newfd = -1;
@ -641,17 +646,19 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co
if (prefix) { if (prefix) {
struct cache_tree *subtree; struct cache_tree *subtree;
subtree = cache_tree_find(index_state->cache_tree, prefix); subtree = cache_tree_find(index_state->cache_tree, prefix);
if (!subtree) if (!subtree) {
return WRITE_TREE_PREFIX_ERROR; ret = WRITE_TREE_PREFIX_ERROR;
goto out;
}
hashcpy(sha1, subtree->oid.hash); hashcpy(sha1, subtree->oid.hash);
} }
else else
hashcpy(sha1, index_state->cache_tree->oid.hash); hashcpy(sha1, index_state->cache_tree->oid.hash);
out:
if (0 <= newfd) if (0 <= newfd)
rollback_lock_file(lock_file); rollback_lock_file(lock_file);
return ret;
return 0;
} }
int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix) int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)