git/builtin/gc.c

702 строки
18 KiB
C
Исходник Обычный вид История

/*
* git gc builtin command
*
* Cleanup unreachable files and optimize the repository.
*
* Copyright (c) 2007 James Bowes
*
* Based on git-gc.sh, which is
*
* Copyright (c) 2006 Shawn O. Pearce
*/
#include "builtin.h"
#include "repository.h"
#include "config.h"
#include "tempfile.h"
#include "lockfile.h"
#include "parse-options.h"
#include "run-command.h"
#include "sigchain.h"
#include "argv-array.h"
#include "commit.h"
#include "commit-graph.h"
#include "packfile.h"
#include "object-store.h"
#include "pack.h"
#include "pack-objects.h"
#include "blob.h"
#include "tree.h"
#include "promisor-remote.h"
#define FAILED_RUN "failed to run %s"
static const char * const builtin_gc_usage[] = {
N_("git gc [<options>]"),
NULL
};
static int pack_refs = 1;
static int prune_reflogs = 1;
gc: default aggressive depth to 50 This commit message is long and has lots of background and numbers. The summary is: the current default of 250 doesn't save much space, and costs CPU. It's not a good tradeoff. Read on for details. The "--aggressive" flag to git-gc does three things: 1. use "-f" to throw out existing deltas and recompute from scratch 2. use "--window=250" to look harder for deltas 3. use "--depth=250" to make longer delta chains Items (1) and (2) are good matches for an "aggressive" repack. They ask the repack to do more computation work in the hopes of getting a better pack. You pay the costs during the repack, and other operations see only the benefit. Item (3) is not so clear. Allowing longer chains means fewer restrictions on the deltas, which means potentially finding better ones and saving some space. But it also means that operations which access the deltas have to follow longer chains, which affects their performance. So it's a tradeoff, and it's not clear that the tradeoff is even a good one. The existing "250" numbers for "--aggressive" come originally from this thread: http://public-inbox.org/git/alpine.LFD.0.9999.0712060803430.13796@woody.linux-foundation.org/ where Linus says: So when I said "--depth=250 --window=250", I chose those numbers more as an example of extremely aggressive packing, and I'm not at all sure that the end result is necessarily wonderfully usable. It's going to save disk space (and network bandwidth - the delta's will be re-used for the network protocol too!), but there are definitely downsides too, and using long delta chains may simply not be worth it in practice. There are some numbers in that thread, but they're mostly focused on the improved window size, and measure the improvement from --depth=250 and --window=250 together. E.g.: http://public-inbox.org/git/9e4733910712062006l651571f3w7f76ce64c6650dff@mail.gmail.com/ talks about the improved run-time of "git-blame", which comes from the reduced pack size. But most of that reduction is coming from --window=250, whereas most of the extra costs come from --depth=250. There's a link in that thread showing that increasing the depth beyond 50 doesn't seem to help much with the size: https://vcscompare.blogspot.com/2008/06/git-repack-parameters.html but again, no discussion of the timing impact. In an earlier thread from Ted Ts'o which discussed setting the non-aggressive default (from 10 to 50): http://public-inbox.org/git/20070509134958.GA21489%40thunk.org/ we have more numbers, with the conclusion that going past 50 does not help size much, and hurts the speed of normal operations. So from that, we might guess that 50 is actually a sweet spot, even for aggressive, if we interpret aggressive to "spend time now to make a better pack". It is not clear that "--depth=250" is actually a better pack. It may be slightly _smaller_, but it carries a run-time penalty. Here are some more recent timings I did to verify that. They show three things: - the size of the resulting pack (so disk saved to store, bandwidth saved on clones/fetches) - the cost of "rev-list --objects --all", which shows the effect of the delta chains on trees (commits typically don't delta, and the command doesn't touch the blobs at all) - the cost of "log -Sfoo", which will additionally access each blob All cases were repacked with "git repack -adf --depth=$d --window=250" (so basically, what would happen if we tweaked the "gc --aggressive" default depth). The timings are all wall-clock best-of-3. The machine itself has plenty of RAM compared to the repositories (which is probably typical of most workstations these days), so we're really measuring CPU usage, as the whole thing will be in disk cache after the first run. The core.deltaBaseCacheLimit is at its default of 96MiB. It's possible that tweaking it would have some impact on the tests, as some of them (especially "log -S" on a large repo) are likely to overflow that. But bumping that carries a run-time memory cost, so for these tests, I focused on what we could do just with the on-disk pack tradeoffs. Each test is done for four depths: 250 (the current value), 50 (the current default that tested well previously), 100 (to show something on the larger side, which previous tests showed was not a good tradeoff), and 10 (the very old default, which previous tests showed was worse than 50). Here are the numbers for linux.git: depth | size | % | rev-list | % | log -Sfoo | % -------+-------+-------+----------+--------+-----------+------- 250 | 967MB | n/a | 48.159s | n/a | 378.088 | n/a 100 | 971MB | +0.4% | 41.471s | -13.9% | 342.060 | -9.5% 50 | 979MB | +1.2% | 37.778s | -21.6% | 311.040s | -17.7% 10 | 1.1GB | +6.6% | 32.518s | -32.5% | 279.890s | -25.9% and for git.git: depth | size | % | rev-list | % | log -Sfoo | % -------+-------+-------+----------+--------+-----------+------- 250 | 48MB | n/a | 2.215s | n/a | 20.922s | n/a 100 | 49MB | +0.5% | 2.140s | -3.4% | 17.736s | -15.2% 50 | 49MB | +1.7% | 2.099s | -5.2% | 15.418s | -26.3% 10 | 53MB | +9.3% | 2.001s | -9.7% | 12.677s | -39.4% You can see that that the CPU savings for regular operations improves as we decrease the depth. The savings are less for "rev-list" on a smaller repository than they are for blob-accessing operations, or even rev-list on a larger repository. This may mean that a larger delta cache would help (though setting core.deltaBaseCacheLimit by itself doesn't). But we can also see that the space savings are not that great as the depth goes higher. Saving 5-10% between 10 and 50 is probably worth the CPU tradeoff. Saving 1% to go from 50 to 100, or another 0.5% to go from 100 to 250 is probably not. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-08-11 19:13:09 +03:00
static int aggressive_depth = 50;
static int aggressive_window = 250;
static int gc_auto_threshold = 6700;
static int gc_auto_pack_limit = 50;
static int detach_auto = 1;
static timestamp_t gc_log_expire_time;
static const char *gc_log_expire = "1.day.ago";
static const char *prune_expire = "2.weeks.ago";
static const char *prune_worktrees_expire = "3.months.ago";
static unsigned long big_pack_threshold;
static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
static struct argv_array pack_refs_cmd = ARGV_ARRAY_INIT;
static struct argv_array reflog = ARGV_ARRAY_INIT;
static struct argv_array repack = ARGV_ARRAY_INIT;
static struct argv_array prune = ARGV_ARRAY_INIT;
static struct argv_array prune_worktrees = ARGV_ARRAY_INIT;
static struct argv_array rerere = ARGV_ARRAY_INIT;
tempfile: auto-allocate tempfiles on heap The previous commit taught the tempfile code to give up ownership over tempfiles that have been renamed or deleted. That makes it possible to use a stack variable like this: struct tempfile t; create_tempfile(&t, ...); ... if (!err) rename_tempfile(&t, ...); else delete_tempfile(&t); But doing it this way has a high potential for creating memory errors. The tempfile we pass to create_tempfile() ends up on a global linked list, and it's not safe for it to go out of scope until we've called one of those two deactivation functions. Imagine that we add an early return from the function that forgets to call delete_tempfile(). With a static or heap tempfile variable, the worst case is that the tempfile hangs around until the program exits (and some functions like setup_shallow_temporary rely on this intentionally, creating a tempfile and then leaving it for later cleanup). But with a stack variable as above, this is a serious memory error: the variable goes out of scope and may be filled with garbage by the time the tempfile code looks at it. Let's see if we can make it harder to get this wrong. Since many callers need to allocate arbitrary numbers of tempfiles, we can't rely on static storage as a general solution. So we need to turn to the heap. We could just ask all callers to pass us a heap variable, but that puts the burden on them to call free() at the right time. Instead, let's have the tempfile code handle the heap allocation _and_ the deallocation (when the tempfile is deactivated and removed from the list). This changes the return value of all of the creation functions. For the cleanup functions (delete and rename), we'll add one extra bit of safety: instead of taking a tempfile pointer, we'll take a pointer-to-pointer and set it to NULL after freeing the object. This makes it safe to double-call functions like delete_tempfile(), as the second call treats the NULL input as a noop. Several callsites follow this pattern. The resulting patch does have a fair bit of noise, as each caller needs to be converted to handle: 1. Storing a pointer instead of the struct itself. 2. Passing the pointer instead of taking the struct address. 3. Handling a "struct tempfile *" return instead of a file descriptor. We could play games to make this less noisy. For example, by defining the tempfile like this: struct tempfile { struct heap_allocated_part_of_tempfile { int fd; ...etc } *actual_data; } Callers would continue to have a "struct tempfile", and it would be "active" only when the inner pointer was non-NULL. But that just makes things more awkward in the long run. There aren't that many callers, so we can simply bite the bullet and adjust all of them. And the compiler makes it easy for us to find them all. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-05 15:15:08 +03:00
static struct tempfile *pidfile;
static struct lock_file log_lock;
static struct string_list pack_garbage = STRING_LIST_INIT_DUP;
static void clean_pack_garbage(void)
{
int i;
for (i = 0; i < pack_garbage.nr; i++)
unlink_or_warn(pack_garbage.items[i].string);
string_list_clear(&pack_garbage, 0);
}
static void report_pack_garbage(unsigned seen_bits, const char *path)
{
if (seen_bits == PACKDIR_FILE_IDX)
string_list_append(&pack_garbage, path);
}
static void process_log_file(void)
{
struct stat st;
if (fstat(get_lock_file_fd(&log_lock), &st)) {
/*
* Perhaps there was an i/o error or another
* unlikely situation. Try to make a note of
* this in gc.log along with any existing
* messages.
*/
int saved_errno = errno;
fprintf(stderr, _("Failed to fstat %s: %s"),
tempfile: auto-allocate tempfiles on heap The previous commit taught the tempfile code to give up ownership over tempfiles that have been renamed or deleted. That makes it possible to use a stack variable like this: struct tempfile t; create_tempfile(&t, ...); ... if (!err) rename_tempfile(&t, ...); else delete_tempfile(&t); But doing it this way has a high potential for creating memory errors. The tempfile we pass to create_tempfile() ends up on a global linked list, and it's not safe for it to go out of scope until we've called one of those two deactivation functions. Imagine that we add an early return from the function that forgets to call delete_tempfile(). With a static or heap tempfile variable, the worst case is that the tempfile hangs around until the program exits (and some functions like setup_shallow_temporary rely on this intentionally, creating a tempfile and then leaving it for later cleanup). But with a stack variable as above, this is a serious memory error: the variable goes out of scope and may be filled with garbage by the time the tempfile code looks at it. Let's see if we can make it harder to get this wrong. Since many callers need to allocate arbitrary numbers of tempfiles, we can't rely on static storage as a general solution. So we need to turn to the heap. We could just ask all callers to pass us a heap variable, but that puts the burden on them to call free() at the right time. Instead, let's have the tempfile code handle the heap allocation _and_ the deallocation (when the tempfile is deactivated and removed from the list). This changes the return value of all of the creation functions. For the cleanup functions (delete and rename), we'll add one extra bit of safety: instead of taking a tempfile pointer, we'll take a pointer-to-pointer and set it to NULL after freeing the object. This makes it safe to double-call functions like delete_tempfile(), as the second call treats the NULL input as a noop. Several callsites follow this pattern. The resulting patch does have a fair bit of noise, as each caller needs to be converted to handle: 1. Storing a pointer instead of the struct itself. 2. Passing the pointer instead of taking the struct address. 3. Handling a "struct tempfile *" return instead of a file descriptor. We could play games to make this less noisy. For example, by defining the tempfile like this: struct tempfile { struct heap_allocated_part_of_tempfile { int fd; ...etc } *actual_data; } Callers would continue to have a "struct tempfile", and it would be "active" only when the inner pointer was non-NULL. But that just makes things more awkward in the long run. There aren't that many callers, so we can simply bite the bullet and adjust all of them. And the compiler makes it easy for us to find them all. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-05 15:15:08 +03:00
get_tempfile_path(log_lock.tempfile),
strerror(saved_errno));
fflush(stderr);
commit_lock_file(&log_lock);
errno = saved_errno;
} else if (st.st_size) {
/* There was some error recorded in the lock file */
commit_lock_file(&log_lock);
} else {
/* No error, clean up any old gc.log */
unlink(git_path("gc.log"));
rollback_lock_file(&log_lock);
}
}
static void process_log_file_at_exit(void)
{
fflush(stderr);
process_log_file();
}
static void process_log_file_on_signal(int signo)
{
process_log_file();
sigchain_pop(signo);
raise(signo);
}
gc: handle & check gc.reflogExpire config Don't redundantly run "git reflog expire --all" when gc.reflogExpire and gc.reflogExpireUnreachable are set to "never", and die immediately if those configuration valuer are bad. As an earlier "assert lack of early exit" change to the tests for "git reflog expire" shows, an early check of gc.reflogExpire{Unreachable,} isn't wanted in general for "git reflog expire", but it makes sense for "gc" because: 1) Similarly to 8ab5aa4bd8 ("parseopt: handle malformed --expire arguments more nicely", 2018-04-21) we'll now die early if the config variables are set to invalid values. We run "pack-refs" before "reflog expire", which can take a while, only to then die on an invalid gc.reflogExpire{Unreachable,} configuration. 2) Not invoking the command at all means it won't show up in trace output, which makes what's going on more obvious when the two are set to "never". 3) As a later change documents we lock the refs when looping over the refs to expire, even in cases where we end up doing nothing due to this config. For the reasons noted in the earlier "assert lack of early exit" change I don't think it's worth it to bend over backwards in "git reflog expire" itself to carefully detect if we'll really do nothing given the combination of all its possible options and skip that locking, but that's easy to detect here in "gc" where we'll only run "reflog expire" in a relatively simple mode. Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-28 19:14:34 +03:00
static int gc_config_is_timestamp_never(const char *var)
{
const char *value;
timestamp_t expire;
if (!git_config_get_value(var, &value) && value) {
if (parse_expiry_date(value, &expire))
die(_("failed to parse '%s' value '%s'"), var, value);
return expire == 0;
}
return 0;
}
static void gc_config(void)
{
const char *value;
if (!git_config_get_value("gc.packrefs", &value)) {
if (value && !strcmp(value, "notbare"))
pack_refs = -1;
else
pack_refs = git_config_bool("gc.packrefs", value);
}
gc: handle & check gc.reflogExpire config Don't redundantly run "git reflog expire --all" when gc.reflogExpire and gc.reflogExpireUnreachable are set to "never", and die immediately if those configuration valuer are bad. As an earlier "assert lack of early exit" change to the tests for "git reflog expire" shows, an early check of gc.reflogExpire{Unreachable,} isn't wanted in general for "git reflog expire", but it makes sense for "gc" because: 1) Similarly to 8ab5aa4bd8 ("parseopt: handle malformed --expire arguments more nicely", 2018-04-21) we'll now die early if the config variables are set to invalid values. We run "pack-refs" before "reflog expire", which can take a while, only to then die on an invalid gc.reflogExpire{Unreachable,} configuration. 2) Not invoking the command at all means it won't show up in trace output, which makes what's going on more obvious when the two are set to "never". 3) As a later change documents we lock the refs when looping over the refs to expire, even in cases where we end up doing nothing due to this config. For the reasons noted in the earlier "assert lack of early exit" change I don't think it's worth it to bend over backwards in "git reflog expire" itself to carefully detect if we'll really do nothing given the combination of all its possible options and skip that locking, but that's easy to detect here in "gc" where we'll only run "reflog expire" in a relatively simple mode. Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-28 19:14:34 +03:00
if (gc_config_is_timestamp_never("gc.reflogexpire") &&
gc_config_is_timestamp_never("gc.reflogexpireunreachable"))
prune_reflogs = 0;
git_config_get_int("gc.aggressivewindow", &aggressive_window);
git_config_get_int("gc.aggressivedepth", &aggressive_depth);
git_config_get_int("gc.auto", &gc_auto_threshold);
git_config_get_int("gc.autopacklimit", &gc_auto_pack_limit);
git_config_get_bool("gc.autodetach", &detach_auto);
git_config_get_expiry("gc.pruneexpire", &prune_expire);
git_config_get_expiry("gc.worktreepruneexpire", &prune_worktrees_expire);
git_config_get_expiry("gc.logexpiry", &gc_log_expire);
git_config_get_ulong("gc.bigpackthreshold", &big_pack_threshold);
git_config_get_ulong("pack.deltacachesize", &max_delta_cache_size);
git_config(git_default_config, NULL);
}
static int too_many_loose_objects(void)
{
/*
* Quickly check if a "gc" is needed, by estimating how
* many loose objects there are. Because SHA-1 is evenly
* distributed, we can check only one and get a reasonable
* estimate.
*/
DIR *dir;
struct dirent *ent;
int auto_threshold;
int num_loose = 0;
int needed = 0;
gc: convert to using the_hash_algo There's been a lot of changing of the hardcoded "40" values to the_hash_algo->hexsz, but we've so far missed this one where we hardcoded 38 for the loose object file length. This is because a SHA-1 like abcde[...] gets turned into objects/ab/cde[...]. There's no reason to suppose the same won't be the case for SHA-256, and reading between the lines in hash-function-transition.txt the format is planned to be the same. In the future we may want to further modify this code for the hash function transition. There's a potential pathological case here where we'll only consider the loose objects for the currently active hash, but objects for that hash will share a directory storage with the other hash. Thus we could theoretically have e.g. 1k SHA-1 loose objects, and 1 million SHA-256 objects. Then not notice that we need to pack them because we're currently using SHA-1, even though our FS may be straining under the stress of such humongous directories. So assuming that "gc" eventually learns to pack up both SHA-1 and SHA-256 objects regardless of what the current the_hash_algo is, perhaps this check should be changed to consider all files in objects/17/ matching [0-9a-f] 38 or 62 characters in length (i.e. both SHA-1 and SHA-256). But none of that is something we need to worry about now, and supporting both 38 and 62 characters depending on "the_hash_algo" removes another case of SHA-1 hardcoding. As noted in [1] I'm making no effort to somehow remove the hardcoding for "2" as in "use the first two hexdigits for the directory name". There's no indication that that'll ever change, and somehow generalizing it here would be a drop in the ocean, so there's no point in doing that. It also couldn't be done without coming up with some generalized version of the magical "objects/17" directory. See [2] for a discussion of that directory. 1. https://public-inbox.org/git/874l84ber7.fsf@evledraar.gmail.com/ 2. https://public-inbox.org/git/87k1mta9x5.fsf@evledraar.gmail.com/ Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-15 18:59:53 +03:00
const unsigned hexsz_loose = the_hash_algo->hexsz - 2;
dir = opendir(git_path("objects/17"));
if (!dir)
return 0;
auto_threshold = DIV_ROUND_UP(gc_auto_threshold, 256);
while ((ent = readdir(dir)) != NULL) {
gc: convert to using the_hash_algo There's been a lot of changing of the hardcoded "40" values to the_hash_algo->hexsz, but we've so far missed this one where we hardcoded 38 for the loose object file length. This is because a SHA-1 like abcde[...] gets turned into objects/ab/cde[...]. There's no reason to suppose the same won't be the case for SHA-256, and reading between the lines in hash-function-transition.txt the format is planned to be the same. In the future we may want to further modify this code for the hash function transition. There's a potential pathological case here where we'll only consider the loose objects for the currently active hash, but objects for that hash will share a directory storage with the other hash. Thus we could theoretically have e.g. 1k SHA-1 loose objects, and 1 million SHA-256 objects. Then not notice that we need to pack them because we're currently using SHA-1, even though our FS may be straining under the stress of such humongous directories. So assuming that "gc" eventually learns to pack up both SHA-1 and SHA-256 objects regardless of what the current the_hash_algo is, perhaps this check should be changed to consider all files in objects/17/ matching [0-9a-f] 38 or 62 characters in length (i.e. both SHA-1 and SHA-256). But none of that is something we need to worry about now, and supporting both 38 and 62 characters depending on "the_hash_algo" removes another case of SHA-1 hardcoding. As noted in [1] I'm making no effort to somehow remove the hardcoding for "2" as in "use the first two hexdigits for the directory name". There's no indication that that'll ever change, and somehow generalizing it here would be a drop in the ocean, so there's no point in doing that. It also couldn't be done without coming up with some generalized version of the magical "objects/17" directory. See [2] for a discussion of that directory. 1. https://public-inbox.org/git/874l84ber7.fsf@evledraar.gmail.com/ 2. https://public-inbox.org/git/87k1mta9x5.fsf@evledraar.gmail.com/ Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-15 18:59:53 +03:00
if (strspn(ent->d_name, "0123456789abcdef") != hexsz_loose ||
ent->d_name[hexsz_loose] != '\0')
continue;
if (++num_loose > auto_threshold) {
needed = 1;
break;
}
}
closedir(dir);
return needed;
}
static struct packed_git *find_base_packs(struct string_list *packs,
unsigned long limit)
{
struct packed_git *p, *base = NULL;
for (p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local)
continue;
if (limit) {
if (p->pack_size >= limit)
string_list_append(packs, p->pack_name);
} else if (!base || base->pack_size < p->pack_size) {
base = p;
}
}
if (base)
string_list_append(packs, base->pack_name);
return base;
}
static int too_many_packs(void)
{
struct packed_git *p;
int cnt;
if (gc_auto_pack_limit <= 0)
return 0;
for (cnt = 0, p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local)
continue;
if (p->pack_keep)
continue;
/*
* Perhaps check the size of the pack and count only
* very small ones here?
*/
cnt++;
}
return gc_auto_pack_limit < cnt;
}
static uint64_t total_ram(void)
{
#if defined(HAVE_SYSINFO)
struct sysinfo si;
if (!sysinfo(&si))
return si.totalram;
#elif defined(HAVE_BSD_SYSCTL) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM))
int64_t physical_memory;
int mib[2];
size_t length;
mib[0] = CTL_HW;
# if defined(HW_MEMSIZE)
mib[1] = HW_MEMSIZE;
# else
mib[1] = HW_PHYSMEM;
# endif
length = sizeof(int64_t);
if (!sysctl(mib, 2, &physical_memory, &length, NULL, 0))
return physical_memory;
#elif defined(GIT_WINDOWS_NATIVE)
MEMORYSTATUSEX memInfo;
memInfo.dwLength = sizeof(MEMORYSTATUSEX);
if (GlobalMemoryStatusEx(&memInfo))
return memInfo.ullTotalPhys;
#endif
return 0;
}
static uint64_t estimate_repack_memory(struct packed_git *pack)
{
unsigned long nr_objects = approximate_object_count();
size_t os_cache, heap;
if (!pack || !nr_objects)
return 0;
/*
* First we have to scan through at least one pack.
* Assume enough room in OS file cache to keep the entire pack
* or we may accidentally evict data of other processes from
* the cache.
*/
os_cache = pack->pack_size + pack->index_size;
/* then pack-objects needs lots more for book keeping */
heap = sizeof(struct object_entry) * nr_objects;
/*
* internal rev-list --all --objects takes up some memory too,
* let's say half of it is for blobs
*/
heap += sizeof(struct blob) * nr_objects / 2;
/*
* and the other half is for trees (commits and tags are
* usually insignificant)
*/
heap += sizeof(struct tree) * nr_objects / 2;
/* and then obj_hash[], underestimated in fact */
heap += sizeof(struct object *) * nr_objects;
/* revindex is used also */
heap += sizeof(struct revindex_entry) * nr_objects;
/*
* read_sha1_file() (either at delta calculation phase, or
* writing phase) also fills up the delta base cache
*/
heap += delta_base_cache_limit;
/* and of course pack-objects has its own delta cache */
heap += max_delta_cache_size;
return os_cache + heap;
}
static int keep_one_pack(struct string_list_item *item, void *data)
{
argv_array_pushf(&repack, "--keep-pack=%s", basename(item->string));
return 0;
}
static void add_repack_all_option(struct string_list *keep_pack)
{
if (prune_expire && !strcmp(prune_expire, "now"))
argv_array_push(&repack, "-a");
else {
argv_array_push(&repack, "-A");
if (prune_expire)
argv_array_pushf(&repack, "--unpack-unreachable=%s", prune_expire);
}
if (keep_pack)
for_each_string_list(keep_pack, keep_one_pack, NULL);
}
static void add_repack_incremental_option(void)
{
argv_array_push(&repack, "--no-write-bitmap-index");
}
static int need_to_gc(void)
{
/*
* Setting gc.auto to 0 or negative can disable the
* automatic gc.
*/
if (gc_auto_threshold <= 0)
return 0;
/*
* If there are too many loose objects, but not too many
* packs, we run "repack -d -l". If there are too many packs,
* we run "repack -A -d -l". Otherwise we tell the caller
* there is no need.
*/
if (too_many_packs()) {
struct string_list keep_pack = STRING_LIST_INIT_NODUP;
if (big_pack_threshold) {
find_base_packs(&keep_pack, big_pack_threshold);
if (keep_pack.nr >= gc_auto_pack_limit) {
big_pack_threshold = 0;
string_list_clear(&keep_pack, 0);
find_base_packs(&keep_pack, 0);
}
} else {
struct packed_git *p = find_base_packs(&keep_pack, 0);
uint64_t mem_have, mem_want;
mem_have = total_ram();
mem_want = estimate_repack_memory(p);
/*
* Only allow 1/2 of memory for pack-objects, leave
* the rest for the OS and other processes in the
* system.
*/
if (!mem_have || mem_want < mem_have / 2)
string_list_clear(&keep_pack, 0);
}
add_repack_all_option(&keep_pack);
string_list_clear(&keep_pack, 0);
} else if (too_many_loose_objects())
add_repack_incremental_option();
else
return 0;
if (run_hook_le(NULL, "pre-auto-gc", NULL))
return 0;
return 1;
}
/* return NULL on success, else hostname running the gc */
static const char *lock_repo_for_gc(int force, pid_t* ret_pid)
{
struct lock_file lock = LOCK_INIT;
char my_host[HOST_NAME_MAX + 1];
struct strbuf sb = STRBUF_INIT;
struct stat st;
uintmax_t pid;
FILE *fp;
int fd;
char *pidfile_path;
tempfile: auto-allocate tempfiles on heap The previous commit taught the tempfile code to give up ownership over tempfiles that have been renamed or deleted. That makes it possible to use a stack variable like this: struct tempfile t; create_tempfile(&t, ...); ... if (!err) rename_tempfile(&t, ...); else delete_tempfile(&t); But doing it this way has a high potential for creating memory errors. The tempfile we pass to create_tempfile() ends up on a global linked list, and it's not safe for it to go out of scope until we've called one of those two deactivation functions. Imagine that we add an early return from the function that forgets to call delete_tempfile(). With a static or heap tempfile variable, the worst case is that the tempfile hangs around until the program exits (and some functions like setup_shallow_temporary rely on this intentionally, creating a tempfile and then leaving it for later cleanup). But with a stack variable as above, this is a serious memory error: the variable goes out of scope and may be filled with garbage by the time the tempfile code looks at it. Let's see if we can make it harder to get this wrong. Since many callers need to allocate arbitrary numbers of tempfiles, we can't rely on static storage as a general solution. So we need to turn to the heap. We could just ask all callers to pass us a heap variable, but that puts the burden on them to call free() at the right time. Instead, let's have the tempfile code handle the heap allocation _and_ the deallocation (when the tempfile is deactivated and removed from the list). This changes the return value of all of the creation functions. For the cleanup functions (delete and rename), we'll add one extra bit of safety: instead of taking a tempfile pointer, we'll take a pointer-to-pointer and set it to NULL after freeing the object. This makes it safe to double-call functions like delete_tempfile(), as the second call treats the NULL input as a noop. Several callsites follow this pattern. The resulting patch does have a fair bit of noise, as each caller needs to be converted to handle: 1. Storing a pointer instead of the struct itself. 2. Passing the pointer instead of taking the struct address. 3. Handling a "struct tempfile *" return instead of a file descriptor. We could play games to make this less noisy. For example, by defining the tempfile like this: struct tempfile { struct heap_allocated_part_of_tempfile { int fd; ...etc } *actual_data; } Callers would continue to have a "struct tempfile", and it would be "active" only when the inner pointer was non-NULL. But that just makes things more awkward in the long run. There aren't that many callers, so we can simply bite the bullet and adjust all of them. And the compiler makes it easy for us to find them all. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-05 15:15:08 +03:00
if (is_tempfile_active(pidfile))
/* already locked */
return NULL;
if (xgethostname(my_host, sizeof(my_host)))
xsnprintf(my_host, sizeof(my_host), "unknown");
pidfile_path = git_pathdup("gc.pid");
fd = hold_lock_file_for_update(&lock, pidfile_path,
LOCK_DIE_ON_ERROR);
if (!force) {
static char locking_host[HOST_NAME_MAX + 1];
static char *scan_fmt;
int should_exit;
if (!scan_fmt)
scan_fmt = xstrfmt("%s %%%ds", "%"SCNuMAX, HOST_NAME_MAX);
fp = fopen(pidfile_path, "r");
memset(locking_host, 0, sizeof(locking_host));
should_exit =
fp != NULL &&
!fstat(fileno(fp), &st) &&
/*
* 12 hour limit is very generous as gc should
* never take that long. On the other hand we
* don't really need a strict limit here,
* running gc --auto one day late is not a big
* problem. --force can be used in manual gc
* after the user verifies that no gc is
* running.
*/
time(NULL) - st.st_mtime <= 12 * 3600 &&
fscanf(fp, scan_fmt, &pid, locking_host) == 2 &&
/* be gentle to concurrent "gc" on remote hosts */
(strcmp(locking_host, my_host) || !kill(pid, 0) || errno == EPERM);
if (fp != NULL)
fclose(fp);
if (should_exit) {
if (fd >= 0)
rollback_lock_file(&lock);
*ret_pid = pid;
free(pidfile_path);
return locking_host;
}
}
strbuf_addf(&sb, "%"PRIuMAX" %s",
(uintmax_t) getpid(), my_host);
write_in_full(fd, sb.buf, sb.len);
strbuf_release(&sb);
commit_lock_file(&lock);
tempfile: auto-allocate tempfiles on heap The previous commit taught the tempfile code to give up ownership over tempfiles that have been renamed or deleted. That makes it possible to use a stack variable like this: struct tempfile t; create_tempfile(&t, ...); ... if (!err) rename_tempfile(&t, ...); else delete_tempfile(&t); But doing it this way has a high potential for creating memory errors. The tempfile we pass to create_tempfile() ends up on a global linked list, and it's not safe for it to go out of scope until we've called one of those two deactivation functions. Imagine that we add an early return from the function that forgets to call delete_tempfile(). With a static or heap tempfile variable, the worst case is that the tempfile hangs around until the program exits (and some functions like setup_shallow_temporary rely on this intentionally, creating a tempfile and then leaving it for later cleanup). But with a stack variable as above, this is a serious memory error: the variable goes out of scope and may be filled with garbage by the time the tempfile code looks at it. Let's see if we can make it harder to get this wrong. Since many callers need to allocate arbitrary numbers of tempfiles, we can't rely on static storage as a general solution. So we need to turn to the heap. We could just ask all callers to pass us a heap variable, but that puts the burden on them to call free() at the right time. Instead, let's have the tempfile code handle the heap allocation _and_ the deallocation (when the tempfile is deactivated and removed from the list). This changes the return value of all of the creation functions. For the cleanup functions (delete and rename), we'll add one extra bit of safety: instead of taking a tempfile pointer, we'll take a pointer-to-pointer and set it to NULL after freeing the object. This makes it safe to double-call functions like delete_tempfile(), as the second call treats the NULL input as a noop. Several callsites follow this pattern. The resulting patch does have a fair bit of noise, as each caller needs to be converted to handle: 1. Storing a pointer instead of the struct itself. 2. Passing the pointer instead of taking the struct address. 3. Handling a "struct tempfile *" return instead of a file descriptor. We could play games to make this less noisy. For example, by defining the tempfile like this: struct tempfile { struct heap_allocated_part_of_tempfile { int fd; ...etc } *actual_data; } Callers would continue to have a "struct tempfile", and it would be "active" only when the inner pointer was non-NULL. But that just makes things more awkward in the long run. There aren't that many callers, so we can simply bite the bullet and adjust all of them. And the compiler makes it easy for us to find them all. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-05 15:15:08 +03:00
pidfile = register_tempfile(pidfile_path);
free(pidfile_path);
return NULL;
}
gc: do not return error for prior errors in daemonized mode Some build machines started consistently failing to fetch updated source using "repo sync", with error error: The last gc run reported the following. Please correct the root cause and remove /build/.repo/projects/tools/git.git/gc.log. Automatic cleanup will not be performed until the file is removed. warning: There are too many unreachable loose objects; run 'git prune' to remove them. The cause takes some time to describe. In v2.0.0-rc0~145^2 (gc: config option for running --auto in background, 2014-02-08), "git gc --auto" learned to run in the background instead of blocking the invoking command. In this mode, it closed stderr to avoid interleaving output with any subsequent commands, causing warnings like the above to be swallowed; v2.6.3~24^2 (gc: save log from daemonized gc --auto and print it next time, 2015-09-19) addressed that by storing any diagnostic output in .git/gc.log and allowing the next "git gc --auto" run to print it. To avoid wasteful repeated fruitless gcs, when gc.log is present, the subsequent "gc --auto" would die after printing its contents. Most git commands, such as "git fetch", ignore the exit status from "git gc --auto" so all is well at this point: the user gets to see the error message, and the fetch succeeds, without a wasteful additional attempt at an automatic gc. External tools like repo[1], though, do care about the exit status from "git gc --auto". In non-daemonized mode, the exit status is straightforward: if there is an error, it is nonzero, but after a warning like the above, the status is zero. The daemonized mode, as a side effect of the other properties provided, offers a very strange exit code convention: - if no housekeeping was required, the exit status is 0 - the first real run, after forking into the background, returns exit status 0 unconditionally. The parent process has no way to know whether gc will succeed. - if there is any diagnostic output in gc.log, subsequent runs return a nonzero exit status to indicate that gc was not triggered. There's nothing for the calling program to act on on the basis of that error. Use status 0 consistently instead, to indicate that we decided not to run a gc (just like if no housekeeping was required). This way, repo and similar tools can get the benefit of the same behavior as tools like "git fetch" that ignore the exit status from gc --auto. Once the period of time described by gc.pruneExpire elapses, the unreachable loose objects will be removed by "git gc --auto" automatically. [1] https://gerrit-review.googlesource.com/c/git-repo/+/10598/ Reported-by: Andrii Dehtiarov <adehtiarov@google.com> Helped-by: Jeff King <peff@peff.net> Signed-off-by: Jonathan Nieder <jrnieder@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-17 09:57:40 +03:00
/*
* Returns 0 if there was no previous error and gc can proceed, 1 if
* gc should not proceed due to an error in the last run. Prints a
* message and returns -1 if an error occured while reading gc.log
*/
static int report_last_gc_error(void)
{
struct strbuf sb = STRBUF_INIT;
gc: do not return error for prior errors in daemonized mode Some build machines started consistently failing to fetch updated source using "repo sync", with error error: The last gc run reported the following. Please correct the root cause and remove /build/.repo/projects/tools/git.git/gc.log. Automatic cleanup will not be performed until the file is removed. warning: There are too many unreachable loose objects; run 'git prune' to remove them. The cause takes some time to describe. In v2.0.0-rc0~145^2 (gc: config option for running --auto in background, 2014-02-08), "git gc --auto" learned to run in the background instead of blocking the invoking command. In this mode, it closed stderr to avoid interleaving output with any subsequent commands, causing warnings like the above to be swallowed; v2.6.3~24^2 (gc: save log from daemonized gc --auto and print it next time, 2015-09-19) addressed that by storing any diagnostic output in .git/gc.log and allowing the next "git gc --auto" run to print it. To avoid wasteful repeated fruitless gcs, when gc.log is present, the subsequent "gc --auto" would die after printing its contents. Most git commands, such as "git fetch", ignore the exit status from "git gc --auto" so all is well at this point: the user gets to see the error message, and the fetch succeeds, without a wasteful additional attempt at an automatic gc. External tools like repo[1], though, do care about the exit status from "git gc --auto". In non-daemonized mode, the exit status is straightforward: if there is an error, it is nonzero, but after a warning like the above, the status is zero. The daemonized mode, as a side effect of the other properties provided, offers a very strange exit code convention: - if no housekeeping was required, the exit status is 0 - the first real run, after forking into the background, returns exit status 0 unconditionally. The parent process has no way to know whether gc will succeed. - if there is any diagnostic output in gc.log, subsequent runs return a nonzero exit status to indicate that gc was not triggered. There's nothing for the calling program to act on on the basis of that error. Use status 0 consistently instead, to indicate that we decided not to run a gc (just like if no housekeeping was required). This way, repo and similar tools can get the benefit of the same behavior as tools like "git fetch" that ignore the exit status from gc --auto. Once the period of time described by gc.pruneExpire elapses, the unreachable loose objects will be removed by "git gc --auto" automatically. [1] https://gerrit-review.googlesource.com/c/git-repo/+/10598/ Reported-by: Andrii Dehtiarov <adehtiarov@google.com> Helped-by: Jeff King <peff@peff.net> Signed-off-by: Jonathan Nieder <jrnieder@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-17 09:57:40 +03:00
int ret = 0;
ssize_t len;
struct stat st;
char *gc_log_path = git_pathdup("gc.log");
if (stat(gc_log_path, &st)) {
if (errno == ENOENT)
goto done;
gc: do not return error for prior errors in daemonized mode Some build machines started consistently failing to fetch updated source using "repo sync", with error error: The last gc run reported the following. Please correct the root cause and remove /build/.repo/projects/tools/git.git/gc.log. Automatic cleanup will not be performed until the file is removed. warning: There are too many unreachable loose objects; run 'git prune' to remove them. The cause takes some time to describe. In v2.0.0-rc0~145^2 (gc: config option for running --auto in background, 2014-02-08), "git gc --auto" learned to run in the background instead of blocking the invoking command. In this mode, it closed stderr to avoid interleaving output with any subsequent commands, causing warnings like the above to be swallowed; v2.6.3~24^2 (gc: save log from daemonized gc --auto and print it next time, 2015-09-19) addressed that by storing any diagnostic output in .git/gc.log and allowing the next "git gc --auto" run to print it. To avoid wasteful repeated fruitless gcs, when gc.log is present, the subsequent "gc --auto" would die after printing its contents. Most git commands, such as "git fetch", ignore the exit status from "git gc --auto" so all is well at this point: the user gets to see the error message, and the fetch succeeds, without a wasteful additional attempt at an automatic gc. External tools like repo[1], though, do care about the exit status from "git gc --auto". In non-daemonized mode, the exit status is straightforward: if there is an error, it is nonzero, but after a warning like the above, the status is zero. The daemonized mode, as a side effect of the other properties provided, offers a very strange exit code convention: - if no housekeeping was required, the exit status is 0 - the first real run, after forking into the background, returns exit status 0 unconditionally. The parent process has no way to know whether gc will succeed. - if there is any diagnostic output in gc.log, subsequent runs return a nonzero exit status to indicate that gc was not triggered. There's nothing for the calling program to act on on the basis of that error. Use status 0 consistently instead, to indicate that we decided not to run a gc (just like if no housekeeping was required). This way, repo and similar tools can get the benefit of the same behavior as tools like "git fetch" that ignore the exit status from gc --auto. Once the period of time described by gc.pruneExpire elapses, the unreachable loose objects will be removed by "git gc --auto" automatically. [1] https://gerrit-review.googlesource.com/c/git-repo/+/10598/ Reported-by: Andrii Dehtiarov <adehtiarov@google.com> Helped-by: Jeff King <peff@peff.net> Signed-off-by: Jonathan Nieder <jrnieder@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-17 09:57:40 +03:00
ret = error_errno(_("cannot stat '%s'"), gc_log_path);
goto done;
}
if (st.st_mtime < gc_log_expire_time)
goto done;
len = strbuf_read_file(&sb, gc_log_path, 0);
if (len < 0)
gc: do not return error for prior errors in daemonized mode Some build machines started consistently failing to fetch updated source using "repo sync", with error error: The last gc run reported the following. Please correct the root cause and remove /build/.repo/projects/tools/git.git/gc.log. Automatic cleanup will not be performed until the file is removed. warning: There are too many unreachable loose objects; run 'git prune' to remove them. The cause takes some time to describe. In v2.0.0-rc0~145^2 (gc: config option for running --auto in background, 2014-02-08), "git gc --auto" learned to run in the background instead of blocking the invoking command. In this mode, it closed stderr to avoid interleaving output with any subsequent commands, causing warnings like the above to be swallowed; v2.6.3~24^2 (gc: save log from daemonized gc --auto and print it next time, 2015-09-19) addressed that by storing any diagnostic output in .git/gc.log and allowing the next "git gc --auto" run to print it. To avoid wasteful repeated fruitless gcs, when gc.log is present, the subsequent "gc --auto" would die after printing its contents. Most git commands, such as "git fetch", ignore the exit status from "git gc --auto" so all is well at this point: the user gets to see the error message, and the fetch succeeds, without a wasteful additional attempt at an automatic gc. External tools like repo[1], though, do care about the exit status from "git gc --auto". In non-daemonized mode, the exit status is straightforward: if there is an error, it is nonzero, but after a warning like the above, the status is zero. The daemonized mode, as a side effect of the other properties provided, offers a very strange exit code convention: - if no housekeeping was required, the exit status is 0 - the first real run, after forking into the background, returns exit status 0 unconditionally. The parent process has no way to know whether gc will succeed. - if there is any diagnostic output in gc.log, subsequent runs return a nonzero exit status to indicate that gc was not triggered. There's nothing for the calling program to act on on the basis of that error. Use status 0 consistently instead, to indicate that we decided not to run a gc (just like if no housekeeping was required). This way, repo and similar tools can get the benefit of the same behavior as tools like "git fetch" that ignore the exit status from gc --auto. Once the period of time described by gc.pruneExpire elapses, the unreachable loose objects will be removed by "git gc --auto" automatically. [1] https://gerrit-review.googlesource.com/c/git-repo/+/10598/ Reported-by: Andrii Dehtiarov <adehtiarov@google.com> Helped-by: Jeff King <peff@peff.net> Signed-off-by: Jonathan Nieder <jrnieder@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-17 09:57:40 +03:00
ret = error_errno(_("cannot read '%s'"), gc_log_path);
else if (len > 0) {
/*
* A previous gc failed. Report the error, and don't
* bother with an automatic gc run since it is likely
* to fail in the same way.
*/
warning(_("The last gc run reported the following. "
"Please correct the root cause\n"
"and remove %s.\n"
"Automatic cleanup will not be performed "
"until the file is removed.\n\n"
"%s"),
gc_log_path, sb.buf);
gc: do not return error for prior errors in daemonized mode Some build machines started consistently failing to fetch updated source using "repo sync", with error error: The last gc run reported the following. Please correct the root cause and remove /build/.repo/projects/tools/git.git/gc.log. Automatic cleanup will not be performed until the file is removed. warning: There are too many unreachable loose objects; run 'git prune' to remove them. The cause takes some time to describe. In v2.0.0-rc0~145^2 (gc: config option for running --auto in background, 2014-02-08), "git gc --auto" learned to run in the background instead of blocking the invoking command. In this mode, it closed stderr to avoid interleaving output with any subsequent commands, causing warnings like the above to be swallowed; v2.6.3~24^2 (gc: save log from daemonized gc --auto and print it next time, 2015-09-19) addressed that by storing any diagnostic output in .git/gc.log and allowing the next "git gc --auto" run to print it. To avoid wasteful repeated fruitless gcs, when gc.log is present, the subsequent "gc --auto" would die after printing its contents. Most git commands, such as "git fetch", ignore the exit status from "git gc --auto" so all is well at this point: the user gets to see the error message, and the fetch succeeds, without a wasteful additional attempt at an automatic gc. External tools like repo[1], though, do care about the exit status from "git gc --auto". In non-daemonized mode, the exit status is straightforward: if there is an error, it is nonzero, but after a warning like the above, the status is zero. The daemonized mode, as a side effect of the other properties provided, offers a very strange exit code convention: - if no housekeeping was required, the exit status is 0 - the first real run, after forking into the background, returns exit status 0 unconditionally. The parent process has no way to know whether gc will succeed. - if there is any diagnostic output in gc.log, subsequent runs return a nonzero exit status to indicate that gc was not triggered. There's nothing for the calling program to act on on the basis of that error. Use status 0 consistently instead, to indicate that we decided not to run a gc (just like if no housekeeping was required). This way, repo and similar tools can get the benefit of the same behavior as tools like "git fetch" that ignore the exit status from gc --auto. Once the period of time described by gc.pruneExpire elapses, the unreachable loose objects will be removed by "git gc --auto" automatically. [1] https://gerrit-review.googlesource.com/c/git-repo/+/10598/ Reported-by: Andrii Dehtiarov <adehtiarov@google.com> Helped-by: Jeff King <peff@peff.net> Signed-off-by: Jonathan Nieder <jrnieder@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-17 09:57:40 +03:00
ret = 1;
}
strbuf_release(&sb);
done:
free(gc_log_path);
gc: do not return error for prior errors in daemonized mode Some build machines started consistently failing to fetch updated source using "repo sync", with error error: The last gc run reported the following. Please correct the root cause and remove /build/.repo/projects/tools/git.git/gc.log. Automatic cleanup will not be performed until the file is removed. warning: There are too many unreachable loose objects; run 'git prune' to remove them. The cause takes some time to describe. In v2.0.0-rc0~145^2 (gc: config option for running --auto in background, 2014-02-08), "git gc --auto" learned to run in the background instead of blocking the invoking command. In this mode, it closed stderr to avoid interleaving output with any subsequent commands, causing warnings like the above to be swallowed; v2.6.3~24^2 (gc: save log from daemonized gc --auto and print it next time, 2015-09-19) addressed that by storing any diagnostic output in .git/gc.log and allowing the next "git gc --auto" run to print it. To avoid wasteful repeated fruitless gcs, when gc.log is present, the subsequent "gc --auto" would die after printing its contents. Most git commands, such as "git fetch", ignore the exit status from "git gc --auto" so all is well at this point: the user gets to see the error message, and the fetch succeeds, without a wasteful additional attempt at an automatic gc. External tools like repo[1], though, do care about the exit status from "git gc --auto". In non-daemonized mode, the exit status is straightforward: if there is an error, it is nonzero, but after a warning like the above, the status is zero. The daemonized mode, as a side effect of the other properties provided, offers a very strange exit code convention: - if no housekeeping was required, the exit status is 0 - the first real run, after forking into the background, returns exit status 0 unconditionally. The parent process has no way to know whether gc will succeed. - if there is any diagnostic output in gc.log, subsequent runs return a nonzero exit status to indicate that gc was not triggered. There's nothing for the calling program to act on on the basis of that error. Use status 0 consistently instead, to indicate that we decided not to run a gc (just like if no housekeeping was required). This way, repo and similar tools can get the benefit of the same behavior as tools like "git fetch" that ignore the exit status from gc --auto. Once the period of time described by gc.pruneExpire elapses, the unreachable loose objects will be removed by "git gc --auto" automatically. [1] https://gerrit-review.googlesource.com/c/git-repo/+/10598/ Reported-by: Andrii Dehtiarov <adehtiarov@google.com> Helped-by: Jeff King <peff@peff.net> Signed-off-by: Jonathan Nieder <jrnieder@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-17 09:57:40 +03:00
return ret;
}
static void gc_before_repack(void)
{
/*
* We may be called twice, as both the pre- and
* post-daemonized phases will call us, but running these
* commands more than once is pointless and wasteful.
*/
static int done = 0;
if (done++)
return;
if (pack_refs && run_command_v_opt(pack_refs_cmd.argv, RUN_GIT_CMD))
die(FAILED_RUN, pack_refs_cmd.argv[0]);
if (prune_reflogs && run_command_v_opt(reflog.argv, RUN_GIT_CMD))
die(FAILED_RUN, reflog.argv[0]);
}
int cmd_gc(int argc, const char **argv, const char *prefix)
{
int aggressive = 0;
int auto_gc = 0;
int quiet = 0;
int force = 0;
const char *name;
pid_t pid;
int daemonized = 0;
int keep_base_pack = -1;
2018-04-21 06:13:13 +03:00
timestamp_t dummy;
struct option builtin_gc_options[] = {
OPT__QUIET(&quiet, N_("suppress progress reporting")),
{ OPTION_STRING, 0, "prune", &prune_expire, N_("date"),
N_("prune unreferenced objects"),
PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire },
OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")),
OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"),
PARSE_OPT_NOCOMPLETE),
OPT_BOOL_F(0, "force", &force,
N_("force running gc even if there may be another gc running"),
PARSE_OPT_NOCOMPLETE),
OPT_BOOL(0, "keep-largest-pack", &keep_base_pack,
N_("repack all other packs except the largest pack")),
OPT_END()
};
if (argc == 2 && !strcmp(argv[1], "-h"))
usage_with_options(builtin_gc_usage, builtin_gc_options);
argv_array_pushl(&pack_refs_cmd, "pack-refs", "--all", "--prune", NULL);
argv_array_pushl(&reflog, "reflog", "expire", "--all", NULL);
argv_array_pushl(&repack, "repack", "-d", "-l", NULL);
argv_array_pushl(&prune, "prune", "--expire", NULL);
argv_array_pushl(&prune_worktrees, "worktree", "prune", "--expire", NULL);
argv_array_pushl(&rerere, "rerere", "gc", NULL);
/* default expiry time, overwritten in gc_config */
gc_config();
if (parse_expiry_date(gc_log_expire, &gc_log_expire_time))
die(_("failed to parse gc.logexpiry value %s"), gc_log_expire);
if (pack_refs < 0)
pack_refs = !is_bare_repository();
argc = parse_options(argc, argv, prefix, builtin_gc_options,
builtin_gc_usage, 0);
if (argc > 0)
usage_with_options(builtin_gc_usage, builtin_gc_options);
2018-04-21 06:13:13 +03:00
if (prune_expire && parse_expiry_date(prune_expire, &dummy))
die(_("failed to parse prune expiry value %s"), prune_expire);
if (aggressive) {
argv_array_push(&repack, "-f");
gc --aggressive: make --depth configurable When 1c192f3 (gc --aggressive: make it really aggressive - 2007-12-06) made --depth=250 the default value, it didn't really explain the reason behind, especially the pros and cons of --depth=250. An old mail from Linus below explains it at length. Long story short, --depth=250 is a disk saver and a performance killer. Not everybody agrees on that aggressiveness. Let the user configure it. From: Linus Torvalds <torvalds@linux-foundation.org> Subject: Re: [PATCH] gc --aggressive: make it really aggressive Date: Thu, 6 Dec 2007 08:19:24 -0800 (PST) Message-ID: <alpine.LFD.0.9999.0712060803430.13796@woody.linux-foundation.org> Gmane-URL: http://article.gmane.org/gmane.comp.gcc.devel/94637 On Thu, 6 Dec 2007, Harvey Harrison wrote: > > 7:41:25elapsed 86%CPU Heh. And this is why you want to do it exactly *once*, and then just export the end result for others ;) > -r--r--r-- 1 hharrison hharrison 324094684 2007-12-06 07:26 pack-1d46...pack But yeah, especially if you allow longer delta chains, the end result can be much smaller (and what makes the one-time repack more expensive is the window size, not the delta chain - you could make the delta chains longer with no cost overhead at packing time) HOWEVER. The longer delta chains do make it potentially much more expensive to then use old history. So there's a trade-off. And quite frankly, a delta depth of 250 is likely going to cause overflows in the delta cache (which is only 256 entries in size *and* it's a hash, so it's going to start having hash conflicts long before hitting the 250 depth limit). So when I said "--depth=250 --window=250", I chose those numbers more as an example of extremely aggressive packing, and I'm not at all sure that the end result is necessarily wonderfully usable. It's going to save disk space (and network bandwidth - the delta's will be re-used for the network protocol too!), but there are definitely downsides too, and using long delta chains may simply not be worth it in practice. (And some of it might just want to have git tuning, ie if people think that long deltas are worth it, we could easily just expand on the delta hash, at the cost of some more memory used!) That said, the good news is that working with *new* history will not be affected negatively, and if you want to be _really_ sneaky, there are ways to say "create a pack that contains the history up to a version one year ago, and be very aggressive about those old versions that we still want to have around, but do a separate pack for newer stuff using less aggressive parameters" So this is something that can be tweaked, although we don't really have any really nice interfaces for stuff like that (ie the git delta cache size is hardcoded in the sources and cannot be set in the config file, and the "pack old history more aggressively" involves some manual scripting and knowing how "git pack-objects" works rather than any nice simple command line switch). So the thing to take away from this is: - git is certainly flexible as hell - .. but to get the full power you may need to tweak things - .. happily you really only need to have one person to do the tweaking, and the tweaked end results will be available to others that do not need to know/care. And whether the difference between 320MB and 500MB is worth any really involved tweaking (considering the potential downsides), I really don't know. Only testing will tell. Linus Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-03-16 17:35:03 +04:00
if (aggressive_depth > 0)
argv_array_pushf(&repack, "--depth=%d", aggressive_depth);
if (aggressive_window > 0)
argv_array_pushf(&repack, "--window=%d", aggressive_window);
}
if (quiet)
argv_array_push(&repack, "-q");
if (auto_gc) {
/*
* Auto-gc should be least intrusive as possible.
*/
if (!need_to_gc())
return 0;
if (!quiet) {
if (detach_auto)
fprintf(stderr, _("Auto packing the repository in background for optimum performance.\n"));
else
fprintf(stderr, _("Auto packing the repository for optimum performance.\n"));
fprintf(stderr, _("See \"git help gc\" for manual housekeeping.\n"));
}
if (detach_auto) {
gc: do not return error for prior errors in daemonized mode Some build machines started consistently failing to fetch updated source using "repo sync", with error error: The last gc run reported the following. Please correct the root cause and remove /build/.repo/projects/tools/git.git/gc.log. Automatic cleanup will not be performed until the file is removed. warning: There are too many unreachable loose objects; run 'git prune' to remove them. The cause takes some time to describe. In v2.0.0-rc0~145^2 (gc: config option for running --auto in background, 2014-02-08), "git gc --auto" learned to run in the background instead of blocking the invoking command. In this mode, it closed stderr to avoid interleaving output with any subsequent commands, causing warnings like the above to be swallowed; v2.6.3~24^2 (gc: save log from daemonized gc --auto and print it next time, 2015-09-19) addressed that by storing any diagnostic output in .git/gc.log and allowing the next "git gc --auto" run to print it. To avoid wasteful repeated fruitless gcs, when gc.log is present, the subsequent "gc --auto" would die after printing its contents. Most git commands, such as "git fetch", ignore the exit status from "git gc --auto" so all is well at this point: the user gets to see the error message, and the fetch succeeds, without a wasteful additional attempt at an automatic gc. External tools like repo[1], though, do care about the exit status from "git gc --auto". In non-daemonized mode, the exit status is straightforward: if there is an error, it is nonzero, but after a warning like the above, the status is zero. The daemonized mode, as a side effect of the other properties provided, offers a very strange exit code convention: - if no housekeeping was required, the exit status is 0 - the first real run, after forking into the background, returns exit status 0 unconditionally. The parent process has no way to know whether gc will succeed. - if there is any diagnostic output in gc.log, subsequent runs return a nonzero exit status to indicate that gc was not triggered. There's nothing for the calling program to act on on the basis of that error. Use status 0 consistently instead, to indicate that we decided not to run a gc (just like if no housekeeping was required). This way, repo and similar tools can get the benefit of the same behavior as tools like "git fetch" that ignore the exit status from gc --auto. Once the period of time described by gc.pruneExpire elapses, the unreachable loose objects will be removed by "git gc --auto" automatically. [1] https://gerrit-review.googlesource.com/c/git-repo/+/10598/ Reported-by: Andrii Dehtiarov <adehtiarov@google.com> Helped-by: Jeff King <peff@peff.net> Signed-off-by: Jonathan Nieder <jrnieder@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-17 09:57:40 +03:00
int ret = report_last_gc_error();
if (ret < 0)
/* an I/O error occured, already reported */
exit(128);
if (ret == 1)
/* Last gc --auto failed. Skip this one. */
return 0;
gc: run pre-detach operations under lock We normally try to avoid having two auto-gc operations run at the same time, because it wastes resources. This was done long ago in 64a99eb47 (gc: reject if another gc is running, unless --force is given, 2013-08-08). When we do a detached auto-gc, we run the ref-related commands _before_ detaching, to avoid confusing lock contention. This was done by 62aad1849 (gc --auto: do not lock refs in the background, 2014-05-25). These two features do not interact well. The pre-detach operations are run before we check the gc.pid lock, meaning that on a busy repository we may run many of them concurrently. Ideally we'd take the lock before spawning any operations, and hold it for the duration of the program. This is tricky, though, with the way the pid-file interacts with the daemonize() process. Other processes will check that the pid recorded in the pid-file still exists. But detaching causes us to fork and continue running under a new pid. So if we take the lock before detaching, the pid-file will have a bogus pid in it. We'd have to go back and update it with the new pid after detaching. We'd also have to play some tricks with the tempfile subsystem to tweak the "owner" field, so that the parent process does not clean it up on exit, but the child process does. Instead, we can do something a bit simpler: take the lock only for the duration of the pre-detach work, then detach, then take it again for the post-detach work. Technically, this means that the post-detach lock could lose to another process doing pre-detach work. But in the long run this works out. That second process would then follow-up by doing post-detach work. Unless it was in turn blocked by a third process doing pre-detach work, and so on. This could in theory go on indefinitely, as the pre-detach work does not repack, and so need_to_gc() will continue to trigger. But in each round we are racing between the pre- and post-detach locks. Eventually, one of the post-detach locks will win the race and complete the full gc. So in the worst case, we may racily repeat the pre-detach work, but we would never do so simultaneously (it would happen via a sequence of serialized race-wins). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-07-11 12:06:35 +03:00
if (lock_repo_for_gc(force, &pid))
return 0;
gc_before_repack(); /* dies on failure */
gc: run pre-detach operations under lock We normally try to avoid having two auto-gc operations run at the same time, because it wastes resources. This was done long ago in 64a99eb47 (gc: reject if another gc is running, unless --force is given, 2013-08-08). When we do a detached auto-gc, we run the ref-related commands _before_ detaching, to avoid confusing lock contention. This was done by 62aad1849 (gc --auto: do not lock refs in the background, 2014-05-25). These two features do not interact well. The pre-detach operations are run before we check the gc.pid lock, meaning that on a busy repository we may run many of them concurrently. Ideally we'd take the lock before spawning any operations, and hold it for the duration of the program. This is tricky, though, with the way the pid-file interacts with the daemonize() process. Other processes will check that the pid recorded in the pid-file still exists. But detaching causes us to fork and continue running under a new pid. So if we take the lock before detaching, the pid-file will have a bogus pid in it. We'd have to go back and update it with the new pid after detaching. We'd also have to play some tricks with the tempfile subsystem to tweak the "owner" field, so that the parent process does not clean it up on exit, but the child process does. Instead, we can do something a bit simpler: take the lock only for the duration of the pre-detach work, then detach, then take it again for the post-detach work. Technically, this means that the post-detach lock could lose to another process doing pre-detach work. But in the long run this works out. That second process would then follow-up by doing post-detach work. Unless it was in turn blocked by a third process doing pre-detach work, and so on. This could in theory go on indefinitely, as the pre-detach work does not repack, and so need_to_gc() will continue to trigger. But in each round we are racing between the pre- and post-detach locks. Eventually, one of the post-detach locks will win the race and complete the full gc. So in the worst case, we may racily repeat the pre-detach work, but we would never do so simultaneously (it would happen via a sequence of serialized race-wins). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-07-11 12:06:35 +03:00
delete_tempfile(&pidfile);
/*
* failure to daemonize is ok, we'll continue
* in foreground
*/
daemonized = !daemonize();
}
} else {
struct string_list keep_pack = STRING_LIST_INIT_NODUP;
if (keep_base_pack != -1) {
if (keep_base_pack)
find_base_packs(&keep_pack, 0);
} else if (big_pack_threshold) {
find_base_packs(&keep_pack, big_pack_threshold);
}
add_repack_all_option(&keep_pack);
string_list_clear(&keep_pack, 0);
}
name = lock_repo_for_gc(force, &pid);
if (name) {
if (auto_gc)
return 0; /* be quiet on --auto */
die(_("gc is already running on machine '%s' pid %"PRIuMAX" (use --force if not)"),
name, (uintmax_t)pid);
}
if (daemonized) {
hold_lock_file_for_update(&log_lock,
git_path("gc.log"),
LOCK_DIE_ON_ERROR);
dup2(get_lock_file_fd(&log_lock), 2);
sigchain_push_common(process_log_file_on_signal);
atexit(process_log_file_at_exit);
}
gc_before_repack();
if (!repository_format_precious_objects) {
close_object_store(the_repository->objects);
if (run_command_v_opt(repack.argv, RUN_GIT_CMD))
die(FAILED_RUN, repack.argv[0]);
if (prune_expire) {
argv_array_push(&prune, prune_expire);
if (quiet)
argv_array_push(&prune, "--no-progress");
if (has_promisor_remote())
argv_array_push(&prune,
"--exclude-promisor-objects");
if (run_command_v_opt(prune.argv, RUN_GIT_CMD))
die(FAILED_RUN, prune.argv[0]);
}
}
if (prune_worktrees_expire) {
argv_array_push(&prune_worktrees, prune_worktrees_expire);
if (run_command_v_opt(prune_worktrees.argv, RUN_GIT_CMD))
die(FAILED_RUN, prune_worktrees.argv[0]);
}
if (run_command_v_opt(rerere.argv, RUN_GIT_CMD))
die(FAILED_RUN, rerere.argv[0]);
report_garbage = report_pack_garbage;
reprepare_packed_git(the_repository);
if (pack_garbage.nr > 0) {
close_object_store(the_repository->objects);
clean_pack_garbage();
}
prepare_repo_settings(the_repository);
if (the_repository->settings.gc_write_commit_graph == 1)
write_commit_graph_reachable(get_object_directory(),
!quiet && !daemonized ? COMMIT_GRAPH_WRITE_PROGRESS : 0,
NULL);
if (auto_gc && too_many_loose_objects())
warning(_("There are too many unreachable loose objects; "
"run 'git prune' to remove them."));
if (!daemonized)
unlink(git_path("gc.log"));
return 0;
}