2016-10-03 23:49:11 +03:00
|
|
|
#include "cache.h"
|
|
|
|
#include "tmp-objdir.h"
|
2021-12-07 01:05:04 +03:00
|
|
|
#include "chdir-notify.h"
|
2016-10-03 23:49:11 +03:00
|
|
|
#include "dir.h"
|
|
|
|
#include "sigchain.h"
|
|
|
|
#include "string-list.h"
|
|
|
|
#include "strbuf.h"
|
2020-07-28 23:23:39 +03:00
|
|
|
#include "strvec.h"
|
2016-12-12 22:53:55 +03:00
|
|
|
#include "quote.h"
|
2018-03-23 20:20:56 +03:00
|
|
|
#include "object-store.h"
|
2016-10-03 23:49:11 +03:00
|
|
|
|
|
|
|
struct tmp_objdir {
|
|
|
|
struct strbuf path;
|
2020-07-28 23:25:12 +03:00
|
|
|
struct strvec env;
|
2021-12-07 01:05:04 +03:00
|
|
|
struct object_directory *prev_odb;
|
|
|
|
int will_destroy;
|
2016-10-03 23:49:11 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow only one tmp_objdir at a time in a running process, which simplifies
|
|
|
|
* our signal/atexit cleanup routines. It's doubtful callers will ever need
|
|
|
|
* more than one, and we can expand later if so. You can have many such
|
|
|
|
* tmp_objdirs simultaneously in many processes, of course.
|
|
|
|
*/
|
|
|
|
static struct tmp_objdir *the_tmp_objdir;
|
|
|
|
|
|
|
|
static void tmp_objdir_free(struct tmp_objdir *t)
|
|
|
|
{
|
|
|
|
strbuf_release(&t->path);
|
2020-07-28 23:25:12 +03:00
|
|
|
strvec_clear(&t->env);
|
2016-10-03 23:49:11 +03:00
|
|
|
free(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tmp_objdir_destroy_1(struct tmp_objdir *t, int on_signal)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!t)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (t == the_tmp_objdir)
|
|
|
|
the_tmp_objdir = NULL;
|
|
|
|
|
2021-12-07 01:05:04 +03:00
|
|
|
if (!on_signal && t->prev_odb)
|
|
|
|
restore_primary_odb(t->prev_odb, t->path.buf);
|
|
|
|
|
2016-10-03 23:49:11 +03:00
|
|
|
/*
|
|
|
|
* This may use malloc via strbuf_grow(), but we should
|
|
|
|
* have pre-grown t->path sufficiently so that this
|
|
|
|
* doesn't happen in practice.
|
|
|
|
*/
|
|
|
|
err = remove_dir_recursively(&t->path, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When we are cleaning up due to a signal, we won't bother
|
|
|
|
* freeing memory; it may cause a deadlock if the signal
|
|
|
|
* arrived while libc's allocator lock is held.
|
|
|
|
*/
|
|
|
|
if (!on_signal)
|
|
|
|
tmp_objdir_free(t);
|
2021-12-07 01:05:04 +03:00
|
|
|
|
2016-10-03 23:49:11 +03:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int tmp_objdir_destroy(struct tmp_objdir *t)
|
|
|
|
{
|
|
|
|
return tmp_objdir_destroy_1(t, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void remove_tmp_objdir(void)
|
|
|
|
{
|
|
|
|
tmp_objdir_destroy(the_tmp_objdir);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void remove_tmp_objdir_on_signal(int signo)
|
|
|
|
{
|
|
|
|
tmp_objdir_destroy_1(the_tmp_objdir, 1);
|
|
|
|
sigchain_pop(signo);
|
|
|
|
raise(signo);
|
|
|
|
}
|
|
|
|
|
2022-02-02 05:37:29 +03:00
|
|
|
void tmp_objdir_discard_objects(struct tmp_objdir *t)
|
|
|
|
{
|
|
|
|
remove_dir_recursively(&t->path, REMOVE_DIR_KEEP_TOPLEVEL);
|
|
|
|
}
|
|
|
|
|
2016-10-03 23:49:11 +03:00
|
|
|
/*
|
|
|
|
* These env_* functions are for setting up the child environment; the
|
|
|
|
* "replace" variant overrides the value of any existing variable with that
|
|
|
|
* "key". The "append" variant puts our new value at the end of a list,
|
|
|
|
* separated by PATH_SEP (which is what separate values in
|
|
|
|
* GIT_ALTERNATE_OBJECT_DIRECTORIES).
|
|
|
|
*/
|
2020-07-28 23:25:12 +03:00
|
|
|
static void env_append(struct strvec *env, const char *key, const char *val)
|
2016-10-03 23:49:11 +03:00
|
|
|
{
|
2016-12-12 22:53:55 +03:00
|
|
|
struct strbuf quoted = STRBUF_INIT;
|
|
|
|
const char *old;
|
2016-10-03 23:49:11 +03:00
|
|
|
|
2016-12-12 22:53:55 +03:00
|
|
|
/*
|
|
|
|
* Avoid quoting if it's not necessary, for maximum compatibility
|
|
|
|
* with older parsers which don't understand the quoting.
|
|
|
|
*/
|
|
|
|
if (*val == '"' || strchr(val, PATH_SEP)) {
|
|
|
|
strbuf_addch("ed, '"');
|
|
|
|
quote_c_style(val, "ed, NULL, 1);
|
|
|
|
strbuf_addch("ed, '"');
|
|
|
|
val = quoted.buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
old = getenv(key);
|
2016-10-03 23:49:11 +03:00
|
|
|
if (!old)
|
2020-07-28 23:25:12 +03:00
|
|
|
strvec_pushf(env, "%s=%s", key, val);
|
2016-10-03 23:49:11 +03:00
|
|
|
else
|
2020-07-28 23:25:12 +03:00
|
|
|
strvec_pushf(env, "%s=%s%c%s", key, old, PATH_SEP, val);
|
2016-12-12 22:53:55 +03:00
|
|
|
|
|
|
|
strbuf_release("ed);
|
2016-10-03 23:49:11 +03:00
|
|
|
}
|
|
|
|
|
2020-07-28 23:25:12 +03:00
|
|
|
static void env_replace(struct strvec *env, const char *key, const char *val)
|
2016-10-03 23:49:11 +03:00
|
|
|
{
|
2020-07-28 23:25:12 +03:00
|
|
|
strvec_pushf(env, "%s=%s", key, val);
|
2016-10-03 23:49:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int setup_tmp_objdir(const char *root)
|
|
|
|
{
|
|
|
|
char *path;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
path = xstrfmt("%s/pack", root);
|
|
|
|
ret = mkdir(path, 0777);
|
|
|
|
free(path);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-12-07 01:05:04 +03:00
|
|
|
struct tmp_objdir *tmp_objdir_create(const char *prefix)
|
2016-10-03 23:49:11 +03:00
|
|
|
{
|
|
|
|
static int installed_handlers;
|
|
|
|
struct tmp_objdir *t;
|
|
|
|
|
|
|
|
if (the_tmp_objdir)
|
2018-05-02 12:38:39 +03:00
|
|
|
BUG("only one tmp_objdir can be used at a time");
|
2016-10-03 23:49:11 +03:00
|
|
|
|
2021-12-07 01:05:04 +03:00
|
|
|
t = xcalloc(1, sizeof(*t));
|
2016-10-03 23:49:11 +03:00
|
|
|
strbuf_init(&t->path, 0);
|
2020-07-28 23:25:12 +03:00
|
|
|
strvec_init(&t->env);
|
2016-10-03 23:49:11 +03:00
|
|
|
|
2021-12-07 01:05:04 +03:00
|
|
|
/*
|
|
|
|
* Use a string starting with tmp_ so that the builtin/prune.c code
|
|
|
|
* can recognize any stale objdirs left behind by a crash and delete
|
|
|
|
* them.
|
|
|
|
*/
|
|
|
|
strbuf_addf(&t->path, "%s/tmp_objdir-%s-XXXXXX", get_object_directory(), prefix);
|
2016-10-03 23:49:11 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Grow the strbuf beyond any filename we expect to be placed in it.
|
|
|
|
* If tmp_objdir_destroy() is called by a signal handler, then
|
|
|
|
* we should be able to use the strbuf to remove files without
|
|
|
|
* having to call malloc.
|
|
|
|
*/
|
|
|
|
strbuf_grow(&t->path, 1024);
|
|
|
|
|
|
|
|
if (!mkdtemp(t->path.buf)) {
|
|
|
|
/* free, not destroy, as we never touched the filesystem */
|
|
|
|
tmp_objdir_free(t);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
the_tmp_objdir = t;
|
|
|
|
if (!installed_handlers) {
|
|
|
|
atexit(remove_tmp_objdir);
|
|
|
|
sigchain_push_common(remove_tmp_objdir_on_signal);
|
|
|
|
installed_handlers++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (setup_tmp_objdir(t->path.buf)) {
|
|
|
|
tmp_objdir_destroy(t);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
env_append(&t->env, ALTERNATE_DB_ENVIRONMENT,
|
|
|
|
absolute_path(get_object_directory()));
|
|
|
|
env_replace(&t->env, DB_ENVIRONMENT, absolute_path(t->path.buf));
|
2016-10-03 23:49:18 +03:00
|
|
|
env_replace(&t->env, GIT_QUARANTINE_ENVIRONMENT,
|
|
|
|
absolute_path(t->path.buf));
|
2016-10-03 23:49:11 +03:00
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure we copy packfiles and their associated metafiles in the correct
|
|
|
|
* order. All of these ends_with checks are slightly expensive to do in
|
|
|
|
* the midst of a sorting routine, but in practice it shouldn't matter.
|
|
|
|
* We will have a relatively small number of packfiles to order, and loose
|
|
|
|
* objects exit early in the first line.
|
|
|
|
*/
|
|
|
|
static int pack_copy_priority(const char *name)
|
|
|
|
{
|
|
|
|
if (!starts_with(name, "pack"))
|
|
|
|
return 0;
|
|
|
|
if (ends_with(name, ".keep"))
|
|
|
|
return 1;
|
|
|
|
if (ends_with(name, ".pack"))
|
|
|
|
return 2;
|
packfile: prepare for the existence of '*.rev' files
Specify the format of the on-disk reverse index 'pack-*.rev' file, as
well as prepare the code for the existence of such files.
The reverse index maps from pack relative positions (i.e., an index into
the array of object which is sorted by their offsets within the
packfile) to their position within the 'pack-*.idx' file. Today, this is
done by building up a list of (off_t, uint32_t) tuples for each object
(the off_t corresponding to that object's offset, and the uint32_t
corresponding to its position in the index). To convert between pack and
index position quickly, this array of tuples is radix sorted based on
its offset.
This has two major drawbacks:
First, the in-memory cost scales linearly with the number of objects in
a pack. Each 'struct revindex_entry' is sizeof(off_t) +
sizeof(uint32_t) + padding bytes for a total of 16.
To observe this, force Git to load the reverse index by, for e.g.,
running 'git cat-file --batch-check="%(objectsize:disk)"'. When asking
for a single object in a fresh clone of the kernel, Git needs to
allocate 120+ MB of memory in order to hold the reverse index in memory.
Second, the cost to sort also scales with the size of the pack.
Luckily, this is a linear function since 'load_pack_revindex()' uses a
radix sort, but this cost still must be paid once per pack per process.
As an example, it takes ~60x longer to print the _size_ of an object as
it does to print that entire object's _contents_:
Benchmark #1: git.compile cat-file --batch <obj
Time (mean ± σ): 3.4 ms ± 0.1 ms [User: 3.3 ms, System: 2.1 ms]
Range (min … max): 3.2 ms … 3.7 ms 726 runs
Benchmark #2: git.compile cat-file --batch-check="%(objectsize:disk)" <obj
Time (mean ± σ): 210.3 ms ± 8.9 ms [User: 188.2 ms, System: 23.2 ms]
Range (min … max): 193.7 ms … 224.4 ms 13 runs
Instead, avoid computing and sorting the revindex once per process by
writing it to a file when the pack itself is generated.
The format is relatively straightforward. It contains an array of
uint32_t's, the length of which is equal to the number of objects in the
pack. The ith entry in this table contains the index position of the
ith object in the pack, where "ith object in the pack" is determined by
pack offset.
One thing that the on-disk format does _not_ contain is the full (up to)
eight-byte offset corresponding to each object. This is something that
the in-memory revindex contains (it stores an off_t in 'struct
revindex_entry' along with the same uint32_t that the on-disk format
has). Omit it in the on-disk format, since knowing the index position
for some object is sufficient to get a constant-time lookup in the
pack-*.idx file to ask for an object's offset within the pack.
This trades off between the on-disk size of the 'pack-*.rev' file for
runtime to chase down the offset for some object. Even though the lookup
is constant time, the constant is heavier, since it can potentially
involve two pointer walks in v2 indexes (one to access the 4-byte offset
table, and potentially a second to access the double wide offset table).
Consider trying to map an object's pack offset to a relative position
within that pack. In a cold-cache scenario, more page faults occur while
switching between binary searching through the reverse index and
searching through the *.idx file for an object's offset. Sure enough,
with a cold cache (writing '3' into '/proc/sys/vm/drop_caches' after
'sync'ing), printing out the entire object's contents is still
marginally faster than printing its size:
Benchmark #1: git.compile cat-file --batch-check="%(objectsize:disk)" <obj >/dev/null
Time (mean ± σ): 22.6 ms ± 0.5 ms [User: 2.4 ms, System: 7.9 ms]
Range (min … max): 21.4 ms … 23.5 ms 41 runs
Benchmark #2: git.compile cat-file --batch <obj >/dev/null
Time (mean ± σ): 17.2 ms ± 0.7 ms [User: 2.8 ms, System: 5.5 ms]
Range (min … max): 15.6 ms … 18.2 ms 45 runs
(Numbers taken in the kernel after cheating and using the next patch to
generate a reverse index). There are a couple of approaches to improve
cold cache performance not pursued here:
- We could include the object offsets in the reverse index format.
Predictably, this does result in fewer page faults, but it triples
the size of the file, while simultaneously duplicating a ton of data
already available in the .idx file. (This was the original way I
implemented the format, and it did show
`--batch-check='%(objectsize:disk)'` winning out against `--batch`.)
On the other hand, this increase in size also results in a large
block-cache footprint, which could potentially hurt other workloads.
- We could store the mapping from pack to index position in more
cache-friendly way, like constructing a binary search tree from the
table and writing the values in breadth-first order. This would
result in much better locality, but the price you pay is trading
O(1) lookup in 'pack_pos_to_index()' for an O(log n) one (since you
can no longer directly index the table).
So, neither of these approaches are taken here. (Thankfully, the format
is versioned, so we are free to pursue these in the future.) But, cold
cache performance likely isn't interesting outside of one-off cases like
asking for the size of an object directly. In real-world usage, Git is
often performing many operations in the revindex (i.e., asking about
many objects rather than a single one).
The trade-off is worth it, since we will avoid the vast majority of the
cost of generating the revindex that the extra pointer chase will look
like noise in the following patch's benchmarks.
This patch describes the format and prepares callers (like in
pack-revindex.c) to be able to read *.rev files once they exist. An
implementation of the writer will appear in the next patch, and callers
will gradually begin to start using the writer in the patches that
follow after that.
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-26 02:37:14 +03:00
|
|
|
if (ends_with(name, ".rev"))
|
2016-10-03 23:49:11 +03:00
|
|
|
return 3;
|
packfile: prepare for the existence of '*.rev' files
Specify the format of the on-disk reverse index 'pack-*.rev' file, as
well as prepare the code for the existence of such files.
The reverse index maps from pack relative positions (i.e., an index into
the array of object which is sorted by their offsets within the
packfile) to their position within the 'pack-*.idx' file. Today, this is
done by building up a list of (off_t, uint32_t) tuples for each object
(the off_t corresponding to that object's offset, and the uint32_t
corresponding to its position in the index). To convert between pack and
index position quickly, this array of tuples is radix sorted based on
its offset.
This has two major drawbacks:
First, the in-memory cost scales linearly with the number of objects in
a pack. Each 'struct revindex_entry' is sizeof(off_t) +
sizeof(uint32_t) + padding bytes for a total of 16.
To observe this, force Git to load the reverse index by, for e.g.,
running 'git cat-file --batch-check="%(objectsize:disk)"'. When asking
for a single object in a fresh clone of the kernel, Git needs to
allocate 120+ MB of memory in order to hold the reverse index in memory.
Second, the cost to sort also scales with the size of the pack.
Luckily, this is a linear function since 'load_pack_revindex()' uses a
radix sort, but this cost still must be paid once per pack per process.
As an example, it takes ~60x longer to print the _size_ of an object as
it does to print that entire object's _contents_:
Benchmark #1: git.compile cat-file --batch <obj
Time (mean ± σ): 3.4 ms ± 0.1 ms [User: 3.3 ms, System: 2.1 ms]
Range (min … max): 3.2 ms … 3.7 ms 726 runs
Benchmark #2: git.compile cat-file --batch-check="%(objectsize:disk)" <obj
Time (mean ± σ): 210.3 ms ± 8.9 ms [User: 188.2 ms, System: 23.2 ms]
Range (min … max): 193.7 ms … 224.4 ms 13 runs
Instead, avoid computing and sorting the revindex once per process by
writing it to a file when the pack itself is generated.
The format is relatively straightforward. It contains an array of
uint32_t's, the length of which is equal to the number of objects in the
pack. The ith entry in this table contains the index position of the
ith object in the pack, where "ith object in the pack" is determined by
pack offset.
One thing that the on-disk format does _not_ contain is the full (up to)
eight-byte offset corresponding to each object. This is something that
the in-memory revindex contains (it stores an off_t in 'struct
revindex_entry' along with the same uint32_t that the on-disk format
has). Omit it in the on-disk format, since knowing the index position
for some object is sufficient to get a constant-time lookup in the
pack-*.idx file to ask for an object's offset within the pack.
This trades off between the on-disk size of the 'pack-*.rev' file for
runtime to chase down the offset for some object. Even though the lookup
is constant time, the constant is heavier, since it can potentially
involve two pointer walks in v2 indexes (one to access the 4-byte offset
table, and potentially a second to access the double wide offset table).
Consider trying to map an object's pack offset to a relative position
within that pack. In a cold-cache scenario, more page faults occur while
switching between binary searching through the reverse index and
searching through the *.idx file for an object's offset. Sure enough,
with a cold cache (writing '3' into '/proc/sys/vm/drop_caches' after
'sync'ing), printing out the entire object's contents is still
marginally faster than printing its size:
Benchmark #1: git.compile cat-file --batch-check="%(objectsize:disk)" <obj >/dev/null
Time (mean ± σ): 22.6 ms ± 0.5 ms [User: 2.4 ms, System: 7.9 ms]
Range (min … max): 21.4 ms … 23.5 ms 41 runs
Benchmark #2: git.compile cat-file --batch <obj >/dev/null
Time (mean ± σ): 17.2 ms ± 0.7 ms [User: 2.8 ms, System: 5.5 ms]
Range (min … max): 15.6 ms … 18.2 ms 45 runs
(Numbers taken in the kernel after cheating and using the next patch to
generate a reverse index). There are a couple of approaches to improve
cold cache performance not pursued here:
- We could include the object offsets in the reverse index format.
Predictably, this does result in fewer page faults, but it triples
the size of the file, while simultaneously duplicating a ton of data
already available in the .idx file. (This was the original way I
implemented the format, and it did show
`--batch-check='%(objectsize:disk)'` winning out against `--batch`.)
On the other hand, this increase in size also results in a large
block-cache footprint, which could potentially hurt other workloads.
- We could store the mapping from pack to index position in more
cache-friendly way, like constructing a binary search tree from the
table and writing the values in breadth-first order. This would
result in much better locality, but the price you pay is trading
O(1) lookup in 'pack_pos_to_index()' for an O(log n) one (since you
can no longer directly index the table).
So, neither of these approaches are taken here. (Thankfully, the format
is versioned, so we are free to pursue these in the future.) But, cold
cache performance likely isn't interesting outside of one-off cases like
asking for the size of an object directly. In real-world usage, Git is
often performing many operations in the revindex (i.e., asking about
many objects rather than a single one).
The trade-off is worth it, since we will avoid the vast majority of the
cost of generating the revindex that the extra pointer chase will look
like noise in the following patch's benchmarks.
This patch describes the format and prepares callers (like in
pack-revindex.c) to be able to read *.rev files once they exist. An
implementation of the writer will appear in the next patch, and callers
will gradually begin to start using the writer in the patches that
follow after that.
Signed-off-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-26 02:37:14 +03:00
|
|
|
if (ends_with(name, ".idx"))
|
|
|
|
return 4;
|
|
|
|
return 5;
|
2016-10-03 23:49:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int pack_copy_cmp(const char *a, const char *b)
|
|
|
|
{
|
|
|
|
return pack_copy_priority(a) - pack_copy_priority(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int read_dir_paths(struct string_list *out, const char *path)
|
|
|
|
{
|
|
|
|
DIR *dh;
|
|
|
|
struct dirent *de;
|
|
|
|
|
|
|
|
dh = opendir(path);
|
|
|
|
if (!dh)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
while ((de = readdir(dh)))
|
2016-10-03 23:49:22 +03:00
|
|
|
if (de->d_name[0] != '.')
|
2016-10-03 23:49:11 +03:00
|
|
|
string_list_append(out, de->d_name);
|
|
|
|
|
|
|
|
closedir(dh);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int migrate_paths(struct strbuf *src, struct strbuf *dst);
|
|
|
|
|
|
|
|
static int migrate_one(struct strbuf *src, struct strbuf *dst)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
if (stat(src->buf, &st) < 0)
|
|
|
|
return -1;
|
|
|
|
if (S_ISDIR(st.st_mode)) {
|
|
|
|
if (!mkdir(dst->buf, 0777)) {
|
|
|
|
if (adjust_shared_perm(dst->buf))
|
|
|
|
return -1;
|
|
|
|
} else if (errno != EEXIST)
|
|
|
|
return -1;
|
|
|
|
return migrate_paths(src, dst);
|
|
|
|
}
|
|
|
|
return finalize_object_file(src->buf, dst->buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int migrate_paths(struct strbuf *src, struct strbuf *dst)
|
|
|
|
{
|
|
|
|
size_t src_len = src->len, dst_len = dst->len;
|
|
|
|
struct string_list paths = STRING_LIST_INIT_DUP;
|
|
|
|
int i;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (read_dir_paths(&paths, src->buf) < 0)
|
|
|
|
return -1;
|
|
|
|
paths.cmp = pack_copy_cmp;
|
|
|
|
string_list_sort(&paths);
|
|
|
|
|
|
|
|
for (i = 0; i < paths.nr; i++) {
|
|
|
|
const char *name = paths.items[i].string;
|
|
|
|
|
|
|
|
strbuf_addf(src, "/%s", name);
|
|
|
|
strbuf_addf(dst, "/%s", name);
|
|
|
|
|
|
|
|
ret |= migrate_one(src, dst);
|
|
|
|
|
|
|
|
strbuf_setlen(src, src_len);
|
|
|
|
strbuf_setlen(dst, dst_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
string_list_clear(&paths, 0);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int tmp_objdir_migrate(struct tmp_objdir *t)
|
|
|
|
{
|
|
|
|
struct strbuf src = STRBUF_INIT, dst = STRBUF_INIT;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!t)
|
|
|
|
return 0;
|
|
|
|
|
2021-12-07 01:05:04 +03:00
|
|
|
if (t->prev_odb) {
|
|
|
|
if (the_repository->objects->odb->will_destroy)
|
|
|
|
BUG("migrating an ODB that was marked for destruction");
|
|
|
|
restore_primary_odb(t->prev_odb, t->path.buf);
|
|
|
|
t->prev_odb = NULL;
|
|
|
|
}
|
|
|
|
|
2016-10-03 23:49:11 +03:00
|
|
|
strbuf_addbuf(&src, &t->path);
|
|
|
|
strbuf_addstr(&dst, get_object_directory());
|
|
|
|
|
|
|
|
ret = migrate_paths(&src, &dst);
|
|
|
|
|
|
|
|
strbuf_release(&src);
|
|
|
|
strbuf_release(&dst);
|
|
|
|
|
|
|
|
tmp_objdir_destroy(t);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char **tmp_objdir_env(const struct tmp_objdir *t)
|
|
|
|
{
|
|
|
|
if (!t)
|
|
|
|
return NULL;
|
2020-07-29 03:37:20 +03:00
|
|
|
return t->env.v;
|
2016-10-03 23:49:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void tmp_objdir_add_as_alternate(const struct tmp_objdir *t)
|
|
|
|
{
|
|
|
|
add_to_alternates_memory(t->path.buf);
|
|
|
|
}
|
2021-12-07 01:05:04 +03:00
|
|
|
|
|
|
|
void tmp_objdir_replace_primary_odb(struct tmp_objdir *t, int will_destroy)
|
|
|
|
{
|
|
|
|
if (t->prev_odb)
|
|
|
|
BUG("the primary object database is already replaced");
|
|
|
|
t->prev_odb = set_temporary_primary_odb(t->path.buf, will_destroy);
|
|
|
|
t->will_destroy = will_destroy;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct tmp_objdir *tmp_objdir_unapply_primary_odb(void)
|
|
|
|
{
|
|
|
|
if (!the_tmp_objdir || !the_tmp_objdir->prev_odb)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
restore_primary_odb(the_tmp_objdir->prev_odb, the_tmp_objdir->path.buf);
|
|
|
|
the_tmp_objdir->prev_odb = NULL;
|
|
|
|
return the_tmp_objdir;
|
|
|
|
}
|
|
|
|
|
|
|
|
void tmp_objdir_reapply_primary_odb(struct tmp_objdir *t, const char *old_cwd,
|
|
|
|
const char *new_cwd)
|
|
|
|
{
|
|
|
|
char *path;
|
|
|
|
|
|
|
|
path = reparent_relative_path(old_cwd, new_cwd, t->path.buf);
|
|
|
|
strbuf_reset(&t->path);
|
|
|
|
strbuf_addstr(&t->path, path);
|
|
|
|
free(path);
|
|
|
|
tmp_objdir_replace_primary_odb(t, t->will_destroy);
|
|
|
|
}
|