зеркало из https://github.com/microsoft/git.git
mem-pool: use more standard initialization and finalization
A typical memory type, such as strbuf, hashmap, or string_list can be stored on the stack or embedded within another structure. mem_pool cannot be, because of how mem_pool_init() and mem_pool_discard() are written. mem_pool_init() does essentially the following (simplified for purposes of explanation here): void mem_pool_init(struct mem_pool **pool...) { *pool = xcalloc(1, sizeof(*pool)); It seems weird to require that mem_pools can only be accessed through a pointer. It also seems slightly dangerous: unlike strbuf_release() or strbuf_reset() or string_list_clear(), all of which put the data structure into a state where it can be re-used after the call, mem_pool_discard(pool) will leave pool pointing at free'd memory. read-cache (and split-index) are the only current users of mem_pools, and they haven't fallen into a use-after-free mistake here, but it seems likely to be problematic for future users especially since several of the current callers of mem_pool_init() will only call it when the mem_pool* is not already allocated (i.e. is NULL). This type of mechanism also prevents finding synchronization points where one can free existing memory and then resume more operations. It would be natural at such points to run something like mem_pool_discard(pool...); and, if necessary, mem_pool_init(&pool...); and then carry on continuing to use the pool. However, this fails badly if several objects had a copy of the value of pool from before these commands; in such a case, those objects won't get the updated value of pool that mem_pool_init() overwrites pool with and they'll all instead be reading and writing from free'd memory. Modify mem_pool_init()/mem_pool_discard() to behave more like strbuf_init()/strbuf_release() or string_list_init()/string_list_clear() In particular: (1) make mem_pool_init() just take a mem_pool* and have it only worry about allocating struct mp_blocks, not the struct mem_pool itself, (2) make mem_pool_discard() free the memory that the pool was responsible for, but leave it in a state where it can be used to allocate more memory afterward (without the need to call mem_pool_init() again). Signed-off-by: Elijah Newren <newren@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Родитель
ca1186ae01
Коммит
758f41d5e9
15
mem-pool.c
15
mem-pool.c
|
@ -33,21 +33,13 @@ static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t b
|
|||
return p;
|
||||
}
|
||||
|
||||
void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size)
|
||||
void mem_pool_init(struct mem_pool *pool, size_t initial_size)
|
||||
{
|
||||
struct mem_pool *pool;
|
||||
|
||||
if (*mem_pool)
|
||||
return;
|
||||
|
||||
pool = xcalloc(1, sizeof(*pool));
|
||||
|
||||
memset(pool, 0, sizeof(*pool));
|
||||
pool->block_alloc = BLOCK_GROWTH_SIZE;
|
||||
|
||||
if (initial_size > 0)
|
||||
mem_pool_alloc_block(pool, initial_size, NULL);
|
||||
|
||||
*mem_pool = pool;
|
||||
}
|
||||
|
||||
void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
|
||||
|
@ -66,7 +58,8 @@ void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
|
|||
free(block_to_free);
|
||||
}
|
||||
|
||||
free(mem_pool);
|
||||
mem_pool->mp_block = NULL;
|
||||
mem_pool->pool_alloc = 0;
|
||||
}
|
||||
|
||||
void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
|
||||
|
|
|
@ -24,10 +24,10 @@ struct mem_pool {
|
|||
/*
|
||||
* Initialize mem_pool with specified initial size.
|
||||
*/
|
||||
void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size);
|
||||
void mem_pool_init(struct mem_pool *pool, size_t initial_size);
|
||||
|
||||
/*
|
||||
* Discard a memory pool and free all the memory it is responsible for.
|
||||
* Discard all the memory the memory pool is responsible for.
|
||||
*/
|
||||
void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory);
|
||||
|
||||
|
|
21
read-cache.c
21
read-cache.c
|
@ -89,8 +89,10 @@ static struct mem_pool *find_mem_pool(struct index_state *istate)
|
|||
else
|
||||
pool_ptr = &istate->ce_mem_pool;
|
||||
|
||||
if (!*pool_ptr)
|
||||
mem_pool_init(pool_ptr, 0);
|
||||
if (!*pool_ptr) {
|
||||
*pool_ptr = xmalloc(sizeof(**pool_ptr));
|
||||
mem_pool_init(*pool_ptr, 0);
|
||||
}
|
||||
|
||||
return *pool_ptr;
|
||||
}
|
||||
|
@ -2020,11 +2022,12 @@ static unsigned long load_all_cache_entries(struct index_state *istate,
|
|||
{
|
||||
unsigned long consumed;
|
||||
|
||||
istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
|
||||
if (istate->version == 4) {
|
||||
mem_pool_init(&istate->ce_mem_pool,
|
||||
mem_pool_init(istate->ce_mem_pool,
|
||||
estimate_cache_size_from_compressed(istate->cache_nr));
|
||||
} else {
|
||||
mem_pool_init(&istate->ce_mem_pool,
|
||||
mem_pool_init(istate->ce_mem_pool,
|
||||
estimate_cache_size(mmap_size, istate->cache_nr));
|
||||
}
|
||||
|
||||
|
@ -2084,7 +2087,8 @@ static unsigned long load_cache_entries_threaded(struct index_state *istate, con
|
|||
if (istate->name_hash_initialized)
|
||||
BUG("the name hash isn't thread safe");
|
||||
|
||||
mem_pool_init(&istate->ce_mem_pool, 0);
|
||||
istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
|
||||
mem_pool_init(istate->ce_mem_pool, 0);
|
||||
|
||||
/* ensure we have no more threads than we have blocks to process */
|
||||
if (nr_threads > ieot->nr)
|
||||
|
@ -2111,11 +2115,12 @@ static unsigned long load_cache_entries_threaded(struct index_state *istate, con
|
|||
nr = 0;
|
||||
for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
|
||||
nr += p->ieot->entries[j].nr;
|
||||
istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
|
||||
if (istate->version == 4) {
|
||||
mem_pool_init(&p->ce_mem_pool,
|
||||
mem_pool_init(p->ce_mem_pool,
|
||||
estimate_cache_size_from_compressed(nr));
|
||||
} else {
|
||||
mem_pool_init(&p->ce_mem_pool,
|
||||
mem_pool_init(p->ce_mem_pool,
|
||||
estimate_cache_size(mmap_size, nr));
|
||||
}
|
||||
|
||||
|
@ -2372,7 +2377,7 @@ int discard_index(struct index_state *istate)
|
|||
|
||||
if (istate->ce_mem_pool) {
|
||||
mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
|
||||
istate->ce_mem_pool = NULL;
|
||||
FREE_AND_NULL(istate->ce_mem_pool);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -79,8 +79,10 @@ void move_cache_to_base_index(struct index_state *istate)
|
|||
if (si->base &&
|
||||
si->base->ce_mem_pool) {
|
||||
|
||||
if (!istate->ce_mem_pool)
|
||||
mem_pool_init(&istate->ce_mem_pool, 0);
|
||||
if (!istate->ce_mem_pool) {
|
||||
istate->ce_mem_pool = xmalloc(sizeof(struct mem_pool));
|
||||
mem_pool_init(istate->ce_mem_pool, 0);
|
||||
}
|
||||
|
||||
mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче