ebpf_epoch: _ebpf_flush_timer_set should be per-CPU (#481)

Signed-off-by: Alan Jowett <alanjo@microsoft.com>
This commit is contained in:
Alan Jowett 2021-09-02 13:55:45 -06:00 коммит произвёл GitHub
Родитель ac9b6282b8
Коммит ffdc71166e
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
1 изменённых файлов: 18 добавлений и 11 удалений

Просмотреть файл

@ -52,7 +52,11 @@ typedef struct _ebpf_epoch_cpu_entry
_Requires_lock_held_(lock) ebpf_epoch_state_t cpu_epoch_state;
_Requires_lock_held_(lock) ebpf_list_entry_t free_list;
_Requires_lock_held_(lock) ebpf_hash_table_t* thread_table;
uintptr_t padding[2];
struct
{
int timer_armed : 1;
} flags;
uintptr_t padding;
} ebpf_epoch_cpu_entry_t;
C_ASSERT(sizeof(ebpf_epoch_cpu_entry_t) % EBPF_CACHE_LINE_SIZE == 0);
@ -80,7 +84,6 @@ static bool _ebpf_epoch_rundown = false;
* @brief Timer used to update _ebpf_release_epoch.
*/
static ebpf_timer_work_item_t* _ebpf_flush_timer = NULL;
static volatile int32_t _ebpf_flush_timer_set = 0;
// There are two possible actions that can be taken at the end of an epoch.
// 1. Return a block of memory to the memory pool.
@ -135,7 +138,6 @@ ebpf_epoch_initiate()
_ebpf_current_epoch = 1;
_ebpf_release_epoch = 0;
_ebpf_epoch_cpu_count = cpu_count;
_ebpf_flush_timer_set = 0;
_ebpf_epoch_cpu_table = ebpf_allocate_cache_aligned(sizeof(ebpf_epoch_cpu_entry_t) * cpu_count);
if (!_ebpf_epoch_cpu_table) {
@ -235,16 +237,10 @@ ebpf_epoch_exit()
ebpf_lock_unlock(&_ebpf_epoch_cpu_table[current_cpu].lock, state);
}
// First reap the free list.
// Reap the free list.
if (!ebpf_list_is_empty(&_ebpf_epoch_cpu_table[current_cpu].free_list)) {
_ebpf_epoch_release_free_list(&_ebpf_epoch_cpu_table[current_cpu], _ebpf_release_epoch);
}
// If there are still items in the free list, schedule a timer to reap them in the future.
if (!ebpf_list_is_empty(&_ebpf_epoch_cpu_table[current_cpu].free_list) &&
(ebpf_interlocked_compare_exchange_int32(&_ebpf_flush_timer_set, 1, 0) == 0)) {
ebpf_schedule_timer_work_item(_ebpf_flush_timer, EBPF_EPOCH_FLUSH_DELAY_IN_MICROSECONDS);
}
}
void
@ -384,6 +380,14 @@ _ebpf_epoch_release_free_list(_In_ ebpf_epoch_cpu_entry_t* cpu_entry, int64_t re
break;
}
}
// If there are still items in the free list, schedule a timer to reap them in the future.
if (!ebpf_list_is_empty(&cpu_entry->free_list) && !cpu_entry->flags.timer_armed) {
// We will arm the timer once per CPU that sees entries it can't release.
// That's acceptable as arming the timer is idempotent.
cpu_entry->flags.timer_armed = true;
ebpf_schedule_timer_work_item(_ebpf_flush_timer, EBPF_EPOCH_FLUSH_DELAY_IN_MICROSECONDS);
}
ebpf_lock_unlock(&cpu_entry->lock, lock_state);
// Free all the expired items outside of the lock.
@ -429,6 +433,10 @@ _ebpf_epoch_get_release_epoch(_Out_ int64_t* release_epoch)
// Grab the CPU epoch.
lock_state = ebpf_lock_lock(&_ebpf_epoch_cpu_table[cpu_id].lock);
// Clear the flush timer flag.
_ebpf_epoch_cpu_table[cpu_id].flags.timer_armed = false;
if (_ebpf_epoch_cpu_table[cpu_id].cpu_epoch_state.active) {
lowest_epoch = min(lowest_epoch, _ebpf_epoch_cpu_table[cpu_id].cpu_epoch_state.epoch);
}
@ -476,7 +484,6 @@ _ebpf_flush_worker(_In_ void* context)
UNREFERENCED_PARAMETER(context);
ebpf_epoch_flush();
ebpf_interlocked_compare_exchange_int32(&_ebpf_flush_timer_set, 0, 1);
}
ebpf_result_t