Signed-off-by: Alan Jowett <alanjo@microsoft.com>
This commit is contained in:
Alan Jowett 2021-08-23 14:49:26 -06:00 коммит произвёл GitHub
Родитель 5519c26f5b
Коммит 024abd4534
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
6 изменённых файлов: 18 добавлений и 27 удалений

Просмотреть файл

@ -590,7 +590,7 @@ ebpf_map_create(
ebpf_map_type_t type = ebpf_map_definition->type;
ebpf_result_t result = EBPF_SUCCESS;
uint32_t cpu_count;
ebpf_get_cpu_count(&cpu_count);
cpu_count = ebpf_get_cpu_count();
ebpf_map_definition_t local_map_definition = *ebpf_map_definition;
switch (local_map_definition.type) {
case BPF_MAP_TYPE_PERCPU_HASH:

Просмотреть файл

@ -10,15 +10,15 @@
// Each block of code that accesses epoch freed memory wraps access in calls to
// ebpf_epoch_enter/ebpf_epoch_exit.
//
// Epoch tracking is handled differently for pre-emptable vs non-pre-emptable
// Epoch tracking is handled differently for pre-emptible vs non-pre-emptible
// invocations.
//
// Non-pre-emptable invocations are:
// Non-pre-emptible invocations are:
// 1) Tracked by the CPU they are running on as they don't switch CPUs.
// 2) Accessed without synchronization.
// 3) Set to the current epoch on entry.
//
// Pre-emptable invocations are:
// Pre-emptible invocations are:
// 1) Tracked by thread ID.
// 2) Accessed under a lock.
// 3) Set to the current epoch on entry.
@ -49,7 +49,7 @@ static _Requires_lock_held_(&_ebpf_epoch_thread_table_lock) ebpf_hash_table_t* _
typedef struct _ebpf_epoch_cpu_entry
{
int64_t epoch;
ebpf_non_preemptible_work_item_t* non_preemtable_work_item;
ebpf_non_preemptible_work_item_t* non_preemptible_work_item;
ebpf_lock_t free_list_lock;
ebpf_list_entry_t free_list;
} ebpf_epoch_cpu_entry_t;
@ -99,7 +99,7 @@ _ebpf_epoch_release_free_list(uint32_t cpu_id, int64_t released_epoch);
// Get the highest epoch that is no longer in use.
static ebpf_result_t
_ebpf_epoch_get_release_epoch(int64_t* released_epoch);
_ebpf_epoch_get_release_epoch(_Out_ int64_t* released_epoch);
static void
_ebpf_epoch_update_cpu_entry(void* context, void* parameter_1);
@ -114,13 +114,12 @@ ebpf_epoch_initiate()
uint32_t cpu_id;
uint32_t cpu_count;
ebpf_get_cpu_count(&cpu_count);
cpu_count = ebpf_get_cpu_count();
_ebpf_epoch_initiated = true;
_ebpf_epoch_rundown = false;
_ebpf_current_epoch = 1;
_ebpf_epoch_cpu_count = cpu_count;
_Analysis_assume_(_ebpf_epoch_cpu_count >= 1);
ebpf_lock_create(&_ebpf_epoch_thread_table_lock);
@ -144,7 +143,7 @@ ebpf_epoch_initiate()
if (return_value != EBPF_SUCCESS) {
goto Error;
}
_ebpf_epoch_cpu_table[cpu_id].non_preemtable_work_item = work_item_context;
_ebpf_epoch_cpu_table[cpu_id].non_preemptible_work_item = work_item_context;
}
}
@ -176,8 +175,8 @@ ebpf_epoch_terminate()
if (ebpf_is_non_preemptible_work_item_supported()) {
for (cpu_id = 0; cpu_id < _ebpf_epoch_cpu_count; cpu_id++) {
ebpf_free_non_preemptible_work_item(_ebpf_epoch_cpu_table[cpu_id].non_preemtable_work_item);
_ebpf_epoch_cpu_table[cpu_id].non_preemtable_work_item = NULL;
ebpf_free_non_preemptible_work_item(_ebpf_epoch_cpu_table[cpu_id].non_preemptible_work_item);
_ebpf_epoch_cpu_table[cpu_id].non_preemptible_work_item = NULL;
}
}
_ebpf_epoch_cpu_count = 0;
@ -262,7 +261,7 @@ ebpf_epoch_flush()
// Note: Either the per-cpu epoch or the global epoch could be out of date.
// That is acceptable as it may schedule an extra work item.
if (_ebpf_epoch_cpu_table[cpu_id].epoch != _ebpf_current_epoch)
ebpf_queue_non_preemptible_work_item(_ebpf_epoch_cpu_table[cpu_id].non_preemtable_work_item, NULL);
ebpf_queue_non_preemptible_work_item(_ebpf_epoch_cpu_table[cpu_id].non_preemptible_work_item, NULL);
}
}
@ -361,7 +360,7 @@ _ebpf_epoch_release_free_list(uint32_t cpu_id, int64_t released_epoch)
}
static ebpf_result_t
_ebpf_epoch_get_release_epoch(int64_t* release_epoch)
_ebpf_epoch_get_release_epoch(_Out_ int64_t* release_epoch)
{
int64_t lowest_epoch = INT64_MAX;
int64_t* thread_epoch;

Просмотреть файл

@ -258,11 +258,9 @@ extern "C"
/**
* @brief Query the platform for the total number of CPUs.
* @param[out] cpu_count Pointer to memory location that contains the
* number of CPUs.
* @return The count of logical cores in the system.
*/
void
ebpf_get_cpu_count(_Out_ uint32_t* cpu_count);
_Ret_range_(>, 0) uint32_t ebpf_get_cpu_count();
/**
* @brief Query the platform to determine if the current execution can

Просмотреть файл

@ -236,11 +236,7 @@ ebpf_interlocked_compare_exchange_pointer(
return InterlockedCompareExchangePointer((void* volatile*)destination, (void*)exchange, (void*)comperand);
}
void
ebpf_get_cpu_count(_Out_ uint32_t* cpu_count)
{
*cpu_count = KeQueryMaximumProcessorCount();
}
_Ret_range_(>, 0) uint32_t ebpf_get_cpu_count() { return KeQueryMaximumProcessorCount(); }
bool
ebpf_is_preemptible()

Просмотреть файл

@ -130,8 +130,7 @@ TEST_CASE("hash_table_stress_test", "[platform]")
ebpf_hash_table_t* table = nullptr;
const size_t iterations = 1000;
uint32_t worker_threads;
ebpf_get_cpu_count(&worker_threads);
uint32_t worker_threads = ebpf_get_cpu_count();
REQUIRE(
ebpf_hash_table_create(
&table, ebpf_epoch_allocate, ebpf_epoch_free, sizeof(uint32_t), sizeof(uint64_t), worker_threads, NULL) ==

Просмотреть файл

@ -213,12 +213,11 @@ ebpf_random_uint32()
return mt();
}
void
ebpf_get_cpu_count(_Out_ uint32_t* cpu_count)
_Ret_range_(>, 0) uint32_t ebpf_get_cpu_count()
{
SYSTEM_INFO system_info;
GetNativeSystemInfo(&system_info);
*cpu_count = system_info.dwNumberOfProcessors;
return system_info.dwNumberOfProcessors;
}
bool