Add support for allocating pages for code that can be marked read/execute (#228)
* Add LLVM for code generation proposal
Signed-off-by: Alan Jowett <alanjo@microsoft.com>
* Revert "Add LLVM for code generation proposal"
This reverts commit cd896afd94
.
* Work in progress
Signed-off-by: Alan Jowett <alanjo@microsoft.com>
* Add doxygen
Signed-off-by: Alan Jowett <alanjo@microsoft.com>
* PR feedback
Signed-off-by: Alan Jowett <alanjo@microsoft.com>
* PR feedback
Signed-off-by: Alan Jowett <alanjo@microsoft.com>
* PR feedback
Signed-off-by: Alan Jowett <alanjo@microsoft.com>
This commit is contained in:
Родитель
2bd9033b95
Коммит
4b53d101aa
|
@ -65,7 +65,7 @@
|
|||
<ConfigurationType>Driver</ConfigurationType>
|
||||
<DriverType>KMDF</DriverType>
|
||||
<DriverTargetPlatform>Universal</DriverTargetPlatform>
|
||||
<Driver_SpectreMitigation>false</Driver_SpectreMitigation>
|
||||
<Driver_SpectreMitigation>Spectre</Driver_SpectreMitigation>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
|
|
@ -124,6 +124,59 @@ extern "C"
|
|||
void
|
||||
ebpf_free(void* memory);
|
||||
|
||||
typedef enum _ebpf_page_protection
|
||||
{
|
||||
EBPF_PAGE_PROTECT_READ_ONLY,
|
||||
EBPF_PAGE_PROTECT_READ_WRITE,
|
||||
EBPF_PAGE_PROTECT_READ_EXECUTE,
|
||||
} ebpf_page_protection_t;
|
||||
|
||||
typedef struct _ebpf_memory_descriptor ebpf_memory_descriptor_t;
|
||||
|
||||
/**
|
||||
* @brief Allocate pages from physical memory and create a mapping into the
|
||||
* system address space.
|
||||
*
|
||||
* @param[in] length Size of memory to allocate (internally this gets rounded
|
||||
* up to a page boundary).
|
||||
* @return Pointer to an ebpf_memory_descriptor_t on success, NULL on failure.
|
||||
*/
|
||||
ebpf_memory_descriptor_t*
|
||||
ebpf_map_memory(size_t length);
|
||||
|
||||
/**
|
||||
* @brief Release physical memory previously allocated via ebpf_map_memory.
|
||||
*
|
||||
* @param[in] memory_descriptor Pointer to ebpf_memory_descriptor_t describing
|
||||
* allocated pages.
|
||||
*/
|
||||
void
|
||||
ebpf_unmap_memory(ebpf_memory_descriptor_t* memory_descriptor);
|
||||
|
||||
/**
|
||||
* @brief Change the page protection on memory allocated via
|
||||
* ebpf_map_memory.
|
||||
*
|
||||
* @param[in] memory_descriptor Pointer to an ebpf_memory_descriptor_t
|
||||
* describing allocated pages.
|
||||
* @param[in] protection The new page protection to apply.
|
||||
* @retval EBPF_SUCCESS The operation was successful.
|
||||
* @retval EBPF_INVALID_ARGUMENT An invalid argument was supplied.
|
||||
*/
|
||||
ebpf_result_t
|
||||
ebpf_protect_memory(const ebpf_memory_descriptor_t* memory_descriptor, ebpf_page_protection_t protection);
|
||||
|
||||
/**
|
||||
* @brief Given an ebpf_memory_descriptor_t allocated via ebpf_map_memory
|
||||
* obtain the base virtual address.
|
||||
*
|
||||
* @param[in] memory_descriptor Pointer to an ebpf_memory_descriptor_t
|
||||
* describing allocated pages.
|
||||
* @return Base virtual address of pages that have been allocated.
|
||||
*/
|
||||
void*
|
||||
ebpf_memory_descriptor_get_base_address(ebpf_memory_descriptor_t* memory_descriptor);
|
||||
|
||||
/**
|
||||
* @brief Allocate and copy a UTF-8 string.
|
||||
*
|
||||
|
|
|
@ -10,6 +10,11 @@
|
|||
|
||||
#include <ntstrsafe.h>
|
||||
|
||||
typedef struct _ebpf_memory_descriptor
|
||||
{
|
||||
MDL memory_descriptor_list;
|
||||
} ebpf_memory_descriptor_t;
|
||||
|
||||
typedef enum _ebpf_pool_tag
|
||||
{
|
||||
EBPF_POOL_TAG = 'fpbe'
|
||||
|
@ -39,6 +44,65 @@ ebpf_free(void* memory)
|
|||
ExFreePool(memory);
|
||||
}
|
||||
|
||||
ebpf_memory_descriptor_t*
|
||||
ebpf_map_memory(size_t length)
|
||||
{
|
||||
MDL* memory_descriptor_list = NULL;
|
||||
PHYSICAL_ADDRESS start_address;
|
||||
PHYSICAL_ADDRESS end_address;
|
||||
PHYSICAL_ADDRESS page_size;
|
||||
start_address.QuadPart = 0;
|
||||
end_address.QuadPart = -1;
|
||||
page_size.QuadPart = PAGE_SIZE;
|
||||
memory_descriptor_list =
|
||||
MmAllocatePagesForMdlEx(start_address, end_address, page_size, length, MmCached, MM_ALLOCATE_FULLY_REQUIRED);
|
||||
|
||||
if (memory_descriptor_list) {
|
||||
MmProbeAndLockPages(memory_descriptor_list, KernelMode, IoWriteAccess);
|
||||
}
|
||||
return (ebpf_memory_descriptor_t*)memory_descriptor_list;
|
||||
}
|
||||
|
||||
void
|
||||
ebpf_unmap_memory(ebpf_memory_descriptor_t* memory_descriptor)
|
||||
{
|
||||
MmUnlockPages(&memory_descriptor->memory_descriptor_list);
|
||||
MmFreePagesFromMdl(&memory_descriptor->memory_descriptor_list);
|
||||
ExFreePool(memory_descriptor);
|
||||
}
|
||||
|
||||
ebpf_result_t
|
||||
ebpf_protect_memory(const ebpf_memory_descriptor_t* memory_descriptor, ebpf_page_protection_t protection)
|
||||
{
|
||||
NTSTATUS status;
|
||||
ULONG mm_protection_state = 0;
|
||||
switch (protection) {
|
||||
case EBPF_PAGE_PROTECT_READ_ONLY:
|
||||
mm_protection_state = PAGE_READONLY;
|
||||
break;
|
||||
case EBPF_PAGE_PROTECT_READ_WRITE:
|
||||
mm_protection_state = PAGE_READWRITE;
|
||||
break;
|
||||
case EBPF_PAGE_PROTECT_READ_EXECUTE:
|
||||
mm_protection_state = PAGE_EXECUTE_READ;
|
||||
break;
|
||||
default:
|
||||
return EBPF_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
status = MmProtectMdlSystemAddress((MDL*)&memory_descriptor->memory_descriptor_list, mm_protection_state);
|
||||
if (!NT_SUCCESS(status))
|
||||
return EBPF_INVALID_ARGUMENT;
|
||||
|
||||
return EBPF_SUCCESS;
|
||||
}
|
||||
|
||||
void*
|
||||
ebpf_memory_descriptor_get_base_address(ebpf_memory_descriptor_t* memory_descriptor)
|
||||
{
|
||||
return MmGetSystemAddressForMdlSafe(&memory_descriptor->memory_descriptor_list, NormalPagePriority);
|
||||
}
|
||||
|
||||
// There isn't an official API to query this information from kernel.
|
||||
// Use NtQuerySystemInformation with struct + header from winternl.h.
|
||||
|
||||
|
|
|
@ -59,14 +59,14 @@ TEST_CASE("pinning_test", "[pinning_test]")
|
|||
|
||||
some_object_t an_object;
|
||||
some_object_t another_object;
|
||||
some_object_t* some_object;
|
||||
some_object_t* some_object = nullptr;
|
||||
ebpf_utf8_string_t foo = EBPF_UTF8_STRING_FROM_CONST_STRING("foo");
|
||||
ebpf_utf8_string_t bar = EBPF_UTF8_STRING_FROM_CONST_STRING("bar");
|
||||
|
||||
ebpf_object_initialize(&an_object.object, EBPF_OBJECT_MAP, [](ebpf_object_t*) {});
|
||||
ebpf_object_initialize(&another_object.object, EBPF_OBJECT_MAP, [](ebpf_object_t*) {});
|
||||
|
||||
ebpf_pinning_table_t* pinning_table;
|
||||
ebpf_pinning_table_t* pinning_table = nullptr;
|
||||
REQUIRE(ebpf_pinning_table_allocate(&pinning_table) == EBPF_SUCCESS);
|
||||
|
||||
REQUIRE(ebpf_pinning_table_insert(pinning_table, &foo, &an_object.object) == EBPF_SUCCESS);
|
||||
|
@ -153,9 +153,9 @@ TEST_CASE("extension_test", "[extension_test]")
|
|||
const ebpf_extension_dispatch_table_t* returned_provider_dispatch_table;
|
||||
const ebpf_extension_data_t* returned_provider_data;
|
||||
|
||||
ebpf_extension_provider_t* provider_context;
|
||||
ebpf_extension_client_t* client_context;
|
||||
void* provider_binding_context;
|
||||
ebpf_extension_provider_t* provider_context = nullptr;
|
||||
ebpf_extension_client_t* client_context = nullptr;
|
||||
void* provider_binding_context = nullptr;
|
||||
|
||||
ebpf_guid_create(&interface_id);
|
||||
|
||||
|
@ -237,14 +237,15 @@ TEST_CASE("program_type_info", "[program_type_info]")
|
|||
EBPF_RETURN_TYPE_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
{EBPF_ARGUMENT_TYPE_PTR_TO_MAP, EBPF_ARGUMENT_TYPE_PTR_TO_MAP_KEY}},
|
||||
};
|
||||
ebpf_context_descriptor_t context_descriptor{sizeof(xdp_md_t),
|
||||
EBPF_OFFSET_OF(xdp_md_t, data),
|
||||
EBPF_OFFSET_OF(xdp_md_t, data_end),
|
||||
EBPF_OFFSET_OF(xdp_md_t, data_meta)};
|
||||
ebpf_context_descriptor_t context_descriptor{
|
||||
sizeof(xdp_md_t),
|
||||
EBPF_OFFSET_OF(xdp_md_t, data),
|
||||
EBPF_OFFSET_OF(xdp_md_t, data_end),
|
||||
EBPF_OFFSET_OF(xdp_md_t, data_meta)};
|
||||
ebpf_program_type_descriptor_t program_type{"xdp", &context_descriptor};
|
||||
ebpf_program_information_t program_information{program_type, _countof(helper_functions), helper_functions};
|
||||
ebpf_program_information_t* new_program_information = nullptr;
|
||||
uint8_t* buffer;
|
||||
uint8_t* buffer = nullptr;
|
||||
unsigned long buffer_size;
|
||||
REQUIRE(ebpf_program_information_encode(&program_information, &buffer, &buffer_size) == EBPF_SUCCESS);
|
||||
REQUIRE(ebpf_program_information_decode(&new_program_information, buffer, buffer_size) == EBPF_SUCCESS);
|
||||
|
@ -298,3 +299,17 @@ TEST_CASE("access_check", "[access_check]")
|
|||
|
||||
REQUIRE((result = ebpf_access_check(sd, 1, &generic_mapping), LocalFree(sd), result == EBPF_ERROR_ACCESS_DENIED));
|
||||
}
|
||||
|
||||
TEST_CASE("memory_map_test", "[memory_map_test]")
|
||||
{
|
||||
ebpf_result_t result;
|
||||
ebpf_memory_descriptor_t* memory_descriptor = nullptr;
|
||||
REQUIRE((memory_descriptor = ebpf_map_memory(100)) != nullptr);
|
||||
REQUIRE(
|
||||
(result = ebpf_protect_memory(memory_descriptor, EBPF_PAGE_PROTECT_READ_WRITE),
|
||||
result != EBPF_SUCCESS ? ebpf_unmap_memory(memory_descriptor) : (void)0,
|
||||
result == EBPF_SUCCESS));
|
||||
memset(ebpf_memory_descriptor_get_base_address(memory_descriptor), 0xCC, 100);
|
||||
REQUIRE(ebpf_protect_memory(memory_descriptor, EBPF_PAGE_PROTECT_READ_ONLY) == EBPF_SUCCESS);
|
||||
ebpf_unmap_memory(memory_descriptor);
|
||||
}
|
|
@ -131,6 +131,72 @@ ebpf_free(void* memory)
|
|||
free(memory);
|
||||
}
|
||||
}
|
||||
struct _ebpf_memory_descriptor
|
||||
{
|
||||
void* base;
|
||||
size_t length;
|
||||
};
|
||||
typedef struct _ebpf_memory_descriptor ebpf_memory_descriptor_t;
|
||||
|
||||
ebpf_memory_descriptor_t*
|
||||
ebpf_map_memory(size_t length)
|
||||
{
|
||||
ebpf_memory_descriptor_t* descriptor = (ebpf_memory_descriptor_t*)malloc(sizeof(ebpf_memory_descriptor_t));
|
||||
if (!descriptor) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
descriptor->base = VirtualAlloc(0, length, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
|
||||
descriptor->length = length;
|
||||
|
||||
if (!descriptor->base) {
|
||||
free(descriptor);
|
||||
descriptor = nullptr;
|
||||
}
|
||||
return descriptor;
|
||||
}
|
||||
|
||||
void
|
||||
ebpf_unmap_memory(ebpf_memory_descriptor_t* memory_descriptor)
|
||||
{
|
||||
if (memory_descriptor) {
|
||||
VirtualFree(memory_descriptor->base, 0, MEM_RELEASE);
|
||||
free(memory_descriptor);
|
||||
}
|
||||
}
|
||||
|
||||
ebpf_result_t
|
||||
ebpf_protect_memory(const ebpf_memory_descriptor_t* memory_descriptor, ebpf_page_protection_t protection)
|
||||
{
|
||||
ULONG mm_protection_state = 0;
|
||||
ULONG old_mm_protection_state = 0;
|
||||
switch (protection) {
|
||||
case EBPF_PAGE_PROTECT_READ_ONLY:
|
||||
mm_protection_state = PAGE_READONLY;
|
||||
break;
|
||||
case EBPF_PAGE_PROTECT_READ_WRITE:
|
||||
mm_protection_state = PAGE_READWRITE;
|
||||
break;
|
||||
case EBPF_PAGE_PROTECT_READ_EXECUTE:
|
||||
mm_protection_state = PAGE_EXECUTE_READ;
|
||||
break;
|
||||
default:
|
||||
return EBPF_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
if (!VirtualProtect(
|
||||
memory_descriptor->base, memory_descriptor->length, mm_protection_state, &old_mm_protection_state)) {
|
||||
return EBPF_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
return EBPF_SUCCESS;
|
||||
}
|
||||
|
||||
void*
|
||||
ebpf_memory_descriptor_get_base_address(ebpf_memory_descriptor_t* memory_descriptor)
|
||||
{
|
||||
return memory_descriptor->base;
|
||||
}
|
||||
|
||||
ebpf_result_t
|
||||
ebpf_safe_size_t_multiply(size_t multiplicand, size_t multiplier, size_t* result)
|
||||
|
|
|
@ -65,7 +65,7 @@
|
|||
<ConfigurationType>Driver</ConfigurationType>
|
||||
<DriverType>KMDF</DriverType>
|
||||
<DriverTargetPlatform>Universal</DriverTargetPlatform>
|
||||
<Driver_SpectreMitigation>false</Driver_SpectreMitigation>
|
||||
<Driver_SpectreMitigation>Spectre</Driver_SpectreMitigation>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
|
Загрузка…
Ссылка в новой задаче