зеркало из https://github.com/microsoft/CCF.git
Mitigations for potential stale reads from Legacy xAPIC (#4127)
This commit is contained in:
Родитель
14e546b798
Коммит
dafcd67024
|
@ -71,6 +71,11 @@ option(BUILD_TESTS "Build tests" ON)
|
|||
option(BUILD_UNIT_TESTS "Build unit tests" ON)
|
||||
option(TLS_TEST "TLS Test using https://github.com/drwetter/testssl.sh" OFF)
|
||||
option(BUILD_TPCC "Build TPPC sample app and clients" OFF)
|
||||
option(
|
||||
FORCE_ENABLE_XAPIC_MITIGATION
|
||||
"Always enable aligned reads from host-memory to mitigate xAPIC stale data read vulnerability. When this setting is off, the mitigation is enabled at run-time when vulnerable hardware is detected"
|
||||
OFF
|
||||
)
|
||||
|
||||
# Allow framework code to use LOG_*_FMT macros. These will be removed from
|
||||
# public headers in future
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
// Licensed under the Apache 2.0 License.
|
||||
#pragma once
|
||||
|
||||
#include "ccf/ds/pal.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
|
@ -53,58 +55,20 @@ namespace crypto
|
|||
class IntelDRNG : public Entropy
|
||||
{
|
||||
private:
|
||||
typedef struct cpuid_struct
|
||||
{
|
||||
unsigned int eax;
|
||||
unsigned int ebx;
|
||||
unsigned int ecx;
|
||||
unsigned int edx;
|
||||
} cpuid_t;
|
||||
|
||||
static void cpuid(cpuid_t* info, unsigned int leaf, unsigned int subleaf)
|
||||
{
|
||||
asm volatile(
|
||||
"cpuid"
|
||||
: "=a"(info->eax), "=b"(info->ebx), "=c"(info->ecx), "=d"(info->edx)
|
||||
: "a"(leaf), "c"(subleaf));
|
||||
}
|
||||
|
||||
static int _is_intel_cpu()
|
||||
{
|
||||
static int intel_cpu = -1;
|
||||
cpuid_t info;
|
||||
|
||||
if (intel_cpu == -1)
|
||||
{
|
||||
cpuid(&info, 0, 0);
|
||||
|
||||
if (
|
||||
memcmp((char*)&info.ebx, "Genu", 4) ||
|
||||
memcmp((char*)&info.edx, "ineI", 4) ||
|
||||
memcmp((char*)&info.ecx, "ntel", 4))
|
||||
intel_cpu = 0;
|
||||
else
|
||||
intel_cpu = 1;
|
||||
}
|
||||
|
||||
return intel_cpu;
|
||||
}
|
||||
|
||||
static int get_drng_support()
|
||||
{
|
||||
static int drng_features = -1;
|
||||
|
||||
/* So we don't call cpuid multiple times for the same information */
|
||||
|
||||
if (drng_features == -1)
|
||||
{
|
||||
drng_features = DRNG_NO_SUPPORT;
|
||||
|
||||
if (_is_intel_cpu())
|
||||
if (ccf::is_intel_cpu())
|
||||
{
|
||||
cpuid_t info;
|
||||
ccf::CpuidInfo info;
|
||||
|
||||
cpuid(&info, 1, 0);
|
||||
ccf::cpuid(&info, 1, 0);
|
||||
|
||||
if ((info.ecx & 0x40000000) == 0x40000000)
|
||||
drng_features |= DRNG_HAS_RDRAND;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <set>
|
||||
|
||||
#if !defined(INSIDE_ENCLAVE) || defined(VIRTUAL_ENCLAVE)
|
||||
# include <cstring>
|
||||
|
@ -48,6 +49,47 @@ namespace ccf
|
|||
size_t peak_allocated_heap_size = 0;
|
||||
};
|
||||
|
||||
struct CpuidInfo
|
||||
{
|
||||
uint64_t eax;
|
||||
uint64_t ebx;
|
||||
uint64_t ecx;
|
||||
uint64_t edx;
|
||||
};
|
||||
|
||||
static void cpuid(CpuidInfo* info, uint64_t leaf, uint64_t subleaf)
|
||||
{
|
||||
asm volatile(
|
||||
"cpuid"
|
||||
: "=a"(info->eax), "=b"(info->ebx), "=c"(info->ecx), "=d"(info->edx)
|
||||
: "a"(leaf), "c"(subleaf));
|
||||
}
|
||||
|
||||
static bool is_intel_cpu()
|
||||
{
|
||||
static int intel_cpu = -1;
|
||||
|
||||
if (intel_cpu == -1)
|
||||
{
|
||||
CpuidInfo info;
|
||||
cpuid(&info, 0, 0);
|
||||
|
||||
if (
|
||||
memcmp((char*)&info.ebx, "Genu", 4) ||
|
||||
memcmp((char*)&info.edx, "ineI", 4) ||
|
||||
memcmp((char*)&info.ecx, "ntel", 4))
|
||||
{
|
||||
intel_cpu = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
intel_cpu = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return intel_cpu == 1;
|
||||
}
|
||||
|
||||
#if !defined(INSIDE_ENCLAVE) || defined(VIRTUAL_ENCLAVE)
|
||||
/**
|
||||
* Virtual enclaves and the host code share the same PAL.
|
||||
|
@ -106,6 +148,15 @@ namespace ccf
|
|||
unique_id = {};
|
||||
report_data = {};
|
||||
}
|
||||
|
||||
static bool require_alignment_for_untrusted_reads()
|
||||
{
|
||||
# ifdef FORCE_ENABLE_XAPIC_MITIGATION
|
||||
return true;
|
||||
# else
|
||||
return false;
|
||||
# endif
|
||||
}
|
||||
};
|
||||
|
||||
using Pal = HostPal;
|
||||
|
@ -346,6 +397,20 @@ namespace ccf
|
|||
}
|
||||
}
|
||||
|
||||
static bool require_alignment_for_untrusted_reads()
|
||||
{
|
||||
# ifdef FORCE_ENABLE_XAPIC_MITIGATION
|
||||
return true;
|
||||
# else
|
||||
static std::optional<bool> required = std::nullopt;
|
||||
if (!required.has_value())
|
||||
{
|
||||
required = is_intel_cpu() && is_vulnerable_to_stale_xapic_read();
|
||||
}
|
||||
return required.value();
|
||||
# endif
|
||||
}
|
||||
|
||||
private:
|
||||
static void open_enclave_logging_callback(
|
||||
void* context,
|
||||
|
@ -376,6 +441,43 @@ namespace ccf
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_vulnerable_to_stale_xapic_read()
|
||||
{
|
||||
ccf::CpuidInfo info;
|
||||
|
||||
ccf::cpuid(&info, 1, 0);
|
||||
|
||||
// Ignores stepping, looks only at model and family: potentially
|
||||
// includes safe instances which differ only by stepping from a vulnerable
|
||||
// instance.
|
||||
constexpr uint64_t proc_id_mask = 0x000F'0FF0;
|
||||
const uint64_t proc_id = info.eax & proc_id_mask;
|
||||
|
||||
// https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
|
||||
// 2022 tab, column "Stale Data Read from Legacy xAPIC, CVE-2022-21233,
|
||||
// INTEL-SA-00657"
|
||||
const std::set<uint64_t> vulnerable_proc_ids{
|
||||
0x506C0, // Apollo Lake
|
||||
0x506F0, // Denverton (Goldmont)
|
||||
0x606A0, // Ice Lake Xeon-SP
|
||||
0x606C0, // Ice Lake D
|
||||
0x706A0, // Gemini Lake
|
||||
0x706E0, // Ice Lake U, Y
|
||||
0x80660, // Snow Ridge BTS (Tremont)
|
||||
0x806A0, // Lakefield B-step (Tremont)
|
||||
0x806C0, // Tiger Lake U
|
||||
0x806D0, // Tiger Lake H
|
||||
0x90660, // Elkhart Lake (Tremont)
|
||||
0x90670, // Alder Lake S (Golden Cove, Gracemont)
|
||||
0x906A0, // Alder Lake H (Golden Cove, Gracemont)
|
||||
0x906C0, // Jasper Lake (Tremont)
|
||||
0xA0670 // Rocket Lake
|
||||
};
|
||||
|
||||
const auto it = vulnerable_proc_ids.find(proc_id);
|
||||
return it != vulnerable_proc_ids.end();
|
||||
}
|
||||
};
|
||||
|
||||
using Pal = OEPal;
|
||||
|
|
|
@ -38,7 +38,10 @@ enum CreateNodeStatus
|
|||
ReconfigurationMethodNotSupported = 10,
|
||||
|
||||
/** Host and enclave versions must match */
|
||||
VersionMismatch = 11
|
||||
VersionMismatch = 11,
|
||||
|
||||
/** When reading from host memory, the source must be 8-byte aligned **/
|
||||
UnalignedArguments = 12,
|
||||
};
|
||||
|
||||
constexpr char const* create_node_result_to_str(CreateNodeStatus result)
|
||||
|
@ -93,6 +96,10 @@ constexpr char const* create_node_result_to_str(CreateNodeStatus result)
|
|||
{
|
||||
return "VersionMismatch";
|
||||
}
|
||||
case CreateNodeStatus::UnalignedArguments:
|
||||
{
|
||||
return "UnalignedArguments";
|
||||
}
|
||||
default:
|
||||
{
|
||||
return "Unknown CreateNodeStatus";
|
||||
|
|
|
@ -166,6 +166,8 @@ namespace ringbuffer
|
|||
|
||||
BufferDef bd;
|
||||
|
||||
std::vector<uint8_t> local_copy;
|
||||
|
||||
virtual uint64_t read64(size_t index)
|
||||
{
|
||||
bd.check_access(index, sizeof(uint64_t));
|
||||
|
@ -234,7 +236,26 @@ namespace ringbuffer
|
|||
|
||||
// Call the handler function for this message.
|
||||
bd.check_access(hd_index, advance);
|
||||
f(m, bd.data + msg_index + Const::header_size(), (size_t)size);
|
||||
|
||||
if (ccf::Pal::require_alignment_for_untrusted_reads() && size > 0)
|
||||
{
|
||||
// To prevent unaligned reads during message processing, copy aligned
|
||||
// chunk into enclave memory
|
||||
const auto copy_size = Const::align_size(size);
|
||||
if (local_copy.size() < copy_size)
|
||||
{
|
||||
local_copy.resize(copy_size);
|
||||
}
|
||||
ccf::Pal::safe_memcpy(
|
||||
local_copy.data(),
|
||||
bd.data + msg_index + Const::header_size(),
|
||||
copy_size);
|
||||
f(m, local_copy.data(), (size_t)size);
|
||||
}
|
||||
else
|
||||
{
|
||||
f(m, bd.data + msg_index + Const::header_size(), (size_t)size);
|
||||
}
|
||||
}
|
||||
|
||||
if (advance > 0)
|
||||
|
|
|
@ -674,7 +674,12 @@ public:
|
|||
|
||||
// This test checks that the ringbuffer functions correctly when the offsets
|
||||
// overflow and wrap around from their maximum representable size to 0
|
||||
TEST_CASE("Offset overflow" * doctest::test_suite("ringbuffer"))
|
||||
TEST_CASE(
|
||||
"Offset overflow" *
|
||||
doctest::test_suite("ringbuffer")
|
||||
// Skip when xAPIC mitigations are enabled, which are not correctly handled by
|
||||
// SparseReader
|
||||
* doctest::skip(ccf::Pal::require_alignment_for_untrusted_reads()))
|
||||
{
|
||||
srand(time(NULL));
|
||||
|
||||
|
@ -767,7 +772,12 @@ TEST_CASE("Offset overflow" * doctest::test_suite("ringbuffer"))
|
|||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Malicious writer" * doctest::test_suite("ringbuffer"))
|
||||
TEST_CASE(
|
||||
"Malicious writer" *
|
||||
doctest::test_suite("ringbuffer")
|
||||
// Skip when xAPIC mitigations are enabled, since the core assertion that
|
||||
// reads are within the original buffer is deliberately broken by the Reader
|
||||
* doctest::skip(ccf::Pal::require_alignment_for_untrusted_reads()))
|
||||
{
|
||||
constexpr auto buffer_size = 256ull;
|
||||
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
|
||||
namespace ccf
|
||||
{
|
||||
std::atomic<std::chrono::microseconds>* host_time = nullptr;
|
||||
std::atomic<long long>* host_time_us = nullptr;
|
||||
std::chrono::microseconds last_value(0);
|
||||
}
|
|
@ -7,18 +7,18 @@
|
|||
|
||||
namespace ccf
|
||||
{
|
||||
extern std::atomic<std::chrono::microseconds>* host_time;
|
||||
extern std::atomic<long long>* host_time_us;
|
||||
extern std::chrono::microseconds last_value;
|
||||
|
||||
static std::chrono::microseconds get_enclave_time()
|
||||
{
|
||||
// Update cached value if possible, but never move backwards
|
||||
if (host_time != nullptr)
|
||||
if (host_time_us != nullptr)
|
||||
{
|
||||
const auto current_time = host_time->load();
|
||||
if (current_time > last_value)
|
||||
const auto current_time = host_time_us->load();
|
||||
if (current_time > last_value.count())
|
||||
{
|
||||
last_value = current_time;
|
||||
last_value = std::chrono::microseconds(current_time);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include "ringbuffer_logger.h"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <thread>
|
||||
|
||||
// the central enclave object
|
||||
|
@ -26,6 +27,13 @@ std::atomic<uint16_t> threading::ThreadMessaging::thread_count = 0;
|
|||
std::chrono::microseconds ccf::Channel::min_gap_between_initiation_attempts(
|
||||
2'000'000);
|
||||
|
||||
static bool is_aligned(void* p, size_t align, size_t count = 0)
|
||||
{
|
||||
const auto start = reinterpret_cast<std::uintptr_t>(p);
|
||||
const auto end = start + count;
|
||||
return (start % align == 0) && (end % align == 0);
|
||||
}
|
||||
|
||||
extern "C"
|
||||
{
|
||||
// Confirming in-enclave behaviour in separate unit tests is tricky, so we do
|
||||
|
@ -76,6 +84,12 @@ extern "C"
|
|||
return CreateNodeStatus::MemoryNotOutsideEnclave;
|
||||
}
|
||||
|
||||
if (!is_aligned(enclave_config, 8, sizeof(EnclaveConfig)))
|
||||
{
|
||||
LOG_FAIL_FMT("Read source memory not aligned: enclave_config");
|
||||
return CreateNodeStatus::UnalignedArguments;
|
||||
}
|
||||
|
||||
EnclaveConfig ec = *static_cast<EnclaveConfig*>(enclave_config);
|
||||
|
||||
// Setup logger to allow enclave logs to reach the host before node is
|
||||
|
@ -131,13 +145,21 @@ extern "C"
|
|||
|
||||
// Check that where we expect arguments to be in host-memory, they really
|
||||
// are. lfence after these checks to prevent speculative execution
|
||||
if (!ccf::Pal::is_outside_enclave(time_location, sizeof(ccf::host_time)))
|
||||
if (!ccf::Pal::is_outside_enclave(
|
||||
time_location, sizeof(*ccf::host_time_us)))
|
||||
{
|
||||
LOG_FAIL_FMT("Memory outside enclave: time_location");
|
||||
return CreateNodeStatus::MemoryNotOutsideEnclave;
|
||||
}
|
||||
|
||||
ccf::host_time = static_cast<decltype(ccf::host_time)>(time_location);
|
||||
if (!is_aligned(time_location, 8, sizeof(*ccf::host_time_us)))
|
||||
{
|
||||
LOG_FAIL_FMT("Read source memory not aligned: time_location");
|
||||
return CreateNodeStatus::UnalignedArguments;
|
||||
}
|
||||
|
||||
ccf::host_time_us =
|
||||
static_cast<decltype(ccf::host_time_us)>(time_location);
|
||||
|
||||
// Check that ringbuffer memory ranges are entirely outside of the enclave
|
||||
if (!ccf::Pal::is_outside_enclave(
|
||||
|
@ -177,6 +199,12 @@ extern "C"
|
|||
return CreateNodeStatus::MemoryNotOutsideEnclave;
|
||||
}
|
||||
|
||||
if (!is_aligned(ccf_config, 8, ccf_config_size))
|
||||
{
|
||||
LOG_FAIL_FMT("Read source memory not aligned: ccf_config");
|
||||
return CreateNodeStatus::UnalignedArguments;
|
||||
}
|
||||
|
||||
ccf::Pal::speculation_barrier();
|
||||
|
||||
StartupConfig cc =
|
||||
|
|
|
@ -179,6 +179,16 @@ namespace host
|
|||
|
||||
auto config = nlohmann::json(ccf_config).dump();
|
||||
|
||||
// Pad config with NULLs to a multiple of 8
|
||||
const auto padded_size = (config.size() + 7) & ~(7ull);
|
||||
if (config.size() != padded_size)
|
||||
{
|
||||
LOG_INFO_FMT(
|
||||
"Padding config with {} additional nulls",
|
||||
padded_size - config.size());
|
||||
config.resize(padded_size);
|
||||
}
|
||||
|
||||
#define CREATE_NODE_ARGS \
|
||||
&status, (void*)&enclave_config, config.data(), config.size(), \
|
||||
node_cert.data(), node_cert.size(), &node_cert_len, service_cert.data(), \
|
||||
|
|
|
@ -11,9 +11,7 @@ namespace asynchost
|
|||
{
|
||||
class TimeUpdaterImpl
|
||||
{
|
||||
using TClock = std::chrono::system_clock;
|
||||
|
||||
std::atomic<std::chrono::microseconds> time_now;
|
||||
std::atomic<long long> time_now_us;
|
||||
|
||||
public:
|
||||
TimeUpdaterImpl()
|
||||
|
@ -21,15 +19,17 @@ namespace asynchost
|
|||
on_timer();
|
||||
}
|
||||
|
||||
std::atomic<std::chrono::microseconds>* get_value()
|
||||
std::atomic<long long>* get_value()
|
||||
{
|
||||
return &time_now;
|
||||
return &time_now_us;
|
||||
}
|
||||
|
||||
void on_timer()
|
||||
{
|
||||
time_now = std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
TClock::now().time_since_epoch());
|
||||
using TClock = std::chrono::system_clock;
|
||||
time_now_us = std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
TClock::now().time_since_epoch())
|
||||
.count();
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
namespace ccf
|
||||
{
|
||||
std::atomic<std::chrono::microseconds>* host_time = nullptr;
|
||||
std::atomic<long long>* host_time_us = nullptr;
|
||||
std::chrono::microseconds last_value(0);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче