Enable compilation of the runtime on Linux

This change enables compilation of the runtime excluding the PAL
layer on Linux.
Most of the changes are just to make it build with clang that's
more strict w.r.t. the C++11 standard.
In addition to that, I have removed our implementation of the
new / delete operators and replaced all calls to new in the
runtime by new (nothrow).
This commit is contained in:
Jan Vorlicek 2015-10-17 10:18:37 +02:00
Родитель 86b0cc9a82
Коммит b8ee185564
48 изменённых файлов: 5646 добавлений и 504 удалений

Просмотреть файл

@ -110,5 +110,7 @@ endif (WIN32)
include(configure.cmake)
add_subdirectory(gc)
if(WIN32)
add_subdirectory(gc)
endif()
add_subdirectory(Runtime)

Просмотреть файл

@ -11,7 +11,6 @@ set(SOURCES
# AsmOffsetsVerify.cpp
assert.cpp
Crst.cpp
DebugEventSource.cpp
dllmain.cpp
eetype.cpp
EHHelpers.cpp
@ -24,14 +23,11 @@ set(SOURCES
GcStressControl.cpp
GenericInstance.cpp
HandleTableHelpers.cpp
InstanceStore.cpp
MathHelpers.cpp
MiscHelpers.cpp
module.cpp
ObjectLayout.cpp
OptionalFieldsRuntime.cpp
PalRedhawkCommon.cpp
PalRedhawkMinWin.cpp
portable.cpp
profheapwalkhelper.cpp
Profiling.cpp
@ -48,7 +44,6 @@ set(SOURCES
thread.cpp
threadstore.cpp
../gc/env/gcenv.windows.cpp
../gc/gccommon.cpp
../gc/gceewks.cpp
../gc/gcwks.cpp
@ -60,6 +55,20 @@ set(SOURCES
../gc/env/common.cpp
)
if(WIN32)
list(APPEND SOURCES
DebugEventSource.cpp
../gc/env/gcenv.windows.cpp
PalRedhawkCommon.cpp
PalRedhawkMinWin.cpp
)
else()
include_directories(unix)
list(APPEND SOURCES
../gc/env/gcenv.unix.cpp
)
endif()
if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
add_definitions(-D_TARGET_AMD64_=1)
add_definitions(-D_AMD64_)
@ -84,30 +93,30 @@ else()
clr_unknown_arch()
endif()
add_definitions(-DAPP_LOCAL_RUNTIME)
add_definitions(-DFEATURE_BACKGROUND_GC)
add_definitions(-DFEATURE_BASICFREEZE)
add_definitions(-DFEATURE_CLR_EH)
add_definitions(-DFEATURE_COFF_OUTPUT)
add_definitions(-DFEATURE_CONSERVATIVE_GC)
add_definitions(-DFEATURE_CUSTOM_IMPORTS)
add_definitions(-DFEATURE_DECLSPEC_THREAD)
add_definitions(-DFEATURE_DYNAMIC_CODE)
add_compile_options($<$<CONFIG:Debug>:-DFEATURE_GC_STRESS>)
add_definitions(-DFEATURE_NUTC)
add_definitions(-DFEATURE_PROFILING)
add_definitions(-DFEATURE_REDHAWK)
add_definitions(-DFEATURE_SHARED_GENERICS)
add_definitions(-DMODERN_OS)
add_definitions(-DUSE_PORTABLE_HELPERS)
add_definitions(-DVERIFY_HEAP)
add_definitions(-DRTU_PORTABLE)
add_definitions(-D_LIB)
if(WIN32)
add_definitions(-DWIN32)
add_definitions(-DAPP_LOCAL_RUNTIME)
add_definitions(-DFEATURE_BACKGROUND_GC)
add_definitions(-DFEATURE_BASICFREEZE)
add_definitions(-DFEATURE_CLR_EH)
add_definitions(-DFEATURE_COFF_OUTPUT)
add_definitions(-DFEATURE_CONSERVATIVE_GC)
add_definitions(-DFEATURE_CUSTOM_IMPORTS)
add_definitions(-DFEATURE_DECLSPEC_THREAD)
add_definitions(-DFEATURE_DYNAMIC_CODE)
add_compile_options($<$<CONFIG:Debug>:-DFEATURE_GC_STRESS>)
add_definitions(-DFEATURE_NUTC)
add_definitions(-DFEATURE_PROFILING)
add_definitions(-DFEATURE_REDHAWK)
add_definitions(-DFEATURE_SHARED_GENERICS)
add_definitions(-DMODERN_OS)
add_definitions(-DUSE_PORTABLE_HELPERS)
# There is a problem with undefined symbol g_pConfig, windows don't care since it is in template method, but clang does
add_definitions(-DSTRESS_HEAP)
add_definitions(-DVERIFY_HEAP)
add_definitions(-DRTU_PORTABLE)
add_definitions(-D_LIB)
add_compile_options(/GS)
add_compile_options(/W1)
add_compile_options(/Zc:wchar_t)
@ -122,6 +131,13 @@ else()
add_compile_options(-Wno-unused-variable)
add_compile_options(-Wno-unused-private-field)
add_compile_options(-Wno-tautological-undefined-compare)
add_compile_options(-Wno-unknown-pragmas)
add_compile_options(-Wno-ignored-attributes)
add_compile_options(-Wno-unused-value)
add_compile_options(-Wno-undefined-inline)
add_compile_options(-Wno-unused-function)
add_compile_options(-Wno-self-assign)
endif()
add_library(Runtime STATIC ${SOURCES})

Просмотреть файл

@ -189,3 +189,10 @@ bool inline FitsInI4(__int64 val)
#ifndef GCENV_INCLUDED
#define C_ASSERT(e) typedef char __C_ASSERT__[(e)?1:-1]
#endif // GCENV_INCLUDED
#ifdef __llvm__
#define DECLSPEC_THREAD __thread
#else // __llvm__
#define DECLSPEC_THREAD __declspec(thread)
#endif // !__llvm__

Просмотреть файл

@ -6,6 +6,15 @@
#ifndef __COMMON_TYPES_H__
#define __COMMON_TYPES_H__
#include <cstdint>
#include <cstdlib>
#include <new>
using std::nothrow;
using std::size_t;
using std::uintptr_t;
using std::intptr_t;
//
// These type names are chosen to match the C# types
//
@ -17,13 +26,8 @@ typedef unsigned char UInt8;
typedef unsigned short UInt16;
typedef unsigned int UInt32;
typedef unsigned __int64 UInt64;
#if defined(TARGET_X64)
typedef signed __int64 IntNative; // intentional deviation from C# IntPtr
typedef unsigned __int64 UIntNative; // intentional deviation from C# UIntPtr
#else
typedef __w64 signed int IntNative; // intentional deviation from C# IntPtr
typedef __w64 unsigned int UIntNative; // intentional deviation from C# UIntPtr
#endif
typedef intptr_t IntNative; // intentional deviation from C# IntPtr
typedef uintptr_t UIntNative; // intentional deviation from C# UIntPtr
typedef wchar_t WCHAR;
typedef void * HANDLE;
@ -35,10 +39,11 @@ typedef UInt32 UInt32_BOOL; // windows 4-byte BOOL, 0 -> false,
#define UInt32_FALSE 0
#define UInt32_TRUE 1
#ifndef GCENV_INCLUDED
#define UNREFERENCED_PARAMETER(P) (P)
#endif // GCENV_INCLUDED
#ifndef UNREFERENCED_PARAMETER
#define UNREFERENCED_PARAMETER(P) (void)(P)
#endif // UNREFERENCED_PARAMETER
#undef NULL
#define NULL 0
#define UInt16_MAX ((UInt16)0xffffU)

Просмотреть файл

@ -29,7 +29,6 @@ private:
enum CrstType
{
CrstHandleTable,
CrstInstanceStore,
CrstThreadStore,
CrstDispatchCache,
CrstAllocHeap,

Просмотреть файл

@ -18,7 +18,6 @@
#include "slist.h"
#include "holder.h"
#include "Crst.h"
#include "InstanceStore.h"
#include "RWLock.h"
#include "RuntimeInstance.h"
#include "gcrhinterface.h"

Просмотреть файл

@ -24,7 +24,7 @@ struct GCEnumContext
GCEnumCallback pCallback;
};
enum GCRefKind
enum GCRefKind : unsigned char
{
GCRK_Scalar = 0x00,
GCRK_Object = 0x01,

Просмотреть файл

@ -1,66 +0,0 @@
//
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
#include "rhcommon.h"
#include "CommonTypes.h"
#include "daccess.h"
#include "CommonMacros.h"
#include "PalRedhawkCommon.h"
#include "PalRedhawk.h"
#include "assert.h"
#include "static_check.h"
#include "type_traits.hpp"
#include "slist.h"
#include "holder.h"
#include "Crst.h"
#include "InstanceStore.h"
#include "RWLock.h"
#include "RuntimeInstance.h"
#include "slist.inl"
InstanceStore::InstanceStore()
{
}
InstanceStore::~InstanceStore()
{
}
// static
InstanceStore * InstanceStore::Create()
{
NewHolder<InstanceStore> pInstanceStore = new InstanceStore();
pInstanceStore->m_Crst.Init(CrstInstanceStore);
pInstanceStore.SuppressRelease();
return pInstanceStore;
}
void InstanceStore::Destroy()
{
delete this;
}
void InstanceStore::Insert(RuntimeInstance * pRuntimeInstance)
{
CrstHolder ch(&m_Crst);
m_InstanceList.PushHead(pRuntimeInstance);
}
RuntimeInstance * InstanceStore::GetRuntimeInstance(HANDLE hPalInstance)
{
CrstHolder ch(&m_Crst);
for (SList<RuntimeInstance>::Iterator it = m_InstanceList.Begin(); it != m_InstanceList.End(); ++it)
{
if (it->GetPalInstance() == hPalInstance)
{
return *it;
}
}
return NULL;
}

Просмотреть файл

@ -1,24 +0,0 @@
//
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
class RuntimeInstance;
class InstanceStore
{
SList<RuntimeInstance> m_InstanceList;
CrstStatic m_Crst;
private:
InstanceStore();
public:
~InstanceStore();
static InstanceStore * Create();
void Destroy();
RuntimeInstance * GetRuntimeInstance(HANDLE hPalInstance);
void Insert(RuntimeInstance * pRuntimeInstance);
};

Просмотреть файл

@ -111,8 +111,8 @@ inline UInt32 PalGetLastError()
return GetLastError();
}
extern "C" void * __stdcall GetProcAddress(HANDLE, char *);
inline void * PalGetProcAddress(HANDLE arg1, char * arg2)
extern "C" void * __stdcall GetProcAddress(HANDLE, const char *);
inline void * PalGetProcAddress(HANDLE arg1, const char * arg2)
{
return GetProcAddress(arg1, arg2);
}
@ -160,8 +160,8 @@ inline void PalLeaveCriticalSection(CRITICAL_SECTION * arg1)
LeaveCriticalSection(arg1);
}
extern "C" HANDLE __stdcall LoadLibraryExW(WCHAR *, HANDLE, UInt32);
inline HANDLE PalLoadLibraryExW(WCHAR * arg1, HANDLE arg2, UInt32 arg3)
extern "C" HANDLE __stdcall LoadLibraryExW(const WCHAR *, HANDLE, UInt32);
inline HANDLE PalLoadLibraryExW(const WCHAR * arg1, HANDLE arg2, UInt32 arg3)
{
return LoadLibraryExW(arg1, arg2, arg3);
}

Просмотреть файл

@ -792,6 +792,7 @@ PTR_PTR_VOID EECodeManager::GetReturnAddressLocationForHijack(EEMethodInfo *
#endif // _ARM_
void ** ppvResult;
UInt8 * RSP;
UInt32 epilogOffset = 0;
UInt32 epilogSize = 0;
@ -841,7 +842,7 @@ PTR_PTR_VOID EECodeManager::GetReturnAddressLocationForHijack(EEMethodInfo *
// We do not have a frame pointer, but we are also not in the prolog or epilog
UInt8 * RSP = (UInt8 *)pContext->GetSP();
RSP = (UInt8 *)pContext->GetSP();
RSP += pHeader->GetFrameSize();
RSP += pHeader->GetPreservedRegsSaveSize();
@ -858,9 +859,9 @@ PTR_PTR_VOID EECodeManager::GetReturnAddressLocationForHijack(EEMethodInfo *
GCRefKind EECodeManager::GetReturnValueKind(EEMethodInfo * pMethodInfo)
{
STATIC_ASSERT(GCInfoHeader::MRK_ReturnsScalar == GCRK_Scalar);
STATIC_ASSERT(GCInfoHeader::MRK_ReturnsObject == GCRK_Object);
STATIC_ASSERT(GCInfoHeader::MRK_ReturnsByref == GCRK_Byref);
STATIC_ASSERT((GCRefKind)GCInfoHeader::MRK_ReturnsScalar == GCRK_Scalar);
STATIC_ASSERT((GCRefKind)GCInfoHeader::MRK_ReturnsObject == GCRK_Object);
STATIC_ASSERT((GCRefKind)GCInfoHeader::MRK_ReturnsByref == GCRK_Byref);
GCInfoHeader::MethodReturnKind retKind = pMethodInfo->GetGCInfoHeader()->GetReturnKind();
switch (retKind)
@ -872,6 +873,8 @@ GCRefKind EECodeManager::GetReturnValueKind(EEMethodInfo * pMethodInfo)
return GCRK_Object;
case GCInfoHeader::MRK_ReturnsByref:
return GCRK_Byref;
default:
break;
}
UNREACHABLE_MSG("unexpected return kind");
}

Просмотреть файл

@ -6,7 +6,7 @@ struct REGDISPLAY;
struct GCInfoHeader;
struct GCEnumContext;
class MethodInfo;
enum GCRefKind;
enum GCRefKind : unsigned char;
class EEMethodInfo
{

Просмотреть файл

@ -63,7 +63,7 @@ bool RestrictedCallouts::RegisterGcCallout(GcRestrictedCalloutKind eKind, void *
RhFailFast();
}
GcRestrictedCallout * pCallout = new GcRestrictedCallout();
GcRestrictedCallout * pCallout = new (nothrow) GcRestrictedCallout();
if (pCallout == NULL)
return false;
@ -123,7 +123,7 @@ void RestrictedCallouts::UnregisterGcCallout(GcRestrictedCalloutKind eKind, void
// success, false if insufficient memory was available for the registration.
bool RestrictedCallouts::RegisterRefCountedHandleCallback(void * pCalloutMethod, EEType * pTypeFilter)
{
HandleTableRestrictedCallout * pCallout = new HandleTableRestrictedCallout();
HandleTableRestrictedCallout * pCallout = new (nothrow) HandleTableRestrictedCallout();
if (pCallout == NULL)
return false;

Просмотреть файл

@ -26,7 +26,7 @@
#include "RhConfig.h"
UInt32 RhConfig::ReadConfigValue(_In_z_ WCHAR *wszName)
UInt32 RhConfig::ReadConfigValue(_In_z_ const WCHAR *wszName)
{
WCHAR wszBuffer[CONFIG_VAL_MAXLEN + 1]; // 8 hex digits plus a nul terminator.
const UInt32 cchBuffer = sizeof(wszBuffer) / sizeof(wszBuffer[0]);
@ -69,7 +69,7 @@ UInt32 RhConfig::ReadConfigValue(_In_z_ WCHAR *wszName)
//if the file is not avaliable, or unreadable zero will always be returned
//cchOuputBuffer is the maximum number of characters to write to outputBuffer
//cchOutputBuffer must be a size >= CONFIG_VAL_MAXLEN + 1
UInt32 RhConfig::GetIniVariable(_In_z_ WCHAR* configName, _Out_writes_all_(cchBuff) WCHAR* outputBuffer, _In_ UInt32 cchOuputBuffer)
UInt32 RhConfig::GetIniVariable(_In_z_ const WCHAR* configName, _Out_writes_all_(cchBuff) WCHAR* outputBuffer, _In_ UInt32 cchOuputBuffer)
{
//the buffer needs to be big enough to read the value buffer + null terminator
if (cchOuputBuffer < CONFIG_VAL_MAXLEN + 1)
@ -158,7 +158,14 @@ void RhConfig::ReadConfigIni()
return;
}
ConfigPair* iniBuff = new ConfigPair[RCV_Count];
ConfigPair* iniBuff = new (nothrow) ConfigPair[RCV_Count];
if (iniBuff == NULL)
{
//only set if another thread hasn't initialized the buffer yet, otherwise ignore and let the first setter win
PalInterlockedCompareExchangePointer(&g_iniSettings, CONFIG_INI_NOT_AVAIL, NULL);
return;
}
UInt32 iBuff = 0;
UInt32 iIniBuff = 0;
@ -241,17 +248,19 @@ _Ret_maybenull_z_ WCHAR* RhConfig::GetConfigPath()
return NULL;
}
WCHAR* configPath = new WCHAR[iLastBackslash + 1 + wcslen(CONFIG_INI_FILENAME) + 1];
//copy the path base and file name
for (UInt32 i = 0; i <= iLastBackslash; i++)
WCHAR* configPath = new (nothrow) WCHAR[iLastBackslash + 1 + wcslen(CONFIG_INI_FILENAME) + 1];
if (configPath != NULL)
{
configPath[i] = exePathBuff[i];
}
//copy the path base and file name
for (UInt32 i = 0; i <= iLastBackslash; i++)
{
configPath[i] = exePathBuff[i];
}
for (UInt32 i = 0; i <= wcslen(CONFIG_INI_FILENAME); i++)
{
configPath[i + iLastBackslash + 1] = CONFIG_INI_FILENAME[i];
for (UInt32 i = 0; i <= wcslen(CONFIG_INI_FILENAME); i++)
{
configPath[i + iLastBackslash + 1] = CONFIG_INI_FILENAME[i];
}
}
return configPath;

Просмотреть файл

@ -80,7 +80,7 @@ public:
private:
UInt32 ReadConfigValue(_In_z_ WCHAR *wszName);
UInt32 ReadConfigValue(_In_z_ const WCHAR *wszName);
enum RhConfigValue
{
@ -96,7 +96,7 @@ private:
#define CONFIG_FILE_MAXLEN RCV_Count * sizeof(ConfigPair) + 2000
private:
_Ret_maybenull_z_ WCHAR* RhConfig::GetConfigPath();
_Ret_maybenull_z_ WCHAR* GetConfigPath();
//Parses one line of rhconfig.ini and populates values in the passed in configPair
//returns: true if the parsing was successful, false if the parsing failed.
@ -113,7 +113,7 @@ private:
//lazily reads the file so if the file is not yet read, it will read it on first called
//if the file is not avaliable, or unreadable zero will always be returned
//cchOuputBuffer is the maximum number of characters to write to outputBuffer
UInt32 GetIniVariable(_In_z_ WCHAR* configName, _Out_writes_all_(cchBuff) WCHAR* outputBuffer, _In_ UInt32 cchOuputBuffer);
UInt32 GetIniVariable(_In_z_ const WCHAR* configName, _Out_writes_all_(cchBuff) WCHAR* outputBuffer, _In_ UInt32 cchOuputBuffer);
static bool priv_isspace(char c)
{

Просмотреть файл

@ -329,7 +329,7 @@ bool RuntimeInstance::BuildGenericTypeHashTable()
}
END_FOREACH_MODULE;
GenericTypeHashTable * pTable = new GenericTypeHashTable();
GenericTypeHashTable * pTable = new (nothrow) GenericTypeHashTable();
if (pTable == NULL)
return false;
@ -497,7 +497,7 @@ void RuntimeInstance::UnregisterModule(Module *pModule)
#ifdef FEATURE_DYNAMIC_CODE
bool RuntimeInstance::RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, UInt32 cbRange)
{
CodeManagerEntry * pEntry = new CodeManagerEntry();
CodeManagerEntry * pEntry = new (nothrow) CodeManagerEntry();
if (NULL == pEntry)
return false;
@ -551,7 +551,7 @@ extern "C" void __stdcall UnregisterCodeManager(ICodeManager * pCodeManager)
// static
RuntimeInstance * RuntimeInstance::Create(HANDLE hPalInstance)
{
NewHolder<RuntimeInstance> pRuntimeInstance = new RuntimeInstance();
NewHolder<RuntimeInstance> pRuntimeInstance = new (nothrow) RuntimeInstance();
if (NULL == pRuntimeInstance)
return NULL;
@ -686,7 +686,7 @@ static bool FlattenGenericInstance(UnifiedGenericInstance * pInst)
// Luckily we only have to create a fairly simplistic type. Since this is only used to
// establish identity between generic instances (i.e. type checks) we only need the base
// EEType, no GC desc, interface map or interface dispatch map.
EEType *pArrayType = new EEType();
EEType *pArrayType = new (nothrow) EEType();
if (pArrayType == NULL)
return false;
@ -768,7 +768,7 @@ bool RuntimeInstance::StartGenericUnification(UInt32 cInstances)
// This avoids having to search those new entries during subsequent additions within the same update
// (since a module will never publish two identical generic instantiations these extra equality checks
// are unnecessary as well as expensive).
m_genericInstHashtab = new UnifiedGenericInstance*[cHashBuckets * 2];
m_genericInstHashtab = new (nothrow) UnifiedGenericInstance*[cHashBuckets * 2];
m_genericInstHashtabUpdates = m_genericInstHashtab + cHashBuckets;
if (m_genericInstHashtab == NULL)
{
@ -810,143 +810,144 @@ UnifiedGenericInstance *RuntimeInstance::UnifyGenericInstance(GenericInstanceDes
}
}
// No module has previously registered this generic instantiation. We need to allocate and create a new
// unified canonical representation for this type.
// Allocate enough memory for the UnifiedGenericInstance, canonical GenericInstanceDesc, canonical generic
// instantiation EEType and static fields (GC and non-GC). Note that we don't have to allocate space for a
// GC descriptor, vtable or interface dispatch map for the EEType since this type will never appear in an
// object header on the GC heap (we always use module-local EETypes for this so that virtual dispatch is
// bound back to the local module).
UInt32 cbGid = pLocalGid->GetSize();
UInt32 cbPaddedGid = (UInt32)(ALIGN_UP(cbGid, sizeof(void*)));
UInt32 cbEEType = EEType::GetSizeofEEType(0, // # of virtuals (no vtable)
pLocalEEType->GetNumInterfaces(),
false, // HasFinalizer (we don't care)
false, // RequiresOptionalFields (we don't care)
false, // IsNullable (we don't care)
false); // fHasSealedVirtuals (we don't care)
UInt32 cbNonGcStaticFields = pLocalGid->GetSizeOfNonGcStaticFieldData();
UInt32 cbGcStaticFields = pLocalGid->GetSizeOfGcStaticFieldData();
PTR_StaticGcDesc pLocalGcStaticDesc = cbGcStaticFields ? pLocalGid->GetGcStaticFieldDesc() : NULL;
UInt32 cbGcDesc = pLocalGcStaticDesc ? pLocalGcStaticDesc->GetSize() : 0;
// for performance and correctness reasons (at least on ARM), we wish to align the static areas on a
// multiple of STATIC_FIELD_ALIGNMENT
const UInt32 STATIC_FIELD_ALIGNMENT = 8;
UInt32 cbMemory = (UInt32)ALIGN_UP(sizeof(UnifiedGenericInstance) + cbPaddedGid + cbEEType, STATIC_FIELD_ALIGNMENT) +
(UInt32)ALIGN_UP(cbNonGcStaticFields, STATIC_FIELD_ALIGNMENT) +
cbGcStaticFields +
cbGcDesc;
// Note: Generic instance unification is not a product feature that we ship in ProjectN, so there is no need to
// use safe integers when computing the value of cbMemory.
UInt8 * pMemory = new UInt8[cbMemory];
if (pMemory == NULL)
return NULL;
// Determine the start of the various individual data structures in the monolithic chunk of memory we
// allocated.
pCanonicalInst = (UnifiedGenericInstance*)pMemory;
pMemory += sizeof(UnifiedGenericInstance);
pCanonicalGid = (GenericInstanceDesc*)pMemory;
pMemory += cbPaddedGid;
EEType * pCanonicalType = (EEType*)pMemory;
pMemory = ALIGN_UP(pMemory + cbEEType, STATIC_FIELD_ALIGNMENT);
UInt8 * pStaticData = pMemory;
pMemory += ALIGN_UP(cbNonGcStaticFields, STATIC_FIELD_ALIGNMENT);
UInt8 * pGcStaticData = pMemory;
pMemory += cbGcStaticFields;
StaticGcDesc * pStaticGcDesc = (StaticGcDesc*)pMemory;
pMemory += cbGcDesc;
// Copy local GenericInstanceDesc.
memcpy(pCanonicalGid, pLocalGid, cbGid);
// Copy local definition of the generic instantiation EEType (no vtable).
memcpy(pCanonicalType, pLocalEEType, sizeof(EEType));
// Set the type as runtime allocated (just for debugging purposes at the moment).
pCanonicalType->SetRuntimeAllocated();
// Copy the interface map directly after the EEType (if there are any interfaces).
if (pLocalEEType->HasInterfaces())
memcpy(pCanonicalType + 1,
pLocalEEType->GetInterfaceMap().GetRawPtr(),
pLocalEEType->GetNumInterfaces() * sizeof(EEInterfaceInfo));
// Copy initial static data from the module.
if (cbNonGcStaticFields)
memcpy(pStaticData, pLocalGid->GetNonGcStaticFieldData(), cbNonGcStaticFields);
if (cbGcStaticFields)
memcpy(pGcStaticData, pLocalGid->GetGcStaticFieldData(), cbGcStaticFields);
// If we have any GC static data then we need to copy over GC descriptors for it.
if (cbGcDesc)
memcpy(pStaticGcDesc, pLocalGcStaticDesc, cbGcDesc);
// Because we don't store the vtable with our canonical EEType it throws the calculation of the interface
// map (which is still required for cast operations) off. We need to clear the count of virtual methods in
// the EEType to correct this (this field should not be required for the canonical type).
pCanonicalType->SetNumVtableSlots(0);
// Initialize the UnifiedGenericInstance.
pCanonicalInst->m_pNext = m_genericInstHashtabUpdates[hashCode];
pCanonicalInst->m_cRefs = 1;
// Update canonical GenericInstanceDesc with any values that are no longer local to the module.
pCanonicalGid->SetEEType(pCanonicalType);
if (cbNonGcStaticFields)
pCanonicalGid->SetNonGcStaticFieldData(pStaticData);
if (cbGcStaticFields)
pCanonicalGid->SetGcStaticFieldData(pGcStaticData);
if (cbGcDesc)
pCanonicalGid->SetGcStaticFieldDesc(pStaticGcDesc);
// Any generic types with thread static fields need to know the TLS index assigned to the module by the OS
// loader for the module that ends up "owning" the unified instance. Note that this breaks the module
// unload scenario since when the arbitrarily chosen owning module is unloaded it's TLS index will be
// released. Since the OS doesn't provide access to the TLS allocation mechanism used by .tls support
// (it's a different system than that used by TlsAlloc) our only alternative here would be to allocate TLS
// slots manually and managed the storage ourselves, which is both complicated and would result in lower
// performance at the thread static access site (since at a minimum regular TlsAlloc'd TLS indices need to
// be range checked to determine how they are used with a TEB).
if (pCanonicalGid->HasThreadStaticFields())
pCanonicalGid->SetThreadStaticFieldTlsIndex(uiLocalTlsIndex);
// Attempt to remove any arbitrary dependencies on the module that provided the instantiation. Here
// arbitrary refers to references to the module that exist purely because the module used an IAT
// indirection to point to non-local types. We can remove most of these in-place by performing the IAT
// lookup now and copying the direct pointer up one level of the data structures (see
// FlattenGenericInstance above for more details). Unfortunately one edge case in particular might require
// us to allocate memory (some generic instantiations over array types) so the call below can fail. So
// don't modify any global state (the unification hash table) until this call has succeeded.
if (!FlattenGenericInstance(pCanonicalInst))
{
delete [] pMemory;
return NULL;
// No module has previously registered this generic instantiation. We need to allocate and create a new
// unified canonical representation for this type.
// Allocate enough memory for the UnifiedGenericInstance, canonical GenericInstanceDesc, canonical generic
// instantiation EEType and static fields (GC and non-GC). Note that we don't have to allocate space for a
// GC descriptor, vtable or interface dispatch map for the EEType since this type will never appear in an
// object header on the GC heap (we always use module-local EETypes for this so that virtual dispatch is
// bound back to the local module).
UInt32 cbGid = pLocalGid->GetSize();
UInt32 cbPaddedGid = (UInt32)(ALIGN_UP(cbGid, sizeof(void*)));
UInt32 cbEEType = EEType::GetSizeofEEType(0, // # of virtuals (no vtable)
pLocalEEType->GetNumInterfaces(),
false, // HasFinalizer (we don't care)
false, // RequiresOptionalFields (we don't care)
false, // IsNullable (we don't care)
false); // fHasSealedVirtuals (we don't care)
UInt32 cbNonGcStaticFields = pLocalGid->GetSizeOfNonGcStaticFieldData();
UInt32 cbGcStaticFields = pLocalGid->GetSizeOfGcStaticFieldData();
PTR_StaticGcDesc pLocalGcStaticDesc = cbGcStaticFields ? pLocalGid->GetGcStaticFieldDesc() : NULL;
UInt32 cbGcDesc = pLocalGcStaticDesc ? pLocalGcStaticDesc->GetSize() : 0;
// for performance and correctness reasons (at least on ARM), we wish to align the static areas on a
// multiple of STATIC_FIELD_ALIGNMENT
const UInt32 STATIC_FIELD_ALIGNMENT = 8;
UInt32 cbMemory = (UInt32)ALIGN_UP(sizeof(UnifiedGenericInstance) + cbPaddedGid + cbEEType, STATIC_FIELD_ALIGNMENT) +
(UInt32)ALIGN_UP(cbNonGcStaticFields, STATIC_FIELD_ALIGNMENT) +
cbGcStaticFields +
cbGcDesc;
// Note: Generic instance unification is not a product feature that we ship in ProjectN, so there is no need to
// use safe integers when computing the value of cbMemory.
UInt8 * pMemory = new (nothrow) UInt8[cbMemory];
if (pMemory == NULL)
return NULL;
// Determine the start of the various individual data structures in the monolithic chunk of memory we
// allocated.
pCanonicalInst = (UnifiedGenericInstance*)pMemory;
pMemory += sizeof(UnifiedGenericInstance);
pCanonicalGid = (GenericInstanceDesc*)pMemory;
pMemory += cbPaddedGid;
EEType * pCanonicalType = (EEType*)pMemory;
pMemory = ALIGN_UP(pMemory + cbEEType, STATIC_FIELD_ALIGNMENT);
UInt8 * pStaticData = pMemory;
pMemory += ALIGN_UP(cbNonGcStaticFields, STATIC_FIELD_ALIGNMENT);
UInt8 * pGcStaticData = pMemory;
pMemory += cbGcStaticFields;
StaticGcDesc * pStaticGcDesc = (StaticGcDesc*)pMemory;
pMemory += cbGcDesc;
// Copy local GenericInstanceDesc.
memcpy(pCanonicalGid, pLocalGid, cbGid);
// Copy local definition of the generic instantiation EEType (no vtable).
memcpy(pCanonicalType, pLocalEEType, sizeof(EEType));
// Set the type as runtime allocated (just for debugging purposes at the moment).
pCanonicalType->SetRuntimeAllocated();
// Copy the interface map directly after the EEType (if there are any interfaces).
if (pLocalEEType->HasInterfaces())
memcpy(pCanonicalType + 1,
pLocalEEType->GetInterfaceMap().GetRawPtr(),
pLocalEEType->GetNumInterfaces() * sizeof(EEInterfaceInfo));
// Copy initial static data from the module.
if (cbNonGcStaticFields)
memcpy(pStaticData, pLocalGid->GetNonGcStaticFieldData(), cbNonGcStaticFields);
if (cbGcStaticFields)
memcpy(pGcStaticData, pLocalGid->GetGcStaticFieldData(), cbGcStaticFields);
// If we have any GC static data then we need to copy over GC descriptors for it.
if (cbGcDesc)
memcpy(pStaticGcDesc, pLocalGcStaticDesc, cbGcDesc);
// Because we don't store the vtable with our canonical EEType it throws the calculation of the interface
// map (which is still required for cast operations) off. We need to clear the count of virtual methods in
// the EEType to correct this (this field should not be required for the canonical type).
pCanonicalType->SetNumVtableSlots(0);
// Initialize the UnifiedGenericInstance.
pCanonicalInst->m_pNext = m_genericInstHashtabUpdates[hashCode];
pCanonicalInst->m_cRefs = 1;
// Update canonical GenericInstanceDesc with any values that are no longer local to the module.
pCanonicalGid->SetEEType(pCanonicalType);
if (cbNonGcStaticFields)
pCanonicalGid->SetNonGcStaticFieldData(pStaticData);
if (cbGcStaticFields)
pCanonicalGid->SetGcStaticFieldData(pGcStaticData);
if (cbGcDesc)
pCanonicalGid->SetGcStaticFieldDesc(pStaticGcDesc);
// Any generic types with thread static fields need to know the TLS index assigned to the module by the OS
// loader for the module that ends up "owning" the unified instance. Note that this breaks the module
// unload scenario since when the arbitrarily chosen owning module is unloaded it's TLS index will be
// released. Since the OS doesn't provide access to the TLS allocation mechanism used by .tls support
// (it's a different system than that used by TlsAlloc) our only alternative here would be to allocate TLS
// slots manually and managed the storage ourselves, which is both complicated and would result in lower
// performance at the thread static access site (since at a minimum regular TlsAlloc'd TLS indices need to
// be range checked to determine how they are used with a TEB).
if (pCanonicalGid->HasThreadStaticFields())
pCanonicalGid->SetThreadStaticFieldTlsIndex(uiLocalTlsIndex);
// Attempt to remove any arbitrary dependencies on the module that provided the instantiation. Here
// arbitrary refers to references to the module that exist purely because the module used an IAT
// indirection to point to non-local types. We can remove most of these in-place by performing the IAT
// lookup now and copying the direct pointer up one level of the data structures (see
// FlattenGenericInstance above for more details). Unfortunately one edge case in particular might require
// us to allocate memory (some generic instantiations over array types) so the call below can fail. So
// don't modify any global state (the unification hash table) until this call has succeeded.
if (!FlattenGenericInstance(pCanonicalInst))
{
delete [] pMemory;
return NULL;
}
// If this generic instantiation has GC fields to report add it to the list we traverse during garbage
// collections.
if (cbGcStaticFields || pLocalGid->HasThreadStaticFields())
{
pCanonicalGid->SetNextGidWithGcRoots(m_genericInstReportList);
m_genericInstReportList = pCanonicalGid;
}
// We've built the new unified generic instantiation, publish it in the hash table. But don't put it on
// the real bucket chain yet otherwise further additions as part of this same update will needlessly
// search it. Instead add it to the head of the update bucket. All updated chains will be published back
// to the real buckets at the end of the update.
m_genericInstHashtabEntries += 1;
m_genericInstHashtabUpdates[hashCode] = pCanonicalInst;
}
// If this generic instantiation has GC fields to report add it to the list we traverse during garbage
// collections.
if (cbGcStaticFields || pLocalGid->HasThreadStaticFields())
{
pCanonicalGid->SetNextGidWithGcRoots(m_genericInstReportList);
m_genericInstReportList = pCanonicalGid;
}
// We've built the new unified generic instantiation, publish it in the hash table. But don't put it on
// the real bucket chain yet otherwise further additions as part of this same update will needlessly
// search it. Instead add it to the head of the update bucket. All updated chains will be published back
// to the real buckets at the end of the update.
m_genericInstHashtabEntries += 1;
m_genericInstHashtabUpdates[hashCode] = pCanonicalInst;
Done:
// Get here whether we found an existing match for the type or had to create a new entry. All that's left
@ -1209,7 +1210,7 @@ bool RuntimeInstance::CreateGenericInstanceDesc(EEType * pEEType,
// gets called. With this value, cbGidSize will not exceed 600K, so no need to use safe integers
size_t cbGidSize = GenericInstanceDesc::GetSize(flags, arity);
NewArrayHolder<UInt8> pGidMemory = new UInt8[cbGidSize];
NewArrayHolder<UInt8> pGidMemory = new (nothrow) UInt8[cbGidSize];
if (pGidMemory == NULL)
return false;
@ -1226,7 +1227,7 @@ bool RuntimeInstance::CreateGenericInstanceDesc(EEType * pEEType,
// The value of nonGcStaticDataSize is read from native layout info in the managed layer, where
// there is also a check that it does not exceed the max value of a signed Int32
ASSERT(nonGCStaticDataOffset <= nonGcStaticDataSize);
pNonGcStaticData = new UInt8[nonGcStaticDataSize];
pNonGcStaticData = new (nothrow) UInt8[nonGcStaticDataSize];
if (pNonGcStaticData == NULL)
return false;
memset(pNonGcStaticData, 0, nonGcStaticDataSize);
@ -1238,7 +1239,7 @@ bool RuntimeInstance::CreateGenericInstanceDesc(EEType * pEEType,
{
// The value of gcStaticDataSize is read from native layout info in the managed layer, where
// there is also a check that it does not exceed the max value of a signed Int32
pGcStaticData = new UInt8[gcStaticDataSize];
pGcStaticData = new (nothrow) UInt8[gcStaticDataSize];
if (pGcStaticData == NULL)
return false;
memset(pGcStaticData, 0, gcStaticDataSize);
@ -1412,7 +1413,7 @@ COOP_PINVOKE_HELPER(EEType *, RhCloneType, (EEType * pTemplate,
RhFailFast();
}
NewArrayHolder<UInt8> pEETypeMemory = new UInt8[cbGCDescAligned + cbEEType + sizeof(EEType *)+cbOptionalFieldsSize];
NewArrayHolder<UInt8> pEETypeMemory = new (nothrow) UInt8[cbGCDescAligned + cbEEType + sizeof(EEType *)+cbOptionalFieldsSize];
if (pEETypeMemory == NULL)
return NULL;
@ -1449,7 +1450,7 @@ COOP_PINVOKE_HELPER(EEType *, RhCloneType, (EEType * pTemplate,
GenericInstanceDesc * pTemplateGid = GetRuntimeInstance()->LookupGenericInstance(pTemplate);
ASSERT(pTemplateGid != NULL && pTemplateGid->HasInstantiation() && pTemplateGid->HasVariance());
pGenericVarianceFlags = new UInt32[arity];
pGenericVarianceFlags = new (nothrow) UInt32[arity];
if (pGenericVarianceFlags == NULL) return NULL;
for (UInt32 i = 0; i < arity; i++)
@ -1471,7 +1472,7 @@ COOP_PINVOKE_HELPER(PTR_VOID, RhAllocateMemory, (UInt32 size))
// Generic memory allocation function, for use by managed code
// Note: all callers to RhAllocateMemory on the managed side use checked integer arithmetics to catch overflows,
// so there is no need to use safe integers here.
PTR_VOID pMemory = new UInt8[size];
PTR_VOID pMemory = new (nothrow) UInt8[size];
if (pMemory == NULL)
return NULL;
@ -1516,7 +1517,7 @@ COOP_PINVOKE_HELPER(bool , RhCreateGenericInstanceDescForType, (EEType *
GenericInstanceDesc * pTemplateGid = GetRuntimeInstance()->LookupGenericInstance(pTemplateType);
ASSERT(pTemplateGid != NULL && pTemplateGid->HasInstantiation() && pTemplateGid->HasVariance());
pGenericVarianceFlags = new UInt32[arity];
pGenericVarianceFlags = new (nothrow) UInt32[arity];
if (pGenericVarianceFlags == NULL) return NULL;
for (UInt32 i = 0; i < arity; i++)
@ -1655,7 +1656,7 @@ EXTERN_C void * RhpInitialDynamicInterfaceDispatch;
COOP_PINVOKE_HELPER(void *, RhNewInterfaceDispatchCell, (EEType * pInterface, Int32 slotNumber))
{
InterfaceDispatchCell * pCell = new InterfaceDispatchCell[2];
InterfaceDispatchCell * pCell = new (nothrow) InterfaceDispatchCell[2];
if (pCell == NULL)
return NULL;

Просмотреть файл

@ -25,7 +25,7 @@ enum WellKnownMethodIds
// Define an array of well known method names which are indexed by the enums defined above.
#define DEFINE_WELL_KNOWN_METHOD(_name) #_name,
extern __declspec(selectany) const char *g_rgWellKnownMethodNames[] =
extern __declspec(selectany) const char * const g_rgWellKnownMethodNames[] =
{
#include "WellKnownMethodList.h"
};

Просмотреть файл

@ -97,7 +97,7 @@ bool AllocHeap::Init(
}
#endif // FEATURE_RWX_MEMORY
BlockListElem *pBlock = new BlockListElem(pbInitialMem, cbInitialMemReserve);
BlockListElem *pBlock = new (nothrow) BlockListElem(pbInitialMem, cbInitialMemReserve);
if (pBlock == NULL)
return false;
m_blockList.PushHead(pBlock);
@ -287,7 +287,7 @@ bool AllocHeap::_AllocNewBlock(UIntNative cbMem)
if (pbMem == NULL)
return false;
BlockListElem *pBlockListElem = new BlockListElem(pbMem, cbMem);
BlockListElem *pBlockListElem = new (nothrow) BlockListElem(pbMem, cbMem);
if (pBlockListElem == NULL)
{
PalVirtualFree(pbMem, 0, MEM_RELEASE);
@ -362,13 +362,13 @@ bool AllocHeap::_CommitFromCurBlock(UIntNative cbMem)
}
//-------------------------------------------------------------------------------------------------
void * __cdecl operator new(UIntNative n, AllocHeap * alloc)
void * __cdecl operator new(size_t n, AllocHeap * alloc)
{
return alloc->Alloc(n);
}
//-------------------------------------------------------------------------------------------------
void * __cdecl operator new[](UIntNative n, AllocHeap * alloc)
void * __cdecl operator new[](size_t n, AllocHeap * alloc)
{
return alloc->Alloc(n);
}

Просмотреть файл

@ -119,6 +119,6 @@ class AllocHeap
typedef DPTR(AllocHeap) PTR_AllocHeap;
//-------------------------------------------------------------------------------------------------
void * __cdecl operator new(UIntNative n, AllocHeap * alloc);
void * __cdecl operator new[](UIntNative n, AllocHeap * alloc);
void * __cdecl operator new(size_t n, AllocHeap * alloc);
void * __cdecl operator new[](size_t n, AllocHeap * alloc);

Просмотреть файл

@ -71,8 +71,8 @@ void Assert(const char * expr, const char * file, UInt32 line_num, const char *
expr, file, line_num);
HANDLE hMod = PalLoadLibraryExW(L"user32.dll", NULL, 0);
Int32 (* pfn)(HANDLE, char *, char *, UInt32) =
(Int32 (*)(HANDLE, char *, char *, UInt32))PalGetProcAddress(hMod, "MessageBoxA");
Int32 (* pfn)(HANDLE, char *, const char *, UInt32) =
(Int32 (*)(HANDLE, char *, const char *, UInt32))PalGetProcAddress(hMod, "MessageBoxA");
Int32 result = pfn(NULL, buffer, "Redhawk Assert", MB_ABORTRETRYIGNORE);

Просмотреть файл

@ -5,7 +5,7 @@
#ifdef _MSC_VER
#define ASSUME(expr) __assume(expr)
#else // _MSC_VER
#define ASSUME(expr)
#define ASSUME(expr) do { if (!(expr)) __builtin_unreachable(); } while (0)
#endif // _MSC_VER
#if defined(_DEBUG) && !defined(DACCESS_COMPILE)

Просмотреть файл

@ -58,7 +58,7 @@ class EEInterfaceInfoMap
public:
EEInterfaceInfoMap(EEInterfaceInfoMap const & other)
: m_pMap(m_pMap), m_cMap(m_cMap)
: m_pMap(NULL), m_cMap(0)
{
UNREFERENCED_PARAMETER(other);
}

Просмотреть файл

@ -793,4 +793,4 @@ inline EEType * EEType::GetArrayBaseType()
}
#endif
#endif __eetype_inl__
#endif // __eetype_inl__

Просмотреть файл

@ -20,7 +20,6 @@ FWD_DECL(CrstStatic)
FWD_DECL(EEMethodInfo)
FWD_DECL(EECodeManager)
FWD_DECL(EEThreadId)
FWD_DECL(InstanceStore)
FWD_DECL(MethodInfo)
FWD_DECL(Module)
FWD_DECL(Object)

Просмотреть файл

@ -41,7 +41,7 @@ GCDump::GCDump()
/*****************************************************************************/
static char * calleeSaveRegMaskBitNumberToName[] =
static const char * const calleeSaveRegMaskBitNumberToName[] =
{
#ifdef _ARM_
"R4",
@ -106,7 +106,7 @@ size_t FASTCALL GCDump::DumpInfoHeader (PTR_UInt8 gcInfo,
gcPrintf(" epilogSize: %d\r\n", pHeader->GetFixedEpilogSize());
gcPrintf(" epilogCount: %d %s\r\n", epilogCount, epilogAtEnd ? "[end]" : "");
char * returnKind = "????";
const char * returnKind = "????";
unsigned reversePinvokeFrameOffset = 0; // it can't be 0 because [ebp+0] is the previous ebp
switch (pHeader->GetReturnKind())
{
@ -117,6 +117,9 @@ size_t FASTCALL GCDump::DumpInfoHeader (PTR_UInt8 gcInfo,
returnKind = "to native";
reversePinvokeFrameOffset = pHeader->GetReversePinvokeFrameOffset();
break;
case GCInfoHeader::MRK_Unknown:
//ASSERT("Unexpected return kind")
break;
}
gcPrintf(" returnKind: %s\r\n", returnKind);
gcPrintf(" frameKind: %s", pHeader->HasFramePointer() ? "EBP" : "ESP");
@ -191,7 +194,7 @@ void GCDump::PrintLocalSlot(UInt32 slotNum, GCInfoHeader const * pHeader)
gcPrintf("local slot 0n%d, [R7+%02X] \r\n", slotNum,
((GCInfoHeader*)pHeader)->GetFrameSize() - ((slotNum + 1) * POINTER_SIZE));
#else
char* regAndSign = "EBP-";
const char* regAndSign = "EBP-";
size_t offset = pHeader->GetPreservedRegsSaveSize() + (slotNum * POINTER_SIZE);
# ifdef TARGET_AMD64
if (((GCInfoHeader*)pHeader)->GetFramePointerOffset() == 0)
@ -256,9 +259,9 @@ void GCDump::DumpCallsiteString(UInt32 callsiteOffset, PTR_UInt8 pbCallsiteStrin
case 0x40:
{
// case 3 -- "register"
char* regName = "???";
char* interior = (b & 0x10) ? "+" : "";
char* pinned = (b & 0x08) ? "!" : "";
const char* regName = "???";
const char* interior = (b & 0x10) ? "+" : "";
const char* pinned = (b & 0x08) ? "!" : "";
switch (b & 0x7)
{
@ -334,14 +337,14 @@ void GCDump::DumpCallsiteString(UInt32 callsiteOffset, PTR_UInt8 pbCallsiteStrin
unsigned mask = 0;
PTR_UInt8 pInts = pCursor;
unsigned offset = VarInt::ReadUnsigned(pCursor);
char* interior = (b & 0x10) ? "+" : "";
char* pinned = (b & 0x08) ? "!" : "";
const char* interior = (b & 0x10) ? "+" : "";
const char* pinned = (b & 0x08) ? "!" : "";
#ifdef TARGET_ARM
char* baseReg = (b & 0x04) ? "R7" : "SP";
const char* baseReg = (b & 0x04) ? "R7" : "SP";
#else
char* baseReg = (b & 0x04) ? "EBP" : "ESP";
const char* baseReg = (b & 0x04) ? "EBP" : "ESP";
#endif
char* sign = (b & 0x02) ? "-" : "+";
const char* sign = (b & 0x02) ? "-" : "+";
if (b & 0x01)
{
mask = VarInt::ReadUnsigned(pCursor);

Просмотреть файл

@ -15,7 +15,6 @@
#include "eetype.h"
#include "ObjectLayout.h"
// Adapter for GC's view of Array
class ArrayBase : Array
{

Просмотреть файл

@ -198,7 +198,7 @@ bool RedhawkGCInterface::InitializeSubsystems(GCType gcType)
InitializeSystemInfo();
// Initialize the special EEType used to mark free list entries in the GC heap.
EEType *pFreeObjectType = new EEType(); //@TODO: remove 'new'
EEType *pFreeObjectType = new (nothrow) EEType(); //@TODO: remove 'new'
pFreeObjectType->InitializeAsGcFreeType();
// Place the pointer to this type in a global cell (typed as the structurally equivalent MethodTable
@ -331,8 +331,9 @@ AppDomain g_sDefaultDomain;
// Trivial sync block cache. Will no doubt be replaced with a real implementation soon.
//
#ifdef VERIFY_HEAP
SyncBlockCache g_sSyncBlockCache;
#endif // VERIFY_HEAP
//-------------------------------------------------------------------------------------------------
// Used only by GC initialization, this initializes the EEType used to mark free entries in the GC heap. It
@ -427,6 +428,7 @@ void RedhawkGCInterface::GarbageCollect(UInt32 uGeneration, UInt32 uMode)
// static
GcSegmentHandle RedhawkGCInterface::RegisterFrozenSection(void * pSection, UInt32 SizeSection)
{
#ifdef FEATURE_BASICFREEZE
segment_info seginfo;
seginfo.pvMem = pSection;
@ -436,6 +438,9 @@ GcSegmentHandle RedhawkGCInterface::RegisterFrozenSection(void * pSection, UInt3
seginfo.ibReserved = seginfo.ibAllocated;
return (GcSegmentHandle)GCHeap::GetGCHeap()->RegisterFrozenSegment(&seginfo);
#else // FEATURE_BASICFREEZE
return NULL;
#endif // FEATURE_BASICFREEZE
}
// static
@ -798,6 +803,7 @@ void RedhawkGCInterface::ShutdownFinalization()
// Thread static representing the last allocation.
// This is used to log the type information for each slow allocation.
DECLSPEC_THREAD
EEType * RedhawkGCInterface::tls_pLastAllocationEEType = NULL;
// Get the last allocation for this thread.
@ -1039,9 +1045,9 @@ bool FinalizerThread::Initialize()
// queue of finalizable objects. It's mainly used by GC.WaitForPendingFinalizers(). The
// hEventFinalizerToShutDown and hEventShutDownToFinalizer are used to synchronize the main thread and the
// finalizer during the optional final finalization pass at shutdown.
hEventFinalizerDone = new CLREventStatic();
hEventFinalizerDone = new (nothrow) CLREventStatic();
hEventFinalizerDone->CreateManualEvent(FALSE);
hEventFinalizer = new CLREventStatic();
hEventFinalizer = new (nothrow) CLREventStatic();
hEventFinalizer->CreateAutoEvent(FALSE);
// Create the finalizer thread itself.

Просмотреть файл

@ -31,7 +31,7 @@ struct alloc_context;
class MethodInfo;
struct REGDISPLAY;
class Thread;
enum GCRefKind;
enum GCRefKind : unsigned char;
class ICodeManager;
class EEType;
@ -199,7 +199,7 @@ private:
// The EEType for the last allocation. This value is used inside of the GC allocator
// to emit allocation ETW events with type information. We set this value unconditionally to avoid
// race conditions where ETW is enabled after the value is set.
__declspec(thread) static EEType * tls_pLastAllocationEEType;
DECLSPEC_THREAD static EEType * tls_pLastAllocationEEType;
};
#endif // __GCRHINTERFACE_INCLUDED

Просмотреть файл

@ -85,7 +85,9 @@ static void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, LPARAM *pExtra
VOID CNameSpace::GcWeakPtrScanBySingleThread( int condemned, int max_gen, EnumGcRefScanContext* sc )
{
PalPrintf("CNameSpace::GcWeakPtrScanBySingleThread\n");
#ifdef VERIFY_HEAP
SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(&CheckPromoted, (LPARAM)sc, 0);
#endif // VERIFY_HEAP
}
VOID CNameSpace::GcShortWeakPtrScan(EnumGcRefCallbackFunc* fn, int condemned, int max_gen,
@ -227,16 +229,20 @@ void CNameSpace::GcDemote (int condemned, int max_gen, EnumGcRefScanContext* sc)
{
PalPrintf("CNameSpace::GcDemote\n");
Ref_RejuvenateHandles (condemned, max_gen, (LPARAM)sc);
#ifdef VERIFY_HEAP
if (!GCHeap::IsServerHeap() || sc->thread_number == 0)
SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
#endif // VERIFY_HEAP
}
void CNameSpace::GcPromotionsGranted (int condemned, int max_gen, EnumGcRefScanContext* sc)
{
PalPrintf("CNameSpace::GcPromotionsGranted\n");
Ref_AgeHandles(condemned, max_gen, (LPARAM)sc);
#ifdef VERIFY_HEAP
if (!GCHeap::IsServerHeap() || sc->thread_number == 0)
SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
#endif // VERIFY_HEAP
}

Просмотреть файл

@ -28,6 +28,8 @@ public:
void SuppressRelease() { m_held = false; }
TYPE Extract() { m_held = false; return GetValue(); }
HolderNoDefaultValue(HolderNoDefaultValue && other) = default;
protected:
TYPE m_value;
bool m_held;
@ -39,7 +41,7 @@ private:
};
// -----------------------------------------------------------------------------------------------------------
template <typename TYPE, void (*ACQUIRE_FUNC)(TYPE), void (*RELEASE_FUNC)(TYPE), UIntNative DEFAULTVALUE = 0>
template <typename TYPE, void (*ACQUIRE_FUNC)(TYPE), void (*RELEASE_FUNC)(TYPE), TYPE DEFAULTVALUE = nullptr>
class Holder : public HolderNoDefaultValue<TYPE, ACQUIRE_FUNC, RELEASE_FUNC>
{
typedef HolderNoDefaultValue<TYPE, ACQUIRE_FUNC, RELEASE_FUNC> MY_PARENT;
@ -47,6 +49,8 @@ public:
Holder() : MY_PARENT(DEFAULTVALUE, false) {}
Holder(TYPE value, bool fTake = true) : MY_PARENT(value, fTake) {}
Holder(Holder && other) = default;
private:
// No one should be copying around holder types.
Holder & operator=(const Holder & other);
@ -54,7 +58,7 @@ private:
};
// -----------------------------------------------------------------------------------------------------------
template <typename TYPE, void (*ACQUIRE_FUNC)(TYPE), void (*RELEASE_FUNC)(TYPE), UIntNative DEFAULTVALUE = 0>
template <typename TYPE, void (*ACQUIRE_FUNC)(TYPE), void (*RELEASE_FUNC)(TYPE), TYPE DEFAULTVALUE = nullptr>
class Wrapper : public Holder<TYPE, ACQUIRE_FUNC, RELEASE_FUNC, DEFAULTVALUE>
{
typedef Holder<TYPE, ACQUIRE_FUNC, RELEASE_FUNC, DEFAULTVALUE> MY_PARENT;
@ -62,18 +66,19 @@ class Wrapper : public Holder<TYPE, ACQUIRE_FUNC, RELEASE_FUNC, DEFAULTVALUE>
public:
Wrapper() : MY_PARENT() {}
Wrapper(TYPE value, bool fTake = true) : MY_PARENT(value, fTake) {}
Wrapper(Wrapper && other) = default;
FORCEINLINE TYPE& operator=(TYPE const & value)
{
Release();
m_value = value;
Acquire();
return m_value;
MY_PARENT::Release();
MY_PARENT::m_value = value;
MY_PARENT::Acquire();
return MY_PARENT::m_value;
}
FORCEINLINE const TYPE &operator->() { return m_value; }
FORCEINLINE const TYPE &operator*() { return m_value; }
FORCEINLINE operator TYPE() { return m_value; }
FORCEINLINE const TYPE &operator->() { return MY_PARENT::m_value; }
FORCEINLINE const TYPE &operator*() { return MY_PARENT::m_value; }
FORCEINLINE operator TYPE() { return MY_PARENT::m_value; }
private:
// No one should be copying around wrapper types.
@ -97,9 +102,9 @@ FORCEINLINE void Delete(TYPE *value)
// -----------------------------------------------------------------------------------------------------------
template <typename TYPE,
typename PTR_TYPE = TYPE *,
void (*ACQUIRE_FUNC)(PTR_TYPE) = DoNothing<typename PTR_TYPE>,
void (*ACQUIRE_FUNC)(PTR_TYPE) = DoNothing<PTR_TYPE>,
void (*RELEASE_FUNC)(PTR_TYPE) = Delete<TYPE>,
PTR_TYPE NULL_VAL = 0,
PTR_TYPE NULL_VAL = nullptr,
typename BASE = Wrapper<PTR_TYPE, ACQUIRE_FUNC, RELEASE_FUNC, NULL_VAL> >
class NewHolder : public BASE
{
@ -117,7 +122,7 @@ public:
//-----------------------------------------------------------------------------
// NewArrayHolder : New []'ed pointer holder
// {
// NewArrayHolder<Foo> foo = new Foo [30];
// NewArrayHolder<Foo> foo = new (nothrow) Foo [30];
// } // delete [] foo on out of scope
//-----------------------------------------------------------------------------
@ -130,9 +135,9 @@ FORCEINLINE void DeleteArray(TYPE *value)
template <typename TYPE,
typename PTR_TYPE = TYPE *,
void (*ACQUIRE_FUNC)(PTR_TYPE) = DoNothing<typename PTR_TYPE>,
void (*ACQUIRE_FUNC)(PTR_TYPE) = DoNothing<PTR_TYPE>,
void (*RELEASE_FUNC)(PTR_TYPE) = DeleteArray<TYPE>,
PTR_TYPE NULL_VAL = 0,
PTR_TYPE NULL_VAL = nullptr,
typename BASE = Wrapper<PTR_TYPE, ACQUIRE_FUNC, RELEASE_FUNC, NULL_VAL> >
class NewArrayHolder : public BASE
{
@ -159,7 +164,7 @@ template <typename TYPE,
typename PTR_TYPE = TYPE *,
void (*ACQUIRE_FUNC)(PTR_TYPE) = DoNothing<PTR_TYPE>,
void (*RELEASE_FUNC)(PTR_TYPE) = Destroy<TYPE>,
PTR_TYPE NULL_VAL = 0,
PTR_TYPE NULL_VAL = nullptr,
typename BASE = Wrapper<PTR_TYPE, ACQUIRE_FUNC, RELEASE_FUNC, NULL_VAL> >
class CreateHolder : public BASE
{

Просмотреть файл

@ -52,17 +52,17 @@
EXTERN_C UInt32_BOOL g_fGcStressStarted;
Module::Module(ModuleHeader *pModuleHeader) :
m_pModuleHeader(pModuleHeader),
m_pNext(),
m_MethodList(),
m_pbDeltaShortcutTable(NULL),
m_pModuleHeader(pModuleHeader),
m_MethodList(),
m_fFinalizerInitComplete(false)
{
}
Module * Module::Create(SimpleModuleHeader *pModuleHeader)
{
NewHolder<Module> pNewModule = new Module(nullptr);
NewHolder<Module> pNewModule = new (nothrow) Module(nullptr);
if (NULL == pNewModule)
return NULL;
@ -87,7 +87,7 @@ Module * Module::Create(ModuleHeader *pModuleHeader)
// mode (or just fail the module creation).
ASSERT(pModuleHeader->Version == ModuleHeader::CURRENT_VERSION);
NewHolder<Module> pNewModule = new Module(pModuleHeader);
NewHolder<Module> pNewModule = new (nothrow) Module(pModuleHeader);
if (NULL == pNewModule)
return NULL;
@ -155,7 +155,7 @@ Module * Module::Create(ModuleHeader *pModuleHeader)
UInt32 nMethods = pNewModule->m_MethodList.GetNumMethodsDEBUG();
UInt32 nIndirCells = pNewModule->m_pModuleHeader->CountOfLoopIndirCells;
UIntNative * pShadowBuffer = new UIntNative[nIndirCells];
UIntNative * pShadowBuffer = new (nothrow) UIntNative[nIndirCells];
UIntNative * pIndirCells = (UIntNative *)pNewModule->m_pModuleHeader->GetLoopIndirCells();
memcpy(pShadowBuffer, pIndirCells, nIndirCells * sizeof(UIntNative));
@ -637,6 +637,8 @@ bool Module::EHEnumNext(EHEnumState * pEHEnumState, EHClause * pEHClauseOut)
pEHClauseOut->m_handlerOffset = VarInt::ReadUnsigned(pEnumState->pEHInfo);
pEHClauseOut->m_filterOffset = VarInt::ReadUnsigned(pEnumState->pEHInfo);
break;
case EH_CLAUSE_FAIL_FAST:
break;
}
return true;
@ -1226,7 +1228,7 @@ BlobHeader * Module::GetReadOnlyBlobs(UInt32 * pcbBlobs)
}
Module::GenericInstanceDescEnumerator::GenericInstanceDescEnumerator(Module * pModule, GenericInstanceDescKind gidKind)
: m_pModule(pModule), m_pCurrent(NULL), m_iCurrent(0), m_nCount(0), m_iSection(0), m_gidEnumKind(gidKind)
: m_pModule(pModule), m_pCurrent(NULL), m_gidEnumKind(gidKind), m_iCurrent(0), m_nCount(0), m_iSection(0)
{
}

Просмотреть файл

@ -32,7 +32,7 @@ class Module
friend struct DefaultSListTraits<Module>;
friend class RuntimeInstance;
public:
~Module();
virtual ~Module();
static Module * Create(ModuleHeader *pModuleHeader);
static Module * Create(SimpleModuleHeader *pModuleHeader);

Просмотреть файл

@ -1,53 +0,0 @@
//
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
//
// Overload new and delete operators to provide the required Redhawk semantics.
//
#ifndef __NEW_INCLUDED
#define __NEW_INCLUDED
#ifndef DACCESS_COMPILE
__declspec(selectany) HANDLE g_hHeap = NULL;
inline void * BaseNew(size_t cbSize)
{
//
// @TODO: revisit this implementation
//
if (NULL == g_hHeap)
{
// NOTE: GetProcessHeap is indempotent, so all threads racing to initialize this global will
// initialize it with the same value.
g_hHeap = PalGetProcessHeap();
}
return PalHeapAlloc(g_hHeap, 0, cbSize);
}
inline void BaseDelete(void * pvMemory)
{
//
// @TODO: revisit this implementation
//
//ASSERT(g_hHeap != NULL);
PalHeapFree(g_hHeap, 0, pvMemory);
}
//
// All 'operator new' variations have the same contract, which is to return NULL when out of memory.
//
inline void * __cdecl operator new(size_t cbSize) { return BaseNew(cbSize); } // normal
inline void * __cdecl operator new[](size_t cbSize) { return BaseNew(cbSize); } // array
inline void * __cdecl operator new(size_t cbSize, void * pvWhere) { return pvWhere; } // placement
inline void __cdecl operator delete(void * pvMemory) { BaseDelete(pvMemory); } // normal
inline void __cdecl operator delete[](void * pvMemory) { BaseDelete(pvMemory); } // array
inline void __cdecl operator delete(void * pvMemory, void * pvWhere) { } // placement
#endif // DACCESS_COMPILE
#endif // !__NEW_INCLUDED

Просмотреть файл

@ -24,7 +24,7 @@ struct REGDISPLAY
PTR_UIntNative pR13;
PTR_UIntNative pR14;
PTR_UIntNative pR15;
#endif TARGET_AMD64
#endif // TARGET_AMD64
UIntNative SP;
PTR_PCODE pIP;
@ -35,7 +35,7 @@ struct REGDISPLAY
// these need to be unwound during a stack walk
// for EH, but not adjusted, so we only need
// their values, not their addresses
#endif TARGET_AMD64
#endif // TARGET_AMD64
inline PCODE GetIP() { return IP; }
inline PTR_PCODE GetAddrOfIP() { return pIP; }

Просмотреть файл

@ -125,6 +125,8 @@ class SHash : public TRAITS
class KeyIndex;
friend class KeyIndex;
class Iterator;
class KeyIterator;
public:
// explicitly declare local typedefs for these traits types, otherwise
@ -134,9 +136,6 @@ class SHash : public TRAITS
typedef typename TRAITS::key_t key_t;
typedef typename TRAITS::count_t count_t;
class Iterator;
class KeyIterator;
// Constructor/destructor. SHash tables always start out empty, with no
// allocation overhead. Call Reallocate to prime with an initial size if
// desired.
@ -350,7 +349,7 @@ private:
void First()
{
if (m_index < m_tableSize)
if (IsNull(m_table[m_index]) || IsDeleted(m_table[m_index]))
if (TRAITS::IsNull(m_table[m_index]) || TRAITS::IsDeleted(m_table[m_index]))
Next();
}
@ -364,7 +363,7 @@ private:
m_index++;
if (m_index >= m_tableSize)
break;
if (!IsNull(m_table[m_index]) && !IsDeleted(m_table[m_index]))
if (!TRAITS::IsNull(m_table[m_index]) && !TRAITS::IsDeleted(m_table[m_index]))
break;
}
}
@ -417,14 +416,14 @@ private:
m_key = key;
count_t hash = Hash(key);
m_index = hash % m_tableSize;
TRAITS::m_index = hash % m_tableSize;
m_increment = (hash % (m_tableSize-1)) + 1;
// Find first valid element
if (IsNull(m_table[m_index]))
m_index = m_tableSize;
else if (IsDeleted(m_table[m_index])
|| !Equals(m_key, GetKey(m_table[m_index])))
if (IsNull(m_table[TRAITS::m_index]))
TRAITS::m_index = m_tableSize;
else if (IsDeleted(m_table[TRAITS::m_index])
|| !Equals(m_key, GetKey(m_table[TRAITS::m_index])))
Next();
}
}
@ -433,18 +432,18 @@ private:
{
while (true)
{
m_index += m_increment;
if (m_index >= m_tableSize)
m_index -= m_tableSize;
TRAITS::m_index += m_increment;
if (TRAITS::m_index >= m_tableSize)
TRAITS::m_index -= m_tableSize;
if (IsNull(m_table[m_index]))
if (IsNull(m_table[TRAITS::m_index]))
{
m_index = m_tableSize;
TRAITS::m_index = m_tableSize;
break;
}
if (!IsDeleted(m_table[m_index])
&& Equals(m_key, GetKey(m_table[m_index])))
if (!IsDeleted(m_table[TRAITS::m_index])
&& Equals(m_key, GetKey(m_table[TRAITS::m_index])))
{
break;
}

Просмотреть файл

@ -9,14 +9,14 @@
template <typename TRAITS>
SHash<TRAITS>::SHash()
: m_table(TADDR(NULL)),
: m_table(nullptr),
m_tableSize(0),
m_tableCount(0),
m_tableOccupied(0),
m_tableMax(0)
{
C_ASSERT(s_growth_factor_numerator > s_growth_factor_denominator);
C_ASSERT(s_density_factor_numerator < s_density_factor_denominator);
C_ASSERT(TRAITS::s_growth_factor_numerator > TRAITS::s_growth_factor_denominator);
C_ASSERT(TRAITS::s_density_factor_numerator < TRAITS::s_density_factor_denominator);
}
template <typename TRAITS>
@ -41,7 +41,7 @@ template <typename TRAITS>
typename SHash< TRAITS>::element_t SHash<TRAITS>::Lookup(key_t key) const
{
const element_t *pRet = Lookup(m_table, m_tableSize, key);
return ((pRet != NULL) ? (*pRet) : Null());
return ((pRet != NULL) ? (*pRet) : TRAITS::Null());
}
template <typename TRAITS>
@ -152,15 +152,15 @@ template <typename TRAITS>
bool SHash<TRAITS>::Grow()
{
count_t newSize = (count_t) (m_tableCount
* s_growth_factor_numerator / s_growth_factor_denominator
* s_density_factor_denominator / s_density_factor_numerator);
if (newSize < s_minimum_allocation)
newSize = s_minimum_allocation;
* TRAITS::s_growth_factor_numerator / TRAITS::s_growth_factor_denominator
* TRAITS::s_density_factor_denominator / TRAITS::s_density_factor_numerator);
if (newSize < TRAITS::s_minimum_allocation)
newSize = TRAITS::s_minimum_allocation;
// handle potential overflow
if (newSize < m_tableCount)
{
OnFailure(ftOverflow);
TRAITS::OnFailure(ftOverflow);
return false;
}
@ -175,7 +175,7 @@ bool SHash<TRAITS>::CheckGrowth(count_t newElements)
// handle potential overflow
if (newCount < newElements)
{
OnFailure(ftOverflow);
TRAITS::OnFailure(ftOverflow);
return false;
}
@ -183,24 +183,24 @@ bool SHash<TRAITS>::CheckGrowth(count_t newElements)
if (newCount < m_tableMax)
return true;
count_t newSize = (count_t) (newCount * s_density_factor_denominator / s_density_factor_numerator) + 1;
count_t newSize = (count_t) (newCount * TRAITS::s_density_factor_denominator / TRAITS::s_density_factor_numerator) + 1;
// handle potential overflow
if (newSize < newCount)
{
OnFailure(ftOverflow);
TRAITS::OnFailure(ftOverflow);
return false;
}
// accelerate the growth to avoid unnecessary rehashing
count_t newSize2 = (m_tableCount * s_growth_factor_numerator / s_growth_factor_denominator
* s_density_factor_denominator / s_density_factor_numerator);
count_t newSize2 = (m_tableCount * TRAITS::s_growth_factor_numerator / TRAITS::s_growth_factor_denominator
* TRAITS::s_density_factor_denominator / TRAITS::s_density_factor_numerator);
if (newSize < newSize2)
newSize = newSize2;
if (newSize < s_minimum_allocation)
newSize = s_minimum_allocation;
if (newSize < TRAITS::s_minimum_allocation)
newSize = TRAITS::s_minimum_allocation;
return Reallocate(newSize);
}
@ -209,28 +209,28 @@ template <typename TRAITS>
bool SHash<TRAITS>::Reallocate(count_t newTableSize)
{
ASSERT(newTableSize >=
(count_t) (GetCount() * s_density_factor_denominator / s_density_factor_numerator));
(count_t) (GetCount() * TRAITS::s_density_factor_denominator / TRAITS::s_density_factor_numerator));
// Allocation size must be a prime number. This is necessary so that hashes uniformly
// distribute to all indices, and so that chaining will visit all indices in the hash table.
newTableSize = NextPrime(newTableSize);
if (newTableSize == 0)
{
OnFailure(ftOverflow);
TRAITS::OnFailure(ftOverflow);
return false;
}
element_t *newTable = new element_t [newTableSize];
element_t *newTable = new (nothrow) element_t [newTableSize];
if (newTable == NULL)
{
OnFailure(ftAllocation);
TRAITS::OnFailure(ftAllocation);
return false;
}
element_t *p = newTable, *pEnd = newTable + newTableSize;
while (p < pEnd)
{
*p = Null();
*p = TRAITS::Null();
p++;
}
@ -239,7 +239,7 @@ bool SHash<TRAITS>::Reallocate(count_t newTableSize)
for (Iterator i = Begin(), end = End(); i != end; i++)
{
const element_t & cur = (*i);
if (!IsNull(cur) && !IsDeleted(cur))
if (!TRAITS::IsNull(cur) && !TRAITS::IsDeleted(cur))
Add(newTable, newTableSize, cur);
}
@ -250,7 +250,7 @@ bool SHash<TRAITS>::Reallocate(count_t newTableSize)
m_table = PTR_element_t(newTable);
m_tableSize = newTableSize;
m_tableMax = (count_t) (newTableSize * s_density_factor_numerator / s_density_factor_denominator);
m_tableMax = (count_t) (newTableSize * TRAITS::s_density_factor_numerator / TRAITS::s_density_factor_denominator);
m_tableOccupied = m_tableCount;
return true;
@ -262,7 +262,7 @@ const typename SHash<TRAITS>::element_t * SHash<TRAITS>::Lookup(PTR_element_t ta
if (tableSize == 0)
return NULL;
count_t hash = Hash(key);
count_t hash = TRAITS::Hash(key);
count_t index = hash % tableSize;
count_t increment = 0; // delay computation
@ -270,11 +270,11 @@ const typename SHash<TRAITS>::element_t * SHash<TRAITS>::Lookup(PTR_element_t ta
{
element_t& current = table[index];
if (IsNull(current))
if (TRAITS::IsNull(current))
return NULL;
if (!IsDeleted(current)
&& Equals(key, GetKey(current)))
if (!TRAITS::IsDeleted(current)
&& TRAITS::Equals(key, TRAITS::GetKey(current)))
{
return &current;
}
@ -291,9 +291,9 @@ const typename SHash<TRAITS>::element_t * SHash<TRAITS>::Lookup(PTR_element_t ta
template <typename TRAITS>
bool SHash<TRAITS>::Add(element_t *table, count_t tableSize, const element_t &element)
{
key_t key = GetKey(element);
key_t key = TRAITS::GetKey(element);
count_t hash = Hash(key);
count_t hash = TRAITS::Hash(key);
count_t index = hash % tableSize;
count_t increment = 0; // delay computation
@ -301,13 +301,13 @@ bool SHash<TRAITS>::Add(element_t *table, count_t tableSize, const element_t &el
{
element_t& current = table[index];
if (IsNull(current))
if (TRAITS::IsNull(current))
{
table[index] = element;
return true;
}
if (IsDeleted(current))
if (TRAITS::IsDeleted(current))
{
table[index] = element;
return false;
@ -325,27 +325,27 @@ bool SHash<TRAITS>::Add(element_t *table, count_t tableSize, const element_t &el
template <typename TRAITS>
void SHash<TRAITS>::AddOrReplace(element_t *table, count_t tableSize, const element_t &element)
{
ASSERT(!s_supports_remove);
ASSERT(!TRAITS::s_supports_remove);
key_t key = GetKey(element);
key_t key = TRAITS::GetKey(element);
count_t hash = Hash(key);
count_t hash = TRAITS::Hash(key);
count_t index = hash % tableSize;
count_t increment = 0; // delay computation
while (true)
{
element_t& current = table[index];
ASSERT(!IsDeleted(current));
ASSERT(!TRAITS::IsDeleted(current));
if (IsNull(current))
if (TRAITS::IsNull(current))
{
table[index] = element;
m_tableCount++;
m_tableOccupied++;
return;
}
else if (Equals(key, GetKey(current)))
else if (TRAITS::Equals(key, TRAITS::GetKey(current)))
{
table[index] = element;
return;
@ -366,10 +366,10 @@ void SHash<TRAITS>::AddOrReplace(element_t *table, count_t tableSize, const elem
template <typename TRAITS>
void SHash<TRAITS>::Remove(element_t *table, count_t tableSize, key_t key)
{
ASSERT(s_supports_remove);
ASSERT(TRAITS::s_supports_remove);
ASSERT(Lookup(table, tableSize, key) != NULL);
count_t hash = Hash(key);
count_t hash = TRAITS::Hash(key);
count_t index = hash % tableSize;
count_t increment = 0; // delay computation
@ -377,13 +377,13 @@ void SHash<TRAITS>::Remove(element_t *table, count_t tableSize, key_t key)
{
element_t& current = table[index];
if (IsNull(current))
if (TRAITS::IsNull(current))
return;
if (!IsDeleted(current)
&& Equals(key, GetKey(current)))
if (!TRAITS::IsDeleted(current)
&& TRAITS::Equals(key, TRAITS::GetKey(current)))
{
table[index] = Deleted();
table[index] = TRAITS::Deleted();
m_tableCount--;
return;
}
@ -403,11 +403,11 @@ void SHash<TRAITS>::Remove(element_t *table, count_t tableSize, key_t key)
template <typename TRAITS>
void SHash<TRAITS>::RemoveElement(element_t *table, count_t tableSize, element_t *element)
{
ASSERT(s_supports_remove);
ASSERT(TRAITS::s_supports_remove);
ASSERT(table <= element && element < table + tableSize);
ASSERT(!IsNull(*element) && !IsDeleted(*element));
ASSERT(!TRAITS::IsNull(*element) && !TRAITS::IsDeleted(*element));
*element = Deleted();
*element = TRAITS::Deleted();
m_tableCount--;
}
@ -434,7 +434,7 @@ bool SHash<TRAITS>::IsPrime(count_t number)
namespace
{
extern __declspec(selectany) const UInt32 g_shash_primes[] = {
const UInt32 g_shash_primes[] = {
11,17,23,29,37,47,59,71,89,107,131,163,197,239,293,353,431,521,631,761,919,
1103,1327,1597,1931,2333,2801,3371,4049,4861,5839,7013,8419,10103,12143,14591,
17519,21023,25229,30293,36353,43627,52361,62851,75431,90523, 108631, 130363,

Просмотреть файл

@ -99,11 +99,11 @@ void SList<T, Traits>::PushHeadInterlocked(
while (true)
{
*GetNextPtr(pItem) = *reinterpret_cast<T * volatile *>(&m_pHead);
*Traits::GetNextPtr(pItem) = *reinterpret_cast<T * volatile *>(&m_pHead);
if (PalInterlockedCompareExchangePointer(
reinterpret_cast<void * volatile *>(&m_pHead),
reinterpret_cast<void *>(pItem),
reinterpret_cast<void *>(*GetNextPtr(pItem))) == reinterpret_cast<void *>(*GetNextPtr(pItem)))
reinterpret_cast<void *>(*Traits::GetNextPtr(pItem))) == reinterpret_cast<void *>(*Traits::GetNextPtr(pItem)))
{
break;
}

Просмотреть файл

@ -26,13 +26,12 @@
#include "RWLock.h"
#include "threadstore.h"
#include "RuntimeInstance.h"
#include "InstanceStore.h"
#include "rhbinder.h"
#include "CachedInterfaceDispatch.h"
#include "RhConfig.h"
#include "stressLog.h"
#include "RestrictedCallouts.h"
#endif !DACCESS_COMPILE
#endif // !DACCESS_COMPILE
#ifndef DACCESS_COMPILE

Просмотреть файл

@ -109,7 +109,7 @@ void StressLog::Initialize(unsigned facilities, unsigned level, unsigned maxByt
g_pStressLog = &theLog;
theLog.pLock = new CrstStatic();
theLog.pLock = new (nothrow) CrstStatic();
theLog.pLock->Init(CrstStressLog);
if (maxBytesPerThread < STRESSLOG_CHUNK_SIZE)
{
@ -225,7 +225,7 @@ ThreadStressLog* StressLog::CreateThreadStressLogHelper(Thread * pThread) {
}
if (msgs == 0) {
msgs = new ThreadStressLog();
msgs = new (nothrow) ThreadStressLog();
if (msgs == 0 ||!msgs->IsValid ())
{

Просмотреть файл

@ -27,7 +27,6 @@
#include "threadstore.h"
#include "RuntimeInstance.h"
#include "module.h"
#include "new.h"
#include "rhbinder.h"
#include "stressLog.h"
#include "RhConfig.h"
@ -296,7 +295,7 @@ void Thread::Construct()
m_pTEB = PalNtCurrentTeb();
#ifdef STRESS_LOG
if (StressLog::StressLogOn(~0ul, 0))
if (StressLog::StressLogOn(~0u, 0))
m_pThreadStressLog = StressLog::CreateThreadStressLog(this);
#endif // STRESS_LOG
}
@ -394,7 +393,7 @@ void Thread::Destroy()
RedhawkGCInterface::ReleaseAllocContext(GetAllocContext());
#if _DEBUG
memset(this, 0x06, sizeof(this));
memset(this, 0x06, sizeof(*this));
#endif // _DEBUG
// Thread::Destroy is called when the thread's "home" fiber dies. We mark the thread as "detached" here
@ -1006,7 +1005,7 @@ PTR_UInt8 Thread::AllocateThreadLocalStorageForDynamicType(UInt32 uTlsTypeOffset
if (numTlsCells < 2 * m_numDynamicTypesTlsCells)
numTlsCells = 2 * m_numDynamicTypesTlsCells;
PTR_UInt8* pTlsCells = new PTR_UInt8[numTlsCells];
PTR_UInt8* pTlsCells = new (nothrow) PTR_UInt8[numTlsCells];
if (pTlsCells == NULL)
return NULL;
@ -1026,7 +1025,7 @@ PTR_UInt8 Thread::AllocateThreadLocalStorageForDynamicType(UInt32 uTlsTypeOffset
if (m_pDynamicTypesTlsCells[uTlsTypeOffset] == NULL)
{
UInt8* pTlsStorage = new UInt8[tlsStorageSize];
UInt8* pTlsStorage = new (nothrow) UInt8[tlsStorageSize];
if (pTlsStorage == NULL)
return NULL;

Просмотреть файл

@ -27,7 +27,6 @@
#include "RWLock.h"
#include "threadstore.h"
#include "RuntimeInstance.h"
#include "new.h"
#include "ObjectLayout.h"
#include "TargetPtrs.h"
#include "eetype.h"
@ -89,7 +88,7 @@ ThreadStore::~ThreadStore()
// static
ThreadStore * ThreadStore::Create(RuntimeInstance * pRuntimeInstance)
{
NewHolder<ThreadStore> pNewThreadStore = new ThreadStore();
NewHolder<ThreadStore> pNewThreadStore = new (nothrow) ThreadStore();
if (NULL == pNewThreadStore)
return NULL;
@ -367,7 +366,7 @@ C_ASSERT(sizeof(Thread) == sizeof(ThreadBuffer));
EXTERN_C Thread * FASTCALL RhpGetThread();
#ifdef FEATURE_DECLSPEC_THREAD
__declspec(thread) ThreadBuffer tls_CurrentThread =
DECLSPEC_THREAD ThreadBuffer tls_CurrentThread =
{
{ 0 }, // m_rgbAllocContextBuffer
Thread::TSF_Unknown, // m_ThreadStateFlags
@ -415,7 +414,7 @@ void * ThreadStore::CreateCurrentThreadBuffer()
ASSERT(_tls_index < 64);
ASSERT(NULL == PalTlsGetValue(_tls_index));
TlsSectionStruct * pTlsBlock = new TlsSectionStruct();
TlsSectionStruct * pTlsBlock = new (nothrow) TlsSectionStruct();
ASSERT(NULL != pTlsBlock); // we require NT6's __declspec(thread) support for reliability
PalTlsSetValue(_tls_index, pTlsBlock);
@ -521,7 +520,7 @@ COOP_PINVOKE_HELPER(Boolean, RhGetExceptionsForCurrentThread, (Array* pOutputArr
Boolean ThreadStore::GetExceptionsForCurrentThread(Array* pOutputArray, Int32* pWrittenCountOut)
{
Int32 countWritten = 0;
Object** pArrayElements;
Thread * pThread = GetCurrentThread();
for (PTR_ExInfo pInfo = pThread->m_pExInfoStackHead; pInfo != NULL; pInfo = pInfo->m_pPrevExInfo)
@ -546,7 +545,7 @@ Boolean ThreadStore::GetExceptionsForCurrentThread(Array* pOutputArray, Int32* p
if (countWritten == 0)
return Boolean_true;
Object** pArrayElements = (Object**)pOutputArray->GetArrayData();
pArrayElements = (Object**)pOutputArray->GetArrayData();
for (PTR_ExInfo pInfo = pThread->m_pExInfoStackHead; pInfo != NULL; pInfo = pInfo->m_pPrevExInfo)
{
if (pInfo->m_exception == NULL)

Просмотреть файл

@ -0,0 +1,535 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
/***
* no_sal2.h - renders the SAL annotations for documenting APIs harmless.
*
*
*Purpose:
* sal.h provides a set of SAL2 annotations to describe how a function uses its
* parameters - the assumptions it makes about them, and the guarantees it makes
* upon finishing. This file redefines all those annotation macros to be harmless.
* It is designed for use in down-level build environments where the tooling may
* be unhappy with the standard SAL2 macro definitions.
*
* [Public]
*
*
****/
#ifndef _NO_SAL_2_H_
#define _NO_SAL_2_H_
#undef _When_
#define _When_(c,a)
#undef _At_
#define _At_(t,a)
#undef _At_buffer_
#define _At_buffer_(t,i,c,a)
#undef _Group_
#define _Group_(a)
#undef _Pre_
#define _Pre_
#undef _Post_
#define _Post_
#undef _Deref_
#define _Deref_
#undef _Null_
#define _Null_
#undef _Notnull_
#define _Notnull_
#undef _Maybenull_
#define _Maybenull_
#undef _Const_
#define _Const_
#undef _Check_return_
#define _Check_return_
#undef _Must_inspect_result_
#define _Must_inspect_result_
#undef _Pre_satisfies_
#define _Pre_satisfies_(e)
#undef _Post_satisfies_
#define _Post_satisfies_(e)
#undef _Writable_elements_
#define _Writable_elements_(s)
#undef _Writable_bytes_
#define _Writable_bytes_(s)
#undef _Readable_elements_
#define _Readable_elements_(s)
#undef _Readable_bytes_
#define _Readable_bytes_(s)
#undef _Null_terminated_
#define _Null_terminated_
#undef _NullNull_terminated_
#define _NullNull_terminated_
#undef _Valid_
#define _Valid_
#undef _Notvalid_
#define _Notvalid_
#undef _Success_
#define _Success_(c)
#undef _Return_type_success_
#define _Return_type_success_(c)
#undef _On_failure_
#define _On_failure_(a)
#undef _Always_
#define _Always_(a)
#undef _Use_decl_annotations_
#define _Use_decl_annotations_
#undef _Pre_defensive_
#define _Pre_defensive_
#undef _Post_defensive_
#define _Post_defensive_
#undef _Pre_unknown_
#define _Pre_unknown_
#undef _Acquires_lock_
#define _Acquires_lock_(e)
#undef _Releases_lock_
#define _Releases_lock_(e)
#undef _Requires_lock_held_
#define _Requires_lock_held_(e)
#undef _Requires_lock_not_held_
#define _Requires_lock_not_held_(e)
#undef _Requires_no_locks_held_
#define _Requires_no_locks_held_
#undef _Guarded_by_
#define _Guarded_by_(e)
#undef _Write_guarded_by_
#define _Write_guarded_by_(e)
#undef _Interlocked_
#define _Interlocked_
#undef _Post_same_lock_
#define _Post_same_lock_(e1,e2)
#undef _Benign_race_begin_
#define _Benign_race_begin_
#undef _Benign_race_end_
#define _Benign_race_end_
#undef _No_competing_thread_
#define _No_competing_thread_
#undef _No_competing_thread_begin_
#define _No_competing_thread_begin_
#undef _No_competing_thread_end_
#define _No_competing_thread_end_
#undef _Acquires_shared_lock_
#define _Acquires_shared_lock_(e)
#undef _Releases_shared_lock_
#define _Releases_shared_lock_(e)
#undef _Requires_shared_lock_held_
#define _Requires_shared_lock_held_(e)
#undef _Acquires_exclusive_lock_
#define _Acquires_exclusive_lock_(e)
#undef _Releases_exclusive_lock_
#define _Releases_exclusive_lock_(e)
#undef _Requires_exclusive_lock_held_
#define _Requires_exclusive_lock_held_(e)
#undef _Has_lock_kind_
#define _Has_lock_kind_(n)
#undef _Create_lock_level_
#define _Create_lock_level_(n)
#undef _Has_lock_level_
#define _Has_lock_level_(n)
#undef _Lock_level_order_
#define _Lock_level_order_(n1,n2)
#undef _Analysis_assume_lock_acquired_
#define _Analysis_assume_lock_acquired_(e)
#undef _Analysis_assume_lock_released_
#define _Analysis_assume_lock_released_(e)
#undef _Analysis_assume_lock_held_
#define _Analysis_assume_lock_held_(e)
#undef _Analysis_assume_lock_not_held_
#define _Analysis_assume_lock_not_held_(e)
#undef _Analysis_assume_same_lock_
#define _Analysis_assume_same_lock_(e)
#undef _In_
#define _In_
#undef _Out_
#define _Out_
#undef _Inout_
#define _Inout_
#undef _In_z_
#define _In_z_
#undef _Inout_z_
#define _Inout_z_
#undef _In_reads_
#define _In_reads_(s)
#undef _In_reads_bytes_
#define _In_reads_bytes_(s)
#undef _In_reads_z_
#define _In_reads_z_(s)
#undef _In_reads_or_z_
#define _In_reads_or_z_(s)
#undef _Out_writes_
#define _Out_writes_(s)
#undef _Out_writes_bytes_
#define _Out_writes_bytes_(s)
#undef _Out_writes_z_
#define _Out_writes_z_(s)
#undef _Inout_updates_
#define _Inout_updates_(s)
#undef _Inout_updates_bytes_
#define _Inout_updates_bytes_(s)
#undef _Inout_updates_z_
#define _Inout_updates_z_(s)
#undef _Out_writes_to_
#define _Out_writes_to_(s,c)
#undef _Out_writes_bytes_to_
#define _Out_writes_bytes_to_(s,c)
#undef _Out_writes_all_
#define _Out_writes_all_(s)
#undef _Out_writes_bytes_all_
#define _Out_writes_bytes_all_(s)
#undef _Inout_updates_to_
#define _Inout_updates_to_(s,c)
#undef _Inout_updates_bytes_to_
#define _Inout_updates_bytes_to_(s,c)
#undef _Inout_updates_all_
#define _Inout_updates_all_(s)
#undef _Inout_updates_bytes_all_
#define _Inout_updates_bytes_all_(s)
#undef _In_reads_to_ptr_
#define _In_reads_to_ptr_(p)
#undef _In_reads_to_ptr_z_
#define _In_reads_to_ptr_z_(p)
#undef _Out_writes_to_ptr_
#define _Out_writes_to_ptr_(p)
#undef _Out_writes_to_ptr_z_
#define _Out_writes_to_ptr_z_(p)
#undef _In_opt_
#define _In_opt_
#undef _Out_opt_
#define _Out_opt_
#undef _Inout_opt_
#define _Inout_opt_
#undef _In_opt_z_
#define _In_opt_z_
#undef _Inout_opt_z_
#define _Inout_opt_z_
#undef _In_reads_opt_
#define _In_reads_opt_(s)
#undef _In_reads_bytes_opt_
#define _In_reads_bytes_opt_(s)
#undef _Out_writes_opt_
#define _Out_writes_opt_(s)
#undef _Out_writes_bytes_opt_
#define _Out_writes_bytes_opt_(s)
#undef _Out_writes_opt_z_
#define _Out_writes_opt_z_(s)
#undef _Inout_updates_opt_
#define _Inout_updates_opt_(s)
#undef _Inout_updates_bytes_opt_
#define _Inout_updates_bytes_opt_(s)
#undef _Inout_updates_opt_z_
#define _Inout_updates_opt_z_(s)
#undef _Out_writes_to_opt_
#define _Out_writes_to_opt_(s,c)
#undef _Out_writes_bytes_to_opt_
#define _Out_writes_bytes_to_opt_(s,c)
#undef _Out_writes_all_opt_
#define _Out_writes_all_opt_(s)
#undef _Out_writes_bytes_all_opt_
#define _Out_writes_bytes_all_opt_(s)
#undef _Inout_updates_to_opt_
#define _Inout_updates_to_opt_(s,c)
#undef _Inout_updates_bytes_to_opt_
#define _Inout_updates_bytes_to_opt_(s,c)
#undef _Inout_updates_all_opt_
#define _Inout_updates_all_opt_(s)
#undef _Inout_updates_bytes_all_opt_
#define _Inout_updates_bytes_all_opt_(s)
#undef _In_reads_to_ptr_opt_
#define _In_reads_to_ptr_opt_(p)
#undef _In_reads_to_ptr_opt_z_
#define _In_reads_to_ptr_opt_z_(p)
#undef _Out_writes_to_ptr_opt_
#define _Out_writes_to_ptr_opt_(p)
#undef _Out_writes_to_ptr_opt_z_
#define _Out_writes_to_ptr_opt_z_(p)
#undef _Outptr_
#define _Outptr_
#undef _Outptr_opt_
#define _Outptr_opt_
#undef _Outptr_result_maybenull_
#define _Outptr_result_maybenull_
#undef _Outptr_opt_result_maybenull_
#define _Outptr_opt_result_maybenull_
#undef _Outptr_z_
#define _Outptr_z_
#undef _Outptr_opt_z_
#define _Outptr_opt_z_
#undef _Outptr_result_maybenull_z_
#define _Outptr_result_maybenull_z_
#undef _Outptr_opt_result_maybenull_z_
#define _Outptr_opt_result_maybenull_z_
#undef _COM_Outptr_
#define _COM_Outptr_
#undef _COM_Outptr_opt_
#define _COM_Outptr_opt_
#undef _COM_Outptr_result_maybenull_
#define _COM_Outptr_result_maybenull_
#undef _COM_Outptr_opt_result_maybenull_
#define _COM_Outptr_opt_result_maybenull_
#undef _Outptr_result_buffer_
#define _Outptr_result_buffer_(s)
#undef _Outptr_result_bytebuffer_
#define _Outptr_result_bytebuffer_(s)
#undef _Outptr_opt_result_buffer_
#define _Outptr_opt_result_buffer_(s)
#undef _Outptr_opt_result_bytebuffer_
#define _Outptr_opt_result_bytebuffer_(s)
#undef _Outptr_result_buffer_to_
#define _Outptr_result_buffer_to_(s,c)
#undef _Outptr_result_bytebuffer_to_
#define _Outptr_result_bytebuffer_to_(s,c)
#undef _Outptr_opt_result_buffer_to_
#define _Outptr_opt_result_buffer_to_(s,c)
#undef _Outptr_opt_result_bytebuffer_to_
#define _Outptr_opt_result_bytebuffer_to_(s,c)
#undef _Ret_
#define _Ret_
#undef _Ret_valid_
#define _Ret_valid_
#undef _Ret_z_
#define _Ret_z_
#undef _Ret_writes_
#define _Ret_writes_(s)
#undef _Ret_writes_bytes_
#define _Ret_writes_bytes_(s)
#undef _Ret_writes_z_
#define _Ret_writes_z_(s)
#undef _Ret_writes_to_
#define _Ret_writes_to_(s,c)
#undef _Ret_writes_bytes_to_
#define _Ret_writes_bytes_to_(s,c)
#undef _Ret_writes_maybenull_
#define _Ret_writes_maybenull_(s)
#undef _Ret_writes_bytes_maybenull_
#define _Ret_writes_bytes_maybenull_(s)
#undef _Ret_writes_to_maybenull_
#define _Ret_writes_to_maybenull_(s,c)
#undef _Ret_writes_bytes_to_maybenull_
#define _Ret_writes_bytes_to_maybenull_(s,c)
#undef _Ret_writes_maybenull_z_
#define _Ret_writes_maybenull_z_(s)
#undef _Ret_maybenull_
#define _Ret_maybenull_
#undef _Ret_maybenull_z_
#define _Ret_maybenull_z_
#undef _Field_size_
#define _Field_size_(s)
#undef _Field_size_opt_
#define _Field_size_opt_(s)
#undef _Field_size_bytes_
#define _Field_size_bytes_(s)
#undef _Field_size_bytes_opt_
#define _Field_size_bytes_opt_(s)
#undef _Field_size_part_
#define _Field_size_part_(s,c)
#undef _Field_size_part_opt_
#define _Field_size_part_opt_(s,c)
#undef _Field_size_bytes_part_
#define _Field_size_bytes_part_(s,c)
#undef _Field_size_bytes_part_opt_
#define _Field_size_bytes_part_opt_(s,c)
#undef _Field_size_full_
#define _Field_size_full_(s)
#undef _Field_size_full_opt_
#define _Field_size_full_opt_(s)
#undef _Field_size_bytes_full_
#define _Field_size_bytes_full_(s)
#undef _Field_size_bytes_full_opt_
#define _Field_size_bytes_full_opt_(s)
#undef _Printf_format_string_
#define _Printf_format_string_
#undef _Scanf_format_string_
#define _Scanf_format_string_
#undef _Scanf_s_format_string_
#define _Scanf_s_format_string_
#undef _Printf_format_string_params_
#define _Printf_format_string_params_(x)
#undef _Scanf_format_string_params_
#define _Scanf_format_string_params_(x)
#undef _Scanf_s_format_string_params_
#define _Scanf_s_format_string_params_(x)
#undef _In_range_
#define _In_range_(l,h)
#undef _Out_range_
#define _Out_range_(l,h)
#undef _Ret_range_
#define _Ret_range_(l,h)
#undef _Deref_in_range_
#define _Deref_in_range_(l,h)
#undef _Deref_out_range_
#define _Deref_out_range_(l,h)
#undef _Deref_inout_range_
#define _Deref_inout_range_(l,h)
#undef _Field_range_
#define _Field_range_(l,h)
#undef _Pre_equal_to_
#define _Pre_equal_to_(e)
#undef _Post_equal_to_
#define _Post_equal_to_(e)
#undef _Struct_size_bytes_
#define _Struct_size_bytes_(s)
#undef _Analysis_assume_
#define _Analysis_assume_
#undef _Analysis_mode_
#define _Analysis_mode_(m)
#undef _Analysis_noreturn_
#define _Analysis_noreturn_
#undef _Raises_SEH_exception_
#define _Raises_SEH_exception_
#undef _Maybe_raises_SEH_exception_
#define _Maybe_raises_SEH_exception_
#undef _Function_class_
#define _Function_class_(n)
#undef _Literal_
#define _Literal_
#undef _Notliteral_
#define _Notliteral_
#undef _Enum_is_bitflag_
#define _Enum_is_bitflag_
#undef _Strict_type_match_
#define _Strict_type_match_
#undef _Points_to_data_
#define _Points_to_data_
#undef _Interlocked_operand_
#define _Interlocked_operand_
#undef _IRQL_raises_
#define _IRQL_raises_(i)
#undef _IRQL_requires_
#define _IRQL_requires_(i)
#undef _IRQL_requires_max_
#define _IRQL_requires_max_(i)
#undef _IRQL_requires_min_
#define _IRQL_requires_min_(i)
#undef _IRQL_saves_
#define _IRQL_saves_
#undef _IRQL_saves_global_
#define _IRQL_saves_global_(k,s)
#undef _IRQL_restores_
#define _IRQL_restores_
#undef _IRQL_restores_global_
#define _IRQL_restores_global_(k,s)
#undef _IRQL_always_function_min_
#define _IRQL_always_function_min_(i)
#undef _IRQL_always_function_max_
#define _IRQL_always_function_max_(i)
#undef _IRQL_requires_same_
#define _IRQL_requires_same_
#undef _IRQL_uses_cancel_
#define _IRQL_uses_cancel_
#undef _IRQL_is_cancel_
#define _IRQL_is_cancel_
#undef _Kernel_float_saved_
#define _Kernel_float_saved_
#undef _Kernel_float_restored_
#define _Kernel_float_restored_
#undef _Kernel_float_used_
#define _Kernel_float_used_
#undef _Kernel_acquires_resource_
#define _Kernel_acquires_resource_(k)
#undef _Kernel_releases_resource_
#define _Kernel_releases_resource_(k)
#undef _Kernel_requires_resource_held_
#define _Kernel_requires_resource_held_(k)
#undef _Kernel_requires_resource_not_held_
#define _Kernel_requires_resource_not_held_(k)
#undef _Kernel_clear_do_init_
#define _Kernel_clear_do_init_(yn)
#undef _Kernel_IoGetDmaAdapter_
#define _Kernel_IoGetDmaAdapter_
#undef _Outref_
#define _Outref_
#undef _Outref_result_maybenull_
#define _Outref_result_maybenull_
#undef _Outref_result_buffer_
#define _Outref_result_buffer_(s)
#undef _Outref_result_bytebuffer_
#define _Outref_result_bytebuffer_(s)
#undef _Outref_result_buffer_to_
#define _Outref_result_buffer_to_(s,c)
#undef _Outref_result_bytebuffer_to_
#define _Outref_result_bytebuffer_to_(s,c)
#undef _Outref_result_buffer_all_
#define _Outref_result_buffer_all_(s)
#undef _Outref_result_bytebuffer_all_
#define _Outref_result_bytebuffer_all_(s)
#undef _Outref_result_buffer_maybenull_
#define _Outref_result_buffer_maybenull_(s)
#undef _Outref_result_bytebuffer_maybenull_
#define _Outref_result_bytebuffer_maybenull_(s)
#undef _Outref_result_buffer_to_maybenull_
#define _Outref_result_buffer_to_maybenull_(s,c)
#undef _Outref_result_bytebuffer_to_maybenull_
#define _Outref_result_bytebuffer_to_maybenull_(s,c)
#undef _Outref_result_buffer_all_maybenull_
#define _Outref_result_buffer_all_maybenull_(s)
#undef _Outref_result_bytebuffer_all_maybenull_
#define _Outref_result_bytebuffer_all_maybenull_(s)
#undef _In_defensive_
#define _In_defensive_(a)
#undef _Out_defensive_
#define _Out_defensive_(a)
#undef _Inout_defensive_
#define _Inout_defensive_(a)
#undef _Outptr_result_nullonfailure_
#define _Outptr_result_nullonfailure_
#undef _Outptr_opt_result_nullonfailure_
#define _Outptr_opt_result_nullonfailure_
#undef _Outref_result_nullonfailure_
#define _Outref_result_nullonfailure_
#undef _Result_nullonfailure_
#define _Result_nullonfailure_
#undef _Result_zeroonfailure_
#define _Result_zeroonfailure_
#undef _Acquires_nonreentrant_lock_
#define _Acquires_nonreentrant_lock_(e)
#undef _Releases_nonreentrant_lock_
#define _Releases_nonreentrant_lock_(e)
#undef _Reserved_
#define _Reserved_ _Pre_equal_to_(0) _Pre_ _Null_
#undef _Pre_z_
#define _Pre_z_ _Pre_ _Null_terminated_
#undef _Post_z_
#define _Post_z_ _Post_ _Null_terminated_
#undef _Prepost_z_
#define _Prepost_z_ _Pre_z_ _Post_z_
#undef _Pre_null_
#define _Pre_null_ _Pre_ _Null_
#undef _Pre_maybenull_
#define _Pre_maybenull_ _Pre_ _Maybenull_
#undef _Pre_notnull_
#define _Pre_notnull_ _Pre_ _Notnull_
#undef _Pre_valid_
#define _Pre_valid_ _Pre_notnull_ _Pre_ _Valid_
#undef _Pre_opt_valid_
#define _Pre_opt_valid_ _Pre_maybenull_ _Pre_ _Valid_
#undef _Post_valid_
#define _Post_valid_ _Post_ _Valid_
#undef _Post_invalid_
#define _Post_invalid_ _Post_ _Deref_ _Notvalid_
#undef _Post_ptr_invalid_
#define _Post_ptr_invalid_ _Post_ _Notvalid_
#undef _Pre_readable_size_
#define _Pre_readable_size_(s) _Pre_ _Readable_elements_(s) _Pre_ _Valid_
#undef _Pre_writable_size_
#define _Pre_writable_size_(s) _Pre_ _Writable_elements_(s)
#undef _Pre_readable_byte_size_
#define _Pre_readable_byte_size_(s) _Pre_ _Readable_bytes_(s) _Pre_ _Valid_
#undef _Pre_writable_byte_size_
#define _Pre_writable_byte_size_(s) _Pre_ _Writable_bytes_(s)
#undef _Post_readable_size_
#define _Post_readable_size_(s) _Post_ _Readable_elements_(s) _Post_ _Valid_
#undef _Post_writable_size_
#define _Post_writable_size_(s) _Post_ _Writable_elements_(s)
#undef _Post_readable_byte_size_
#define _Post_readable_byte_size_(s) _Post_ _Readable_bytes_(s) _Post_ _Valid_
#undef _Post_writable_byte_size_
#define _Post_writable_byte_size_(s) _Post_ _Writable_bytes_(s)
#endif /* _NO_SAL_2_H_ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,537 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
//
#ifndef SPECSTRINGS_H
#define SPECSTRINGS_H
/***
*specstrings.h - support for markers for documenting the semantics of APIs
*
*
* [Public]
****/
/*************************************************************************
* See specstrings_strict.h for documentation of all user visible macros.
*************************************************************************/
#if _MSC_VER
#pragma once
#endif
#if !defined(_SAL_VERSION_SAL2)
#if defined(__BUILDMACHINE__) || defined(_USE_SAL2_ONLY)
#define _SAL_VERSION_SAL2(_A) SAL_2_Clean_Violation_using ## _A
#else
#define _SAL_VERSION_SAL2(_A)
#endif
#ifdef _USE_SAL2_ONLY
#define _SAL2_STRICT
#define _SAL_VERSION_CHECK(_A) _SAL_VERSION_SAL2(_A)
#else
#define _SAL_VERSION_CHECK(_A)
#endif
#ifndef SAL_VERSION_CHECK
#define SAL_VERSION_CHECK(_A) _SAL_VERSION_CHECK(_A)
#define SAL_VERSION_SAL2(_A) _SAL_VERSION_SAL2(_A)
#endif
#endif
#include <sal.h>
#ifndef __SAL_H_FULL_VER
#define __SAL_H_FULL_VER 140050727
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* version specific fixes to bring sal.h upto date */
#if __SAL_H_FULL_VER <= 140050727
#if !defined(__midl) && defined(_PREFAST_) && _MSC_VER >= 1000 // [
/* Missing from RTM sal.h */
#define __inner_bound _SA_annotes0(SAL_bound)
#define __inner_range(lb,ub) _SA_annotes2(SAL_range,lb,ub)
#define __inner_assume_bound_dec __inline __nothrow void __AssumeBoundInt(_Post_ __inner_bound int i) {i;}
#define __inner_assume_bound(i) __AssumeBoundInt(i);
#define __inner_allocator _SA_annotes0(SAL_allocator)
#define __static_context(ctx, annotes) \
_SA_annotes1(SAL_context,ctx) _Group_(__nop_impl(annotes))
#define __failure(x) __static_context(SAL_return_convention, \
_SA_annotes1(SAL_failure,x))
__ANNOTATION(SAL_valueUndefined());
#define __valueUndefined _SA_annotes0(SAL_valueUndefined)
enum __SAL_failureKind{__failureUnspecified = 0, __failureUndefined = 1};
__ANNOTATION(SAL_failureDefault(enum __SAL_failureKind));
#define __failureDefault(kind) __static_context(SAL_return_convention, \
_SA_annotes1(SAL_failureDefault,kind))
#else // ][
#define __inner_bound
#define __inner_range(lb,ub)
#define __inner_assume_bound_dec
#define __inner_assume_bound(i)
#define __inner_allocator
#define __static_context(ctx, annotes)
#define __failure(x)
#define __valueUndefined
#define __failureDefault(x)
#endif // ]
#define __xcount(size) __notnull __inexpressible_writableTo(size)
#define __in_xcount(size) __in _Pre_ __inexpressible_readableTo(size)
#define __out_xcount(size) __xcount(size) _Post_ __valid __refparam
#define __out_xcount_part(size,length) __out_xcount(size) _Post_ __inexpressible_readableTo(length)
#define __out_xcount_full(size) __out_xcount_part(size,size)
#define __inout_xcount(size) __out_xcount(size) _Pre_ __valid
#define __inout_xcount_part(size,length) __out_xcount_part(size,length) _Pre_ __valid _Pre_ __inexpressible_readableTo(length)
#define __inout_xcount_full(size) __inout_xcount_part(size,size)
#define __xcount_opt(size) __xcount(size) __exceptthat __maybenull
#define __in_xcount_opt(size) __in_xcount(size) __exceptthat __maybenull
#define __out_xcount_opt(size) __out_xcount(size) __exceptthat __maybenull
#define __out_xcount_part_opt(size,length) __out_xcount_part(size,length) __exceptthat __maybenull
#define __out_xcount_full_opt(size) __out_xcount_full(size) __exceptthat __maybenull
#define __inout_xcount_opt(size) __inout_xcount(size) __exceptthat __maybenull
#define __inout_xcount_part_opt(size,length) __inout_xcount_part(size,length) __exceptthat __maybenull
#define __inout_xcount_full_opt(size) __inout_xcount_full(size) __exceptthat __maybenull
#define __deref_xcount(size) __ecount(1) _Post_ __elem_readableTo(1) _Post_ __deref __notnull _Post_ __deref __inexpressible_writableTo(size)
#define __deref_in __in _Pre_ __deref __deref __readonly
#define __deref_in_ecount(size) __deref_in _Pre_ __deref __elem_readableTo(size)
#define __deref_in_bcount(size) __deref_in _Pre_ __deref __byte_readableTo(size)
#define __deref_in_xcount(size) __deref_in _Pre_ __deref __inexpressible_readableTo(size)
#define __deref_out_xcount(size) __deref_xcount(size) _Post_ __deref __valid __refparam
#define __deref_out_xcount_part(size,length) __deref_out_xcount(size) _Post_ __deref __inexpressible_readableTo(length)
#define __deref_out_xcount_full(size) __deref_out_xcount_part(size,size)
#define __deref_out_xcount(size) __deref_xcount(size) _Post_ __deref __valid __refparam
#define __inout_xcount_opt(size) __inout_xcount(size) __exceptthat __maybenull
#define __inout_xcount_part_opt(size,length) __inout_xcount_part(size,length) __exceptthat __maybenull
#define __inout_xcount_full_opt(size) __inout_xcount_full(size) __exceptthat __maybenull
#define __deref_xcount(size) __ecount(1) _Post_ __elem_readableTo(1) _Post_ __deref __notnull _Post_ __deref __inexpressible_writableTo(size)
#define __deref_in __in _Pre_ __deref __deref __readonly
#define __deref_in_ecount(size) __deref_in _Pre_ __deref __elem_readableTo(size)
#define __deref_in_bcount(size) __deref_in _Pre_ __deref __byte_readableTo(size)
#define __deref_in_xcount(size) __deref_in _Pre_ __deref __inexpressible_readableTo(size)
#define __deref_out_xcount(size) __deref_xcount(size) _Post_ __deref __valid __refparam
#define __deref_out_xcount_part(size,length) __deref_out_xcount(size) _Post_ __deref __inexpressible_readableTo(length)
#define __deref_out_xcount_full(size) __deref_out_xcount_part(size,size)
#define __deref_out_xcount(size) __deref_xcount(size) _Post_ __deref __valid __refparam
#define __deref_inout_xcount(size) __deref_inout _Pre_ __deref __inexpressible_writableTo(size) _Post_ __deref __inexpressible_writableTo(size)
#define __deref_inout_xcount_part(size,length) __deref_inout_xcount(size) _Pre_ __deref __inexpressible_readableTo(length) _Post_ __deref __inexpressible_readableTo(length)
#define __deref_inout_xcount_full(size) __deref_inout_xcount_part(size,size)
#define __deref_xcount_opt(size) __deref_xcount(size) _Post_ __deref __exceptthat __maybenull
#define __deref_in_opt __deref_in _Pre_ __deref __exceptthat __maybenull
#define __deref_in_opt_out __deref_inout _Pre_ __deref __exceptthat __maybenull _Post_ __deref __notnull
#define __deref_in_ecount_opt(size) __deref_in_ecount(size) _Pre_ __deref __exceptthat __maybenull
#define __deref_in_bcount_opt(size) __deref_in_bcount(size) _Pre_ __deref __exceptthat __maybenull
#define __deref_in_xcount_opt(size) __deref_in_xcount(size) _Pre_ __deref __exceptthat __maybenull
#define __deref_out_xcount_opt(size) __deref_out_xcount(size) _Post_ __deref __exceptthat __maybenull
#define __deref_out_xcount_part_opt(size,length) __deref_out_xcount_part(size,length) _Post_ __deref __exceptthat __maybenull
#define __deref_out_xcount_full_opt(size) __deref_out_xcount_full(size) _Post_ __deref __exceptthat __maybenull
#define __deref_inout_xcount_opt(size) __deref_inout_xcount(size) _Pre_ __deref __exceptthat __maybenull _Post_ __deref __exceptthat __maybenull
#define __deref_inout_xcount_part_opt(size,length) __deref_inout_xcount_part(size,length) _Pre_ __deref __exceptthat __maybenull _Post_ __deref __exceptthat __maybenull
#define __deref_inout_xcount_full_opt(size) __deref_inout_xcount_full(size) _Pre_ __deref __exceptthat __maybenull _Post_ __deref __exceptthat __maybenull
#define __deref_opt_xcount(size) __deref_xcount(size) __exceptthat __maybenull
#define __deref_opt_in __deref_in __exceptthat __maybenull
#define __deref_opt_in_ecount(size) __deref_in_ecount(size) __exceptthat __maybenull
#define __deref_opt_in_bcount(size) __deref_in_bcount(size) __exceptthat __maybenull
#define __deref_opt_in_xcount(size) __deref_in_xcount(size) __exceptthat __maybenull
#define __deref_opt_out_xcount(size) __deref_out_xcount(size) __exceptthat __maybenull
#define __deref_opt_out_xcount_part(size,length) __deref_out_xcount_part(size,length) __exceptthat __maybenull
#define __deref_opt_out_xcount_full(size) __deref_out_xcount_full(size) __exceptthat __maybenull
#define __deref_opt_inout_xcount(size) __deref_inout_xcount(size) __exceptthat __maybenull
#define __deref_opt_inout_xcount_part(size,length) __deref_inout_xcount_part(size,length) __exceptthat __maybenull
#define __deref_opt_inout_xcount_full(size) __deref_inout_xcount_full(size) __exceptthat __maybenull
#define __deref_opt_xcount_opt(size) __deref_xcount_opt(size) __exceptthat __maybenull
#define __deref_opt_in_opt __deref_in_opt __exceptthat __maybenull
#define __deref_opt_in_ecount_opt(size) __deref_in_ecount_opt(size) __exceptthat __maybenull
#define __deref_opt_in_bcount_opt(size) __deref_in_bcount_opt(size) __exceptthat __maybenull
#define __deref_opt_in_xcount_opt(size) __deref_in_xcount_opt(size) __exceptthat __maybenull
#define __deref_opt_out_xcount_opt(size) __deref_out_xcount_opt(size) __exceptthat __maybenull
#define __deref_opt_out_xcount_part_opt(size,length) __deref_out_xcount_part_opt(size,length) __exceptthat __maybenull
#define __deref_opt_out_xcount_full_opt(size) __deref_out_xcount_full_opt(size) __exceptthat __maybenull
#define __deref_opt_inout_xcount_opt(size) __deref_inout_xcount_opt(size) __exceptthat __maybenull
#define __deref_opt_inout_xcount_part_opt(size,length) __deref_inout_xcount_part_opt(size,length) __exceptthat __maybenull
#define __deref_opt_inout_xcount_full_opt(size) __deref_inout_xcount_full_opt(size) __exceptthat __maybenull
#define __deref_in_ecount_iterator(size, incr) __inout _Pre_ __deref __elem_readableTo(size) __deref_out_range(==, _Old_(*_Curr_) + incr)
#define __deref_out_ecount_iterator(size, incr) __inout _Pre_ __deref __elem_writableTo(size) __deref_out_range(==, _Old_(*_Curr_) + incr)
#define __deref_inout_ecount_iterator(size, incr) __inout _Pre_ __deref __elem_readableTo(size) _Pre_ __deref __elem_writableTo(size) __deref_out_range(==, _Old_(*_Curr_) + incr)
#define __post_bcount(size) _Post_ __byte_writableTo(size)
#define __post_ecount(size) _Post_ __elem_writableTo(size)
#define __deref_realloc_bcount(insize, outsize) __inout _Pre_ __deref __byte_readableTo(insize) _Post_ __deref __byte_writableTo(outsize)
/* __in_ecount_or_z(c) specifies semantics like strncmp, where a string
* parameter is either null terminated, or valid up to c elements.
*/
#define __in_ecount_or_z(c) _When_(_String_length_(_Curr_) < (c), __in_z) \
_When_(_String_length_(_Curr_) >= (c), __in_ecount(c))
/* Provide default definition to be overridden when needed */
#define __post_nullnullterminated
/* Must protect redfinitions of macros to workaround rc.exe issues. */
#ifndef RC_INVOKED
#undef __nullnullterminated
#define __nullnullterminated __inexpressible_readableTo("string terminated by two nulls") __nullterminated
#undef __post_nullnullterminated
#define __post_nullnullterminated _Post_ __inexpressible_readableTo("string terminated by two nulls") _Post_ __nullterminated
#endif
#endif //__SAL_H_FULL_VER <= 140050727
/************************************************************************
New extensions to sal.h follow here.
*************************************************************************/
#if (_MSC_VER >= 1000) && !defined(__midl) && defined(_PREFAST_)
#define __file_parser(typ) _SA_annotes2(SAL_file_parser,"function",typ)
#define __file_parser_class(typ) _SA_annotes2(SAL_file_parser,"class",typ)
#define __file_parser_library(typ) extern int _SA_annotes2(SAL_file_parser, "library", typ) __iSALFileParserLibrary##typ;
#define __source_code_content(typ) extern int _SA_annotes1(SAL_source_code_content, typ) __iSAL_Source_Code_Content##typ;
#define __class_code_content(typ) _SA_annotes1(SAL_class_code_content, typ)
#define __analysis_assert(e) __assume(e)
#define __analysis_hint(hint) _SA_annotes1(SAL_analysisHint, hint)
// For "breakpoint": doesn't return as far as analysis is concerned.
#define __analysis_noreturn __declspec(noreturn)
/* Internal defintions */
#define __inner_data_source(src_raw) _SA_annotes1(SAL_untrusted_data_source,src_raw)
#define __inner_this_data_source(src_raw) _SA_annotes1(SAL_untrusted_data_source_this,src_raw)
#define __inner_out_validated(typ_raw) _Post_ _SA_annotes1(SAL_validated,typ_raw)
#define __inner_this_out_validated(typ_raw) _SA_annotes1(SAL_validated_this,typ_raw)
#define __inner_assume_validated_dec __inline __nothrow void __AssumeValidated(__inner_out_validated("BY_DESIGN") const void *p) {p;}
#define __inner_assume_validated(p) __AssumeValidated(p)
#define __inner_transfer(formal) _SA_annotes1(SAL_transfer_adt_property_from,formal)
#define __inner_encoded _SA_annotes0(SAL_encoded)
#if defined(_MSC_EXTENSIONS) || defined(_PREFAST_) || defined(OACR)
#define __inner_adt_prop(adt,prop) _SA_annotes2(SAL_adt, adt,prop)
#define __inner_adt_add_prop(adt,prop) _SA_annotes2(SAL_add_adt_property,adt,prop)
#define __inner_adt_remove_prop(adt,prop) _SA_annotes2(SAL_remove_adt_property,adt,prop)
#define __inner_adt_transfer_prop(arg) _SA_annotes1(SAL_transfer_adt_property_from,arg)
#define __inner_adt_type_props(typ) _SA_annotes1(SAL_post_type,typ)
#define __inner_volatile _SA_annotes0(SAL_volatile)
#define __inner_nonvolatile _SA_annotes0(SAL_nonvolatile)
#define __inner_possibly_notnullterminated _SA_annotes1(SAL_nullTerminated,__maybe)
#define __inner_analysis_assume_nullterminated_dec __inline __nothrow void __AnalysisAssumeNullterminated(_Post_ __nullterminated void *p) {*(char*)p=0;}
#define __inner_analysis_assume_nullterminated(x) __AnalysisAssumeNullterminated(x);
#endif
#else
#define __file_parser(typ)
#define __file_parser_class(typ)
#define __file_parser_library(typ)
#define __source_code_content(typ)
#define __class_code_content(typ)
#define __analysis_assert(e)
#define __analysis_hint(hint)
#define __analysis_noreturn
/* Internal defintions */
#define __inner_data_source(src_raw)
#define __inner_this_data_source(src_raw)
#define __inner_out_validated(typ_raw)
#define __inner_this_out_validated(typ_raw)
#define __inner_assume_validated_dec
#define __inner_assume_validated(p)
#define __inner_transfer(formal)
#define __inner_encoded
#define __inner_adt_prop(adt,prop)
#define __inner_adt_add_prop(adt,prop)
#define __inner_adt_remove_prop(adt,prop)
#define __inner_adt_transfer_prop(arg)
#define __inner_adt_type_props(typ)
#define __inner_volatile
#define __inner_nonvolatile
#define __inner_possibly_notnullterminated
#define __inner_analysis_assume_nullterminated_dec
#define __inner_analysis_assume_nullterminated(x)
#endif // #if (_MSC_VER >= 1000) && !defined(__midl) && defined(_PREFAST_)
#define __field_ecount(size) __notnull __elem_writableTo(size)
#define __field_bcount(size) __notnull __byte_writableTo(size)
#define __field_xcount(size) __notnull __inexpressible_writableTo(size)
#define __field_ecount_opt(size) __maybenull __elem_writableTo(size)
#define __field_bcount_opt(size) __maybenull __byte_writableTo(size)
#define __field_xcount_opt(size) __maybenull __inexpressible_writableTo(size)
#define __field_ecount_part(size,init) __notnull __elem_writableTo(size) __elem_readableTo(init)
#define __field_bcount_part(size,init) __notnull __byte_writableTo(size) __byte_readableTo(init)
#define __field_xcount_part(size,init) __notnull __inexpressible_writableTo(size) __inexpressible_readableTo(init)
#define __field_ecount_part_opt(size,init) __maybenull __elem_writableTo(size) __elem_readableTo(init)
#define __field_bcount_part_opt(size,init) __maybenull __byte_writableTo(size) __byte_readableTo(init)
#define __field_xcount_part_opt(size,init) __maybenull __inexpressible_writableTo(size) __inexpressible_readableTo(init)
#define __field_ecount_full(size) __field_ecount_part(size,size)
#define __field_bcount_full(size) __field_bcount_part(size,size)
#define __field_xcount_full(size) __field_xcount_part(size,size)
#define __field_ecount_full_opt(size) __field_ecount_part_opt(size,size)
#define __field_bcount_full_opt(size) __field_bcount_part_opt(size,size)
#define __field_xcount_full_opt(size) __field_xcount_part_opt(size,size)
#define __field_nullterminated __nullterminated
#define __struct_bcount(size) __byte_writableTo(size)
#define __struct_xcount(size) __inexpressible_writableTo(size)
#define __out_awcount(expr,size) _Pre_ __notnull \
__byte_writableTo((expr) ? (size) : (size) * 2) \
_Post_ __valid __refparam
#define __in_awcount(expr,size) _Pre_ __valid \
_Pre_ _Notref_ __deref __readonly \
__byte_readableTo((expr) ? (size) : (size) * 2)
#define __post_invalid _Post_ __notvalid
/* integer related macros */
#define __allocator __inner_allocator
#ifndef PAL_STDCPP_COMPAT
#define __deallocate(kind) _Pre_ __notnull __post_invalid
#define __deallocate_opt(kind) _Pre_ __maybenull __post_invalid
#endif
#define __bound __inner_bound
#define __range(lb,ub) __inner_range(lb,ub)
#define __in_bound _Pre_ __inner_bound
#define __out_bound _Post_ __inner_bound
#define __deref_out_bound _Post_ __deref __inner_bound
#define __in_range(lb,ub) _Pre_ __inner_range(lb,ub)
#define __out_range(lb,ub) _Post_ __inner_range(lb,ub)
#define __deref_in_range(lb,ub) _Pre_ __deref __inner_range(lb,ub)
#define __deref_out_range(lb,ub) _Post_ __deref __inner_range(lb,ub)
#define __deref_inout_range(lb,ub) __deref_in_range(lb,ub) __deref_out_range(lb,ub)
#define __field_range(lb,ub) __range(lb,ub)
#define __field_data_source(src_sym) __inner_data_source(#src_sym)
#define __range_max(a,b) __range(==, a > b ? a : b)
#define __range_min(a,b) __range(==, a < b ? a : b)
/* Penetration review macros */
#define __in_data_source(src_sym) _Pre_ __inner_data_source(#src_sym)
#define __out_data_source(src_sym) _Post_ __inner_data_source(#src_sym)
#define __out_validated(typ_sym) __inner_out_validated(#typ_sym)
#define __this_out_data_source(src_sym) __inner_this_data_source(#src_sym)
#define __this_out_validated(typ_sym) __inner_this_out_validated(#typ_sym)
#define __transfer(formal) _Post_ __inner_transfer(formal)
#define __rpc_entry __inner_control_entrypoint(RPC)
#define __kernel_entry __inner_control_entrypoint(UserToKernel)
#define __gdi_entry __inner_control_entrypoint(GDI)
#define __encoded_pointer __inner_encoded
#define __encoded_array __inner_encoded
#define __field_encoded_pointer __inner_encoded
#define __field_encoded_array __inner_encoded
#if defined(_MSC_EXTENSIONS) || defined(_PREFAST_) || defined(OACR)
#define __type_has_adt_prop(adt,prop) __inner_adt_prop(adt,prop)
#define __out_has_adt_prop(adt,prop) _Post_ __inner_adt_add_prop(adt,prop)
#define __out_not_has_adt_prop(adt,prop) _Post_ __inner_adt_remove_prop(adt,prop)
#define __out_transfer_adt_prop(arg) _Post_ __inner_adt_transfer_prop(arg)
#define __out_has_type_adt_props(typ) _Post_ __inner_adt_type_props(typ)
/* useful PFD related macros */
#define __possibly_notnullterminated __inner_possibly_notnullterminated
/* Windows Internal */
#define __volatile __inner_volatile
#define __nonvolatile __inner_nonvolatile
#else
#define __out_has_type_adt_props(typ) /* nothing */
#endif
#define __deref_volatile __deref __volatile
#define __deref_nonvolatile __deref __nonvolatile
/* declare stub functions for macros */
__inner_assume_validated_dec
__inner_assume_bound_dec
__inner_analysis_assume_nullterminated_dec
#define __analysis_assume_nullterminated(x) __inner_analysis_assume_nullterminated(x)
#define __assume_validated(p) __inner_assume_validated(p)
#define __assume_bound(i) __inner_assume_bound(i)
/**************************************************************************
* SAL 2 extensions for Windows-specific APIs.
***************************************************************************/
// Annotation for parameters that are not used in any way by the function.
// Unlike _Reserved_, an _Unreferenced_parameter_ pointer need not be NULL.
#ifndef _Unreferenced_parameter_
#define _Unreferenced_parameter_ _Const_
#endif
// Pointer parameters that are freed by the function, and thus the pointed-to
// memory should not be used after return.
#ifndef _Frees_ptr_
#define _Frees_ptr_ _Pre_notnull_ _Post_ptr_invalid_
#endif
#ifndef _Frees_ptr_opt_
#define _Frees_ptr_opt_ _Pre_maybenull_ _Post_ptr_invalid_
#endif
// NLS APIs allow strings to be specified either by an element count or
// null termination. Unlike _In_reads_or_z_, this is not whichever comes
// first, but based on whether the size is negative or not.
#define _In_NLS_string_(size) _When_((size) < 0, _In_z_) \
_When_((size) >= 0, _In_reads_(size))
// Minifilter CompletionContext parameters on the pre-operation callback
// default to NULL. For return type FLT_PREOP_SUCCESS_WITH_CALLBACK or
// FLT_PREOP_SYNCHRONIZE, it may be set to NULL or a valid pointer. For all
// other returns, it must be NULL.
#define _Flt_CompletionContext_Outptr_ \
_Outptr_result_maybenull_ _Pre_valid_ \
_At_(*_Curr_, _Pre_null_ \
_When_(return != FLT_PREOP_SUCCESS_WITH_CALLBACK && return != FLT_PREOP_SYNCHRONIZE, _Post_null_))
// Minifilter ConnectionCookie parameters on the port connect notify callback
// default to NULL. On successful return, it may be set to NULL or non-NULL,
// but it must be NULL on failure.
#define _Flt_ConnectionCookie_Outptr_ \
_Outptr_result_maybenull_ _Pre_valid_ \
_At_(*_Curr_, _Pre_null_ _On_failure_(_Post_null_))
//
// A common pattern is to pass an "_Inout_ PCHAR* ppBuf" of size "_Inout_ DWORD* pSize"
// to a function that writes to **pBuf, incrementing *ppBuf to point to one
// past the last written byte. Thus the length of the write is
// (*ppBuf - Old(*ppBuf)). The size of the remaining unwritten capacity
// is written to *pSize.
//
// This pattern is frequently used when progressively filling a
// large buffer in chunks
// (e.g. when reading from a network interface in a driver).
//
// It is expected that these supplementary annotations would be used inside an
// _At_, like so:
//
// _At_(*ppBuf, _Writes_and_advances_ptr_(*pBufSize))
// HRESULT WriteChunkOfData(_Inout_ PCHAR* ppBuf, _Inout_ DWORD* pBufSize);
//
#ifndef _Writes_and_advances_ptr_
#define _Writes_and_advances_ptr_(size) \
_At_((void*)_Curr_, _Inout_) \
_At_(_Curr_, \
_Pre_writable_size_(size) \
_Post_writable_size_(size) \
_Post_satisfies_(_Curr_ - _Old_(_Curr_) == size)) \
_At_(_Old_(_Curr_), \
_Post_readable_size_(_Old_(size) - size))
#endif
#ifndef _Writes_bytes_and_advances_ptr_
#define _Writes_bytes_and_advances_ptr_(size) \
_At_((void*)_Curr_, _Inout_) \
_At_(_Curr_, \
_Pre_writable_byte_size_(size) \
_Post_writable_byte_size_(size) \
_Post_satisfies_(((char*)_Curr_) - ((void*)_Old_(_Curr_)) == size)) \
_At_(_Old_(_Curr_), \
_Post_readable_byte_size_(_Old_(size) - size))
#endif
//
// Gets the current error code (as returned by GetLastError()), and stores
// in _Curr_ as a postcondition. This is currently approximated by assuming
// that GetLastError() always returns a failed error code. This is not a
// completely accurate approximation, but reasonable.
//
#define _Post_equals_last_error_ _Post_satisfies_(_Curr_ != 0)
#ifdef __cplusplus
}
#endif
#ifdef _PREFIX_
/**************************************************************************
* Defintion of __pfx_assume and __pfx_assert. Thse should be the only
* defintions of these functions.
***************************************************************************/
#if __cplusplus
extern "C" void __pfx_assert(bool, const char *);
extern "C" void __pfx_assume(bool, const char *);
#else
void __pfx_assert(int, const char *);
void __pfx_assume(int, const char *);
#endif
/**************************************************************************
* Redefintion of __analysis_assume and __analysis_assert for PREFIX build
**************************************************************************/
#undef __analysis_assume
#undef __analysis_assert
#define __analysis_assume(e) (__pfx_assume(e,"pfx_assume"),__assume(e));
#define __analysis_assert(e) (__pfx_assert(e,"pfx_assert"),__assume(e));
#endif /* ifdef _PREFIX_ */
/**************************************************************************
* This include should always be the last thing in this file.
* Must avoid redfinitions of macros to workaround rc.exe issues.
***************************************************************************/
#if !(defined(RC_INVOKED) || defined(SORTPP_PASS))
#include <specstrings_strict.h>
#endif /* if !(defined(RC_INVOKED) || defined(SORTPP_PASS)) */
/*
If no SAL 2 appears to have been defined (_Outptr_ is a representative choice)
then we must be operating in a downlevel build environment (such as VS10).
We also test against the compiler version to identify a downlevel environment,
as VS11 is the minimum required for SAL 2 support.
If we are operating in a downlevel build environment (such as VS10)
we need to undefine the following symbols before including driverspecs.h
or we will end up referencing SAL 2 implementation symbols and cause
build failures.
*/
#if (!defined(_Outptr_) || _MSC_VER <= 1600) && !( defined( MIDL_PASS ) || defined(__midl) || defined(RC_INVOKED) ) /*IFSTRIP=IGN*/
#undef __ANNOTATION
#define __ANNOTATION(fun) /* fun */
#undef __PRIMOP
#define __PRIMOP(type, fun)
#endif /* !defined(_Outptr_) || _MSC_VER <= 1600 */
// ROTOR doesn't need driverspecs.h
// #include <driverspecs.h>
/*
If no SAL 2 appears to have been defined (_Outptr_ is a representative choice)
then we must be operating in a downlevel build environment (such as VS10).
We also test against the compiler version to identify a downlevel environment,
as VS11 is the minimum required for SAL 2 support.
If we are in a downlevel environment, we can go ahead and include no_sal2.h
to make all of SAL 2 no-ops to ensure no build failures.
*/
#if (!defined(_Outptr_) || _MSC_VER <= 1600) && !( defined( MIDL_PASS ) || defined(__midl) || defined(RC_INVOKED) ) && !( defined( _SDV_ ) ) /*IFSTRIP=IGN*/
#include <no_sal2.h>
#endif /* !defined(_Outptr_) || _MSC_VER <= 1600 */
#endif /* #ifndef SPECSTRINGS_H */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -4,7 +4,7 @@ include(CheckCXXSourceCompiles)
include(CheckCXXSourceRuns)
if (NOT WIN32)
include_directories(SYSTEM /usr/local/include)
include_directories(SYSTEM /usr/local/include)
endif ()
if (CMAKE_SYSTEM_NAME STREQUAL Linux)

8
src/Native/gc/env/gcenv.unix.cpp поставляемый
Просмотреть файл

@ -116,6 +116,7 @@ void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
}
}
#if 0
void CLREventStatic::CreateManualEvent(bool bInitialState)
{
// TODO: Implement
@ -197,6 +198,7 @@ uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable)
return result;
}
#endif // 0
bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount)
{
@ -331,6 +333,7 @@ int32_t g_TrapReturningThreads;
bool g_fFinalizerRunOnShutDown;
#if 0
#ifdef _MSC_VER
__declspec(thread)
#else
@ -364,12 +367,13 @@ void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock)
pThread->m_pNext = g_pThreadList;
g_pThreadList = pThread;
}
#endif // 0
void DestroyThread(Thread * pThread)
{
// TODO: Implement
}
#if 0
void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason)
{
GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
@ -427,6 +431,8 @@ bool IsGCSpecialThread()
return false;
}
#endif // 0
bool PalHasCapability(PalCapability capability)
{
// TODO: Implement for background GC

Просмотреть файл

@ -25,15 +25,11 @@ else()
../env/gcenv.unix.cpp)
endif()
add_executable(gcsample
${SOURCES}
)
if(CLR_CMAKE_PLATFORM_UNIX)
add_compile_options(-Wno-format)
add_compile_options(-Wno-unused-variable)
add_compile_options(-Wno-unused-private-field)
add_compile_options(-Wno-tautological-undefined-compare)
add_compile_options(-Wno-format)
add_compile_options(-Wno-unused-variable)
add_compile_options(-Wno-unused-private-field)
add_compile_options(-Wno-tautological-undefined-compare)
endif()
if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
@ -52,5 +48,9 @@ else()
clr_unknown_arch()
endif()
add_executable(gcsample
${SOURCES}
)
# Install gcsample
install (TARGETS gcsample DESTINATION .)