Merge pull request #1 from sergiy-k/GcFiles

Add GC source files
This commit is contained in:
Jan Kotas 2015-09-16 19:32:26 -07:00
Родитель 7abb2d32a3 add643f03b
Коммит 65eeb22c09
34 изменённых файлов: 57973 добавлений и 0 удалений

Просмотреть файл

@ -0,0 +1,49 @@
project(clrgc)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
include_directories(env)
set(SOURCES
gccommon.cpp
gceewks.cpp
gcscan.cpp
gcwks.cpp
handletable.cpp
handletablecache.cpp
handletablecore.cpp
handletablescan.cpp
objecthandle.cpp
)
if(WIN32)
list(APPEND SOURCES
env/gcenv.windows.cpp)
else()
list(APPEND SOURCES
env/gcenv.unix.cpp)
endif()
if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
add_definitions(-D_TARGET_AMD64_=1)
add_definitions(-D_WIN64=1)
elseif(CLR_CMAKE_PLATFORM_ARCH_I386)
add_definitions(-D_TARGET_X86_=1)
add_definitions(-D_WIN32=1)
elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
add_definitions(-D_TARGET_ARM_=1)
add_definitions(-D_WIN32=1)
elseif(CLR_CMAKE_PLATFORM_ARCH_ARM64)
add_definitions(-D_TARGET_ARM64_=1)
add_definitions(-D_WIN64=1)
else()
clr_unknown_arch()
endif()
add_compile_options(-Wno-format)
add_compile_options(-Wno-unused-variable)
add_compile_options(-Wno-unused-private-field)
add_compile_options(-Wno-tautological-undefined-compare)
add_library(clrgc STATIC ${SOURCES})

10
src/Native/gc/env/common.cpp поставляемый Normal file
Просмотреть файл

@ -0,0 +1,10 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
// common.cpp : source file that includes just the standard includes
// GCSample.pch will be the pre-compiled header
// common.obj will contain the pre-compiled type information
#include "common.h"

29
src/Native/gc/env/common.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,29 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
// common.h : include file for standard system include files,
// or project specific include files that are used frequently, but
// are changed infrequently
//
#pragma once
#define _CRT_SECURE_NO_WARNINGS
#include <stdint.h>
#include <stddef.h>
#include <stdio.h>
#include <wchar.h>
#include <assert.h>
#include <stdarg.h>
#include <memory.h>
#include <new>
#ifndef WIN32
#include <pthread.h>
#endif
using namespace std;

400
src/Native/gc/env/etmdummy.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,400 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
#define FireEtwGCStart(Count, Reason) 0
#define FireEtwGCStart_V1(Count, Depth, Reason, Type, ClrInstanceID) 0
#define FireEtwGCStart_V2(Count, Depth, Reason, Type, ClrInstanceID, ClientSequenceNumber) 0
#define FireEtwGCEnd(Count, Depth) 0
#define FireEtwGCEnd_V1(Count, Depth, ClrInstanceID) 0
#define FireEtwGCRestartEEEnd() 0
#define FireEtwGCRestartEEEnd_V1(ClrInstanceID) 0
#define FireEtwGCHeapStats(GenerationSize0, TotalPromotedSize0, GenerationSize1, TotalPromotedSize1, GenerationSize2, TotalPromotedSize2, GenerationSize3, TotalPromotedSize3, FinalizationPromotedSize, FinalizationPromotedCount, PinnedObjectCount, SinkBlockCount, GCHandleCount) 0
#define FireEtwGCHeapStats_V1(GenerationSize0, TotalPromotedSize0, GenerationSize1, TotalPromotedSize1, GenerationSize2, TotalPromotedSize2, GenerationSize3, TotalPromotedSize3, FinalizationPromotedSize, FinalizationPromotedCount, PinnedObjectCount, SinkBlockCount, GCHandleCount, ClrInstanceID) 0
#define FireEtwGCCreateSegment(Address, Size, Type) 0
#define FireEtwGCCreateSegment_V1(Address, Size, Type, ClrInstanceID) 0
#define FireEtwGCFreeSegment(Address) 0
#define FireEtwGCFreeSegment_V1(Address, ClrInstanceID) 0
#define FireEtwGCRestartEEBegin() 0
#define FireEtwGCRestartEEBegin_V1(ClrInstanceID) 0
#define FireEtwGCSuspendEEEnd() 0
#define FireEtwGCSuspendEEEnd_V1(ClrInstanceID) 0
#define FireEtwGCSuspendEEBegin(Reason) 0
#define FireEtwGCSuspendEEBegin_V1(Reason, Count, ClrInstanceID) 0
#define FireEtwGCAllocationTick(AllocationAmount, AllocationKind) 0
#define FireEtwGCAllocationTick_V1(AllocationAmount, AllocationKind, ClrInstanceID) 0
#define FireEtwGCAllocationTick_V2(AllocationAmount, AllocationKind, ClrInstanceID, AllocationAmount64, TypeID, TypeName, HeapIndex) 0
#define FireEtwGCAllocationTick_V3(AllocationAmount, AllocationKind, ClrInstanceID, AllocationAmount64, TypeID, TypeName, HeapIndex, Address) 0
#define FireEtwGCCreateConcurrentThread() 0
#define FireEtwGCCreateConcurrentThread_V1(ClrInstanceID) 0
#define FireEtwGCTerminateConcurrentThread() 0
#define FireEtwGCTerminateConcurrentThread_V1(ClrInstanceID) 0
#define FireEtwGCFinalizersEnd(Count) 0
#define FireEtwGCFinalizersEnd_V1(Count, ClrInstanceID) 0
#define FireEtwGCFinalizersBegin() 0
#define FireEtwGCFinalizersBegin_V1(ClrInstanceID) 0
#define FireEtwBulkType(Count, ClrInstanceID, Values_Len_, Values) 0
#define FireEtwGCBulkRootEdge(Index, Count, ClrInstanceID, Values_Len_, Values) 0
#define FireEtwGCBulkRootConditionalWeakTableElementEdge(Index, Count, ClrInstanceID, Values_Len_, Values) 0
#define FireEtwGCBulkNode(Index, Count, ClrInstanceID, Values_Len_, Values) 0
#define FireEtwGCBulkEdge(Index, Count, ClrInstanceID, Values_Len_, Values) 0
#define FireEtwGCSampledObjectAllocationHigh(Address, TypeID, ObjectCountForTypeSample, TotalSizeForTypeSample, ClrInstanceID) 0
#define FireEtwGCBulkSurvivingObjectRanges(Index, Count, ClrInstanceID, Values_Len_, Values) 0
#define FireEtwGCBulkMovedObjectRanges(Index, Count, ClrInstanceID, Values_Len_, Values) 0
#define FireEtwGCGenerationRange(Generation, RangeStart, RangeUsedLength, RangeReservedLength, ClrInstanceID) 0
#define FireEtwGCMarkStackRoots(HeapNum, ClrInstanceID) 0
#define FireEtwGCMarkFinalizeQueueRoots(HeapNum, ClrInstanceID) 0
#define FireEtwGCMarkHandles(HeapNum, ClrInstanceID) 0
#define FireEtwGCMarkOlderGenerationRoots(HeapNum, ClrInstanceID) 0
#define FireEtwFinalizeObject(TypeID, ObjectID, ClrInstanceID) 0
#define FireEtwSetGCHandle(HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) 0
#define FireEtwDestroyGCHandle(HandleID, ClrInstanceID) 0
#define FireEtwGCSampledObjectAllocationLow(Address, TypeID, ObjectCountForTypeSample, TotalSizeForTypeSample, ClrInstanceID) 0
#define FireEtwPinObjectAtGCTime(HandleID, ObjectID, ObjectSize, TypeName, ClrInstanceID) 0
#define FireEtwGCTriggered(Reason, ClrInstanceID) 0
#define FireEtwGCBulkRootCCW(Count, ClrInstanceID, Values_Len_, Values) 0
#define FireEtwGCBulkRCW(Count, ClrInstanceID, Values_Len_, Values) 0
#define FireEtwGCBulkRootStaticVar(Count, AppDomainID, ClrInstanceID, Values_Len_, Values) 0
#define FireEtwWorkerThreadCreate(WorkerThreadCount, RetiredWorkerThreads) 0
#define FireEtwWorkerThreadTerminate(WorkerThreadCount, RetiredWorkerThreads) 0
#define FireEtwWorkerThreadRetire(WorkerThreadCount, RetiredWorkerThreads) 0
#define FireEtwWorkerThreadUnretire(WorkerThreadCount, RetiredWorkerThreads) 0
#define FireEtwIOThreadCreate(IOThreadCount, RetiredIOThreads) 0
#define FireEtwIOThreadCreate_V1(IOThreadCount, RetiredIOThreads, ClrInstanceID) 0
#define FireEtwIOThreadTerminate(IOThreadCount, RetiredIOThreads) 0
#define FireEtwIOThreadTerminate_V1(IOThreadCount, RetiredIOThreads, ClrInstanceID) 0
#define FireEtwIOThreadRetire(IOThreadCount, RetiredIOThreads) 0
#define FireEtwIOThreadRetire_V1(IOThreadCount, RetiredIOThreads, ClrInstanceID) 0
#define FireEtwIOThreadUnretire(IOThreadCount, RetiredIOThreads) 0
#define FireEtwIOThreadUnretire_V1(IOThreadCount, RetiredIOThreads, ClrInstanceID) 0
#define FireEtwThreadpoolSuspensionSuspendThread(ClrThreadID, CpuUtilization) 0
#define FireEtwThreadpoolSuspensionResumeThread(ClrThreadID, CpuUtilization) 0
#define FireEtwThreadPoolWorkerThreadStart(ActiveWorkerThreadCount, RetiredWorkerThreadCount, ClrInstanceID) 0
#define FireEtwThreadPoolWorkerThreadStop(ActiveWorkerThreadCount, RetiredWorkerThreadCount, ClrInstanceID) 0
#define FireEtwThreadPoolWorkerThreadRetirementStart(ActiveWorkerThreadCount, RetiredWorkerThreadCount, ClrInstanceID) 0
#define FireEtwThreadPoolWorkerThreadRetirementStop(ActiveWorkerThreadCount, RetiredWorkerThreadCount, ClrInstanceID) 0
#define FireEtwThreadPoolWorkerThreadAdjustmentSample(Throughput, ClrInstanceID) 0
#define FireEtwThreadPoolWorkerThreadAdjustmentAdjustment(AverageThroughput, NewWorkerThreadCount, Reason, ClrInstanceID) 0
#define FireEtwThreadPoolWorkerThreadAdjustmentStats(Duration, Throughput, ThreadWave, ThroughputWave, ThroughputErrorEstimate, AverageThroughputErrorEstimate, ThroughputRatio, Confidence, NewControlSetting, NewThreadWaveMagnitude, ClrInstanceID) 0
#define FireEtwThreadPoolWorkerThreadWait(ActiveWorkerThreadCount, RetiredWorkerThreadCount, ClrInstanceID) 0
#define FireEtwThreadPoolWorkingThreadCount(Count, ClrInstanceID) 0
#define FireEtwThreadPoolEnqueue(WorkID, ClrInstanceID) 0
#define FireEtwThreadPoolDequeue(WorkID, ClrInstanceID) 0
#define FireEtwThreadPoolIOEnqueue(NativeOverlapped, Overlapped, MultiDequeues, ClrInstanceID) 0
#define FireEtwThreadPoolIODequeue(NativeOverlapped, Overlapped, ClrInstanceID) 0
#define FireEtwThreadPoolIOPack(NativeOverlapped, Overlapped, ClrInstanceID) 0
#define FireEtwThreadCreating(ID, ClrInstanceID) 0
#define FireEtwThreadRunning(ID, ClrInstanceID) 0
#define FireEtwExceptionThrown() 0
#define FireEtwExceptionThrown_V1(ExceptionType, ExceptionMessage, ExceptionEIP, ExceptionHRESULT, ExceptionFlags, ClrInstanceID) 0
#define FireEtwExceptionCatchStart(EntryEIP, MethodID, MethodName, ClrInstanceID) 0
#define FireEtwExceptionCatchStop() 0
#define FireEtwExceptionFinallyStart(EntryEIP, MethodID, MethodName, ClrInstanceID) 0
#define FireEtwExceptionFinallyStop() 0
#define FireEtwExceptionFilterStart(EntryEIP, MethodID, MethodName, ClrInstanceID) 0
#define FireEtwExceptionFilterStop() 0
#define FireEtwExceptionThrownStop() 0
#define FireEtwContention() 0
#define FireEtwContentionStart_V1(ContentionFlags, ClrInstanceID) 0
#define FireEtwContentionStop(ContentionFlags, ClrInstanceID) 0
#define FireEtwCLRStackWalk(ClrInstanceID, Reserved1, Reserved2, FrameCount, Stack) 0
#define FireEtwAppDomainMemAllocated(AppDomainID, Allocated, ClrInstanceID) 0
#define FireEtwAppDomainMemSurvived(AppDomainID, Survived, ProcessSurvived, ClrInstanceID) 0
#define FireEtwThreadCreated(ManagedThreadID, AppDomainID, Flags, ManagedThreadIndex, OSThreadID, ClrInstanceID) 0
#define FireEtwThreadTerminated(ManagedThreadID, AppDomainID, ClrInstanceID) 0
#define FireEtwThreadDomainEnter(ManagedThreadID, AppDomainID, ClrInstanceID) 0
#define FireEtwILStubGenerated(ClrInstanceID, ModuleID, StubMethodID, StubFlags, ManagedInteropMethodToken, ManagedInteropMethodNamespace, ManagedInteropMethodName, ManagedInteropMethodSignature, NativeMethodSignature, StubMethodSignature, StubMethodILCode) 0
#define FireEtwILStubCacheHit(ClrInstanceID, ModuleID, StubMethodID, ManagedInteropMethodToken, ManagedInteropMethodNamespace, ManagedInteropMethodName, ManagedInteropMethodSignature) 0
#define FireEtwDCStartCompleteV2() 0
#define FireEtwDCEndCompleteV2() 0
#define FireEtwMethodDCStartV2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
#define FireEtwMethodDCEndV2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
#define FireEtwMethodDCStartVerboseV2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
#define FireEtwMethodDCEndVerboseV2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
#define FireEtwMethodLoad(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
#define FireEtwMethodLoad_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID) 0
#define FireEtwMethodLoad_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID, ReJITID) 0
#define FireEtwMethodUnload(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
#define FireEtwMethodUnload_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID) 0
#define FireEtwMethodUnload_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID, ReJITID) 0
#define FireEtwMethodLoadVerbose(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
#define FireEtwMethodLoadVerbose_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID) 0
#define FireEtwMethodLoadVerbose_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID, ReJITID) 0
#define FireEtwMethodUnloadVerbose(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
#define FireEtwMethodUnloadVerbose_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID) 0
#define FireEtwMethodUnloadVerbose_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID, ReJITID) 0
#define FireEtwMethodJittingStarted(MethodID, ModuleID, MethodToken, MethodILSize, MethodNamespace, MethodName, MethodSignature) 0
#define FireEtwMethodJittingStarted_V1(MethodID, ModuleID, MethodToken, MethodILSize, MethodNamespace, MethodName, MethodSignature, ClrInstanceID) 0
#define FireEtwMethodJitInliningSucceeded(MethodBeingCompiledNamespace, MethodBeingCompiledName, MethodBeingCompiledNameSignature, InlinerNamespace, InlinerName, InlinerNameSignature, InlineeNamespace, InlineeName, InlineeNameSignature, ClrInstanceID) 0
#define FireEtwMethodJitInliningFailed(MethodBeingCompiledNamespace, MethodBeingCompiledName, MethodBeingCompiledNameSignature, InlinerNamespace, InlinerName, InlinerNameSignature, InlineeNamespace, InlineeName, InlineeNameSignature, FailAlways, FailReason, ClrInstanceID) 0
#define FireEtwMethodJitTailCallSucceeded(MethodBeingCompiledNamespace, MethodBeingCompiledName, MethodBeingCompiledNameSignature, CallerNamespace, CallerName, CallerNameSignature, CalleeNamespace, CalleeName, CalleeNameSignature, TailPrefix, TailCallType, ClrInstanceID) 0
#define FireEtwMethodJitTailCallFailed(MethodBeingCompiledNamespace, MethodBeingCompiledName, MethodBeingCompiledNameSignature, CallerNamespace, CallerName, CallerNameSignature, CalleeNamespace, CalleeName, CalleeNameSignature, TailPrefix, FailReason, ClrInstanceID) 0
#define FireEtwMethodILToNativeMap(MethodID, ReJITID, MethodExtent, CountOfMapEntries, ILOffsets, NativeOffsets, ClrInstanceID) 0
#define FireEtwModuleDCStartV2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
#define FireEtwModuleDCEndV2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
#define FireEtwDomainModuleLoad(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
#define FireEtwDomainModuleLoad_V1(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
#define FireEtwModuleLoad(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
#define FireEtwModuleLoad_V1(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
#define FireEtwModuleLoad_V2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) 0
#define FireEtwModuleUnload(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
#define FireEtwModuleUnload_V1(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
#define FireEtwModuleUnload_V2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) 0
#define FireEtwAssemblyLoad(AssemblyID, AppDomainID, AssemblyFlags, FullyQualifiedAssemblyName) 0
#define FireEtwAssemblyLoad_V1(AssemblyID, AppDomainID, BindingID, AssemblyFlags, FullyQualifiedAssemblyName, ClrInstanceID) 0
#define FireEtwAssemblyUnload(AssemblyID, AppDomainID, AssemblyFlags, FullyQualifiedAssemblyName) 0
#define FireEtwAssemblyUnload_V1(AssemblyID, AppDomainID, BindingID, AssemblyFlags, FullyQualifiedAssemblyName, ClrInstanceID) 0
#define FireEtwAppDomainLoad(AppDomainID, AppDomainFlags, AppDomainName) 0
#define FireEtwAppDomainLoad_V1(AppDomainID, AppDomainFlags, AppDomainName, AppDomainIndex, ClrInstanceID) 0
#define FireEtwAppDomainUnload(AppDomainID, AppDomainFlags, AppDomainName) 0
#define FireEtwAppDomainUnload_V1(AppDomainID, AppDomainFlags, AppDomainName, AppDomainIndex, ClrInstanceID) 0
#define FireEtwModuleRangeLoad(ClrInstanceID, ModuleID, RangeBegin, RangeSize, RangeType) 0
#define FireEtwStrongNameVerificationStart(VerificationFlags, ErrorCode, FullyQualifiedAssemblyName) 0
#define FireEtwStrongNameVerificationStart_V1(VerificationFlags, ErrorCode, FullyQualifiedAssemblyName, ClrInstanceID) 0
#define FireEtwStrongNameVerificationStop(VerificationFlags, ErrorCode, FullyQualifiedAssemblyName) 0
#define FireEtwStrongNameVerificationStop_V1(VerificationFlags, ErrorCode, FullyQualifiedAssemblyName, ClrInstanceID) 0
#define FireEtwAuthenticodeVerificationStart(VerificationFlags, ErrorCode, ModulePath) 0
#define FireEtwAuthenticodeVerificationStart_V1(VerificationFlags, ErrorCode, ModulePath, ClrInstanceID) 0
#define FireEtwAuthenticodeVerificationStop(VerificationFlags, ErrorCode, ModulePath) 0
#define FireEtwAuthenticodeVerificationStop_V1(VerificationFlags, ErrorCode, ModulePath, ClrInstanceID) 0
#define FireEtwRuntimeInformationStart(ClrInstanceID, Sku, BclMajorVersion, BclMinorVersion, BclBuildNumber, BclQfeNumber, VMMajorVersion, VMMinorVersion, VMBuildNumber, VMQfeNumber, StartupFlags, StartupMode, CommandLine, ComObjectGuid, RuntimeDllPath) 0
#define FireEtwIncreaseMemoryPressure(BytesAllocated, ClrInstanceID) 0
#define FireEtwDecreaseMemoryPressure(BytesFreed, ClrInstanceID) 0
#define FireEtwGCMarkWithType(HeapNum, ClrInstanceID, Type, Bytes) 0
#define FireEtwGCJoin_V2(Heap, JoinTime, JoinType, ClrInstanceID, JoinID) 0
#define FireEtwGCPerHeapHistory_V3(ClrInstanceID, FreeListAllocated, FreeListRejected, EndOfSegAllocated, CondemnedAllocated, PinnedAllocated, PinnedAllocatedAdvance, RunningFreeListEfficiency, CondemnReasons0, CondemnReasons1, CompactMechanisms, ExpandMechanisms, HeapIndex, ExtraGen0Commit, Count, Values_Len_, Values) 0
#define FireEtwGCGlobalHeapHistory_V2(FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms, ClrInstanceID, PauseMode, MemoryPressure) 0
#define FireEtwDebugIPCEventStart() 0
#define FireEtwDebugIPCEventEnd() 0
#define FireEtwDebugExceptionProcessingStart() 0
#define FireEtwDebugExceptionProcessingEnd() 0
#define FireEtwCLRStackWalkDCStart(ClrInstanceID, Reserved1, Reserved2, FrameCount, Stack) 0
#define FireEtwMethodDCStart(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
#define FireEtwMethodDCStart_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID) 0
#define FireEtwMethodDCStart_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID, ReJITID) 0
#define FireEtwMethodDCEnd(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
#define FireEtwMethodDCEnd_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID) 0
#define FireEtwMethodDCEnd_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID, ReJITID) 0
#define FireEtwMethodDCStartVerbose(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
#define FireEtwMethodDCStartVerbose_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID) 0
#define FireEtwMethodDCStartVerbose_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID, ReJITID) 0
#define FireEtwMethodDCEndVerbose(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
#define FireEtwMethodDCEndVerbose_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID) 0
#define FireEtwMethodDCEndVerbose_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID, ReJITID) 0
#define FireEtwDCStartComplete() 0
#define FireEtwDCStartComplete_V1(ClrInstanceID) 0
#define FireEtwDCEndComplete() 0
#define FireEtwDCEndComplete_V1(ClrInstanceID) 0
#define FireEtwDCStartInit() 0
#define FireEtwDCStartInit_V1(ClrInstanceID) 0
#define FireEtwDCEndInit() 0
#define FireEtwDCEndInit_V1(ClrInstanceID) 0
#define FireEtwMethodDCStartILToNativeMap(MethodID, ReJITID, MethodExtent, CountOfMapEntries, ILOffsets, NativeOffsets, ClrInstanceID) 0
#define FireEtwMethodDCEndILToNativeMap(MethodID, ReJITID, MethodExtent, CountOfMapEntries, ILOffsets, NativeOffsets, ClrInstanceID) 0
#define FireEtwDomainModuleDCStart(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
#define FireEtwDomainModuleDCStart_V1(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
#define FireEtwDomainModuleDCEnd(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
#define FireEtwDomainModuleDCEnd_V1(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
#define FireEtwModuleDCStart(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
#define FireEtwModuleDCStart_V1(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
#define FireEtwModuleDCStart_V2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) 0
#define FireEtwModuleDCEnd(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
#define FireEtwModuleDCEnd_V1(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
#define FireEtwModuleDCEnd_V2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) 0
#define FireEtwAssemblyDCStart(AssemblyID, AppDomainID, AssemblyFlags, FullyQualifiedAssemblyName) 0
#define FireEtwAssemblyDCStart_V1(AssemblyID, AppDomainID, BindingID, AssemblyFlags, FullyQualifiedAssemblyName, ClrInstanceID) 0
#define FireEtwAssemblyDCEnd(AssemblyID, AppDomainID, AssemblyFlags, FullyQualifiedAssemblyName) 0
#define FireEtwAssemblyDCEnd_V1(AssemblyID, AppDomainID, BindingID, AssemblyFlags, FullyQualifiedAssemblyName, ClrInstanceID) 0
#define FireEtwAppDomainDCStart(AppDomainID, AppDomainFlags, AppDomainName) 0
#define FireEtwAppDomainDCStart_V1(AppDomainID, AppDomainFlags, AppDomainName, AppDomainIndex, ClrInstanceID) 0
#define FireEtwAppDomainDCEnd(AppDomainID, AppDomainFlags, AppDomainName) 0
#define FireEtwAppDomainDCEnd_V1(AppDomainID, AppDomainFlags, AppDomainName, AppDomainIndex, ClrInstanceID) 0
#define FireEtwThreadDC(ManagedThreadID, AppDomainID, Flags, ManagedThreadIndex, OSThreadID, ClrInstanceID) 0
#define FireEtwModuleRangeDCStart(ClrInstanceID, ModuleID, RangeBegin, RangeSize, RangeType) 0
#define FireEtwModuleRangeDCEnd(ClrInstanceID, ModuleID, RangeBegin, RangeSize, RangeType) 0
#define FireEtwRuntimeInformationDCStart(ClrInstanceID, Sku, BclMajorVersion, BclMinorVersion, BclBuildNumber, BclQfeNumber, VMMajorVersion, VMMinorVersion, VMBuildNumber, VMQfeNumber, StartupFlags, StartupMode, CommandLine, ComObjectGuid, RuntimeDllPath) 0
#define FireEtwStressLogEvent(Facility, LogLevel, Message) 0
#define FireEtwStressLogEvent_V1(Facility, LogLevel, Message, ClrInstanceID) 0
#define FireEtwCLRStackWalkStress(ClrInstanceID, Reserved1, Reserved2, FrameCount, Stack) 0
#define FireEtwGCDecision(DoCompact) 0
#define FireEtwGCDecision_V1(DoCompact, ClrInstanceID) 0
#define FireEtwGCSettings(SegmentSize, LargeObjectSegmentSize, ServerGC) 0
#define FireEtwGCSettings_V1(SegmentSize, LargeObjectSegmentSize, ServerGC, ClrInstanceID) 0
#define FireEtwGCOptimized(DesiredAllocation, NewAllocation, GenerationNumber) 0
#define FireEtwGCOptimized_V1(DesiredAllocation, NewAllocation, GenerationNumber, ClrInstanceID) 0
#define FireEtwGCPerHeapHistory() 0
#define FireEtwGCPerHeapHistory_V1(ClrInstanceID) 0
#define FireEtwGCGlobalHeapHistory(FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms) 0
#define FireEtwGCGlobalHeapHistory_V1(FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms, ClrInstanceID) 0
#define FireEtwGCJoin(Heap, JoinTime, JoinType) 0
#define FireEtwGCJoin_V1(Heap, JoinTime, JoinType, ClrInstanceID) 0
#define FireEtwPrvGCMarkStackRoots(HeapNum) 0
#define FireEtwPrvGCMarkStackRoots_V1(HeapNum, ClrInstanceID) 0
#define FireEtwPrvGCMarkFinalizeQueueRoots(HeapNum) 0
#define FireEtwPrvGCMarkFinalizeQueueRoots_V1(HeapNum, ClrInstanceID) 0
#define FireEtwPrvGCMarkHandles(HeapNum) 0
#define FireEtwPrvGCMarkHandles_V1(HeapNum, ClrInstanceID) 0
#define FireEtwPrvGCMarkCards(HeapNum) 0
#define FireEtwPrvGCMarkCards_V1(HeapNum, ClrInstanceID) 0
#define FireEtwBGCBegin(ClrInstanceID) 0
#define FireEtwBGC1stNonConEnd(ClrInstanceID) 0
#define FireEtwBGC1stConEnd(ClrInstanceID) 0
#define FireEtwBGC2ndNonConBegin(ClrInstanceID) 0
#define FireEtwBGC2ndNonConEnd(ClrInstanceID) 0
#define FireEtwBGC2ndConBegin(ClrInstanceID) 0
#define FireEtwBGC2ndConEnd(ClrInstanceID) 0
#define FireEtwBGCPlanEnd(ClrInstanceID) 0
#define FireEtwBGCSweepEnd(ClrInstanceID) 0
#define FireEtwBGCDrainMark(Objects, ClrInstanceID) 0
#define FireEtwBGCRevisit(Pages, Objects, IsLarge, ClrInstanceID) 0
#define FireEtwBGCOverflow(Min, Max, Objects, IsLarge, ClrInstanceID) 0
#define FireEtwBGCAllocWaitBegin(Reason, ClrInstanceID) 0
#define FireEtwBGCAllocWaitEnd(Reason, ClrInstanceID) 0
#define FireEtwGCFullNotify(GenNumber, IsAlloc) 0
#define FireEtwGCFullNotify_V1(GenNumber, IsAlloc, ClrInstanceID) 0
#define FireEtwEEStartupStart() 0
#define FireEtwEEStartupStart_V1(ClrInstanceID) 0
#define FireEtwEEStartupEnd() 0
#define FireEtwEEStartupEnd_V1(ClrInstanceID) 0
#define FireEtwEEConfigSetup() 0
#define FireEtwEEConfigSetup_V1(ClrInstanceID) 0
#define FireEtwEEConfigSetupEnd() 0
#define FireEtwEEConfigSetupEnd_V1(ClrInstanceID) 0
#define FireEtwLdSysBases() 0
#define FireEtwLdSysBases_V1(ClrInstanceID) 0
#define FireEtwLdSysBasesEnd() 0
#define FireEtwLdSysBasesEnd_V1(ClrInstanceID) 0
#define FireEtwExecExe() 0
#define FireEtwExecExe_V1(ClrInstanceID) 0
#define FireEtwExecExeEnd() 0
#define FireEtwExecExeEnd_V1(ClrInstanceID) 0
#define FireEtwMain() 0
#define FireEtwMain_V1(ClrInstanceID) 0
#define FireEtwMainEnd() 0
#define FireEtwMainEnd_V1(ClrInstanceID) 0
#define FireEtwApplyPolicyStart() 0
#define FireEtwApplyPolicyStart_V1(ClrInstanceID) 0
#define FireEtwApplyPolicyEnd() 0
#define FireEtwApplyPolicyEnd_V1(ClrInstanceID) 0
#define FireEtwLdLibShFolder() 0
#define FireEtwLdLibShFolder_V1(ClrInstanceID) 0
#define FireEtwLdLibShFolderEnd() 0
#define FireEtwLdLibShFolderEnd_V1(ClrInstanceID) 0
#define FireEtwPrestubWorker() 0
#define FireEtwPrestubWorker_V1(ClrInstanceID) 0
#define FireEtwPrestubWorkerEnd() 0
#define FireEtwPrestubWorkerEnd_V1(ClrInstanceID) 0
#define FireEtwGetInstallationStart() 0
#define FireEtwGetInstallationStart_V1(ClrInstanceID) 0
#define FireEtwGetInstallationEnd() 0
#define FireEtwGetInstallationEnd_V1(ClrInstanceID) 0
#define FireEtwOpenHModule() 0
#define FireEtwOpenHModule_V1(ClrInstanceID) 0
#define FireEtwOpenHModuleEnd() 0
#define FireEtwOpenHModuleEnd_V1(ClrInstanceID) 0
#define FireEtwExplicitBindStart() 0
#define FireEtwExplicitBindStart_V1(ClrInstanceID) 0
#define FireEtwExplicitBindEnd() 0
#define FireEtwExplicitBindEnd_V1(ClrInstanceID) 0
#define FireEtwParseXml() 0
#define FireEtwParseXml_V1(ClrInstanceID) 0
#define FireEtwParseXmlEnd() 0
#define FireEtwParseXmlEnd_V1(ClrInstanceID) 0
#define FireEtwInitDefaultDomain() 0
#define FireEtwInitDefaultDomain_V1(ClrInstanceID) 0
#define FireEtwInitDefaultDomainEnd() 0
#define FireEtwInitDefaultDomainEnd_V1(ClrInstanceID) 0
#define FireEtwInitSecurity() 0
#define FireEtwInitSecurity_V1(ClrInstanceID) 0
#define FireEtwInitSecurityEnd() 0
#define FireEtwInitSecurityEnd_V1(ClrInstanceID) 0
#define FireEtwAllowBindingRedirs() 0
#define FireEtwAllowBindingRedirs_V1(ClrInstanceID) 0
#define FireEtwAllowBindingRedirsEnd() 0
#define FireEtwAllowBindingRedirsEnd_V1(ClrInstanceID) 0
#define FireEtwEEConfigSync() 0
#define FireEtwEEConfigSync_V1(ClrInstanceID) 0
#define FireEtwEEConfigSyncEnd() 0
#define FireEtwEEConfigSyncEnd_V1(ClrInstanceID) 0
#define FireEtwFusionBinding() 0
#define FireEtwFusionBinding_V1(ClrInstanceID) 0
#define FireEtwFusionBindingEnd() 0
#define FireEtwFusionBindingEnd_V1(ClrInstanceID) 0
#define FireEtwLoaderCatchCall() 0
#define FireEtwLoaderCatchCall_V1(ClrInstanceID) 0
#define FireEtwLoaderCatchCallEnd() 0
#define FireEtwLoaderCatchCallEnd_V1(ClrInstanceID) 0
#define FireEtwFusionInit() 0
#define FireEtwFusionInit_V1(ClrInstanceID) 0
#define FireEtwFusionInitEnd() 0
#define FireEtwFusionInitEnd_V1(ClrInstanceID) 0
#define FireEtwFusionAppCtx() 0
#define FireEtwFusionAppCtx_V1(ClrInstanceID) 0
#define FireEtwFusionAppCtxEnd() 0
#define FireEtwFusionAppCtxEnd_V1(ClrInstanceID) 0
#define FireEtwFusion2EE() 0
#define FireEtwFusion2EE_V1(ClrInstanceID) 0
#define FireEtwFusion2EEEnd() 0
#define FireEtwFusion2EEEnd_V1(ClrInstanceID) 0
#define FireEtwSecurityCatchCall() 0
#define FireEtwSecurityCatchCall_V1(ClrInstanceID) 0
#define FireEtwSecurityCatchCallEnd() 0
#define FireEtwSecurityCatchCallEnd_V1(ClrInstanceID) 0
#define FireEtwCLRStackWalkPrivate(ClrInstanceID, Reserved1, Reserved2, FrameCount, Stack) 0
#define FireEtwModuleRangeLoadPrivate(ClrInstanceID, ModuleID, RangeBegin, RangeSize, RangeType, IBCType, SectionType) 0
#define FireEtwBindingPolicyPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwBindingPolicyPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwBindingNgenPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwBindingNgenPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwBindingLookupAndProbingPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwBindingLookupAndProbingPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwLoaderPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwLoaderPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwBindingPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwBindingPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwBindingDownloadPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwBindingDownloadPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwLoaderAssemblyInitPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwLoaderAssemblyInitPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwLoaderMappingPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwLoaderMappingPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwLoaderDeliverEventsPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwLoaderDeliverEventsPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
#define FireEtwEvidenceGenerated(Type, AppDomain, ILImage, ClrInstanceID) 0
#define FireEtwModuleTransparencyComputationStart(Module, AppDomainID, ClrInstanceID) 0
#define FireEtwModuleTransparencyComputationEnd(Module, AppDomainID, IsAllCritical, IsAllTransparent, IsTreatAsSafe, IsOpportunisticallyCritical, SecurityRuleSet, ClrInstanceID) 0
#define FireEtwTypeTransparencyComputationStart(Type, Module, AppDomainID, ClrInstanceID) 0
#define FireEtwTypeTransparencyComputationEnd(Type, Module, AppDomainID, IsAllCritical, IsAllTransparent, IsCritical, IsTreatAsSafe, ClrInstanceID) 0
#define FireEtwMethodTransparencyComputationStart(Method, Module, AppDomainID, ClrInstanceID) 0
#define FireEtwMethodTransparencyComputationEnd(Method, Module, AppDomainID, IsCritical, IsTreatAsSafe, ClrInstanceID) 0
#define FireEtwFieldTransparencyComputationStart(Field, Module, AppDomainID, ClrInstanceID) 0
#define FireEtwFieldTransparencyComputationEnd(Field, Module, AppDomainID, IsCritical, IsTreatAsSafe, ClrInstanceID) 0
#define FireEtwTokenTransparencyComputationStart(Token, Module, AppDomainID, ClrInstanceID) 0
#define FireEtwTokenTransparencyComputationEnd(Token, Module, AppDomainID, IsCritical, IsTreatAsSafe, ClrInstanceID) 0
#define FireEtwNgenBindEvent(ClrInstanceID, BindingID, ReasonCode, AssemblyName) 0
#define FireEtwFailFast(FailFastUserMessage, FailedEIP, OSExitCode, ClrExitCode, ClrInstanceID) 0
#define FireEtwPrvFinalizeObject(TypeID, ObjectID, ClrInstanceID, TypeName) 0
#define FireEtwCCWRefCountChange(HandleID, ObjectID, COMInterfacePointer, NewRefCount, AppDomainID, ClassName, NameSpace, Operation, ClrInstanceID) 0
#define FireEtwPrvSetGCHandle(HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) 0
#define FireEtwPrvDestroyGCHandle(HandleID, ClrInstanceID) 0
#define FireEtwFusionMessageEvent(ClrInstanceID, Prepend, Message) 0
#define FireEtwFusionErrorCodeEvent(ClrInstanceID, Category, ErrorCode) 0
#define FireEtwPinPlugAtGCTime(PlugStart, PlugEnd, GapBeforeSize, ClrInstanceID) 0
#define FireEtwAllocRequest(LoaderHeapPtr, MemoryAddress, RequestSize, Unused1, Unused2, ClrInstanceID) 0
#define FireEtwMulticoreJit(ClrInstanceID, String1, String2, Int1, Int2, Int3) 0
#define FireEtwMulticoreJitMethodCodeReturned(ClrInstanceID, ModuleID, MethodID) 0
#define FireEtwIInspectableRuntimeClassName(TypeName, ClrInstanceID) 0
#define FireEtwWinRTUnbox(TypeName, SecondTypeName, ClrInstanceID) 0
#define FireEtwCreateRCW(TypeName, ClrInstanceID) 0
#define FireEtwRCWVariance(TypeName, InterfaceTypeName, VariantInterfaceTypeName, ClrInstanceID) 0
#define FireEtwRCWIEnumerableCasting(TypeName, SecondTypeName, ClrInstanceID) 0
#define FireEtwCreateCCW(TypeName, ClrInstanceID) 0
#define FireEtwCCWVariance(TypeName, InterfaceTypeName, VariantInterfaceTypeName, ClrInstanceID) 0
#define FireEtwObjectVariantMarshallingToNative(TypeName, Int1, ClrInstanceID) 0
#define FireEtwGetTypeFromGUID(TypeName, SecondTypeName, ClrInstanceID) 0
#define FireEtwGetTypeFromProgID(TypeName, SecondTypeName, ClrInstanceID) 0
#define FireEtwConvertToCallbackEtw(TypeName, SecondTypeName, ClrInstanceID) 0
#define FireEtwBeginCreateManagedReference(ClrInstanceID) 0
#define FireEtwEndCreateManagedReference(ClrInstanceID) 0
#define FireEtwObjectVariantMarshallingToManaged(TypeName, Int1, ClrInstanceID) 0

1357
src/Native/gc/env/gcenv.h поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

623
src/Native/gc/env/gcenv.unix.cpp поставляемый Normal file
Просмотреть файл

@ -0,0 +1,623 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
//
// Implementation of the GC environment
//
#include "common.h"
#include "gcenv.h"
#include "gc.h"
#include <sys/mman.h>
#include <sys/time.h>
int32_t FastInterlockIncrement(int32_t volatile *lpAddend)
{
return __sync_add_and_fetch(lpAddend, 1);
}
int32_t FastInterlockDecrement(int32_t volatile *lpAddend)
{
return __sync_sub_and_fetch(lpAddend, 1);
}
int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value)
{
return __sync_swap(Target, Value);
}
int32_t FastInterlockCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
int32_t FastInterlockExchangeAdd(int32_t volatile *Addend, int32_t Value)
{
return __sync_fetch_and_add(Addend, Value);
}
void * _FastInterlockExchangePointer(void * volatile *Target, void * Value)
{
return __sync_swap(Target, Value);
}
void * _FastInterlockCompareExchangePointer(void * volatile *Destination, void * Exchange, void * Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
void FastInterlockOr(uint32_t volatile *p, uint32_t msk)
{
__sync_fetch_and_or(p, msk);
}
void FastInterlockAnd(uint32_t volatile *p, uint32_t msk)
{
__sync_fetch_and_and(p, msk);
}
void UnsafeInitializeCriticalSection(CRITICAL_SECTION * lpCriticalSection)
{
pthread_mutex_init(&lpCriticalSection->mutex, NULL);
}
void UnsafeEEEnterCriticalSection(CRITICAL_SECTION *lpCriticalSection)
{
pthread_mutex_lock(&lpCriticalSection->mutex);
}
void UnsafeEELeaveCriticalSection(CRITICAL_SECTION * lpCriticalSection)
{
pthread_mutex_unlock(&lpCriticalSection->mutex);
}
void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection)
{
pthread_mutex_destroy(&lpCriticalSection->mutex);
}
void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
pMSEX->dwMemoryLoad = 0;
pMSEX->ullTotalPageFile = 0;
pMSEX->ullAvailPageFile = 0;
pMSEX->ullAvailExtendedVirtual = 0;
// There is no API to get the total virtual address space size on
// Unix, so we use a constant value representing 128TB, which is
// the approximate size of total user virtual address space on
// the currently supported Unix systems.
static const UINT64 _128TB = (1ull << 47);
pMSEX->ullTotalVirtual = _128TB;
pMSEX->ullAvailVirtual = _128TB;
// TODO: Implement
pMSEX->ullTotalPhys = _128TB;
pMSEX->ullAvailPhys = _128TB;
// If the machine has more RAM than virtual address limit, let us cap it.
// Our GC can never use more than virtual address limit.
if (pMSEX->ullAvailPhys > pMSEX->ullTotalVirtual)
{
pMSEX->ullAvailPhys = pMSEX->ullAvailVirtual;
}
}
void CLREventStatic::CreateManualEvent(bool bInitialState)
{
// TODO: Implement
m_fInitialized = true;
}
void CLREventStatic::CreateAutoEvent(bool bInitialState)
{
// TODO: Implement
m_fInitialized = true;
}
void CLREventStatic::CreateOSManualEvent(bool bInitialState)
{
CreateManualEvent(bInitialState);
}
void CLREventStatic::CreateOSAutoEvent (bool bInitialState)
{
CreateAutoEvent(bInitialState);
}
void CLREventStatic::CloseEvent()
{
if (m_fInitialized)
{
// TODO: Implement
m_fInitialized = false;
}
}
bool CLREventStatic::IsValid() const
{
return m_fInitialized;
}
bool CLREventStatic::Set()
{
if (!m_fInitialized)
return false;
// TODO: Implement
return true;
}
bool CLREventStatic::Reset()
{
if (!m_fInitialized)
return false;
// TODO: Implement
return true;
}
uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable)
{
DWORD result = WAIT_FAILED;
if (m_fInitialized)
{
bool disablePreemptive = false;
Thread * pCurThread = GetThread();
if (NULL != pCurThread)
{
if (pCurThread->PreemptiveGCDisabled())
{
pCurThread->EnablePreemptiveGC();
disablePreemptive = true;
}
}
// TODO: Implement
result = WAIT_OBJECT_0;
if (disablePreemptive)
{
pCurThread->DisablePreemptiveGC();
}
}
return result;
}
bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount)
{
return sched_yield() == 0;
}
void * ClrVirtualAlloc(
void * lpAddress,
size_t dwSize,
uint32_t flAllocationType,
uint32_t flProtect)
{
return ClrVirtualAllocAligned(lpAddress, dwSize, flAllocationType, flProtect, OS_PAGE_SIZE);
}
static int W32toUnixAccessControl(uint32_t flProtect)
{
int prot = 0;
switch (flProtect & 0xff)
{
case PAGE_NOACCESS:
prot = PROT_NONE;
break;
case PAGE_READWRITE:
prot = PROT_READ | PROT_WRITE;
break;
default:
_ASSERTE(false);
break;
}
return prot;
}
void * ClrVirtualAllocAligned(
void * lpAddress,
size_t dwSize,
uint32_t flAllocationType,
uint32_t flProtect,
size_t dwAlignment)
{
if ((flAllocationType & ~(MEM_RESERVE | MEM_COMMIT)) != 0)
{
// TODO: Implement
return NULL;
}
_ASSERTE(((size_t)lpAddress & (OS_PAGE_SIZE - 1)) == 0);
// Align size to whole pages
dwSize = (dwSize + (OS_PAGE_SIZE - 1)) & ~(OS_PAGE_SIZE - 1);
if (flAllocationType & MEM_RESERVE)
{
size_t alignedSize = dwSize;
if (dwAlignment > OS_PAGE_SIZE)
alignedSize += (dwAlignment - OS_PAGE_SIZE);
void * pRetVal = mmap(lpAddress, alignedSize, W32toUnixAccessControl(flProtect),
MAP_ANON | MAP_PRIVATE, -1, 0);
if (dwAlignment > OS_PAGE_SIZE && pRetVal != NULL)
{
void * pAlignedRetVal = (void *)(((size_t)pRetVal + (dwAlignment - 1)) & ~(dwAlignment - 1));
size_t startPadding = (size_t)pAlignedRetVal - (size_t)pRetVal;
if (startPadding != 0)
{
int ret = munmap(pRetVal, startPadding);
_ASSERTE(ret == 0);
}
size_t endPadding = alignedSize - (startPadding + dwSize);
if (endPadding != 0)
{
int ret = munmap((void *)((size_t)pAlignedRetVal + dwSize), endPadding);
_ASSERTE(ret == 0);
}
pRetVal = pAlignedRetVal;
}
return pRetVal;
}
if (flAllocationType & MEM_COMMIT)
{
int ret = mprotect(lpAddress, dwSize, W32toUnixAccessControl(flProtect));
return (ret == 0) ? lpAddress : NULL;
}
return NULL;
}
bool ClrVirtualFree(
void * lpAddress,
size_t dwSize,
uint32_t dwFreeType)
{
// TODO: Implement
return false;
}
bool
ClrVirtualProtect(
void * lpAddress,
size_t dwSize,
uint32_t flNewProtect,
uint32_t * lpflOldProtect)
{
// TODO: Implement, not currently used
return false;
}
MethodTable * g_pFreeObjectMethodTable;
EEConfig * g_pConfig;
GCSystemInfo g_SystemInfo;
void InitializeSystemInfo()
{
// TODO: Implement
g_SystemInfo.dwNumberOfProcessors = 4;
g_SystemInfo.dwPageSize = OS_PAGE_SIZE;
g_SystemInfo.dwAllocationGranularity = OS_PAGE_SIZE;
}
int32_t g_TrapReturningThreads;
bool g_fFinalizerRunOnShutDown;
#ifdef _MSC_VER
__declspec(thread)
#else
__thread
#endif
Thread * pCurrentThread;
Thread * GetThread()
{
return pCurrentThread;
}
Thread * g_pThreadList = NULL;
Thread * ThreadStore::GetThreadList(Thread * pThread)
{
if (pThread == NULL)
return g_pThreadList;
return pThread->m_pNext;
}
void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock)
{
// TODO: Locks
Thread * pThread = new Thread();
pThread->GetAllocContext()->init();
pCurrentThread = pThread;
pThread->m_pNext = g_pThreadList;
g_pThreadList = pThread;
}
void DestroyThread(Thread * pThread)
{
// TODO: Implement
}
void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason)
{
GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
// TODO: Implement
}
void GCToEEInterface::RestartEE(bool bFinishedGC)
{
// TODO: Implement
GCHeap::GetGCHeap()->SetGCInProgress(FALSE);
}
void GCToEEInterface::ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
{
// TODO: Implement - Scan stack roots on given thread
}
void GCToEEInterface::ScanStaticGCRefsOpportunistically(promote_func* fn, ScanContext* sc)
{
}
void GCToEEInterface::GcStartWork(int condemned, int max_gen)
{
}
void GCToEEInterface::AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc)
{
}
void GCToEEInterface::GcBeforeBGCSweepWork()
{
}
void GCToEEInterface::GcDone(int condemned)
{
}
void FinalizerThread::EnableFinalization()
{
// Signal to finalizer thread that there are objects to finalize
// TODO: Implement for finalization
}
bool PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext)
{
// TODO: Implement for background GC
return false;
}
bool IsGCSpecialThread()
{
// TODO: Implement for background GC
return false;
}
bool PalHasCapability(PalCapability capability)
{
// TODO: Implement for background GC
return false;
}
WINBASEAPI
UINT
WINAPI
GetWriteWatch(
DWORD dwFlags,
PVOID lpBaseAddress,
SIZE_T dwRegionSize,
PVOID *lpAddresses,
ULONG_PTR * lpdwCount,
ULONG * lpdwGranularity
)
{
// TODO: Implement for background GC
*lpAddresses = NULL;
*lpdwCount = 0;
// Until it is implemented, return non-zero value as an indicator of failure
return 1;
}
WINBASEAPI
UINT
WINAPI
ResetWriteWatch(
LPVOID lpBaseAddress,
SIZE_T dwRegionSize
)
{
// TODO: Implement for background GC
// Until it is implemented, return non-zero value as an indicator of failure
return 1;
}
WINBASEAPI
BOOL
WINAPI
VirtualUnlock(
LPVOID lpAddress,
SIZE_T dwSize
)
{
// TODO: Implement
return false;
}
WINBASEAPI
VOID
WINAPI
FlushProcessWriteBuffers()
{
// TODO: Implement
}
const int tccSecondsToMillieSeconds = 1000;
const int tccSecondsToMicroSeconds = 1000000;
const int tccMillieSecondsToMicroSeconds = 1000; // 10^3
WINBASEAPI
DWORD
WINAPI
GetTickCount()
{
// TODO: More efficient, platform-specific implementation
struct timeval tv;
if (gettimeofday(&tv, NULL) == -1)
{
_ASSERTE(!"gettimeofday() failed");
return 0;
}
return (tv.tv_sec * tccSecondsToMillieSeconds) + (tv.tv_usec / tccMillieSecondsToMicroSeconds);
}
WINBASEAPI
BOOL
WINAPI
QueryPerformanceCounter(LARGE_INTEGER *lpPerformanceCount)
{
// TODO: More efficient, platform-specific implementation
struct timeval tv;
if (gettimeofday(&tv, NULL) == -1)
{
_ASSERTE(!"gettimeofday() failed");
return FALSE;
}
lpPerformanceCount->QuadPart =
(LONGLONG) tv.tv_sec * (LONGLONG) tccSecondsToMicroSeconds + (LONGLONG) tv.tv_usec;
return TRUE;
}
WINBASEAPI
BOOL
WINAPI
QueryPerformanceFrequency(LARGE_INTEGER *lpFrequency)
{
lpFrequency->QuadPart = (LONGLONG) tccSecondsToMicroSeconds;
return TRUE;
}
WINBASEAPI
DWORD
WINAPI
GetCurrentThreadId(
VOID)
{
// TODO: Implement
return 1;
}
WINBASEAPI
VOID
WINAPI
YieldProcessor()
{
// TODO: Implement
}
WINBASEAPI
void
WINAPI
DebugBreak()
{
// TODO: Implement
}
WINBASEAPI
VOID
WINAPI
MemoryBarrier()
{
// TODO: Implement
}
// File I/O - Used for tracking only
WINBASEAPI
DWORD
WINAPI
SetFilePointer(
HANDLE hFile,
LONG lDistanceToMove,
LONG * lpDistanceToMoveHigh,
DWORD dwMoveMethod)
{
// TODO: Reimplement callers using CRT
return 0;
}
WINBASEAPI
BOOL
WINAPI
FlushFileBuffers(
HANDLE hFile)
{
// TODO: Reimplement callers using CRT
return FALSE;
}
WINBASEAPI
BOOL
WINAPI
WriteFile(
HANDLE hFile,
LPCVOID lpBuffer,
DWORD nNumberOfBytesToWrite,
DWORD * lpNumberOfBytesWritten,
PVOID lpOverlapped)
{
// TODO: Reimplement callers using CRT
return FALSE;
}
WINBASEAPI
BOOL
WINAPI
CloseHandle(
HANDLE hObject)
{
// TODO: Reimplement callers using CRT
return FALSE;
}
WINBASEAPI
DWORD
WINAPI
GetLastError()
{
return 1;
}

344
src/Native/gc/env/gcenv.windows.cpp поставляемый Normal file
Просмотреть файл

@ -0,0 +1,344 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
//
// Implementation of the GC environment
//
#include "common.h"
#include "windows.h"
#include "gcenv.h"
#include "gc.h"
int32_t FastInterlockIncrement(int32_t volatile *lpAddend)
{
return InterlockedIncrement((LONG *)lpAddend);
}
int32_t FastInterlockDecrement(int32_t volatile *lpAddend)
{
return InterlockedDecrement((LONG *)lpAddend);
}
int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value)
{
return InterlockedExchange((LONG *)Target, Value);
}
int32_t FastInterlockCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comperand)
{
return InterlockedCompareExchange((LONG *)Destination, Exchange, Comperand);
}
int32_t FastInterlockExchangeAdd(int32_t volatile *Addend, int32_t Value)
{
return InterlockedExchangeAdd((LONG *)Addend, Value);
}
void * _FastInterlockExchangePointer(void * volatile *Target, void * Value)
{
return InterlockedExchangePointer(Target, Value);
}
void * _FastInterlockCompareExchangePointer(void * volatile *Destination, void * Exchange, void * Comperand)
{
return InterlockedCompareExchangePointer(Destination, Exchange, Comperand);
}
void FastInterlockOr(uint32_t volatile *p, uint32_t msk)
{
InterlockedOr((LONG *)p, msk);
}
void FastInterlockAnd(uint32_t volatile *p, uint32_t msk)
{
InterlockedAnd((LONG *)p, msk);
}
void UnsafeInitializeCriticalSection(CRITICAL_SECTION * lpCriticalSection)
{
InitializeCriticalSection(lpCriticalSection);
}
void UnsafeEEEnterCriticalSection(CRITICAL_SECTION *lpCriticalSection)
{
EnterCriticalSection(lpCriticalSection);
}
void UnsafeEELeaveCriticalSection(CRITICAL_SECTION * lpCriticalSection)
{
LeaveCriticalSection(lpCriticalSection);
}
void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection)
{
DeleteCriticalSection(lpCriticalSection);
}
void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
pMSEX->dwLength = sizeof(MEMORYSTATUSEX);
BOOL fRet = GlobalMemoryStatusEx(pMSEX);
_ASSERTE (fRet);
// If the machine has more RAM than virtual address limit, let us cap it.
// Our GC can never use more than virtual address limit.
if (pMSEX->ullAvailPhys > pMSEX->ullTotalVirtual)
{
pMSEX->ullAvailPhys = pMSEX->ullAvailVirtual;
}
}
void CLREventStatic::CreateManualEvent(bool bInitialState)
{
m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL);
m_fInitialized = true;
}
void CLREventStatic::CreateAutoEvent(bool bInitialState)
{
m_hEvent = CreateEventW(NULL, FALSE, bInitialState, NULL);
m_fInitialized = true;
}
void CLREventStatic::CreateOSManualEvent(bool bInitialState)
{
m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL);
m_fInitialized = true;
}
void CLREventStatic::CreateOSAutoEvent (bool bInitialState)
{
m_hEvent = CreateEventW(NULL, FALSE, bInitialState, NULL);
m_fInitialized = true;
}
void CLREventStatic::CloseEvent()
{
if (m_fInitialized && m_hEvent != INVALID_HANDLE_VALUE)
{
CloseHandle(m_hEvent);
m_hEvent = INVALID_HANDLE_VALUE;
}
}
bool CLREventStatic::IsValid() const
{
return m_fInitialized && m_hEvent != INVALID_HANDLE_VALUE;
}
bool CLREventStatic::Set()
{
if (!m_fInitialized)
return false;
return !!SetEvent(m_hEvent);
}
bool CLREventStatic::Reset()
{
if (!m_fInitialized)
return false;
return !!ResetEvent(m_hEvent);
}
uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable)
{
DWORD result = WAIT_FAILED;
if (m_fInitialized)
{
bool disablePreemptive = false;
Thread * pCurThread = GetThread();
if (NULL != pCurThread)
{
if (pCurThread->PreemptiveGCDisabled())
{
pCurThread->EnablePreemptiveGC();
disablePreemptive = true;
}
}
result = WaitForSingleObjectEx(m_hEvent, dwMilliseconds, bAlertable);
if (disablePreemptive)
{
pCurThread->DisablePreemptiveGC();
}
}
return result;
}
bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount)
{
SwitchToThread();
return true;
}
void * ClrVirtualAlloc(
void * lpAddress,
size_t dwSize,
uint32_t flAllocationType,
uint32_t flProtect)
{
return VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
}
void * ClrVirtualAllocAligned(
void * lpAddress,
size_t dwSize,
uint32_t flAllocationType,
uint32_t flProtect,
size_t dwAlignment)
{
return VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
}
bool ClrVirtualFree(
void * lpAddress,
size_t dwSize,
uint32_t dwFreeType)
{
return !!VirtualFree(lpAddress, dwSize, dwFreeType);
}
bool
ClrVirtualProtect(
void * lpAddress,
size_t dwSize,
uint32_t flNewProtect,
uint32_t * lpflOldProtect)
{
return !!VirtualProtect(lpAddress, dwSize, flNewProtect, (DWORD *)lpflOldProtect);
}
MethodTable * g_pFreeObjectMethodTable;
EEConfig * g_pConfig;
GCSystemInfo g_SystemInfo;
void InitializeSystemInfo()
{
SYSTEM_INFO systemInfo;
GetSystemInfo(&systemInfo);
g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors;
g_SystemInfo.dwPageSize = systemInfo.dwPageSize;
g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity;
}
int32_t g_TrapReturningThreads;
bool g_fFinalizerRunOnShutDown;
__declspec(thread) Thread * pCurrentThread;
Thread * GetThread()
{
return pCurrentThread;
}
Thread * g_pThreadList = NULL;
Thread * ThreadStore::GetThreadList(Thread * pThread)
{
if (pThread == NULL)
return g_pThreadList;
return pThread->m_pNext;
}
void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock)
{
// TODO: Locks
Thread * pThread = new Thread();
pThread->GetAllocContext()->init();
pCurrentThread = pThread;
pThread->m_pNext = g_pThreadList;
g_pThreadList = pThread;
}
void DestroyThread(Thread * pThread)
{
// TODO: Implement
}
void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason)
{
GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
// TODO: Implement
}
void GCToEEInterface::RestartEE(bool bFinishedGC)
{
// TODO: Implement
GCHeap::GetGCHeap()->SetGCInProgress(FALSE);
}
void GCToEEInterface::ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
{
// TODO: Implement - Scan stack roots on given thread
}
void GCToEEInterface::ScanStaticGCRefsOpportunistically(promote_func* fn, ScanContext* sc)
{
}
void GCToEEInterface::GcStartWork(int condemned, int max_gen)
{
}
void GCToEEInterface::AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc)
{
}
void GCToEEInterface::GcBeforeBGCSweepWork()
{
}
void GCToEEInterface::GcDone(int condemned)
{
}
void FinalizerThread::EnableFinalization()
{
// Signal to finalizer thread that there are objects to finalize
// TODO: Implement for finalization
}
bool PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext)
{
// TODO: Implement for background GC
return false;
}
bool IsGCSpecialThread()
{
// TODO: Implement for background GC
return false;
}
bool PalHasCapability(PalCapability capability)
{
// TODO: Implement for background GC
return false;
}

36376
src/Native/gc/gc.cpp Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

673
src/Native/gc/gc.h Normal file
Просмотреть файл

@ -0,0 +1,673 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
/*++
Module Name:
gc.h
--*/
#ifndef __GC_H
#define __GC_H
#ifndef BINDER
#ifdef PROFILING_SUPPORTED
#define GC_PROFILING //Turn on profiling
#endif // PROFILING_SUPPORTED
#endif
/*
* Promotion Function Prototypes
*/
typedef void enum_func (Object*);
// callback functions for heap walkers
typedef void object_callback_func(void * pvContext, void * pvDataLoc);
// stub type to abstract a heap segment
struct gc_heap_segment_stub;
typedef gc_heap_segment_stub *segment_handle;
struct segment_info
{
LPVOID pvMem; // base of the allocation, not the first object (must add ibFirstObject)
size_t ibFirstObject; // offset to the base of the first object in the segment
size_t ibAllocated; // limit of allocated memory in the segment (>= firstobject)
size_t ibCommit; // limit of committed memory in the segment (>= alllocated)
size_t ibReserved; // limit of reserved memory in the segment (>= commit)
};
/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
/* If you modify failure_get_memory and */
/* oom_reason be sure to make the corresponding */
/* changes in toolbox\sos\strike\strike.cpp. */
/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
enum failure_get_memory
{
fgm_no_failure = 0,
fgm_reserve_segment = 1,
fgm_commit_segment_beg = 2,
fgm_commit_eph_segment = 3,
fgm_grow_table = 4,
fgm_commit_table = 5
};
struct fgm_history
{
failure_get_memory fgm;
size_t size;
size_t available_pagefile_mb;
BOOL loh_p;
void set_fgm (failure_get_memory f, size_t s, BOOL l)
{
fgm = f;
size = s;
loh_p = l;
}
};
enum oom_reason
{
oom_no_failure = 0,
oom_budget = 1,
oom_cant_commit = 2,
oom_cant_reserve = 3,
oom_loh = 4,
oom_low_mem = 5,
oom_unproductive_full_gc = 6
};
struct oom_history
{
oom_reason reason;
size_t alloc_size;
BYTE* reserved;
BYTE* allocated;
size_t gc_index;
failure_get_memory fgm;
size_t size;
size_t available_pagefile_mb;
BOOL loh_p;
};
/* forward declerations */
class CObjectHeader;
class Object;
class GCHeap;
/* misc defines */
#define LARGE_OBJECT_SIZE ((size_t)(85000))
GPTR_DECL(GCHeap, g_pGCHeap);
#ifndef DACCESS_COMPILE
extern "C" {
#endif
GPTR_DECL(BYTE,g_lowest_address);
GPTR_DECL(BYTE,g_highest_address);
GPTR_DECL(DWORD,g_card_table);
#ifndef DACCESS_COMPILE
}
#endif
#ifdef DACCESS_COMPILE
class DacHeapWalker;
#endif
#ifdef _DEBUG
#define _LOGALLOC
#endif
#ifdef WRITE_BARRIER_CHECK
//always defined, but should be 0 in Server GC
extern BYTE* g_GCShadow;
extern BYTE* g_GCShadowEnd;
// saves the g_lowest_address in between GCs to verify the consistency of the shadow segment
extern BYTE* g_shadow_lowest_address;
#endif
#define MP_LOCKS
extern "C" BYTE* g_ephemeral_low;
extern "C" BYTE* g_ephemeral_high;
namespace WKS {
::GCHeap* CreateGCHeap();
class GCHeap;
class gc_heap;
}
#if defined(FEATURE_SVR_GC)
namespace SVR {
::GCHeap* CreateGCHeap();
class GCHeap;
class gc_heap;
}
#endif // defined(FEATURE_SVR_GC)
/*
* Ephemeral Garbage Collected Heap Interface
*/
struct alloc_context
{
friend class WKS::gc_heap;
#if defined(FEATURE_SVR_GC)
friend class SVR::gc_heap;
friend class SVR::GCHeap;
#endif // defined(FEATURE_SVR_GC)
friend struct ClassDumpInfo;
BYTE* alloc_ptr;
BYTE* alloc_limit;
INT64 alloc_bytes; //Number of bytes allocated on SOH by this context
INT64 alloc_bytes_loh; //Number of bytes allocated on LOH by this context
#if defined(FEATURE_SVR_GC)
SVR::GCHeap* alloc_heap;
SVR::GCHeap* home_heap;
#endif // defined(FEATURE_SVR_GC)
int alloc_count;
public:
void init()
{
LIMITED_METHOD_CONTRACT;
alloc_ptr = 0;
alloc_limit = 0;
alloc_bytes = 0;
alloc_bytes_loh = 0;
#if defined(FEATURE_SVR_GC)
alloc_heap = 0;
home_heap = 0;
#endif // defined(FEATURE_SVR_GC)
alloc_count = 0;
}
};
struct ScanContext
{
Thread* thread_under_crawl;
int thread_number;
BOOL promotion; //TRUE: Promotion, FALSE: Relocation.
BOOL concurrent; //TRUE: concurrent scanning
#if CHECK_APP_DOMAIN_LEAKS || defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE)
AppDomain *pCurrentDomain;
#endif //CHECK_APP_DOMAIN_LEAKS || FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
#if defined(GC_PROFILING) || defined (DACCESS_COMPILE)
MethodDesc *pMD;
#endif //GC_PROFILING || DACCESS_COMPILE
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
EtwGCRootKind dwEtwRootKind;
#endif // GC_PROFILING || FEATURE_EVENT_TRACE
ScanContext()
{
LIMITED_METHOD_CONTRACT;
thread_under_crawl = 0;
thread_number = -1;
promotion = FALSE;
concurrent = FALSE;
#ifdef GC_PROFILING
pMD = NULL;
#endif //GC_PROFILING
#ifdef FEATURE_EVENT_TRACE
dwEtwRootKind = kEtwGCRootKindOther;
#endif // FEATURE_EVENT_TRACE
}
};
typedef BOOL (* walk_fn)(Object*, void*);
typedef void (* gen_walk_fn)(void *context, int generation, BYTE *range_start, BYTE * range_end, BYTE *range_reserved);
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
struct ProfilingScanContext : ScanContext
{
BOOL fProfilerPinned;
LPVOID pvEtwContext;
void *pHeapId;
ProfilingScanContext(BOOL fProfilerPinnedParam) : ScanContext()
{
LIMITED_METHOD_CONTRACT;
pHeapId = NULL;
fProfilerPinned = fProfilerPinnedParam;
pvEtwContext = NULL;
#ifdef FEATURE_CONSERVATIVE_GC
// To not confuse CNameSpace::GcScanRoots
promotion = g_pConfig->GetGCConservative();
#endif
}
};
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifdef STRESS_HEAP
#define IN_STRESS_HEAP(x) x
#define STRESS_HEAP_ARG(x) ,x
#else // STRESS_HEAP
#define IN_STRESS_HEAP(x)
#define STRESS_HEAP_ARG(x)
#endif // STRESS_HEAP
//dynamic data interface
struct gc_counters
{
size_t current_size;
size_t promoted_size;
size_t collection_count;
};
// !!!!!!!!!!!!!!!!!!!!!!!
// make sure you change the def in bcl\system\gc.cs
// if you change this!
enum collection_mode
{
collection_non_blocking = 0x00000001,
collection_blocking = 0x00000002,
collection_optimized = 0x00000004,
collection_compacting = 0x00000008
#ifdef STRESS_HEAP
, collection_gcstress = 0x80000000
#endif // STRESS_HEAP
};
// !!!!!!!!!!!!!!!!!!!!!!!
// make sure you change the def in bcl\system\gc.cs
// if you change this!
enum wait_full_gc_status
{
wait_full_gc_success = 0,
wait_full_gc_failed = 1,
wait_full_gc_cancelled = 2,
wait_full_gc_timeout = 3,
wait_full_gc_na = 4
};
// !!!!!!!!!!!!!!!!!!!!!!!
// make sure you change the def in bcl\system\gc.cs
// if you change this!
enum start_no_gc_region_status
{
start_no_gc_success = 0,
start_no_gc_no_memory = 1,
start_no_gc_too_large = 2,
start_no_gc_in_progress = 3
};
enum end_no_gc_region_status
{
end_no_gc_success = 0,
end_no_gc_not_in_progress = 1,
end_no_gc_induced = 2,
end_no_gc_alloc_exceeded = 3
};
enum bgc_state
{
bgc_not_in_process = 0,
bgc_initialized,
bgc_reset_ww,
bgc_mark_handles,
bgc_mark_stack,
bgc_revisit_soh,
bgc_revisit_loh,
bgc_overflow_soh,
bgc_overflow_loh,
bgc_final_marking,
bgc_sweep_soh,
bgc_sweep_loh,
bgc_plan_phase
};
enum changed_seg_state
{
seg_deleted,
seg_added
};
void record_changed_seg (BYTE* start, BYTE* end,
size_t current_gc_index,
bgc_state current_bgc_state,
changed_seg_state changed_state);
//constants for the flags parameter to the gc call back
#define GC_CALL_INTERIOR 0x1
#define GC_CALL_PINNED 0x2
#define GC_CALL_CHECK_APP_DOMAIN 0x4
//flags for GCHeap::Alloc(...)
#define GC_ALLOC_FINALIZE 0x1
#define GC_ALLOC_CONTAINS_REF 0x2
#define GC_ALLOC_ALIGN8_BIAS 0x4
class GCHeap {
friend struct ::_DacGlobals;
#ifdef DACCESS_COMPILE
friend class ClrDataAccess;
#endif
public:
virtual ~GCHeap() {}
static GCHeap *GetGCHeap()
{
#ifdef CLR_STANDALONE_BINDER
return NULL;
#else
LIMITED_METHOD_CONTRACT;
_ASSERTE(g_pGCHeap != NULL);
return g_pGCHeap;
#endif
}
#ifndef CLR_STANDALONE_BINDER
static BOOL IsGCHeapInitialized()
{
LIMITED_METHOD_CONTRACT;
return (g_pGCHeap != NULL);
}
static BOOL IsGCInProgress(BOOL bConsiderGCStart = FALSE)
{
WRAPPER_NO_CONTRACT;
return (IsGCHeapInitialized() ? GetGCHeap()->IsGCInProgressHelper(bConsiderGCStart) : false);
}
static void WaitForGCCompletion(BOOL bConsiderGCStart = FALSE)
{
WRAPPER_NO_CONTRACT;
if (IsGCHeapInitialized())
GetGCHeap()->WaitUntilGCComplete(bConsiderGCStart);
}
// The runtime needs to know whether we're using workstation or server GC
// long before the GCHeap is created. So IsServerHeap cannot be a virtual
// method on GCHeap. Instead we make it a static method and initialize
// gcHeapType before any of the calls to IsServerHeap. Note that this also
// has the advantage of getting the answer without an indirection
// (virtual call), which is important for perf critical codepaths.
#ifndef DACCESS_COMPILE
static void InitializeHeapType(bool bServerHeap)
{
LIMITED_METHOD_CONTRACT;
#ifdef FEATURE_SVR_GC
gcHeapType = bServerHeap ? GC_HEAP_SVR : GC_HEAP_WKS;
#ifdef WRITE_BARRIER_CHECK
if (gcHeapType == GC_HEAP_SVR)
{
g_GCShadow = 0;
g_GCShadowEnd = 0;
}
#endif
#else // FEATURE_SVR_GC
CONSISTENCY_CHECK(bServerHeap == false);
#endif // FEATURE_SVR_GC
}
#endif
static BOOL IsValidSegmentSize(size_t cbSize)
{
//Must be aligned on a Mb and greater than 4Mb
return (((cbSize & (1024*1024-1)) ==0) && (cbSize >> 22));
}
static BOOL IsValidGen0MaxSize(size_t cbSize)
{
return (cbSize >= 64*1024);
}
inline static bool IsServerHeap()
{
LIMITED_METHOD_CONTRACT;
#ifdef FEATURE_SVR_GC
_ASSERTE(gcHeapType != GC_HEAP_INVALID);
return (gcHeapType == GC_HEAP_SVR);
#else // FEATURE_SVR_GC
return false;
#endif // FEATURE_SVR_GC
}
inline static bool UseAllocationContexts()
{
WRAPPER_NO_CONTRACT;
#ifdef FEATURE_REDHAWK
// SIMPLIFY: only use allocation contexts
return true;
#else
#ifdef _TARGET_ARM_
return TRUE;
#endif
return ((IsServerHeap() ? true : (g_SystemInfo.dwNumberOfProcessors >= 2)));
#endif
}
inline static bool MarkShouldCompeteForStatics()
{
WRAPPER_NO_CONTRACT;
return IsServerHeap() && g_SystemInfo.dwNumberOfProcessors >= 2;
}
#ifndef DACCESS_COMPILE
static GCHeap * CreateGCHeap()
{
WRAPPER_NO_CONTRACT;
GCHeap * pGCHeap;
#if defined(FEATURE_SVR_GC)
pGCHeap = (IsServerHeap() ? SVR::CreateGCHeap() : WKS::CreateGCHeap());
#else
pGCHeap = WKS::CreateGCHeap();
#endif // defined(FEATURE_SVR_GC)
g_pGCHeap = pGCHeap;
return pGCHeap;
}
#endif // DACCESS_COMPILE
#endif // !CLR_STANDALONE_BINDER
private:
typedef enum
{
GC_HEAP_INVALID = 0,
GC_HEAP_WKS = 1,
GC_HEAP_SVR = 2
} GC_HEAP_TYPE;
#ifdef FEATURE_SVR_GC
SVAL_DECL(DWORD,gcHeapType);
#endif // FEATURE_SVR_GC
public:
// TODO Synchronization, should be moved out
virtual BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE) = 0;
virtual DWORD WaitUntilGCComplete (BOOL bConsiderGCStart = FALSE) = 0;
virtual void SetGCInProgress(BOOL fInProgress) = 0;
virtual CLREventStatic * GetWaitForGCEvent() = 0;
virtual void SetFinalizationRun (Object* obj) = 0;
virtual Object* GetNextFinalizable() = 0;
virtual size_t GetNumberOfFinalizable() = 0;
virtual void SetFinalizeQueueForShutdown(BOOL fHasLock) = 0;
virtual BOOL FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers) = 0;
virtual BOOL ShouldRestartFinalizerWatchDog() = 0;
//wait for concurrent GC to finish
virtual void WaitUntilConcurrentGCComplete () = 0; // Use in managed threads
#ifndef DACCESS_COMPILE
virtual HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) = 0; // Use in native threads. TRUE if succeed. FALSE if failed or timeout
#endif
virtual BOOL IsConcurrentGCInProgress() = 0;
// Enable/disable concurrent GC
virtual void TemporaryEnableConcurrentGC() = 0;
virtual void TemporaryDisableConcurrentGC() = 0;
virtual BOOL IsConcurrentGCEnabled() = 0;
virtual void FixAllocContext (alloc_context* acontext, BOOL lockp, void* arg, void *heap) = 0;
virtual Object* Alloc (alloc_context* acontext, size_t size, DWORD flags) = 0;
// This is safe to call only when EE is suspended.
virtual Object* GetContainingObject(void *pInteriorPtr) = 0;
// TODO Should be folded into constructor
virtual HRESULT Initialize () = 0;
virtual HRESULT GarbageCollect (int generation = -1, BOOL low_memory_p=FALSE, int mode = collection_blocking) = 0;
virtual Object* Alloc (size_t size, DWORD flags) = 0;
#ifdef FEATURE_64BIT_ALIGNMENT
virtual Object* AllocAlign8 (size_t size, DWORD flags) = 0;
virtual Object* AllocAlign8 (alloc_context* acontext, size_t size, DWORD flags) = 0;
private:
virtual Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, DWORD flags) = 0;
public:
#endif // FEATURE_64BIT_ALIGNMENT
virtual Object* AllocLHeap (size_t size, DWORD flags) = 0;
virtual void SetReservedVMLimit (size_t vmlimit) = 0;
virtual void SetCardsAfterBulkCopy( Object**, size_t ) = 0;
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
virtual void WalkObject (Object* obj, walk_fn fn, void* context) = 0;
#endif //defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
virtual bool IsThreadUsingAllocationContextHeap(alloc_context* acontext, int thread_number) = 0;
virtual int GetNumberOfHeaps () = 0;
virtual int GetHomeHeapNumber () = 0;
virtual int CollectionCount (int generation, int get_bgc_fgc_count = 0) = 0;
// Finalizer queue stuff (should stay)
virtual bool RegisterForFinalization (int gen, Object* obj) = 0;
// General queries to the GC
virtual BOOL IsPromoted (Object *object) = 0;
virtual unsigned WhichGeneration (Object* object) = 0;
virtual BOOL IsEphemeral (Object* object) = 0;
virtual BOOL IsHeapPointer (void* object, BOOL small_heap_only = FALSE) = 0;
virtual unsigned GetCondemnedGeneration() = 0;
virtual int GetGcLatencyMode() = 0;
virtual int SetGcLatencyMode(int newLatencyMode) = 0;
virtual int GetLOHCompactionMode() = 0;
virtual void SetLOHCompactionMode(int newLOHCompactionyMode) = 0;
virtual BOOL RegisterForFullGCNotification(DWORD gen2Percentage,
DWORD lohPercentage) = 0;
virtual BOOL CancelFullGCNotification() = 0;
virtual int WaitForFullGCApproach(int millisecondsTimeout) = 0;
virtual int WaitForFullGCComplete(int millisecondsTimeout) = 0;
virtual int StartNoGCRegion(ULONGLONG totalSize, BOOL lohSizeKnown, ULONGLONG lohSize, BOOL disallowFullBlockingGC) = 0;
virtual int EndNoGCRegion() = 0;
virtual BOOL IsObjectInFixedHeap(Object *pObj) = 0;
virtual size_t GetTotalBytesInUse () = 0;
virtual size_t GetCurrentObjSize() = 0;
virtual size_t GetLastGCStartTime(int generation) = 0;
virtual size_t GetLastGCDuration(int generation) = 0;
virtual size_t GetNow() = 0;
virtual unsigned GetGcCount() = 0;
virtual void TraceGCSegments() = 0;
virtual void PublishObject(BYTE* obj) = 0;
// static if since restricting for all heaps is fine
virtual size_t GetValidSegmentSize(BOOL large_seg = FALSE) = 0;
static BOOL IsLargeObject(MethodTable *mt) {
WRAPPER_NO_CONTRACT;
return mt->GetBaseSize() >= LARGE_OBJECT_SIZE;
}
static unsigned GetMaxGeneration() {
LIMITED_METHOD_DAC_CONTRACT;
return max_generation;
}
virtual size_t GetPromotedBytes(int heap_index) = 0;
private:
enum {
max_generation = 2,
};
public:
#ifdef FEATURE_BASICFREEZE
// frozen segment management functions
virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo) = 0;
#endif //FEATURE_BASICFREEZE
// debug support
#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#ifdef STRESS_HEAP
//return TRUE if GC actually happens, otherwise FALSE
virtual BOOL StressHeap(alloc_context * acontext = 0) = 0;
#endif
#endif // FEATURE_REDHAWK
#ifdef VERIFY_HEAP
virtual void ValidateObjectMember (Object *obj) = 0;
#endif
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
virtual void DescrGenerationsToProfiler (gen_walk_fn fn, void *context) = 0;
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
protected:
#ifdef VERIFY_HEAP
public:
// Return NULL if can't find next object. When EE is not suspended,
// the result is not accurate: if the input arg is in gen0, the function could
// return zeroed out memory as next object
virtual Object * NextObj (Object * object) = 0;
#ifdef FEATURE_BASICFREEZE
// Return TRUE if object lives in frozen segment
virtual BOOL IsInFrozenSegment (Object * object) = 0;
#endif //FEATURE_BASICFREEZE
#endif //VERIFY_HEAP
};
extern VOLATILE(LONG) m_GCLock;
// Go through and touch (read) each page straddled by a memory block.
void TouchPages(LPVOID pStart, UINT cb);
// For low memory notification from host
extern LONG g_bLowMemoryFromHost;
#ifdef WRITE_BARRIER_CHECK
void updateGCShadow(Object** ptr, Object* val);
#endif
// the method table for the WeakReference class
extern MethodTable *pWeakReferenceMT;
// The canonical method table for WeakReference<T>
extern MethodTable *pWeakReferenceOfTCanonMT;
extern void FinalizeWeakReference(Object * obj);
#endif // __GC_H

105
src/Native/gc/gccommon.cpp Normal file
Просмотреть файл

@ -0,0 +1,105 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
/*
* GCCOMMON.CPP
*
* Code common to both SVR and WKS gcs
*/
#include "common.h"
#include "gcenv.h"
#include "gc.h"
#ifdef FEATURE_SVR_GC
SVAL_IMPL_INIT(DWORD,GCHeap,gcHeapType,GCHeap::GC_HEAP_INVALID);
#endif // FEATURE_SVR_GC
GPTR_IMPL(GCHeap,g_pGCHeap);
/* global versions of the card table and brick table */
GPTR_IMPL(DWORD,g_card_table);
/* absolute bounds of the GC memory */
GPTR_IMPL_INIT(BYTE,g_lowest_address,0);
GPTR_IMPL_INIT(BYTE,g_highest_address,0);
#ifndef DACCESS_COMPILE
BYTE* g_ephemeral_low = (BYTE*)1;
BYTE* g_ephemeral_high = (BYTE*)~0;
#ifdef WRITE_BARRIER_CHECK
BYTE* g_GCShadow;
BYTE* g_GCShadowEnd;
BYTE* g_shadow_lowest_address = NULL;
#endif
VOLATILE(LONG) m_GCLock = -1;
LONG g_bLowMemoryFromHost = 0;
#ifdef WRITE_BARRIER_CHECK
#define INVALIDGCVALUE (LPVOID)((size_t)0xcccccccd)
// called by the write barrier to update the shadow heap
void updateGCShadow(Object** ptr, Object* val)
{
Object** shadow = (Object**) &g_GCShadow[((BYTE*) ptr - g_lowest_address)];
if ((BYTE*) shadow < g_GCShadowEnd)
{
*shadow = val;
// Ensure that the write to the shadow heap occurs before the read from
// the GC heap so that race conditions are caught by INVALIDGCVALUE.
MemoryBarrier();
if(*ptr!=val)
*shadow = (Object *) INVALIDGCVALUE;
}
}
#endif // WRITE_BARRIER_CHECK
struct changed_seg
{
BYTE * start;
BYTE * end;
size_t gc_index;
bgc_state bgc;
changed_seg_state changed;
};
const int max_saved_changed_segs = 128;
changed_seg saved_changed_segs[max_saved_changed_segs];
int saved_changed_segs_count = 0;
void record_changed_seg (BYTE* start, BYTE* end,
size_t current_gc_index,
bgc_state current_bgc_state,
changed_seg_state changed_state)
{
if (saved_changed_segs_count < max_saved_changed_segs)
{
saved_changed_segs[saved_changed_segs_count].start = start;
saved_changed_segs[saved_changed_segs_count].end = end;
saved_changed_segs[saved_changed_segs_count].gc_index = current_gc_index;
saved_changed_segs[saved_changed_segs_count].bgc = current_bgc_state;
saved_changed_segs[saved_changed_segs_count].changed = changed_state;
saved_changed_segs_count++;
}
else
{
saved_changed_segs_count = 0;
}
}
#endif // !DACCESS_COMPILE

264
src/Native/gc/gcdesc.h Normal file
Просмотреть файл

@ -0,0 +1,264 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
//
//
// GC Object Pointer Location Series Stuff
//
#ifndef _GCDESC_H_
#define _GCDESC_H_
#ifdef _WIN64
typedef UINT32 HALF_SIZE_T;
#else // _WIN64
typedef UINT16 HALF_SIZE_T;
#endif
typedef size_t *JSlot;
//
// These two classes make up the apparatus with which the object references
// within an object can be found.
//
// CGCDescSeries:
//
// The CGCDescSeries class describes a series of object references within an
// object by describing the size of the series (which has an adjustment which
// will be explained later) and the starting point of the series.
//
// The series size is adjusted when the map is created by subtracting the
// GetBaseSize() of the object. On retieval of the size the total size
// of the object is added back. For non-array objects the total object
// size is equal to the base size, so this returns the same value. For
// array objects this will yield the size of the data portion of the array.
// Since arrays containing object references will contain ONLY object references
// this is a fast way of handling arrays and normal objects without a
// conditional test
//
//
//
// CGCDesc:
//
// The CGCDesc is a collection of CGCDescSeries objects to describe all the
// different runs of pointers in a particular object. <TODO> [add more on the strange
// way the CGCDesc grows backwards in memory behind the MethodTable]
//</TODO>
struct val_serie_item
{
HALF_SIZE_T nptrs;
HALF_SIZE_T skip;
void set_val_serie_item (HALF_SIZE_T nptrs, HALF_SIZE_T skip)
{
this->nptrs = nptrs;
this->skip = skip;
}
};
struct val_array_series
{
val_serie_item items[1];
size_t m_startOffset;
size_t m_count;
};
typedef DPTR(class CGCDescSeries) PTR_CGCDescSeries;
typedef DPTR(class MethodTable) PTR_MethodTable;
class CGCDescSeries
{
public:
union
{
size_t seriessize; // adjusted length of series (see above) in bytes
val_serie_item val_serie[1]; //coded serie for value class array
};
size_t startoffset;
size_t GetSeriesCount ()
{
return seriessize/sizeof(JSlot);
}
VOID SetSeriesCount (size_t newcount)
{
seriessize = newcount * sizeof(JSlot);
}
VOID IncSeriesCount (size_t increment = 1)
{
seriessize += increment * sizeof(JSlot);
}
size_t GetSeriesSize ()
{
return seriessize;
}
VOID SetSeriesSize (size_t newsize)
{
seriessize = newsize;
}
VOID SetSeriesValItem (val_serie_item item, int index)
{
val_serie [index] = item;
}
VOID SetSeriesOffset (size_t newoffset)
{
startoffset = newoffset;
}
size_t GetSeriesOffset ()
{
return startoffset;
}
};
typedef DPTR(class CGCDesc) PTR_CGCDesc;
class CGCDesc
{
// Don't construct me, you have to hand me a ptr to the *top* of my storage in Init.
CGCDesc () {}
//
// NOTE: for alignment reasons, NumSeries is stored as a size_t.
// This makes everything nicely 8-byte aligned on IA64.
//
public:
static size_t ComputeSize (size_t NumSeries)
{
_ASSERTE (SSIZE_T(NumSeries) > 0);
return sizeof(size_t) + NumSeries*sizeof(CGCDescSeries);
}
// For value type array
static size_t ComputeSizeRepeating (size_t NumSeries)
{
_ASSERTE (SSIZE_T(NumSeries) > 0);
return sizeof(size_t) + sizeof(CGCDescSeries) +
(NumSeries-1)*sizeof(val_serie_item);
}
#ifndef DACCESS_COMPILE
static VOID Init (PVOID mem, size_t NumSeries)
{
*((size_t*)mem-1) = NumSeries;
}
static VOID InitValueClassSeries (PVOID mem, size_t NumSeries)
{
*((SSIZE_T*)mem-1) = -((SSIZE_T)NumSeries);
}
#endif
static PTR_CGCDesc GetCGCDescFromMT (MethodTable * pMT)
{
// If it doesn't contain pointers, there isn't a GCDesc
PTR_MethodTable mt(pMT);
#ifndef BINDER
_ASSERTE(mt->ContainsPointersOrCollectible());
#endif
return PTR_CGCDesc(mt);
}
size_t GetNumSeries ()
{
return *(PTR_size_t(PTR_CGCDesc(this))-1);
}
// Returns lowest series in memory.
// Cannot be used for valuetype arrays
PTR_CGCDescSeries GetLowestSeries ()
{
_ASSERTE (SSIZE_T(GetNumSeries()) > 0);
return PTR_CGCDescSeries(PTR_BYTE(PTR_CGCDesc(this))
- ComputeSize(GetNumSeries()));
}
// Returns highest series in memory.
PTR_CGCDescSeries GetHighestSeries ()
{
return PTR_CGCDescSeries(PTR_size_t(PTR_CGCDesc(this))-1)-1;
}
// Returns number of immediate pointers this object has.
// size is only used if you have an array of value types.
#ifndef DACCESS_COMPILE
static size_t GetNumPointers (MethodTable* pMT, size_t ObjectSize, size_t NumComponents)
{
size_t NumOfPointers = 0;
CGCDesc* map = GetCGCDescFromMT(pMT);
CGCDescSeries* cur = map->GetHighestSeries();
SSIZE_T cnt = (SSIZE_T) map->GetNumSeries();
if (cnt > 0)
{
CGCDescSeries* last = map->GetLowestSeries();
while (cur >= last)
{
NumOfPointers += (cur->GetSeriesSize() + ObjectSize) / sizeof(JSlot);
cur--;
}
}
else
{
/* Handle the repeating case - array of valuetypes */
for (SSIZE_T __i = 0; __i > cnt; __i--)
{
NumOfPointers += cur->val_serie[__i].nptrs;
}
NumOfPointers *= NumComponents;
}
return NumOfPointers;
}
#endif
// Size of the entire slot map.
size_t GetSize ()
{
SSIZE_T numSeries = (SSIZE_T) GetNumSeries();
if (numSeries < 0)
{
return ComputeSizeRepeating(-numSeries);
}
else
{
return ComputeSize(numSeries);
}
}
BYTE *GetStartOfGCData()
{
return ((BYTE *)this) - GetSize();
}
private:
BOOL IsValueClassSeries()
{
return ((SSIZE_T) GetNumSeries()) < 0;
}
};
#define MAX_SIZE_FOR_VALUECLASS_IN_ARRAY 0xffff
#define MAX_PTRS_FOR_VALUECLASSS_IN_ARRAY 0xffff
#endif // _GCDESC_H_

804
src/Native/gc/gcee.cpp Normal file
Просмотреть файл

@ -0,0 +1,804 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
//
//
// sets up vars for GC
#include "gcpriv.h"
#ifndef DACCESS_COMPILE
COUNTER_ONLY(PERF_COUNTER_TIMER_PRECISION g_TotalTimeInGC = 0);
COUNTER_ONLY(PERF_COUNTER_TIMER_PRECISION g_TotalTimeSinceLastGCEnd = 0);
void GCHeap::UpdatePreGCCounters()
{
#if defined(ENABLE_PERF_COUNTERS)
#ifdef MULTIPLE_HEAPS
gc_heap* hp = 0;
#else
gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
size_t allocation_0 = 0;
size_t allocation_3 = 0;
// Publish perf stats
g_TotalTimeInGC = GET_CYCLE_COUNT();
#ifdef MULTIPLE_HEAPS
int hn = 0;
for (hn = 0; hn < gc_heap::n_heaps; hn++)
{
hp = gc_heap::g_heaps [hn];
allocation_0 +=
dd_desired_allocation (hp->dynamic_data_of (0))-
dd_new_allocation (hp->dynamic_data_of (0));
allocation_3 +=
dd_desired_allocation (hp->dynamic_data_of (max_generation+1))-
dd_new_allocation (hp->dynamic_data_of (max_generation+1));
}
#else
allocation_0 =
dd_desired_allocation (hp->dynamic_data_of (0))-
dd_new_allocation (hp->dynamic_data_of (0));
allocation_3 =
dd_desired_allocation (hp->dynamic_data_of (max_generation+1))-
dd_new_allocation (hp->dynamic_data_of (max_generation+1));
#endif //MULTIPLE_HEAPS
GetPerfCounters().m_GC.cbAlloc += allocation_0;
GetPerfCounters().m_GC.cbAlloc += allocation_3;
GetPerfCounters().m_GC.cbLargeAlloc += allocation_3;
GetPerfCounters().m_GC.cPinnedObj = 0;
#ifdef _PREFAST_
// prefix complains about us dereferencing hp in wks build even though we only access static members
// this way. not sure how to shut it up except for this ugly workaround:
PREFIX_ASSUME( hp != NULL);
#endif //_PREFAST_
if (hp->settings.reason == reason_induced IN_STRESS_HEAP( && !hp->settings.stress_induced))
{
COUNTER_ONLY(GetPerfCounters().m_GC.cInducedGCs++);
}
GetPerfCounters().m_Security.timeRTchecks = 0;
GetPerfCounters().m_Security.timeRTchecksBase = 1; // To avoid divide by zero
#endif //ENABLE_PERF_COUNTERS
#ifdef MULTIPLE_HEAPS
//take the first heap....
gc_mechanisms *pSettings = &gc_heap::g_heaps[0]->settings;
#else
gc_mechanisms *pSettings = &gc_heap::settings;
#endif //MULTIPLE_HEAPS
#ifdef FEATURE_EVENT_TRACE
ETW::GCLog::ETW_GC_INFO Info;
Info.GCStart.Count = (ULONG)pSettings->gc_index;
Info.GCStart.Depth = (ULONG)pSettings->condemned_generation;
Info.GCStart.Reason = (ETW::GCLog::ETW_GC_INFO::GC_REASON)((int)(pSettings->reason));
Info.GCStart.Type = ETW::GCLog::ETW_GC_INFO::GC_NGC;
if (pSettings->concurrent)
{
Info.GCStart.Type = ETW::GCLog::ETW_GC_INFO::GC_BGC;
}
#ifdef BACKGROUND_GC
else if (Info.GCStart.Depth < max_generation)
{
if (pSettings->background_p)
Info.GCStart.Type = ETW::GCLog::ETW_GC_INFO::GC_FGC;
}
#endif //BACKGROUND_GC
ETW::GCLog::FireGcStartAndGenerationRanges(&Info);
#endif // FEATURE_EVENT_TRACE
}
void GCHeap::UpdatePostGCCounters()
{
#ifdef FEATURE_EVENT_TRACE
// Use of temporary variables to avoid rotor build warnings
ETW::GCLog::ETW_GC_INFO Info;
#ifdef MULTIPLE_HEAPS
//take the first heap....
gc_mechanisms *pSettings = &gc_heap::g_heaps[0]->settings;
#else
gc_mechanisms *pSettings = &gc_heap::settings;
#endif //MULTIPLE_HEAPS
int condemned_gen = pSettings->condemned_generation;
Info.GCEnd.Depth = condemned_gen;
Info.GCEnd.Count = (ULONG)pSettings->gc_index;
ETW::GCLog::FireGcEndAndGenerationRanges(Info.GCEnd.Count, Info.GCEnd.Depth);
int xGen;
ETW::GCLog::ETW_GC_INFO HeapInfo;
ZeroMemory(&HeapInfo, sizeof(HeapInfo));
size_t youngest_gen_size = 0;
#ifdef MULTIPLE_HEAPS
//take the first heap....
gc_heap* hp1 = gc_heap::g_heaps[0];
#else
gc_heap* hp1 = pGenGCHeap;
#endif //MULTIPLE_HEAPS
size_t promoted_finalization_mem = 0;
totalSurvivedSize = gc_heap::get_total_survived_size();
for (xGen = 0; xGen <= (max_generation+1); xGen++)
{
size_t gensize = 0;
size_t promoted_mem = 0;
#ifdef MULTIPLE_HEAPS
int hn = 0;
for (hn = 0; hn < gc_heap::n_heaps; hn++)
{
gc_heap* hp2 = gc_heap::g_heaps [hn];
dynamic_data* dd2 = hp2->dynamic_data_of (xGen);
// Generation 0 is empty (if there isn't demotion) so its size is 0
// It is more interesting to report the desired size before next collection.
// Gen 1 is also more accurate if desired is reported due to sampling intervals.
if (xGen == 0)
{
youngest_gen_size += dd_desired_allocation (hp2->dynamic_data_of (xGen));
}
gensize += hp2->generation_size(xGen);
if (xGen <= condemned_gen)
{
promoted_mem += dd_promoted_size (dd2);
}
if ((xGen == (max_generation+1)) && (condemned_gen == max_generation))
{
promoted_mem += dd_promoted_size (dd2);
}
if (xGen == 0)
{
promoted_finalization_mem += dd_freach_previous_promotion (dd2);
}
}
#else
if (xGen == 0)
{
youngest_gen_size = dd_desired_allocation (hp1->dynamic_data_of (xGen));
}
gensize = hp1->generation_size(xGen);
if (xGen <= condemned_gen)
{
promoted_mem = dd_promoted_size (hp1->dynamic_data_of (xGen));
}
if ((xGen == (max_generation+1)) && (condemned_gen == max_generation))
{
promoted_mem = dd_promoted_size (hp1->dynamic_data_of (max_generation+1));
}
if (xGen == 0)
{
promoted_finalization_mem = dd_freach_previous_promotion (hp1->dynamic_data_of (xGen));
}
#endif //MULTIPLE_HEAPS
HeapInfo.HeapStats.GenInfo[xGen].GenerationSize = gensize;
HeapInfo.HeapStats.GenInfo[xGen].TotalPromotedSize = promoted_mem;
}
{
#ifdef SIMPLE_DPRINTF
dprintf (2, ("GC#%d: 0: %Id(%Id); 1: %Id(%Id); 2: %Id(%Id); 3: %Id(%Id)",
Info.GCEnd.Count,
HeapInfo.HeapStats.GenInfo[0].GenerationSize,
HeapInfo.HeapStats.GenInfo[0].TotalPromotedSize,
HeapInfo.HeapStats.GenInfo[1].GenerationSize,
HeapInfo.HeapStats.GenInfo[1].TotalPromotedSize,
HeapInfo.HeapStats.GenInfo[2].GenerationSize,
HeapInfo.HeapStats.GenInfo[2].TotalPromotedSize,
HeapInfo.HeapStats.GenInfo[3].GenerationSize,
HeapInfo.HeapStats.GenInfo[3].TotalPromotedSize));
#endif //SIMPLE_DPRINTF
}
HeapInfo.HeapStats.FinalizationPromotedSize = promoted_finalization_mem;
HeapInfo.HeapStats.FinalizationPromotedCount = GetFinalizablePromotedCount();
#if defined(ENABLE_PERF_COUNTERS)
// if a max gen garbage collection was performed, resync the GC Handle counter;
// if threads are currently suspended, we do not need to obtain a lock on each handle table
if (condemned_gen == max_generation)
GetPerfCounters().m_GC.cHandles = HndCountAllHandles(!GCHeap::IsGCInProgress());
for (xGen = 0; xGen <= (max_generation+1); xGen++)
{
_ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.GenInfo[xGen].GenerationSize));
_ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.GenInfo[xGen].TotalPromotedSize));
if (xGen == (max_generation+1))
{
GetPerfCounters().m_GC.cLrgObjSize = static_cast<size_t>(HeapInfo.HeapStats.GenInfo[xGen].GenerationSize);
}
else
{
GetPerfCounters().m_GC.cGenHeapSize[xGen] = ((xGen == 0) ?
youngest_gen_size :
static_cast<size_t>(HeapInfo.HeapStats.GenInfo[xGen].GenerationSize));
}
// the perf counters only count the promoted size for gen0 and gen1.
if (xGen < max_generation)
{
GetPerfCounters().m_GC.cbPromotedMem[xGen] = static_cast<size_t>(HeapInfo.HeapStats.GenInfo[xGen].TotalPromotedSize);
}
if (xGen <= max_generation)
{
GetPerfCounters().m_GC.cGenCollections[xGen] =
dd_collection_count (hp1->dynamic_data_of (xGen));
}
}
//Committed memory
{
size_t committed_mem = 0;
size_t reserved_mem = 0;
#ifdef MULTIPLE_HEAPS
int hn = 0;
for (hn = 0; hn < gc_heap::n_heaps; hn++)
{
gc_heap* hp2 = gc_heap::g_heaps [hn];
#else
gc_heap* hp2 = hp1;
{
#endif //MULTIPLE_HEAPS
heap_segment* seg =
generation_start_segment (hp2->generation_of (max_generation));
while (seg)
{
committed_mem += heap_segment_committed (seg) -
heap_segment_mem (seg);
reserved_mem += heap_segment_reserved (seg) -
heap_segment_mem (seg);
seg = heap_segment_next (seg);
}
//same for large segments
seg =
generation_start_segment (hp2->generation_of (max_generation + 1));
while (seg)
{
committed_mem += heap_segment_committed (seg) -
heap_segment_mem (seg);
reserved_mem += heap_segment_reserved (seg) -
heap_segment_mem (seg);
seg = heap_segment_next (seg);
}
#ifdef MULTIPLE_HEAPS
}
#else
}
#endif //MULTIPLE_HEAPS
GetPerfCounters().m_GC.cTotalCommittedBytes =
committed_mem;
GetPerfCounters().m_GC.cTotalReservedBytes =
reserved_mem;
}
_ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.FinalizationPromotedSize));
_ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.FinalizationPromotedCount));
GetPerfCounters().m_GC.cbPromotedFinalizationMem = static_cast<size_t>(HeapInfo.HeapStats.FinalizationPromotedSize);
GetPerfCounters().m_GC.cSurviveFinalize = static_cast<size_t>(HeapInfo.HeapStats.FinalizationPromotedCount);
// Compute Time in GC
PERF_COUNTER_TIMER_PRECISION _currentPerfCounterTimer = GET_CYCLE_COUNT();
g_TotalTimeInGC = _currentPerfCounterTimer - g_TotalTimeInGC;
PERF_COUNTER_TIMER_PRECISION _timeInGCBase = (_currentPerfCounterTimer - g_TotalTimeSinceLastGCEnd);
if (_timeInGCBase < g_TotalTimeInGC)
g_TotalTimeInGC = 0; // isn't likely except on some SMP machines-- perhaps make sure that
// _timeInGCBase >= g_TotalTimeInGC by setting affinity in GET_CYCLE_COUNT
while (_timeInGCBase > UINT_MAX)
{
_timeInGCBase = _timeInGCBase >> 8;
g_TotalTimeInGC = g_TotalTimeInGC >> 8;
}
// Update Total Time
GetPerfCounters().m_GC.timeInGC = (DWORD)g_TotalTimeInGC;
GetPerfCounters().m_GC.timeInGCBase = (DWORD)_timeInGCBase;
if (!GetPerfCounters().m_GC.cProcessID)
GetPerfCounters().m_GC.cProcessID = (size_t)GetCurrentProcessId();
g_TotalTimeSinceLastGCEnd = _currentPerfCounterTimer;
HeapInfo.HeapStats.PinnedObjectCount = (ULONG)(GetPerfCounters().m_GC.cPinnedObj);
HeapInfo.HeapStats.SinkBlockCount = (ULONG)(GetPerfCounters().m_GC.cSinkBlocks);
HeapInfo.HeapStats.GCHandleCount = (ULONG)(GetPerfCounters().m_GC.cHandles);
#endif //ENABLE_PERF_COUNTERS
FireEtwGCHeapStats_V1(HeapInfo.HeapStats.GenInfo[0].GenerationSize, HeapInfo.HeapStats.GenInfo[0].TotalPromotedSize,
HeapInfo.HeapStats.GenInfo[1].GenerationSize, HeapInfo.HeapStats.GenInfo[1].TotalPromotedSize,
HeapInfo.HeapStats.GenInfo[2].GenerationSize, HeapInfo.HeapStats.GenInfo[2].TotalPromotedSize,
HeapInfo.HeapStats.GenInfo[3].GenerationSize, HeapInfo.HeapStats.GenInfo[3].TotalPromotedSize,
HeapInfo.HeapStats.FinalizationPromotedSize,
HeapInfo.HeapStats.FinalizationPromotedCount,
HeapInfo.HeapStats.PinnedObjectCount,
HeapInfo.HeapStats.SinkBlockCount,
HeapInfo.HeapStats.GCHandleCount,
GetClrInstanceId());
#endif // FEATURE_EVENT_TRACE
}
size_t GCHeap::GetCurrentObjSize()
{
return (totalSurvivedSize + gc_heap::get_total_allocated());
}
size_t GCHeap::GetLastGCStartTime(int generation)
{
#ifdef MULTIPLE_HEAPS
gc_heap* hp = gc_heap::g_heaps[0];
#else
gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
return dd_time_clock (hp->dynamic_data_of (generation));
}
size_t GCHeap::GetLastGCDuration(int generation)
{
#ifdef MULTIPLE_HEAPS
gc_heap* hp = gc_heap::g_heaps[0];
#else
gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
return dd_gc_elapsed_time (hp->dynamic_data_of (generation));
}
size_t GCHeap::GetNow()
{
#ifdef MULTIPLE_HEAPS
gc_heap* hp = gc_heap::g_heaps[0];
#else
gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
return hp->get_time_now();
}
void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, DWORD dwFlags)
{
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
Object *pObj = *ppObject;
#ifdef INTERIOR_POINTERS
if (dwFlags & GC_CALL_INTERIOR)
{
BYTE *o = (BYTE*)pObj;
gc_heap* hp = gc_heap::heap_of (o);
if ((o < hp->gc_low) || (o >= hp->gc_high))
{
return;
}
pObj = (Object*) hp->find_object(o, hp->gc_low);
}
#endif //INTERIOR_POINTERS
ScanRootsHelper(&pObj, pSC, dwFlags);
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
}
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
// This is called only if we've determined that either:
// a) The Profiling API wants to do a walk of the heap, and it has pinned the
// profiler in place (so it cannot be detached), and it's thus safe to call into the
// profiler, OR
// b) ETW infrastructure wants to do a walk of the heap either to log roots,
// objects, or both.
// This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
// ETW can ask for roots, but not objects
void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
{
{
ProfilingScanContext SC(fProfilerPinned);
// **** Scan roots: Only scan roots if profiling API wants them or ETW wants them.
if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
{
#ifdef MULTIPLE_HEAPS
int hn;
// Must emulate each GC thread number so we can hit each
// heap for enumerating the roots.
for (hn = 0; hn < gc_heap::n_heaps; hn++)
{
// Ask the vm to go over all of the roots for this specific
// heap.
gc_heap* hp = gc_heap::g_heaps [hn];
SC.thread_number = hn;
CNameSpace::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
// The finalizer queue is also a source of roots
SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
hp->finalize_queue->GcScanRoots(&ScanRootsHelper, hn, &SC);
}
#else
// Ask the vm to go over all of the roots
CNameSpace::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
// The finalizer queue is also a source of roots
SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
pGenGCHeap->finalize_queue->GcScanRoots(&ScanRootsHelper, 0, &SC);
#endif // MULTIPLE_HEAPS
// Handles are kept independent of wks/svr/concurrent builds
SC.dwEtwRootKind = kEtwGCRootKindHandle;
CNameSpace::GcScanHandlesForProfilerAndETW(max_generation, &SC);
// indicate that regular handle scanning is over, so we can flush the buffered roots
// to the profiler. (This is for profapi only. ETW will flush after the
// entire heap was is complete, via ETW::GCLog::EndHeapDump.)
#if defined (GC_PROFILING)
if (fProfilerPinned)
{
g_profControlBlock.pProfInterface->EndRootReferences2(&SC.pHeapId);
}
#endif // defined (GC_PROFILING)
}
// **** Scan dependent handles: only if the profiler supports it or ETW wants roots
if ((fProfilerPinned && CORProfilerTrackConditionalWeakTableElements()) ||
fShouldWalkHeapRootsForEtw)
{
// GcScanDependentHandlesForProfiler double-checks
// CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
CNameSpace::GcScanDependentHandlesForProfilerAndETW(max_generation, &SC);
// indicate that dependent handle scanning is over, so we can flush the buffered roots
// to the profiler. (This is for profapi only. ETW will flush after the
// entire heap was is complete, via ETW::GCLog::EndHeapDump.)
#if defined (GC_PROFILING)
if (fProfilerPinned && CORProfilerTrackConditionalWeakTableElements())
{
g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(&SC.pHeapId);
}
#endif // defined (GC_PROFILING)
}
ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
// **** Walk objects on heap: only if profiling API wants them or ETW wants them.
if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
{
#ifdef MULTIPLE_HEAPS
int hn;
// Walk the heap and provide the objref to the profiler
for (hn = 0; hn < gc_heap::n_heaps; hn++)
{
gc_heap* hp = gc_heap::g_heaps [hn];
hp->walk_heap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE /* walk the large object heap */);
}
#else
gc_heap::walk_heap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE);
#endif //MULTIPLE_HEAPS
}
// **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
// should be flushed into the ETW stream
if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
{
ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
}
}
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void GCProfileWalkHeap()
{
BOOL fWalkedHeapForProfiler = FALSE;
#ifdef FEATURE_EVENT_TRACE
if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
ETW::GCLog::WalkStaticsAndCOMForETW();
BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
#else // !FEATURE_EVENT_TRACE
BOOL fShouldWalkHeapRootsForEtw = FALSE;
BOOL fShouldWalkHeapObjectsForEtw = FALSE;
#endif // FEATURE_EVENT_TRACE
#if defined (GC_PROFILING)
{
BEGIN_PIN_PROFILER(CORProfilerTrackGC());
GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
fWalkedHeapForProfiler = TRUE;
END_PIN_PROFILER();
}
#endif // defined (GC_PROFILING)
#ifdef FEATURE_EVENT_TRACE
// If the profiling API didn't want us to walk the heap but ETW does, then do the
// walk here
if (!fWalkedHeapForProfiler &&
(fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
{
GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
}
#endif // FEATURE_EVENT_TRACE
}
BOOL GCHeap::IsGCInProgressHelper (BOOL bConsiderGCStart)
{
return GcInProgress || (bConsiderGCStart? VolatileLoad(&gc_heap::gc_started) : FALSE);
}
DWORD GCHeap::WaitUntilGCComplete(BOOL bConsiderGCStart)
{
if (bConsiderGCStart)
{
if (gc_heap::gc_started)
{
gc_heap::wait_for_gc_done();
}
}
DWORD dwWaitResult = NOERROR;
if (GcInProgress)
{
ASSERT( WaitForGCEvent->IsValid() );
#ifdef DETECT_DEADLOCK
// wait for GC to complete
BlockAgain:
dwWaitResult = WaitForGCEvent->Wait(DETECT_DEADLOCK_TIMEOUT, FALSE );
if (dwWaitResult == WAIT_TIMEOUT) {
// Even in retail, stop in the debugger if available. Ideally, the
// following would use DebugBreak, but debspew.h makes this a null
// macro in retail. Note that in debug, we don't use the debspew.h
// macros because these take a critical section that may have been
// taken by a suspended thread.
FreeBuildDebugBreak();
goto BlockAgain;
}
#else //DETECT_DEADLOCK
dwWaitResult = WaitForGCEvent->Wait(INFINITE, FALSE );
#endif //DETECT_DEADLOCK
}
return dwWaitResult;
}
void GCHeap::SetGCInProgress(BOOL fInProgress)
{
GcInProgress = fInProgress;
}
CLREvent * GCHeap::GetWaitForGCEvent()
{
return WaitForGCEvent;
}
void GCHeap::WaitUntilConcurrentGCComplete()
{
#ifdef BACKGROUND_GC
if (pGenGCHeap->settings.concurrent)
pGenGCHeap->background_gc_wait();
#endif //BACKGROUND_GC
}
BOOL GCHeap::IsConcurrentGCInProgress()
{
#ifdef BACKGROUND_GC
return pGenGCHeap->settings.concurrent;
#else
return FALSE;
#endif //BACKGROUND_GC
}
#ifdef FEATURE_EVENT_TRACE
void gc_heap::fire_etw_allocation_event (size_t allocation_amount, int gen_number, BYTE* object_address)
{
TypeHandle th = GetThread()->GetTHAllocContextObj();
if (th != 0)
{
InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
th.GetName(strTypeName);
FireEtwGCAllocationTick_V3((ULONG)allocation_amount,
((gen_number == 0) ? ETW::GCLog::ETW_GC_INFO::AllocationSmall : ETW::GCLog::ETW_GC_INFO::AllocationLarge),
GetClrInstanceId(),
allocation_amount,
th.GetMethodTable(),
strTypeName.GetUnicode(),
heap_number,
object_address
);
}
}
void gc_heap::fire_etw_pin_object_event (BYTE* object, BYTE** ppObject)
{
Object* obj = (Object*)object;
InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
EX_TRY
{
FAULT_NOT_FATAL();
TypeHandle th = obj->GetGCSafeTypeHandleIfPossible();
if(th != NULL)
{
th.GetName(strTypeName);
}
FireEtwPinObjectAtGCTime(ppObject,
object,
obj->GetSize(),
strTypeName.GetUnicode(),
GetClrInstanceId());
}
EX_CATCH {}
EX_END_CATCH(SwallowAllExceptions)
}
#endif // FEATURE_EVENT_TRACE
DWORD gc_heap::user_thread_wait (CLREvent *event, BOOL no_mode_change, int time_out_ms)
{
Thread* pCurThread = NULL;
BOOL mode = FALSE;
DWORD dwWaitResult = NOERROR;
if (!no_mode_change)
{
pCurThread = GetThread();
mode = pCurThread ? pCurThread->PreemptiveGCDisabled() : FALSE;
if (mode)
{
pCurThread->EnablePreemptiveGC();
}
}
dwWaitResult = event->Wait(time_out_ms, FALSE);
if (!no_mode_change && mode)
{
pCurThread->DisablePreemptiveGC();
}
return dwWaitResult;
}
#ifdef BACKGROUND_GC
// Wait for background gc to finish
DWORD gc_heap::background_gc_wait (alloc_wait_reason awr, int time_out_ms)
{
dprintf(2, ("Waiting end of background gc"));
assert (background_gc_done_event.IsValid());
fire_alloc_wait_event_begin (awr);
DWORD dwRet = user_thread_wait (&background_gc_done_event, FALSE, time_out_ms);
fire_alloc_wait_event_end (awr);
dprintf(2, ("Waiting end of background gc is done"));
return dwRet;
}
// Wait for background gc to finish sweeping large objects
void gc_heap::background_gc_wait_lh (alloc_wait_reason awr)
{
dprintf(2, ("Waiting end of background large sweep"));
assert (gc_lh_block_event.IsValid());
fire_alloc_wait_event_begin (awr);
user_thread_wait (&gc_lh_block_event, FALSE);
fire_alloc_wait_event_end (awr);
dprintf(2, ("Waiting end of background large sweep is done"));
}
#endif //BACKGROUND_GC
/******************************************************************************/
::GCHeap* CreateGCHeap() {
return new(nothrow) GCHeap(); // we return wks or svr
}
void GCHeap::TraceGCSegments()
{
#ifdef FEATURE_EVENT_TRACE
heap_segment* seg = 0;
#ifdef MULTIPLE_HEAPS
// walk segments in each heap
for (int i = 0; i < gc_heap::n_heaps; i++)
{
gc_heap* h = gc_heap::g_heaps [i];
#else
{
gc_heap* h = pGenGCHeap;
#endif //MULTIPLE_HEAPS
for (seg = generation_start_segment (h->generation_of (max_generation)); seg != 0; seg = heap_segment_next(seg))
{
ETW::GCLog::ETW_GC_INFO Info;
Info.GCCreateSegment.Address = (size_t)heap_segment_mem(seg);
Info.GCCreateSegment.Size = (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg));
Info.GCCreateSegment.Type = (heap_segment_read_only_p (seg) ?
ETW::GCLog::ETW_GC_INFO::READ_ONLY_HEAP :
ETW::GCLog::ETW_GC_INFO::SMALL_OBJECT_HEAP);
FireEtwGCCreateSegment_V1(Info.GCCreateSegment.Address, Info.GCCreateSegment.Size, Info.GCCreateSegment.Type, GetClrInstanceId());
}
// large obj segments
for (seg = generation_start_segment (h->generation_of (max_generation+1)); seg != 0; seg = heap_segment_next(seg))
{
FireEtwGCCreateSegment_V1((size_t)heap_segment_mem(seg),
(size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)),
ETW::GCLog::ETW_GC_INFO::LARGE_OBJECT_HEAP,
GetClrInstanceId());
}
}
#endif // FEATURE_EVENT_TRACE
}
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void GCHeap::DescrGenerationsToProfiler (gen_walk_fn fn, void *context)
{
pGenGCHeap->descr_generations_to_profiler(fn, context);
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#if defined(BACKGROUND_GC) && defined(FEATURE_REDHAWK)
// Helper used to wrap the start routine of background GC threads so we can do things like initialize the
// Redhawk thread state which requires running in the new thread's context.
DWORD WINAPI gc_heap::rh_bgc_thread_stub(void * pContext)
{
rh_bgc_thread_ctx * pStartContext = (rh_bgc_thread_ctx*)pContext;
// Initialize the Thread for this thread. The false being passed indicates that the thread store lock
// should not be acquired as part of this operation. This is necessary because this thread is created in
// the context of a garbage collection and the lock is already held by the GC.
ASSERT(GCHeap::GetGCHeap()->IsGCInProgress());
ThreadStore::AttachCurrentThread(false);
// Inform the GC which Thread* we are.
pStartContext->m_pRealContext->bgc_thread = GetThread();
// Run the real start procedure and capture its return code on exit.
return pStartContext->m_pRealStartRoutine(pStartContext->m_pRealContext);
}
#endif // BACKGROUND_GC && FEATURE_REDHAWK
#endif // !DACCESS_COMPILE

35
src/Native/gc/gceesvr.cpp Normal file
Просмотреть файл

@ -0,0 +1,35 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
#include "common.h"
#if defined(FEATURE_SVR_GC)
#include "gcenv.h"
#include "gc.h"
#include "gcscan.h"
#define SERVER_GC 1
namespace SVR {
#include "gcimpl.h"
#include "gcee.cpp"
}
#if defined(FEATURE_PAL) && !defined(DACCESS_COMPILE)
// Initializes the SVR DAC table entries
void DacGlobals::InitializeSVREntries(TADDR baseAddress)
{
#define DEFINE_DACVAR_SVR(id_type, size, id, var) id = PTR_TO_TADDR(&var) - baseAddress;
#include "dacvars.h"
}
#endif // FEATURE_PAL && !DACCESS_COMPILE
#endif // FEATURE_SVR_GC

23
src/Native/gc/gceewks.cpp Normal file
Просмотреть файл

@ -0,0 +1,23 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
#include "common.h"
#include "gcenv.h"
#include "gc.h"
#include "gcscan.h"
#ifdef SERVER_GC
#undef SERVER_GC
#endif
namespace WKS {
#include "gcimpl.h"
#include "gcee.cpp"
}

318
src/Native/gc/gcimpl.h Normal file
Просмотреть файл

@ -0,0 +1,318 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
#ifndef GCIMPL_H_
#define GCIMPL_H_
#define CLREvent CLREventStatic
#ifdef SERVER_GC
#define MULTIPLE_HEAPS 1
#endif // SERVER_GC
#ifdef MULTIPLE_HEAPS
#define PER_HEAP
#else //MULTIPLE_HEAPS
#define PER_HEAP static
#endif // MULTIPLE_HEAPS
#define PER_HEAP_ISOLATED static
#if defined(WRITE_BARRIER_CHECK) && !defined (MULTIPLE_HEAPS)
void initGCShadow();
void deleteGCShadow();
void checkGCWriteBarrier();
#else
inline void initGCShadow() {}
inline void deleteGCShadow() {}
inline void checkGCWriteBarrier() {}
#endif
void GCProfileWalkHeap();
class GCHeap;
class gc_heap;
class CFinalize;
// TODO : it would be easier to make this an ORed value
enum gc_reason
{
reason_alloc_soh = 0,
reason_induced = 1,
reason_lowmemory = 2,
reason_empty = 3,
reason_alloc_loh = 4,
reason_oos_soh = 5,
reason_oos_loh = 6,
reason_induced_noforce = 7, // it's an induced GC and doesn't have to be blocking.
reason_gcstress = 8, // this turns into reason_induced & gc_mechanisms.stress_induced = true
reason_lowmemory_blocking = 9,
reason_induced_compacting = 10,
reason_lowmemory_host = 11,
reason_max
};
class GCHeap : public ::GCHeap
{
protected:
#ifdef MULTIPLE_HEAPS
gc_heap* pGenGCHeap;
#else
#define pGenGCHeap ((gc_heap*)0)
#endif //MULTIPLE_HEAPS
friend class CFinalize;
friend class gc_heap;
friend struct ::alloc_context;
friend void EnterAllocLock();
friend void LeaveAllocLock();
friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, DWORD dwFlags);
friend void GCProfileWalkHeap();
public:
//In order to keep gc.cpp cleaner, ugly EE specific code is relegated to methods.
static void UpdatePreGCCounters();
static void UpdatePostGCCounters();
public:
GCHeap(){};
~GCHeap(){};
/* BaseGCHeap Methods*/
PER_HEAP_ISOLATED HRESULT Shutdown ();
size_t GetTotalBytesInUse ();
// Gets the amount of bytes objects currently occupy on the GC heap.
size_t GetCurrentObjSize();
size_t GetLastGCStartTime(int generation);
size_t GetLastGCDuration(int generation);
size_t GetNow();
void TraceGCSegments ();
void PublishObject(BYTE* obj);
BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE);
DWORD WaitUntilGCComplete (BOOL bConsiderGCStart = FALSE);
void SetGCInProgress(BOOL fInProgress);
CLREvent * GetWaitForGCEvent();
HRESULT Initialize ();
//flags can be GC_ALLOC_CONTAINS_REF GC_ALLOC_FINALIZE
Object* Alloc (size_t size, DWORD flags);
#ifdef FEATURE_64BIT_ALIGNMENT
Object* AllocAlign8 (size_t size, DWORD flags);
Object* AllocAlign8 (alloc_context* acontext, size_t size, DWORD flags);
private:
Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, DWORD flags);
public:
#endif // FEATURE_64BIT_ALIGNMENT
Object* AllocLHeap (size_t size, DWORD flags);
Object* Alloc (alloc_context* acontext, size_t size, DWORD flags);
void FixAllocContext (alloc_context* acontext,
BOOL lockp, void* arg, void *heap);
Object* GetContainingObject(void *pInteriorPtr);
#ifdef MULTIPLE_HEAPS
static void AssignHeap (alloc_context* acontext);
static GCHeap* GetHeap (int);
#endif //MULTIPLE_HEAPS
int GetHomeHeapNumber ();
bool IsThreadUsingAllocationContextHeap(alloc_context* acontext, int thread_number);
int GetNumberOfHeaps ();
void HideAllocContext(alloc_context*);
void RevealAllocContext(alloc_context*);
static BOOL IsLargeObject(MethodTable *mt);
BOOL IsObjectInFixedHeap(Object *pObj);
HRESULT GarbageCollect (int generation = -1, BOOL low_memory_p=FALSE, int mode=collection_blocking);
////
// GC callback functions
// Check if an argument is promoted (ONLY CALL DURING
// THE PROMOTIONSGRANTED CALLBACK.)
BOOL IsPromoted (Object *object);
size_t GetPromotedBytes (int heap_index);
int CollectionCount (int generation, int get_bgc_fgc_count = 0);
// promote an object
PER_HEAP_ISOLATED void Promote (Object** object,
ScanContext* sc,
DWORD flags=0);
// Find the relocation address for an object
PER_HEAP_ISOLATED void Relocate (Object** object,
ScanContext* sc,
DWORD flags=0);
HRESULT Init (size_t heapSize);
//Register an object for finalization
bool RegisterForFinalization (int gen, Object* obj);
//Unregister an object for finalization
void SetFinalizationRun (Object* obj);
//returns the generation number of an object (not valid during relocation)
unsigned WhichGeneration (Object* object);
// returns TRUE is the object is ephemeral
BOOL IsEphemeral (Object* object);
BOOL IsHeapPointer (void* object, BOOL small_heap_only = FALSE);
#ifdef VERIFY_HEAP
void ValidateObjectMember (Object *obj);
#endif //_DEBUG
PER_HEAP size_t ApproxTotalBytesInUse(BOOL small_heap_only = FALSE);
PER_HEAP size_t ApproxFreeBytes();
unsigned GetCondemnedGeneration();
int GetGcLatencyMode();
int SetGcLatencyMode(int newLatencyMode);
int GetLOHCompactionMode();
void SetLOHCompactionMode(int newLOHCompactionyMode);
BOOL RegisterForFullGCNotification(DWORD gen2Percentage,
DWORD lohPercentage);
BOOL CancelFullGCNotification();
int WaitForFullGCApproach(int millisecondsTimeout);
int WaitForFullGCComplete(int millisecondsTimeout);
int StartNoGCRegion(ULONGLONG totalSize, BOOL lohSizeKnown, ULONGLONG lohSize, BOOL disallowFullBlockingGC);
int EndNoGCRegion();
PER_HEAP_ISOLATED unsigned GetMaxGeneration();
unsigned GetGcCount();
Object* GetNextFinalizable() { return GetNextFinalizableObject(); };
size_t GetNumberOfFinalizable() { return GetNumberFinalizableObjects(); }
PER_HEAP_ISOLATED HRESULT GetGcCounters(int gen, gc_counters* counters);
size_t GetValidSegmentSize(BOOL large_seg = FALSE);
static size_t GetValidGen0MaxSize(size_t seg_size);
void SetReservedVMLimit (size_t vmlimit);
PER_HEAP_ISOLATED Object* GetNextFinalizableObject();
PER_HEAP_ISOLATED size_t GetNumberFinalizableObjects();
PER_HEAP_ISOLATED size_t GetFinalizablePromotedCount();
void SetFinalizeQueueForShutdown(BOOL fHasLock);
BOOL FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers);
BOOL ShouldRestartFinalizerWatchDog();
void SetCardsAfterBulkCopy( Object**, size_t);
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void WalkObject (Object* obj, walk_fn fn, void* context);
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
public: // FIX
// Lock for finalization
PER_HEAP_ISOLATED
VOLATILE(LONG) m_GCFLock;
PER_HEAP_ISOLATED BOOL GcCollectClasses;
PER_HEAP_ISOLATED
VOLATILE(BOOL) GcInProgress; // used for syncing w/GC
PER_HEAP_ISOLATED VOLATILE(unsigned) GcCount;
PER_HEAP_ISOLATED unsigned GcCondemnedGeneration;
// calculated at the end of a GC.
PER_HEAP_ISOLATED size_t totalSurvivedSize;
// Use only for GC tracing.
PER_HEAP unsigned int GcDuration;
size_t GarbageCollectGeneration (unsigned int gen=0, gc_reason reason=reason_empty);
// Interface with gc_heap
size_t GarbageCollectTry (int generation, BOOL low_memory_p=FALSE, int mode=collection_blocking);
#ifdef FEATURE_BASICFREEZE
// frozen segment management functions
virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo);
#endif // FEATURE_BASICFREEZE
void WaitUntilConcurrentGCComplete (); // Use in managd threads
#ifndef DACCESS_COMPILE
HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout); // Use in native threads. TRUE if succeed. FALSE if failed or timeout
#endif
BOOL IsConcurrentGCInProgress();
// Enable/disable concurrent GC
void TemporaryEnableConcurrentGC();
void TemporaryDisableConcurrentGC();
BOOL IsConcurrentGCEnabled();
PER_HEAP_ISOLATED CLREvent *WaitForGCEvent; // used for syncing w/GC
PER_HEAP_ISOLATED CFinalize* m_Finalize;
PER_HEAP_ISOLATED gc_heap* Getgc_heap();
private:
static bool SafeToRestartManagedThreads()
{
// Note: this routine should return true when the last barrier
// to threads returning to cooperative mode is down after gc.
// In other words, if the sequence in GCHeap::RestartEE changes,
// the condition here may have to change as well.
return g_TrapReturningThreads == 0;
}
#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#ifdef STRESS_HEAP
public:
//return TRUE if GC actually happens, otherwise FALSE
BOOL StressHeap(alloc_context * acontext = 0);
protected:
// only used in BACKGROUND_GC, but the symbol is not defined yet...
PER_HEAP_ISOLATED int gc_stress_fgcs_in_bgc;
#if !defined(MULTIPLE_HEAPS)
// handles to hold the string objects that will force GC movement
enum { NUM_HEAP_STRESS_OBJS = 8 };
PER_HEAP OBJECTHANDLE m_StressObjs[NUM_HEAP_STRESS_OBJS];
PER_HEAP int m_CurStressObj;
#endif // !defined(MULTIPLE_HEAPS)
#endif // STRESS_HEAP
#endif // FEATURE_REDHAWK
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
virtual void DescrGenerationsToProfiler (gen_walk_fn fn, void *context);
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifdef VERIFY_HEAP
public:
Object * NextObj (Object * object);
#ifdef FEATURE_BASICFREEZE
BOOL IsInFrozenSegment (Object * object);
#endif //FEATURE_BASICFREEZE
#endif //VERIFY_HEAP
};
#endif // GCIMPL_H_

4313
src/Native/gc/gcpriv.h Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

365
src/Native/gc/gcrecord.h Normal file
Просмотреть файл

@ -0,0 +1,365 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
/*++
Module Name:
gcrecord.h
--*/
#ifndef __gc_record_h__
#define __gc_record_h__
#define max_generation 2
// We pack the dynamic tuning for deciding which gen to condemn in a DWORD.
// We assume that 2 bits are enough to represent the generation.
#define bits_generation 2
#define generation_mask (~(~0 << bits_generation))
//=======================note !!!===================================//
// If you add stuff to this enum, remember to update total_gen_reasons
// and record_condemn_gen_reasons below.
//=======================note !!!===================================//
// These are condemned reasons related to generations.
// Each reason takes up 2 bits as we have 3 generations.
// So we can store up to 16 reasons in this DWORD.
// They need processing before being used.
// See the set and the get method for details.
enum gc_condemn_reason_gen
{
gen_initial = 0, // indicates the initial gen to condemn.
gen_final_per_heap = 1, // indicates the final gen to condemn per heap.
gen_alloc_budget = 2, // indicates which gen's budget is exceeded.
gen_time_tuning = 3, // indicates the gen number that time based tuning decided.
gcrg_max = 4
};
// These are condemned reasons related to conditions we are in.
// For example, we are in very high memory load which is a condition.
// Each condition takes up a single bit indicates TRUE or FALSE.
// We can store 32 of these.
enum gc_condemn_reason_condition
{
gen_induced_fullgc_p = 0,
gen_expand_fullgc_p = 1,
gen_high_mem_p = 2,
gen_very_high_mem_p = 3,
gen_low_ephemeral_p = 4,
gen_low_card_p = 5,
gen_eph_high_frag_p = 6,
gen_max_high_frag_p = 7,
gen_max_high_frag_e_p = 8,
gen_max_high_frag_m_p = 9,
gen_max_high_frag_vm_p = 10,
gen_max_gen1 = 11,
gen_before_oom = 12,
gen_gen2_too_small = 13,
gen_induced_noforce_p = 14,
gen_before_bgc = 15,
gen_almost_max_alloc = 16,
gcrc_max = 17
};
#ifdef DT_LOG
static char* record_condemn_reasons_gen_header = "[cg]i|f|a|t|";
static char* record_condemn_reasons_condition_header = "[cc]i|e|h|v|l|l|e|m|m|m|m|g|o|s|n|b|a|";
static char char_gen_number[4] = {'0', '1', '2', '3'};
#endif //DT_LOG
class gen_to_condemn_tuning
{
DWORD condemn_reasons_gen;
DWORD condemn_reasons_condition;
#ifdef DT_LOG
char str_reasons_gen[64];
char str_reasons_condition[64];
#endif //DT_LOG
void init_str()
{
#ifdef DT_LOG
memset (str_reasons_gen, '|', sizeof (char) * 64);
str_reasons_gen[gcrg_max*2] = 0;
memset (str_reasons_condition, '|', sizeof (char) * 64);
str_reasons_condition[gcrc_max*2] = 0;
#endif //DT_LOG
}
public:
void init()
{
condemn_reasons_gen = 0;
condemn_reasons_condition = 0;
init_str();
}
void init (gen_to_condemn_tuning* reasons)
{
condemn_reasons_gen = reasons->condemn_reasons_gen;
condemn_reasons_condition = reasons->condemn_reasons_condition;
init_str();
}
void set_gen (gc_condemn_reason_gen condemn_gen_reason, DWORD value)
{
assert ((value & (~generation_mask)) == 0);
condemn_reasons_gen |= (value << (condemn_gen_reason * 2));
}
void set_condition (gc_condemn_reason_condition condemn_gen_reason)
{
condemn_reasons_condition |= (1 << condemn_gen_reason);
}
// This checks if condition_to_check is the only condition set.
BOOL is_only_condition (gc_condemn_reason_condition condition_to_check)
{
DWORD temp_conditions = 1 << condition_to_check;
return !(condemn_reasons_condition ^ temp_conditions);
}
DWORD get_gen (gc_condemn_reason_gen condemn_gen_reason)
{
DWORD value = ((condemn_reasons_gen >> (condemn_gen_reason * 2)) & generation_mask);
return value;
}
DWORD get_condition (gc_condemn_reason_condition condemn_gen_reason)
{
DWORD value = (condemn_reasons_condition & (1 << condemn_gen_reason));
return value;
}
DWORD get_reasons0()
{
return condemn_reasons_gen;
}
DWORD get_reasons1()
{
return condemn_reasons_condition;
}
#ifdef DT_LOG
char get_gen_char (DWORD value)
{
return char_gen_number[value];
}
char get_condition_char (DWORD value)
{
return (value ? 'Y' : 'N');
}
#endif //DT_LOG
void print (int heap_num);
};
// Right now these are all size_t's but if you add a type that requires
// padding you should add a pragma pack here since I am firing this as
// a struct in an ETW event.
struct gc_generation_data
{
// data recorded at the beginning of a GC
size_t size_before; // including fragmentation.
size_t free_list_space_before;
size_t free_obj_space_before;
// data recorded at the end of a GC
size_t size_after; // including fragmentation.
size_t free_list_space_after;
size_t free_obj_space_after;
size_t in;
size_t pinned_surv;
size_t npinned_surv;
size_t new_allocation;
void print (int heap_num, int gen_num);
};
struct maxgen_size_increase
{
size_t free_list_allocated;
size_t free_list_rejected;
size_t end_seg_allocated;
size_t condemned_allocated;
size_t pinned_allocated;
size_t pinned_allocated_advance;
DWORD running_free_list_efficiency;
};
// The following indicates various mechanisms and one value
// related to each one. Each value has its corresponding string
// representation so if you change the enum's, make sure you
// also add its string form.
// Note that if we are doing a gen1 GC, we won't
// really expand the heap if we are reusing, but
// we'll record the can_expand_into_p result here.
enum gc_heap_expand_mechanism
{
expand_reuse_normal,
expand_reuse_bestfit,
expand_new_seg_ep, // new seg with ephemeral promotion
expand_new_seg,
expand_no_memory // we can't get a new seg.
};
#ifdef DT_LOG
static char* str_heap_expand_mechanisms[] =
{
"reused seg with normal fit",
"reused seg with best fit",
"expand promoting eph",
"expand with a new seg",
"no memory for a new seg"
};
#endif //DT_LOG
enum gc_compact_reason
{
compact_low_ephemeral,
compact_high_frag,
compact_no_gaps,
compact_loh_forced
};
#ifdef DT_LOG
static char* str_compact_reasons[] =
{
"low on ephemeral space",
"high fragmetation",
"couldn't allocate gaps",
"user specfied compact LOH"
};
#endif //DT_LOG
#ifdef DT_LOG
static char* str_concurrent_compact_reasons[] =
{
"high fragmentation",
"low on ephemeral space in concurrent marking"
};
#endif //DT_LOG
enum gc_mechanism_per_heap
{
gc_heap_expand,
gc_compact,
max_mechanism_per_heap
};
#ifdef DT_LOG
struct gc_mechanism_descr
{
char* name;
char** descr;
};
static gc_mechanism_descr gc_mechanisms_descr[max_mechanism_per_heap] =
{
{"expanded heap ", str_heap_expand_mechanisms},
{"compacted because of ", str_compact_reasons}
};
#endif //DT_LOG
int index_of_set_bit (size_t power2);
#define mechanism_mask (1 << (sizeof (DWORD) * 8 - 1))
// interesting per heap data we want to record for each GC.
class gc_history_per_heap
{
public:
gc_generation_data gen_data[max_generation+2];
maxgen_size_increase maxgen_size_info;
gen_to_condemn_tuning gen_to_condemn_reasons;
// The mechanisms data is compacted in the following way:
// most significant bit indicates if we did the operation.
// the rest of the bits indicate the reason
// why we chose to do the operation. For example:
// if we did a heap expansion using best fit we'd have
// 0x80000002 for the gc_heap_expand mechanism.
// Only one value is possible for each mechanism - meaning the
// values are all exclusive
DWORD mechanisms[max_mechanism_per_heap];
DWORD heap_index;
size_t extra_gen0_committed;
void set_mechanism (gc_mechanism_per_heap mechanism_per_heap, DWORD value)
{
DWORD* mechanism = &mechanisms[mechanism_per_heap];
*mechanism |= mechanism_mask;
*mechanism |= (1 << value);
}
void clear_mechanism (gc_mechanism_per_heap mechanism_per_heap)
{
DWORD* mechanism = &mechanisms[mechanism_per_heap];
*mechanism = 0;
}
int get_mechanism (gc_mechanism_per_heap mechanism_per_heap)
{
DWORD mechanism = mechanisms[mechanism_per_heap];
if (mechanism & mechanism_mask)
{
int index = index_of_set_bit ((size_t)(mechanism & (~mechanism_mask)));
assert (index != -1);
return index;
}
return -1;
}
void print();
};
// we store up to 32 boolean settings.
enum gc_global_mechanism_p
{
global_concurrent = 0,
global_compaction,
global_promotion,
global_demotion,
global_card_bundles,
global_elevation,
max_global_mechanism
};
struct gc_history_global
{
// We may apply other factors after we calculated gen0 budget in
// desired_new_allocation such as equalization or smoothing so
// record the final budget here.
size_t final_youngest_desired;
DWORD num_heaps;
int condemned_generation;
int gen0_reduction_count;
gc_reason reason;
int pause_mode;
DWORD mem_pressure;
DWORD global_mechanims_p;
void set_mechanism_p (gc_global_mechanism_p mechanism)
{
global_mechanims_p |= (1 << mechanism);
}
BOOL get_mechanism_p (gc_global_mechanism_p mechanism)
{
return (global_mechanims_p & (1 << mechanism));
}
void print();
};
#endif //__gc_record_h__

376
src/Native/gc/gcscan.cpp Normal file
Просмотреть файл

@ -0,0 +1,376 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
/*
* GCSCAN.CPP
*
* GC Root Scanning
*
*
*/
#include "common.h"
#include "gcenv.h"
#include "gcscan.h"
#include "gc.h"
#include "objecthandle.h"
//#define CATCH_GC //catches exception during GC
#ifdef DACCESS_COMPILE
SVAL_IMPL_INIT(LONG, CNameSpace, m_GcStructuresInvalidCnt, 1);
#else //DACCESS_COMPILE
VOLATILE(LONG) CNameSpace::m_GcStructuresInvalidCnt = 1;
#endif //DACCESS_COMPILE
BOOL CNameSpace::GetGcRuntimeStructuresValid ()
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
_ASSERTE ((LONG)m_GcStructuresInvalidCnt >= 0);
return (LONG)m_GcStructuresInvalidCnt == 0;
}
#ifdef DACCESS_COMPILE
void
CNameSpace::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
m_GcStructuresInvalidCnt.EnumMem();
}
#else
//
// Dependent handle promotion scan support
//
// This method is called first during the mark phase. It's job is to set up the context for further scanning
// (remembering the scan parameters the GC gives us and initializing some state variables we use to determine
// whether further scans will be required or not).
//
// This scan is not guaranteed to return complete results due to the GC context in which we are called. In
// particular it is possible, due to either a mark stack overflow or unsynchronized operation in server GC
// mode, that not all reachable objects will be reported as promoted yet. However, the operations we perform
// will still be correct and this scan allows us to spot a common optimization where no dependent handles are
// due for retirement in this particular GC. This is an important optimization to take advantage of since
// synchronizing the GC to calculate complete results is a costly operation.
void CNameSpace::GcDhInitialScan(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
{
// We allocate space for dependent handle scanning context during Ref_Initialize. Under server GC there
// are actually as many contexts as heaps (and CPUs). Ref_GetDependentHandleContext() retrieves the
// correct context for the current GC thread based on the ScanContext passed to us by the GC.
DhContext *pDhContext = Ref_GetDependentHandleContext(sc);
// Record GC callback parameters in the DH context so that the GC doesn't continually have to pass the
// same data to each call.
pDhContext->m_pfnPromoteFunction = fn;
pDhContext->m_iCondemned = condemned;
pDhContext->m_iMaxGen = max_gen;
pDhContext->m_pScanContext = sc;
// Look for dependent handle whose primary has been promoted but whose secondary has not. Promote the
// secondary in those cases. Additionally this scan sets the m_fUnpromotedPrimaries and m_fPromoted state
// flags in the DH context. The m_fUnpromotedPrimaries flag is the most interesting here: if this flag is
// false after the scan then it doesn't matter how many object promotions might currently be missing since
// there are no secondary objects that are currently unpromoted anyway. This is the (hopefully common)
// circumstance under which we don't have to perform any costly additional re-scans.
Ref_ScanDependentHandlesForPromotion(pDhContext);
}
// This method is called after GcDhInitialScan and before each subsequent scan (GcDhReScan below). It
// determines whether any handles are left that have unpromoted secondaries.
bool CNameSpace::GcDhUnpromotedHandlesExist(ScanContext* sc)
{
WRAPPER_NO_CONTRACT;
// Locate our dependent handle context based on the GC context.
DhContext *pDhContext = Ref_GetDependentHandleContext(sc);
return pDhContext->m_fUnpromotedPrimaries;
}
// Perform a re-scan of dependent handles, promoting secondaries associated with newly promoted primaries as
// above. We may still need to call this multiple times since promotion of a secondary late in the table could
// promote a primary earlier in the table. Also, GC graph promotions are not guaranteed to be complete by the
// time the promotion callback returns (the mark stack can overflow). As a result the GC might have to call
// this method in a loop. The scan records state that let's us know when to terminate (no further handles to
// be promoted or no promotions in the last scan). Returns true if at least one object was promoted as a
// result of the scan.
bool CNameSpace::GcDhReScan(ScanContext* sc)
{
// Locate our dependent handle context based on the GC context.
DhContext *pDhContext = Ref_GetDependentHandleContext(sc);
return Ref_ScanDependentHandlesForPromotion(pDhContext);
}
/*
* Scan for dead weak pointers
*/
VOID CNameSpace::GcWeakPtrScan( promote_func* fn, int condemned, int max_gen, ScanContext* sc )
{
// Clear out weak pointers that are no longer live.
Ref_CheckReachable(condemned, max_gen, (LPARAM)sc);
// Clear any secondary objects whose primary object is now definitely dead.
Ref_ScanDependentHandlesForClearing(condemned, max_gen, sc, fn);
}
static void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, LPARAM *pExtraInfo, LPARAM lp1, LPARAM lp2)
{
LIMITED_METHOD_CONTRACT;
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
Object **pRef = (Object **)pObjRef;
if (!GCHeap::GetGCHeap()->IsPromoted(*pRef))
{
LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef)));
*pRef = NULL;
}
else
{
LOG((LF_GC, LL_INFO1000000, "reachable " LOG_OBJECT_CLASS(*pObjRef)));
}
}
VOID CNameSpace::GcWeakPtrScanBySingleThread( int condemned, int max_gen, ScanContext* sc )
{
GCToEEInterface::SyncBlockCacheWeakPtrScan(&CheckPromoted, (LPARAM)sc, 0);
}
VOID CNameSpace::GcScanSizedRefs(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
{
Ref_ScanSizedRefHandles(condemned, max_gen, sc, fn);
}
VOID CNameSpace::GcShortWeakPtrScan(promote_func* fn, int condemned, int max_gen,
ScanContext* sc)
{
Ref_CheckAlive(condemned, max_gen, (LPARAM)sc);
}
/*
* Scan all stack roots in this 'namespace'
*/
VOID CNameSpace::GcScanRoots(promote_func* fn, int condemned, int max_gen,
ScanContext* sc)
{
#if defined ( _DEBUG) && defined (CATCH_GC)
//note that we can't use EX_TRY because the gc_thread isn't known
PAL_TRY
#endif // _DEBUG && CATCH_GC
{
STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
{
// In server GC, we should be competing for marking the statics
if (GCHeap::MarkShouldCompeteForStatics())
{
if (condemned == max_gen && sc->promotion)
{
GCToEEInterface::ScanStaticGCRefsOpportunistically(fn, sc);
}
}
Thread* pThread = NULL;
while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
{
STRESS_LOG2(LF_GC|LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
if (GCHeap::GetGCHeap()->IsThreadUsingAllocationContextHeap(pThread->GetAllocContext(), sc->thread_number))
{
sc->thread_under_crawl = pThread;
#ifdef FEATURE_EVENT_TRACE
sc->dwEtwRootKind = kEtwGCRootKindStack;
#endif // FEATURE_EVENT_TRACE
GCToEEInterface::ScanStackRoots(pThread, fn, sc);
#ifdef FEATURE_EVENT_TRACE
sc->dwEtwRootKind = kEtwGCRootKindOther;
#endif // FEATURE_EVENT_TRACE
}
STRESS_LOG2(LF_GC|LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
}
}
}
#if defined ( _DEBUG) && defined (CATCH_GC)
PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
_ASSERTE (!"We got an exception during scan roots");
}
PAL_ENDTRY
#endif //_DEBUG
}
/*
* Scan all handle roots in this 'namespace'
*/
VOID CNameSpace::GcScanHandles (promote_func* fn, int condemned, int max_gen,
ScanContext* sc)
{
#if defined ( _DEBUG) && defined (CATCH_GC)
//note that we can't use EX_TRY because the gc_thread isn't known
PAL_TRY
#endif // _DEBUG && CATCH_GC
{
STRESS_LOG1(LF_GC|LF_GCROOTS, LL_INFO10, "GcScanHandles (Promotion Phase = %d)\n", sc->promotion);
if (sc->promotion)
{
Ref_TracePinningRoots(condemned, max_gen, sc, fn);
Ref_TraceNormalRoots(condemned, max_gen, sc, fn);
}
else
{
Ref_UpdatePointers(condemned, max_gen, sc, fn);
Ref_UpdatePinnedPointers(condemned, max_gen, sc, fn);
Ref_ScanDependentHandlesForRelocation(condemned, max_gen, sc, fn);
}
}
#if defined ( _DEBUG) && defined (CATCH_GC)
PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
_ASSERTE (!"We got an exception during scan roots");
}
PAL_ENDTRY
#endif //_DEBUG
}
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
/*
* Scan all handle roots in this 'namespace' for profiling
*/
VOID CNameSpace::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc)
{
LIMITED_METHOD_CONTRACT;
#if defined ( _DEBUG) && defined (CATCH_GC)
//note that we can't use EX_TRY because the gc_thread isn't known
PAL_TRY
#endif // _DEBUG && CATCH_GC
{
LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, Handles\n"));
Ref_ScanPointersForProfilerAndETW(max_gen, (LPARAM)sc);
}
#if defined ( _DEBUG) && defined (CATCH_GC)
PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
_ASSERTE (!"We got an exception during scan roots for the profiler");
}
PAL_ENDTRY
#endif //_DEBUG
}
/*
* Scan dependent handles in this 'namespace' for profiling
*/
void CNameSpace::GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc)
{
LIMITED_METHOD_CONTRACT;
LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, DependentHandles\n"));
Ref_ScanDependentHandlesForProfilerAndETW(max_gen, sc);
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void CNameSpace::GcRuntimeStructuresValid (BOOL bValid)
{
WRAPPER_NO_CONTRACT;
if (!bValid)
{
LONG result;
result = FastInterlockIncrement (&m_GcStructuresInvalidCnt);
_ASSERTE (result > 0);
}
else
{
LONG result;
result = FastInterlockDecrement (&m_GcStructuresInvalidCnt);
_ASSERTE (result >= 0);
}
}
void CNameSpace::GcDemote (int condemned, int max_gen, ScanContext* sc)
{
Ref_RejuvenateHandles (condemned, max_gen, (LPARAM)sc);
if (!GCHeap::IsServerHeap() || sc->thread_number == 0)
GCToEEInterface::SyncBlockCacheDemote(max_gen);
}
void CNameSpace::GcPromotionsGranted (int condemned, int max_gen, ScanContext* sc)
{
Ref_AgeHandles(condemned, max_gen, (LPARAM)sc);
if (!GCHeap::IsServerHeap() || sc->thread_number == 0)
GCToEEInterface::SyncBlockCachePromotionsGranted(max_gen);
}
void CNameSpace::GcFixAllocContexts (void* arg, void *heap)
{
LIMITED_METHOD_CONTRACT;
if (GCHeap::UseAllocationContexts())
{
Thread *thread = NULL;
while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
{
GCHeap::GetGCHeap()->FixAllocContext(thread->GetAllocContext(), FALSE, arg, heap);
}
}
}
void CNameSpace::GcEnumAllocContexts (enum_alloc_context_func* fn)
{
LIMITED_METHOD_CONTRACT;
if (GCHeap::UseAllocationContexts())
{
Thread *thread = NULL;
while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
{
(*fn) (thread->GetAllocContext());
}
}
}
size_t CNameSpace::AskForMoreReservedMemory (size_t old_size, size_t need_size)
{
LIMITED_METHOD_CONTRACT;
#if !defined(FEATURE_CORECLR) && !defined(FEATURE_REDHAWK)
// call the host....
IGCHostControl *pGCHostControl = CorHost::GetGCHostControl();
if (pGCHostControl)
{
size_t new_max_limit_size = need_size;
pGCHostControl->RequestVirtualMemLimit (old_size,
(SIZE_T*)&new_max_limit_size);
return new_max_limit_size;
}
#endif
return old_size + need_size;
}
void CNameSpace::VerifyHandleTable(int condemned, int max_gen, ScanContext* sc)
{
LIMITED_METHOD_CONTRACT;
Ref_VerifyHandleTable(condemned, max_gen, sc);
}
#endif // !DACCESS_COMPILE

119
src/Native/gc/gcscan.h Normal file
Просмотреть файл

@ -0,0 +1,119 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
/*
* GCSCAN.H
*
* GC Root Scanning
*
*
*/
#ifndef _GCSCAN_H_
#define _GCSCAN_H_
#include "gc.h"
// Scanning dependent handles for promotion can become a complex operation due to cascaded dependencies and
// other issues (see the comments for GcDhInitialScan and friends in gcscan.cpp for further details). As a
// result we need to maintain a context between all the DH scanning methods called during a single mark phase.
// The structure below describes this context. We allocate one of these per GC heap at Ref_Initialize time and
// select between them based on the ScanContext passed to us by the GC during the mark phase.
struct DhContext
{
bool m_fUnpromotedPrimaries; // Did last scan find at least one non-null unpromoted primary?
bool m_fPromoted; // Did last scan promote at least one secondary?
promote_func *m_pfnPromoteFunction; // GC promote callback to be used for all secondary promotions
int m_iCondemned; // The condemned generation
int m_iMaxGen; // The maximum generation
ScanContext *m_pScanContext; // The GC's scan context for this phase
};
// <TODO>
// @TODO (JSW): For compatibility with the existing GC code we use CNamespace
// as the name of this class. I'm planning on changing it to
// something like GCDomain....
// </TODO>
typedef void enum_alloc_context_func(alloc_context*);
class CNameSpace
{
friend struct ::_DacGlobals;
public:
// Called on gc start
static void GcStartDoWork();
static void GcScanSizedRefs(promote_func* fn, int condemned, int max_gen, ScanContext* sc);
// Regular stack Roots
static void GcScanRoots (promote_func* fn, int condemned, int max_gen, ScanContext* sc);
//
static void GcScanHandles (promote_func* fn, int condemned, int max_gen, ScanContext* sc);
static void GcRuntimeStructuresValid (BOOL bValid);
static BOOL GetGcRuntimeStructuresValid ();
#ifdef DACCESS_COMPILE
static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
#endif // DACCESS_COMPILE
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
static void GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc);
static void GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc);
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
// scan for dead weak pointers
static void GcWeakPtrScan (promote_func* fn, int condemned, int max_gen, ScanContext*sc );
static void GcWeakPtrScanBySingleThread (int condemned, int max_gen, ScanContext*sc );
// scan for dead weak pointers
static void GcShortWeakPtrScan (promote_func* fn, int condemned, int max_gen,
ScanContext* sc);
//
// Dependent handle promotion scan support
//
// Perform initial (incomplete) scan which will deterimine if there's any further work required.
static void GcDhInitialScan(promote_func* fn, int condemned, int max_gen, ScanContext* sc);
// Called between scans to ask if any handles with an unpromoted secondary existed at the end of the last
// scan.
static bool GcDhUnpromotedHandlesExist(ScanContext* sc);
// Rescan the handles for additonal primaries that have been promoted since the last scan. Return true if
// any objects were promoted as a result.
static bool GcDhReScan(ScanContext* sc);
// post-promotions callback
static void GcPromotionsGranted (int condemned, int max_gen,
ScanContext* sc);
// post-promotions callback some roots were demoted
static void GcDemote (int condemned, int max_gen, ScanContext* sc);
static void GcEnumAllocContexts (enum_alloc_context_func* fn);
static void GcFixAllocContexts (void* arg, void *heap);
static size_t AskForMoreReservedMemory (size_t old_size, size_t need_size);
static void VerifyHandleTable(int condemned, int max_gen, ScanContext* sc);
private:
#ifdef DACCESS_COMPILE
SVAL_DECL(LONG, m_GcStructuresInvalidCnt);
#else
static VOLATILE(LONG) m_GcStructuresInvalidCnt;
#endif //DACCESS_COMPILE
};
#endif // _GCSCAN_H_

25
src/Native/gc/gcsvr.cpp Normal file
Просмотреть файл

@ -0,0 +1,25 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
#include "common.h"
#if defined(FEATURE_SVR_GC)
#include "gcenv.h"
#include "gc.h"
#include "gcscan.h"
#include "gcdesc.h"
#define SERVER_GC 1
namespace SVR {
#include "gcimpl.h"
#include "gc.cpp"
}
#endif // defined(FEATURE_SVR_GC)

24
src/Native/gc/gcwks.cpp Normal file
Просмотреть файл

@ -0,0 +1,24 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
#include "common.h"
#include "gcenv.h"
#include "gc.h"
#include "gcscan.h"
#include "gcdesc.h"
#ifdef SERVER_GC
#undef SERVER_GC
#endif
namespace WKS {
#include "gcimpl.h"
#include "gc.cpp"
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

254
src/Native/gc/handletable.h Normal file
Просмотреть файл

@ -0,0 +1,254 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
/*
* Generational GC handle manager. Entrypoint Header.
*
* Implements generic support for external handles into a GC heap.
*
*
*/
#ifndef _HANDLETABLE_H
#define _HANDLETABLE_H
/****************************************************************************
*
* FLAGS, CONSTANTS AND DATA TYPES
*
****************************************************************************/
#ifdef _DEBUG
#define DEBUG_DestroyedHandleValue ((_UNCHECKED_OBJECTREF)0x7)
#endif
/*
* handle flags used by HndCreateHandleTable
*/
#define HNDF_NORMAL (0x00)
#define HNDF_EXTRAINFO (0x01)
/*
* handle to handle table
*/
typedef DPTR(struct HandleTable) PTR_HandleTable;
typedef DPTR(PTR_HandleTable) PTR_PTR_HandleTable;
typedef PTR_HandleTable HHANDLETABLE;
typedef PTR_PTR_HandleTable PTR_HHANDLETABLE;
/*--------------------------------------------------------------------------*/
/****************************************************************************
*
* PUBLIC ROUTINES AND MACROS
*
****************************************************************************/
#ifndef DACCESS_COMPILE
/*
* handle manager init and shutdown routines
*/
HHANDLETABLE HndCreateHandleTable(const UINT *pTypeFlags, UINT uTypeCount, ADIndex uADIndex);
void HndDestroyHandleTable(HHANDLETABLE hTable);
#endif // !DACCESS_COMPILE
/*
* retrieve index stored in table at creation
*/
void HndSetHandleTableIndex(HHANDLETABLE hTable, UINT uTableIndex);
UINT HndGetHandleTableIndex(HHANDLETABLE hTable);
ADIndex HndGetHandleTableADIndex(HHANDLETABLE hTable);
ADIndex HndGetHandleADIndex(OBJECTHANDLE handle);
#ifndef DACCESS_COMPILE
/*
* individual handle allocation and deallocation
*/
OBJECTHANDLE HndCreateHandle(HHANDLETABLE hTable, UINT uType, OBJECTREF object, LPARAM lExtraInfo = 0);
void HndDestroyHandle(HHANDLETABLE hTable, UINT uType, OBJECTHANDLE handle);
void HndDestroyHandleOfUnknownType(HHANDLETABLE hTable, OBJECTHANDLE handle);
/*
* bulk handle allocation and deallocation
*/
UINT HndCreateHandles(HHANDLETABLE hTable, UINT uType, OBJECTHANDLE *pHandles, UINT uCount);
void HndDestroyHandles(HHANDLETABLE hTable, UINT uType, const OBJECTHANDLE *pHandles, UINT uCount);
/*
* owner data associated with handles
*/
void HndSetHandleExtraInfo(OBJECTHANDLE handle, UINT uType, LPARAM lExtraInfo);
#endif // !DACCESS_COMPILE
LPARAM HndGetHandleExtraInfo(OBJECTHANDLE handle);
/*
* get parent table of handle
*/
HHANDLETABLE HndGetHandleTable(OBJECTHANDLE handle);
/*
* write barrier
*/
void HndWriteBarrier(OBJECTHANDLE handle, OBJECTREF value);
/*
* logging an ETW event (for inlined methods)
*/
void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value);
/*
* Scanning callback.
*/
typedef void (CALLBACK *HANDLESCANPROC)(PTR_UNCHECKED_OBJECTREF pref, LPARAM *pExtraInfo, LPARAM param1, LPARAM param2);
/*
* NON-GC handle enumeration
*/
void HndEnumHandles(HHANDLETABLE hTable, const UINT *puType, UINT uTypeCount,
HANDLESCANPROC pfnEnum, LPARAM lParam1, LPARAM lParam2, BOOL fAsync);
/*
* GC-time handle scanning
*/
#define HNDGCF_NORMAL (0x00000000) // normal scan
#define HNDGCF_AGE (0x00000001) // age handles while scanning
#define HNDGCF_ASYNC (0x00000002) // drop the table lock while scanning
#define HNDGCF_EXTRAINFO (0x00000004) // iterate per-handle data while scanning
void HndScanHandlesForGC(HHANDLETABLE hTable,
HANDLESCANPROC scanProc,
LPARAM param1,
LPARAM param2,
const UINT *types,
UINT typeCount,
UINT condemned,
UINT maxgen,
UINT flags);
void HndResetAgeMap(HHANDLETABLE hTable, const UINT *types, UINT typeCount, UINT condemned, UINT maxgen, UINT flags);
void HndVerifyTable(HHANDLETABLE hTable, const UINT *types, UINT typeCount, UINT condemned, UINT maxgen, UINT flags);
void HndNotifyGcCycleComplete(HHANDLETABLE hTable, UINT condemned, UINT maxgen);
/*
* Handle counting
*/
UINT HndCountHandles(HHANDLETABLE hTable);
UINT HndCountAllHandles(BOOL fUseLocks);
/*--------------------------------------------------------------------------*/
#if defined(USE_CHECKED_OBJECTREFS) && !defined(_NOVM)
#define OBJECTREF_TO_UNCHECKED_OBJECTREF(objref) (*((_UNCHECKED_OBJECTREF*)&(objref)))
#define UNCHECKED_OBJECTREF_TO_OBJECTREF(obj) (OBJECTREF(obj))
#else
#define OBJECTREF_TO_UNCHECKED_OBJECTREF(objref) (objref)
#define UNCHECKED_OBJECTREF_TO_OBJECTREF(obj) (obj)
#endif
#ifdef _DEBUG_IMPL
void ValidateAssignObjrefForHandle(OBJECTREF, ADIndex appDomainIndex);
void ValidateFetchObjrefForHandle(OBJECTREF, ADIndex appDomainIndex);
void ValidateAppDomainForHandle(OBJECTHANDLE handle);
#endif
/*
* handle assignment
*/
void HndAssignHandle(OBJECTHANDLE handle, OBJECTREF objref);
/*
* interlocked-exchange assignment
*/
void* HndInterlockedCompareExchangeHandle(OBJECTHANDLE handle, OBJECTREF objref, OBJECTREF oldObjref);
/*
* Note that HndFirstAssignHandle is similar to HndAssignHandle, except that it only
* succeeds if transitioning from NULL to non-NULL. In other words, if this handle
* is being initialized for the first time.
*/
BOOL HndFirstAssignHandle(OBJECTHANDLE handle, OBJECTREF objref);
/*
* inline handle dereferencing
*/
FORCEINLINE OBJECTREF HndFetchHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
// sanity
_ASSERTE(handle);
#ifdef _DEBUG_IMPL
_ASSERTE("Attempt to access destroyed handle." && *(_UNCHECKED_OBJECTREF *)handle != DEBUG_DestroyedHandleValue);
// Make sure the objref for handle is valid
ValidateFetchObjrefForHandle(ObjectToOBJECTREF(*(Object **)handle),
HndGetHandleTableADIndex(HndGetHandleTable(handle)));
#endif // _DEBUG_IMPL
// wrap the raw objectref and return it
return UNCHECKED_OBJECTREF_TO_OBJECTREF(*PTR_UNCHECKED_OBJECTREF(handle));
}
/*
* inline null testing (needed in certain cases where we're in the wrong GC mod)
*/
FORCEINLINE BOOL HndIsNull(OBJECTHANDLE handle)
{
LIMITED_METHOD_CONTRACT;
// sanity
_ASSERTE(handle);
return NULL == *(Object **)handle;
}
/*
* inline handle checking
*/
FORCEINLINE BOOL HndCheckForNullUnchecked(OBJECTHANDLE handle)
{
LIMITED_METHOD_CONTRACT;
return (handle == NULL || (*(_UNCHECKED_OBJECTREF *)handle) == NULL);
}
/*
*
* Checks handle value for null or special value used for free handles in cache.
*
*/
FORCEINLINE BOOL HndIsNullOrDestroyedHandle(_UNCHECKED_OBJECTREF value)
{
LIMITED_METHOD_CONTRACT;
#ifdef DEBUG_DestroyedHandleValue
if (value == DEBUG_DestroyedHandleValue)
return TRUE;
#endif
return (value == NULL);
}
/*--------------------------------------------------------------------------*/
#include "handletable.inl"
#endif //_HANDLETABLE_H

Просмотреть файл

@ -0,0 +1,121 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
//
//
#ifndef _HANDLETABLE_INL
#define _HANDLETABLE_INL
inline void HndAssignHandle(OBJECTHANDLE handle, OBJECTREF objref)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// sanity
_ASSERTE(handle);
#ifdef _DEBUG_IMPL
// handle should not be in unloaded domain
ValidateAppDomainForHandle(handle);
// Make sure the objref is valid before it is assigned to a handle
ValidateAssignObjrefForHandle(objref, HndGetHandleTableADIndex(HndGetHandleTable(handle)));
#endif
// unwrap the objectref we were given
_UNCHECKED_OBJECTREF value = OBJECTREF_TO_UNCHECKED_OBJECTREF(objref);
HndLogSetEvent(handle, value);
// if we are doing a non-NULL pointer store then invoke the write-barrier
if (value)
HndWriteBarrier(handle, objref);
// store the pointer
*(_UNCHECKED_OBJECTREF *)handle = value;
}
inline void* HndInterlockedCompareExchangeHandle(OBJECTHANDLE handle, OBJECTREF objref, OBJECTREF oldObjref)
{
WRAPPER_NO_CONTRACT;
// sanity
_ASSERTE(handle);
#ifdef _DEBUG_IMPL
// handle should not be in unloaded domain
ValidateAppDomainForHandle(handle);
// Make sure the objref is valid before it is assigned to a handle
ValidateAssignObjrefForHandle(objref, HndGetHandleTableADIndex(HndGetHandleTable(handle)));
#endif
// unwrap the objectref we were given
_UNCHECKED_OBJECTREF value = OBJECTREF_TO_UNCHECKED_OBJECTREF(objref);
_UNCHECKED_OBJECTREF oldValue = OBJECTREF_TO_UNCHECKED_OBJECTREF(oldObjref);
// if we are doing a non-NULL pointer store then invoke the write-barrier
if (value)
HndWriteBarrier(handle, objref);
// store the pointer
void* ret = FastInterlockCompareExchangePointer(reinterpret_cast<_UNCHECKED_OBJECTREF volatile*>(handle), value, oldValue);
if (ret == oldValue)
HndLogSetEvent(handle, value);
return ret;
}
inline BOOL HndFirstAssignHandle(OBJECTHANDLE handle, OBJECTREF objref)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// sanity
_ASSERTE(handle);
#ifdef _DEBUG_IMPL
// handle should not be in unloaded domain
ValidateAppDomainForHandle(handle);
// Make sure the objref is valid before it is assigned to a handle
ValidateAssignObjrefForHandle(objref, HndGetHandleTableADIndex(HndGetHandleTable(handle)));
#endif
// unwrap the objectref we were given
_UNCHECKED_OBJECTREF value = OBJECTREF_TO_UNCHECKED_OBJECTREF(objref);
_UNCHECKED_OBJECTREF null = NULL;
// store the pointer if we are the first ones here
BOOL success = (NULL == FastInterlockCompareExchangePointer(reinterpret_cast<_UNCHECKED_OBJECTREF volatile*>(handle),
value,
null));
// if we successfully did a non-NULL pointer store then invoke the write-barrier
if (success)
{
if (value)
HndWriteBarrier(handle, objref);
HndLogSetEvent(handle, value);
}
// return our result
return success;
}
#endif // _HANDLETABLE_INL

Просмотреть файл

@ -0,0 +1,882 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
/*
* Generational GC handle manager. Handle Caching Routines.
*
* Implementation of handle table allocation cache.
*
*
*/
#include "common.h"
#include "gcenv.h"
#include "handletablepriv.h"
/****************************************************************************
*
* RANDOM HELPERS
*
****************************************************************************/
/*
* SpinUntil
*
* Spins on a variable until its state matches a desired state.
*
* This routine will assert if it spins for a very long time.
*
*/
void SpinUntil(void *pCond, BOOL fNonZero)
{
WRAPPER_NO_CONTRACT;
/*
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
*/
// if we have to sleep then we will keep track of a sleep period
DWORD dwThisSleepPeriod = 1; // first just give up our timeslice
DWORD dwNextSleepPeriod = 10; // next try a real delay
#ifdef _DEBUG
DWORD dwTotalSlept = 0;
DWORD dwNextComplain = 1000;
#endif //_DEBUG
// on MP machines, allow ourselves some spin time before sleeping
UINT uNonSleepSpins = 8 * (g_SystemInfo.dwNumberOfProcessors - 1);
// spin until the specificed condition is met
while ((*(UINT_PTR *)pCond != 0) != (fNonZero != 0))
{
// have we exhausted the non-sleep spin count?
if (!uNonSleepSpins)
{
#ifdef _DEBUG
// yes, missed again - before sleeping, check our current sleep time
if (dwTotalSlept >= dwNextComplain)
{
//
// THIS SHOULD NOT NORMALLY HAPPEN
//
// The only time this assert can be ignored is if you have
// another thread intentionally suspended in a way that either
// directly or indirectly leaves a thread suspended in the
// handle table while the current thread (this assert) is
// running normally.
//
// Otherwise, this assert should be investigated as a bug.
//
_ASSERTE(FALSE);
// slow down the assert rate so people can investigate
dwNextComplain = 3 * dwNextComplain;
}
// now update our total sleep time
dwTotalSlept += dwThisSleepPeriod;
#endif //_DEBUG
// sleep for a little while
__SwitchToThread(dwThisSleepPeriod, CALLER_LIMITS_SPINNING);
// now update our sleep period
dwThisSleepPeriod = dwNextSleepPeriod;
// now increase the next sleep period if it is still small
if (dwNextSleepPeriod < 1000)
dwNextSleepPeriod += 10;
}
else
{
// nope - just spin again
YieldProcessor(); // indicate to the processor that we are spining
uNonSleepSpins--;
}
}
}
/*
* ReadAndZeroCacheHandles
*
* Reads a set of handles from a bank in the handle cache, zeroing them as they are taken.
*
* This routine will assert if a requested handle is missing.
*
*/
OBJECTHANDLE *ReadAndZeroCacheHandles(OBJECTHANDLE *pDst, OBJECTHANDLE *pSrc, UINT uCount)
{
LIMITED_METHOD_CONTRACT;
// set up to loop
OBJECTHANDLE *pLast = pDst + uCount;
// loop until we've copied all of them
while (pDst < pLast)
{
// this version assumes we have handles to read
_ASSERTE(*pSrc);
// copy the handle and zero it from the source
*pDst = *pSrc;
*pSrc = 0;
// set up for another handle
pDst++;
pSrc++;
}
// return the next unfilled slot after what we filled in
return pLast;
}
/*
* SyncReadAndZeroCacheHandles
*
* Reads a set of handles from a bank in the handle cache, zeroing them as they are taken.
*
* This routine will spin until all requested handles are obtained.
*
*/
OBJECTHANDLE *SyncReadAndZeroCacheHandles(OBJECTHANDLE *pDst, OBJECTHANDLE *pSrc, UINT uCount)
{
WRAPPER_NO_CONTRACT;
/*
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
*/
// set up to loop
// we loop backwards since that is the order handles are added to the bank
// this is designed to reduce the chance that we will have to spin on a handle
OBJECTHANDLE *pBase = pDst;
pSrc += uCount;
pDst += uCount;
// remember the end of the array
OBJECTHANDLE *pLast = pDst;
// loop until we've copied all of them
while (pDst > pBase)
{
// advance to the next slot
pDst--;
pSrc--;
// this version spins if there is no handle to read
if (!*pSrc)
SpinUntil(pSrc, TRUE);
// copy the handle and zero it from the source
*pDst = *pSrc;
*pSrc = 0;
}
// return the next unfilled slot after what we filled in
return pLast;
}
/*
* WriteCacheHandles
*
* Writes a set of handles to a bank in the handle cache.
*
* This routine will assert if it is about to clobber an existing handle.
*
*/
void WriteCacheHandles(OBJECTHANDLE *pDst, OBJECTHANDLE *pSrc, UINT uCount)
{
LIMITED_METHOD_CONTRACT;
// set up to loop
OBJECTHANDLE *pLimit = pSrc + uCount;
// loop until we've copied all of them
while (pSrc < pLimit)
{
// this version assumes we have space to store the handles
_ASSERTE(!*pDst);
// copy the handle
*pDst = *pSrc;
// set up for another handle
pDst++;
pSrc++;
}
}
/*
* SyncWriteCacheHandles
*
* Writes a set of handles to a bank in the handle cache.
*
* This routine will spin until lingering handles in the cache bank are gone.
*
*/
void SyncWriteCacheHandles(OBJECTHANDLE *pDst, OBJECTHANDLE *pSrc, UINT uCount)
{
WRAPPER_NO_CONTRACT;
/*
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
*/
// set up to loop
// we loop backwards since that is the order handles are removed from the bank
// this is designed to reduce the chance that we will have to spin on a handle
OBJECTHANDLE *pBase = pSrc;
pSrc += uCount;
pDst += uCount;
// loop until we've copied all of them
while (pSrc > pBase)
{
// set up for another handle
pDst--;
pSrc--;
// this version spins if there is no handle to read
if (*pDst)
SpinUntil(pDst, FALSE);
// copy the handle
*pDst = *pSrc;
}
}
/*
* SyncTransferCacheHandles
*
* Transfers a set of handles from one bank of the handle cache to another,
* zeroing the source bank as the handles are removed.
*
* The routine will spin until all requested handles can be transferred.
*
* This routine is equivalent to SyncReadAndZeroCacheHandles + SyncWriteCacheHandles
*
*/
void SyncTransferCacheHandles(OBJECTHANDLE *pDst, OBJECTHANDLE *pSrc, UINT uCount)
{
WRAPPER_NO_CONTRACT;
/*
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
*/
// set up to loop
// we loop backwards since that is the order handles are added to the bank
// this is designed to reduce the chance that we will have to spin on a handle
OBJECTHANDLE *pBase = pDst;
pSrc += uCount;
pDst += uCount;
// loop until we've copied all of them
while (pDst > pBase)
{
// advance to the next slot
pDst--;
pSrc--;
// this version spins if there is no handle to read or no place to write it
if (*pDst || !*pSrc)
{
SpinUntil(pSrc, TRUE);
SpinUntil(pDst, FALSE);
}
// copy the handle and zero it from the source
*pDst = *pSrc;
*pSrc = 0;
}
}
/*--------------------------------------------------------------------------*/
/****************************************************************************
*
* HANDLE CACHE
*
****************************************************************************/
/*
* TableFullRebalanceCache
*
* Rebalances a handle cache by transferring handles from the cache's
* free bank to its reserve bank. If the free bank does not provide
* enough handles to replenish the reserve bank, handles are allocated
* in bulk from the main handle table. If too many handles remain in
* the free bank, the extra handles are returned in bulk to the main
* handle table.
*
* This routine attempts to reduce fragmentation in the main handle
* table by sorting the handles according to table order, preferring to
* refill the reserve bank with lower handles while freeing higher ones.
* The sorting also allows the free routine to operate more efficiently,
* as it can optimize the case where handles near each other are freed.
*
*/
void TableFullRebalanceCache(HandleTable *pTable,
HandleTypeCache *pCache,
UINT uType,
LONG lMinReserveIndex,
LONG lMinFreeIndex,
OBJECTHANDLE *pExtraOutHandle,
OBJECTHANDLE extraInHandle)
{
LIMITED_METHOD_CONTRACT;
/*
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
*/
// we need a temporary space to sort our free handles in
OBJECTHANDLE rgHandles[HANDLE_CACHE_TYPE_SIZE];
// set up a base handle pointer to keep track of where we are
OBJECTHANDLE *pHandleBase = rgHandles;
// do we have a spare incoming handle?
if (extraInHandle)
{
// remember the extra handle now
*pHandleBase = extraInHandle;
pHandleBase++;
}
// if there are handles in the reserve bank then gather them up
// (we don't need to wait on these since they are only put there by this
// function inside our own lock)
if (lMinReserveIndex > 0)
pHandleBase = ReadAndZeroCacheHandles(pHandleBase, pCache->rgReserveBank, (UINT)lMinReserveIndex);
else
lMinReserveIndex = 0;
// if there are handles in the free bank then gather them up
if (lMinFreeIndex < HANDLES_PER_CACHE_BANK)
{
// this may have underflowed
if (lMinFreeIndex < 0)
lMinFreeIndex = 0;
// here we need to wait for all pending freed handles to be written by other threads
pHandleBase = SyncReadAndZeroCacheHandles(pHandleBase,
pCache->rgFreeBank + lMinFreeIndex,
HANDLES_PER_CACHE_BANK - (UINT)lMinFreeIndex);
}
// compute the number of handles we have
UINT uHandleCount = (UINT) (pHandleBase - rgHandles);
// do we have enough handles for a balanced cache?
if (uHandleCount < REBALANCE_LOWATER_MARK)
{
// nope - allocate some more
UINT uAlloc = HANDLES_PER_CACHE_BANK - uHandleCount;
// if we have an extra outgoing handle then plan for that too
if (pExtraOutHandle)
uAlloc++;
{
// allocate the new handles - we intentionally don't check for success here
FAULT_NOT_FATAL();
uHandleCount += TableAllocBulkHandles(pTable, uType, pHandleBase, uAlloc);
}
}
// reset the base handle pointer
pHandleBase = rgHandles;
// by default the whole free bank is available
lMinFreeIndex = HANDLES_PER_CACHE_BANK;
// if we have handles left over then we need to do some more work
if (uHandleCount)
{
// do we have too many handles for a balanced cache?
if (uHandleCount > REBALANCE_HIWATER_MARK)
{
//
// sort the array by reverse handle order - this does two things:
// (1) combats handle fragmentation by preferring low-address handles to high ones
// (2) allows the free routine to run much more efficiently over the ones we free
//
QuickSort((UINT_PTR *)pHandleBase, 0, uHandleCount - 1, CompareHandlesByFreeOrder);
// yup, we need to free some - calculate how many
UINT uFree = uHandleCount - HANDLES_PER_CACHE_BANK;
// free the handles - they are already 'prepared' (eg zeroed and sorted)
TableFreeBulkPreparedHandles(pTable, uType, pHandleBase, uFree);
// update our array base and length
uHandleCount -= uFree;
pHandleBase += uFree;
}
// if we have an extra outgoing handle then fill it now
if (pExtraOutHandle)
{
// account for the handle we're giving away
uHandleCount--;
// now give it away
*pExtraOutHandle = pHandleBase[uHandleCount];
}
// if we have more than a reserve bank of handles then put some in the free bank
if (uHandleCount > HANDLES_PER_CACHE_BANK)
{
// compute the number of extra handles we need to save away
UINT uStore = uHandleCount - HANDLES_PER_CACHE_BANK;
// compute the index to start writing the handles to
lMinFreeIndex = HANDLES_PER_CACHE_BANK - uStore;
// store the handles
// (we don't need to wait on these since we already waited while reading them)
WriteCacheHandles(pCache->rgFreeBank + lMinFreeIndex, pHandleBase, uStore);
// update our array base and length
uHandleCount -= uStore;
pHandleBase += uStore;
}
}
// update the write index for the free bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AFTER THIS POINT THE FREE BANK IS LIVE AND COULD RECEIVE NEW HANDLES
FastInterlockExchange(&pCache->lFreeIndex, lMinFreeIndex);
// now if we have any handles left, store them in the reserve bank
if (uHandleCount)
{
// store the handles
// (here we need to wait for all pending allocated handles to be taken
// before we set up new ones in their places)
SyncWriteCacheHandles(pCache->rgReserveBank, pHandleBase, uHandleCount);
}
// compute the index to start serving handles from
lMinReserveIndex = (LONG)uHandleCount;
// update the read index for the reserve bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AT THIS POINT THE RESERVE BANK IS LIVE AND HANDLES COULD BE ALLOCATED FROM IT
FastInterlockExchange(&pCache->lReserveIndex, lMinReserveIndex);
}
/*
* TableQuickRebalanceCache
*
* Rebalances a handle cache by transferring handles from the cache's free bank
* to its reserve bank. If the free bank does not provide enough handles to
* replenish the reserve bank or too many handles remain in the free bank, the
* routine just punts and calls TableFullRebalanceCache.
*
*/
void TableQuickRebalanceCache(HandleTable *pTable,
HandleTypeCache *pCache,
UINT uType,
LONG lMinReserveIndex,
LONG lMinFreeIndex,
OBJECTHANDLE *pExtraOutHandle,
OBJECTHANDLE extraInHandle)
{
WRAPPER_NO_CONTRACT;
/*
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
*/
// clamp the min free index to be non-negative
if (lMinFreeIndex < 0)
lMinFreeIndex = 0;
// clamp the min reserve index to be non-negative
if (lMinReserveIndex < 0)
lMinReserveIndex = 0;
// compute the number of slots in the free bank taken by handles
UINT uFreeAvail = HANDLES_PER_CACHE_BANK - (UINT)lMinFreeIndex;
// compute the number of handles we have to fiddle with
UINT uHandleCount = (UINT)lMinReserveIndex + uFreeAvail + (extraInHandle != 0);
// can we rebalance these handles in place?
if ((uHandleCount < REBALANCE_LOWATER_MARK) ||
(uHandleCount > REBALANCE_HIWATER_MARK))
{
// nope - perform a full rebalance of the handle cache
TableFullRebalanceCache(pTable, pCache, uType, lMinReserveIndex, lMinFreeIndex,
pExtraOutHandle, extraInHandle);
// all done
return;
}
// compute the number of empty slots in the reserve bank
UINT uEmptyReserve = HANDLES_PER_CACHE_BANK - lMinReserveIndex;
// we want to transfer as many handles as we can from the free bank
UINT uTransfer = uFreeAvail;
// but only as many as we have room to store in the reserve bank
if (uTransfer > uEmptyReserve)
uTransfer = uEmptyReserve;
// transfer the handles
SyncTransferCacheHandles(pCache->rgReserveBank + lMinReserveIndex,
pCache->rgFreeBank + lMinFreeIndex,
uTransfer);
// adjust the free and reserve indices to reflect the transfer
lMinFreeIndex += uTransfer;
lMinReserveIndex += uTransfer;
// do we have an extra incoming handle to store?
if (extraInHandle)
{
//
// Workaround: For code size reasons, we don't handle all cases here.
// We assume an extra IN handle means a cache overflow during a free.
//
// After the rebalance above, the reserve bank should be full, and
// there may be a few handles sitting in the free bank. The HIWATER
// check above guarantees that we have room to store the handle.
//
_ASSERTE(!pExtraOutHandle);
// store the handle in the next available free bank slot
pCache->rgFreeBank[--lMinFreeIndex] = extraInHandle;
}
else if (pExtraOutHandle) // do we have an extra outgoing handle to satisfy?
{
//
// For code size reasons, we don't handle all cases here.
// We assume an extra OUT handle means a cache underflow during an alloc.
//
// After the rebalance above, the free bank should be empty, and
// the reserve bank may not be fully populated. The LOWATER check above
// guarantees that the reserve bank has at least one handle we can steal.
//
// take the handle from the reserve bank and update the reserve index
*pExtraOutHandle = pCache->rgReserveBank[--lMinReserveIndex];
// zero the cache slot we chose
pCache->rgReserveBank[lMinReserveIndex] = NULL;
}
// update the write index for the free bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AFTER THIS POINT THE FREE BANK IS LIVE AND COULD RECEIVE NEW HANDLES
FastInterlockExchange(&pCache->lFreeIndex, lMinFreeIndex);
// update the read index for the reserve bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AT THIS POINT THE RESERVE BANK IS LIVE AND HANDLES COULD BE ALLOCATED FROM IT
FastInterlockExchange(&pCache->lReserveIndex, lMinReserveIndex);
}
/*
* TableCacheMissOnAlloc
*
* Gets a single handle of the specified type from the handle table,
* making the assumption that the reserve cache for that type was
* recently emptied. This routine acquires the handle manager lock and
* attempts to get a handle from the reserve cache again. If this second
* get operation also fails, the handle is allocated by means of a cache
* rebalance.
*
*/
OBJECTHANDLE TableCacheMissOnAlloc(HandleTable *pTable, HandleTypeCache *pCache, UINT uType)
{
WRAPPER_NO_CONTRACT;
// assume we get no handle
OBJECTHANDLE handle = NULL;
// acquire the handle manager lock
CrstHolder ch(&pTable->Lock);
// try again to take a handle (somebody else may have rebalanced)
LONG lReserveIndex = FastInterlockDecrement(&pCache->lReserveIndex);
// are we still waiting for handles?
if (lReserveIndex < 0)
{
// yup, suspend free list usage...
LONG lFreeIndex = FastInterlockExchange(&pCache->lFreeIndex, 0L);
// ...and rebalance the cache...
TableQuickRebalanceCache(pTable, pCache, uType, lReserveIndex, lFreeIndex, &handle, NULL);
}
else
{
// somebody else rebalanced the cache for us - take the handle
handle = pCache->rgReserveBank[lReserveIndex];
// zero the handle slot
pCache->rgReserveBank[lReserveIndex] = 0;
}
// return the handle we got
return handle;
}
/*
* TableCacheMissOnFree
*
* Returns a single handle of the specified type to the handle table,
* making the assumption that the free cache for that type was recently
* filled. This routine acquires the handle manager lock and attempts
* to store the handle in the free cache again. If this second store
* operation also fails, the handle is freed by means of a cache
* rebalance.
*
*/
void TableCacheMissOnFree(HandleTable *pTable, HandleTypeCache *pCache, UINT uType, OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
/*
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
*/
// acquire the handle manager lock
CrstHolder ch(&pTable->Lock);
// try again to take a slot (somebody else may have rebalanced)
LONG lFreeIndex = FastInterlockDecrement(&pCache->lFreeIndex);
// are we still waiting for free slots?
if (lFreeIndex < 0)
{
// yup, suspend reserve list usage...
LONG lReserveIndex = FastInterlockExchange(&pCache->lReserveIndex, 0L);
// ...and rebalance the cache...
TableQuickRebalanceCache(pTable, pCache, uType, lReserveIndex, lFreeIndex, NULL, handle);
}
else
{
// somebody else rebalanced the cache for us - free the handle
pCache->rgFreeBank[lFreeIndex] = handle;
}
}
/*
* TableAllocSingleHandleFromCache
*
* Gets a single handle of the specified type from the handle table by
* trying to fetch it from the reserve cache for that handle type. If the
* reserve cache is empty, this routine calls TableCacheMissOnAlloc.
*
*/
OBJECTHANDLE TableAllocSingleHandleFromCache(HandleTable *pTable, UINT uType)
{
WRAPPER_NO_CONTRACT;
// we use this in two places
OBJECTHANDLE handle;
// first try to get a handle from the quick cache
if (pTable->rgQuickCache[uType])
{
// try to grab the handle we saw
handle = FastInterlockExchangePointer(pTable->rgQuickCache + uType, (OBJECTHANDLE)NULL);
// if it worked then we're done
if (handle)
return handle;
}
// ok, get the main handle cache for this type
HandleTypeCache *pCache = pTable->rgMainCache + uType;
// try to take a handle from the main cache
LONG lReserveIndex = FastInterlockDecrement(&pCache->lReserveIndex);
// did we underflow?
if (lReserveIndex < 0)
{
// yep - the cache is out of handles
return TableCacheMissOnAlloc(pTable, pCache, uType);
}
// get our handle
handle = pCache->rgReserveBank[lReserveIndex];
// zero the handle slot
pCache->rgReserveBank[lReserveIndex] = 0;
// sanity
_ASSERTE(handle);
// return our handle
return handle;
}
/*
* TableFreeSingleHandleToCache
*
* Returns a single handle of the specified type to the handle table
* by trying to store it in the free cache for that handle type. If the
* free cache is full, this routine calls TableCacheMissOnFree.
*
*/
void TableFreeSingleHandleToCache(HandleTable *pTable, UINT uType, OBJECTHANDLE handle)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SO_TOLERANT;
CAN_TAKE_LOCK; // because of TableCacheMissOnFree
}
CONTRACTL_END;
#ifdef DEBUG_DestroyedHandleValue
*(_UNCHECKED_OBJECTREF *)handle = DEBUG_DestroyedHandleValue;
#else
// zero the handle's object pointer
*(_UNCHECKED_OBJECTREF *)handle = NULL;
#endif
// if this handle type has user data then clear it - AFTER the referent is cleared!
if (TypeHasUserData(pTable, uType))
HandleQuickSetUserData(handle, 0L);
// is there room in the quick cache?
if (!pTable->rgQuickCache[uType])
{
// yup - try to stuff our handle in the slot we saw
handle = FastInterlockExchangePointer(&pTable->rgQuickCache[uType], handle);
// if we didn't end up with another handle then we're done
if (!handle)
return;
}
// ok, get the main handle cache for this type
HandleTypeCache *pCache = pTable->rgMainCache + uType;
// try to take a free slot from the main cache
LONG lFreeIndex = FastInterlockDecrement(&pCache->lFreeIndex);
// did we underflow?
if (lFreeIndex < 0)
{
// yep - we're out of free slots
TableCacheMissOnFree(pTable, pCache, uType, handle);
return;
}
// we got a slot - save the handle in the free bank
pCache->rgFreeBank[lFreeIndex] = handle;
}
/*
* TableAllocHandlesFromCache
*
* Allocates multiple handles of the specified type by repeatedly
* calling TableAllocSingleHandleFromCache.
*
*/
UINT TableAllocHandlesFromCache(HandleTable *pTable, UINT uType, OBJECTHANDLE *pHandleBase, UINT uCount)
{
WRAPPER_NO_CONTRACT;
// loop until we have satisfied all the handles we need to allocate
UINT uSatisfied = 0;
while (uSatisfied < uCount)
{
// get a handle from the cache
OBJECTHANDLE handle = TableAllocSingleHandleFromCache(pTable, uType);
// if we can't get any more then bail out
if (!handle)
break;
// store the handle in the caller's array
*pHandleBase = handle;
// on to the next one
uSatisfied++;
pHandleBase++;
}
// return the number of handles we allocated
return uSatisfied;
}
/*
* TableFreeHandlesToCache
*
* Frees multiple handles of the specified type by repeatedly
* calling TableFreeSingleHandleToCache.
*
*/
void TableFreeHandlesToCache(HandleTable *pTable, UINT uType, const OBJECTHANDLE *pHandleBase, UINT uCount)
{
WRAPPER_NO_CONTRACT;
// loop until we have freed all the handles
while (uCount)
{
// get the next handle to free
OBJECTHANDLE handle = *pHandleBase;
// advance our state
uCount--;
pHandleBase++;
// sanity
_ASSERTE(handle);
// return the handle to the cache
TableFreeSingleHandleToCache(pTable, uType, handle);
}
}
/*--------------------------------------------------------------------------*/

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,682 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
/*
* Wraps handle table to implement various handle types (Strong, Weak, etc.)
*
*
*/
#ifndef _OBJECTHANDLE_H
#define _OBJECTHANDLE_H
/*
* include handle manager declarations
*/
#include "handletable.h"
#ifdef FEATURE_COMINTEROP
#include <weakreference.h>
#endif // FEATURE_COMINTEROP
/*
* Convenience macros for accessing handles. StoreFirstObjectInHandle is like
* StoreObjectInHandle, except it only succeeds if transitioning from NULL to
* non-NULL. In other words, if this handle is being initialized for the first
* time.
*/
#define ObjectFromHandle(handle) HndFetchHandle(handle)
#define StoreObjectInHandle(handle, object) HndAssignHandle(handle, object)
#define InterlockedCompareExchangeObjectInHandle(handle, object, oldObj) HndInterlockedCompareExchangeHandle(handle, object, oldObj)
#define StoreFirstObjectInHandle(handle, object) HndFirstAssignHandle(handle, object)
#define ObjectHandleIsNull(handle) HndIsNull(handle)
#define IsHandleNullUnchecked(handle) HndCheckForNullUnchecked(handle)
/*
* HANDLES
*
* The default type of handle is a strong handle.
*
*/
#define HNDTYPE_DEFAULT HNDTYPE_STRONG
/*
* WEAK HANDLES
*
* Weak handles are handles that track an object as long as it is alive,
* but do not keep the object alive if there are no strong references to it.
*
* The default type of weak handle is 'long-lived' weak handle.
*
*/
#define HNDTYPE_WEAK_DEFAULT HNDTYPE_WEAK_LONG
/*
* SHORT-LIVED WEAK HANDLES
*
* Short-lived weak handles are weak handles that track an object until the
* first time it is detected to be unreachable. At this point, the handle is
* severed, even if the object will be visible from a pending finalization
* graph. This further implies that short weak handles do not track
* across object resurrections.
*
*/
#define HNDTYPE_WEAK_SHORT (0)
/*
* LONG-LIVED WEAK HANDLES
*
* Long-lived weak handles are weak handles that track an object until the
* object is actually reclaimed. Unlike short weak handles, long weak handles
* continue to track their referents through finalization and across any
* resurrections that may occur.
*
*/
#define HNDTYPE_WEAK_LONG (1)
/*
* STRONG HANDLES
*
* Strong handles are handles which function like a normal object reference.
* The existence of a strong handle for an object will cause the object to
* be promoted (remain alive) through a garbage collection cycle.
*
*/
#define HNDTYPE_STRONG (2)
/*
* PINNED HANDLES
*
* Pinned handles are strong handles which have the added property that they
* prevent an object from moving during a garbage collection cycle. This is
* useful when passing a pointer to object innards out of the runtime while GC
* may be enabled.
*
* NOTE: PINNING AN OBJECT IS EXPENSIVE AS IT PREVENTS THE GC FROM ACHIEVING
* OPTIMAL PACKING OF OBJECTS DURING EPHEMERAL COLLECTIONS. THIS TYPE
* OF HANDLE SHOULD BE USED SPARINGLY!
*/
#define HNDTYPE_PINNED (3)
/*
* VARIABLE HANDLES
*
* Variable handles are handles whose type can be changed dynamically. They
* are larger than other types of handles, and are scanned a little more often,
* but are useful when the handle owner needs an efficient way to change the
* strength of a handle on the fly.
*
*/
#define HNDTYPE_VARIABLE (4)
#ifdef FEATURE_COMINTEROP
/*
* REFCOUNTED HANDLES
*
* Refcounted handles are handles that behave as strong handles while the
* refcount on them is greater than 0 and behave as weak handles otherwise.
*
* N.B. These are currently NOT general purpose.
* The implementation is tied to COM Interop.
*
*/
#define HNDTYPE_REFCOUNTED (5)
#endif // FEATURE_COMINTEROP
/*
* DEPENDENT HANDLES
*
* Dependent handles are two handles that need to have the same lifetime. One handle refers to a secondary object
* that needs to have the same lifetime as the primary object. The secondary object should not cause the primary
* object to be referenced, but as long as the primary object is alive, so must be the secondary
*
* They are currently used for EnC for adding new field members to existing instantiations under EnC modes where
* the primary object is the original instantiation and the secondary represents the added field.
*
* They are also used to implement the ConditionalWeakTable class in mscorlib.dll. If you want to use
* these from managed code, they are exposed to BCL through the managed DependentHandle class.
*
*
*/
#define HNDTYPE_DEPENDENT (6)
/*
* PINNED HANDLES for asynchronous operation
*
* Pinned handles are strong handles which have the added property that they
* prevent an object from moving during a garbage collection cycle. This is
* useful when passing a pointer to object innards out of the runtime while GC
* may be enabled.
*
* NOTE: PINNING AN OBJECT IS EXPENSIVE AS IT PREVENTS THE GC FROM ACHIEVING
* OPTIMAL PACKING OF OBJECTS DURING EPHEMERAL COLLECTIONS. THIS TYPE
* OF HANDLE SHOULD BE USED SPARINGLY!
*/
#define HNDTYPE_ASYNCPINNED (7)
/*
* SIZEDREF HANDLES
*
* SizedRef handles are strong handles. Each handle has a piece of user data associated
* with it that stores the size of the object this handle refers to. These handles
* are scanned as strong roots during each GC but only during full GCs would the size
* be calculated.
*
*/
#define HNDTYPE_SIZEDREF (8)
#ifdef FEATURE_COMINTEROP
/*
* WINRT WEAK HANDLES
*
* WinRT weak reference handles hold two different types of weak handles to any
* RCW with an underlying COM object that implements IWeakReferenceSource. The
* object reference itself is a short weak handle to the RCW. In addition an
* IWeakReference* to the underlying COM object is stored, allowing the handle
* to create a new RCW if the existing RCW is collected. This ensures that any
* code holding onto a WinRT weak reference can always access an RCW to the
* underlying COM object as long as it has not been released by all of its strong
* references.
*/
#define HNDTYPE_WEAK_WINRT (9)
#endif // FEATURE_COMINTEROP
typedef DPTR(struct HandleTableMap) PTR_HandleTableMap;
typedef DPTR(struct HandleTableBucket) PTR_HandleTableBucket;
typedef DPTR(PTR_HandleTableBucket) PTR_PTR_HandleTableBucket;
struct HandleTableMap
{
PTR_PTR_HandleTableBucket pBuckets;
PTR_HandleTableMap pNext;
DWORD dwMaxIndex;
};
GVAL_DECL(HandleTableMap, g_HandleTableMap);
#define INITIAL_HANDLE_TABLE_ARRAY_SIZE 10
// struct containing g_SystemInfo.dwNumberOfProcessors HHANDLETABLEs and current table index
// instead of just single HHANDLETABLE for on-fly balancing while adding handles on multiproc machines
struct HandleTableBucket
{
PTR_HHANDLETABLE pTable;
UINT HandleTableIndex;
bool Contains(OBJECTHANDLE handle);
};
/*
* Type mask definitions for HNDTYPE_VARIABLE handles.
*/
#define VHT_WEAK_SHORT (0x00000100) // avoid using low byte so we don't overlap normal types
#define VHT_WEAK_LONG (0x00000200) // avoid using low byte so we don't overlap normal types
#define VHT_STRONG (0x00000400) // avoid using low byte so we don't overlap normal types
#define VHT_PINNED (0x00000800) // avoid using low byte so we don't overlap normal types
#define IS_VALID_VHT_VALUE(flag) ((flag == VHT_WEAK_SHORT) || \
(flag == VHT_WEAK_LONG) || \
(flag == VHT_STRONG) || \
(flag == VHT_PINNED))
#ifndef DACCESS_COMPILE
/*
* Convenience macros and prototypes for the various handle types we define
*/
inline OBJECTHANDLE CreateTypedHandle(HHANDLETABLE table, OBJECTREF object, int type)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(table, type, object);
}
inline void DestroyTypedHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandleOfUnknownType(HndGetHandleTable(handle), handle);
}
inline OBJECTHANDLE CreateHandle(HHANDLETABLE table, OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(table, HNDTYPE_DEFAULT, object);
}
inline void DestroyHandle(OBJECTHANDLE handle)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
SO_TOLERANT;
}
CONTRACTL_END;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_DEFAULT, handle);
}
inline OBJECTHANDLE CreateDuplicateHandle(OBJECTHANDLE handle) {
WRAPPER_NO_CONTRACT;
// Create a new STRONG handle in the same table as an existing handle.
return HndCreateHandle(HndGetHandleTable(handle), HNDTYPE_DEFAULT, ObjectFromHandle(handle));
}
inline OBJECTHANDLE CreateWeakHandle(HHANDLETABLE table, OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(table, HNDTYPE_WEAK_DEFAULT, object);
}
inline void DestroyWeakHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_DEFAULT, handle);
}
inline OBJECTHANDLE CreateShortWeakHandle(HHANDLETABLE table, OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(table, HNDTYPE_WEAK_SHORT, object);
}
inline void DestroyShortWeakHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_SHORT, handle);
}
inline OBJECTHANDLE CreateLongWeakHandle(HHANDLETABLE table, OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(table, HNDTYPE_WEAK_LONG, object);
}
inline void DestroyLongWeakHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_LONG, handle);
}
#ifndef FEATURE_REDHAWK
typedef Holder<OBJECTHANDLE,DoNothing<OBJECTHANDLE>,DestroyLongWeakHandle> LongWeakHandleHolder;
#endif
inline OBJECTHANDLE CreateStrongHandle(HHANDLETABLE table, OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(table, HNDTYPE_STRONG, object);
}
inline void DestroyStrongHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_STRONG, handle);
}
inline OBJECTHANDLE CreatePinningHandle(HHANDLETABLE table, OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(table, HNDTYPE_PINNED, object);
}
inline void DestroyPinningHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_PINNED, handle);
}
#ifndef FEATURE_REDHAWK
typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyPinningHandle, NULL> PinningHandleHolder;
#endif
inline OBJECTHANDLE CreateAsyncPinningHandle(HHANDLETABLE table, OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(table, HNDTYPE_ASYNCPINNED, object);
}
inline void DestroyAsyncPinningHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_ASYNCPINNED, handle);
}
#ifndef FEATURE_REDHAWK
typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyAsyncPinningHandle, NULL> AsyncPinningHandleHolder;
#endif
inline OBJECTHANDLE CreateSizedRefHandle(HHANDLETABLE table, OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(table, HNDTYPE_SIZEDREF, object, (LPARAM)0);
}
void DestroySizedRefHandle(OBJECTHANDLE handle);
#ifndef FEATURE_REDHAWK
typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroySizedRefHandle, NULL> SizeRefHandleHolder;
#endif
#ifdef FEATURE_COMINTEROP
inline OBJECTHANDLE CreateRefcountedHandle(HHANDLETABLE table, OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(table, HNDTYPE_REFCOUNTED, object);
}
inline void DestroyRefcountedHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_REFCOUNTED, handle);
}
inline OBJECTHANDLE CreateWinRTWeakHandle(HHANDLETABLE table, OBJECTREF object, IWeakReference* pWinRTWeakReference)
{
WRAPPER_NO_CONTRACT;
_ASSERTE(pWinRTWeakReference != NULL);
return HndCreateHandle(table, HNDTYPE_WEAK_WINRT, object, reinterpret_cast<LPARAM>(pWinRTWeakReference));
}
void DestroyWinRTWeakHandle(OBJECTHANDLE handle);
#endif // FEATURE_COMINTEROP
#endif // !DACCESS_COMPILE
OBJECTREF GetDependentHandleSecondary(OBJECTHANDLE handle);
#ifndef DACCESS_COMPILE
OBJECTHANDLE CreateDependentHandle(HHANDLETABLE table, OBJECTREF primary, OBJECTREF secondary);
void SetDependentHandleSecondary(OBJECTHANDLE handle, OBJECTREF secondary);
inline void DestroyDependentHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_DEPENDENT, handle);
}
#endif // !DACCESS_COMPILE
#ifndef DACCESS_COMPILE
OBJECTHANDLE CreateVariableHandle(HHANDLETABLE hTable, OBJECTREF object, UINT type);
void UpdateVariableHandleType(OBJECTHANDLE handle, UINT type);
inline void DestroyVariableHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_VARIABLE, handle);
}
void GCHandleValidatePinnedObject(OBJECTREF obj);
/*
* Holder for OBJECTHANDLE
*/
#ifndef FEATURE_REDHAWK
typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyHandle > OHWrapper;
class OBJECTHANDLEHolder : public OHWrapper
{
public:
FORCEINLINE OBJECTHANDLEHolder(OBJECTHANDLE p = NULL) : OHWrapper(p)
{
LIMITED_METHOD_CONTRACT;
}
FORCEINLINE void operator=(OBJECTHANDLE p)
{
WRAPPER_NO_CONTRACT;
OHWrapper::operator=(p);
}
};
#endif
#ifdef FEATURE_COMINTEROP
typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyRefcountedHandle> RefCountedOHWrapper;
class RCOBJECTHANDLEHolder : public RefCountedOHWrapper
{
public:
FORCEINLINE RCOBJECTHANDLEHolder(OBJECTHANDLE p = NULL) : RefCountedOHWrapper(p)
{
LIMITED_METHOD_CONTRACT;
}
FORCEINLINE void operator=(OBJECTHANDLE p)
{
WRAPPER_NO_CONTRACT;
RefCountedOHWrapper::operator=(p);
}
};
#endif // FEATURE_COMINTEROP
/*
* Convenience prototypes for using the global handles
*/
int GetCurrentThreadHomeHeapNumber();
inline OBJECTHANDLE CreateGlobalTypedHandle(OBJECTREF object, int type)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], type, object);
}
inline void DestroyGlobalTypedHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandleOfUnknownType(HndGetHandleTable(handle), handle);
}
inline OBJECTHANDLE CreateGlobalHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
CONDITIONAL_CONTRACT_VIOLATION(ModeViolation, object == NULL);
return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_DEFAULT, object);
}
inline void DestroyGlobalHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_DEFAULT, handle);
}
inline OBJECTHANDLE CreateGlobalWeakHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_WEAK_DEFAULT, object);
}
inline void DestroyGlobalWeakHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_DEFAULT, handle);
}
inline OBJECTHANDLE CreateGlobalShortWeakHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
CONDITIONAL_CONTRACT_VIOLATION(ModeViolation, object == NULL);
return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_WEAK_SHORT, object);
}
inline void DestroyGlobalShortWeakHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_SHORT, handle);
}
#ifndef FEATURE_REDHAWK
typedef Holder<OBJECTHANDLE,DoNothing<OBJECTHANDLE>,DestroyGlobalShortWeakHandle> GlobalShortWeakHandleHolder;
#endif
inline OBJECTHANDLE CreateGlobalLongWeakHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_WEAK_LONG, object);
}
inline void DestroyGlobalLongWeakHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_LONG, handle);
}
inline OBJECTHANDLE CreateGlobalStrongHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
CONDITIONAL_CONTRACT_VIOLATION(ModeViolation, object == NULL);
return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_STRONG, object);
}
inline void DestroyGlobalStrongHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_STRONG, handle);
}
#ifndef FEATURE_REDHAWK
typedef Holder<OBJECTHANDLE,DoNothing<OBJECTHANDLE>,DestroyGlobalStrongHandle> GlobalStrongHandleHolder;
#endif
inline OBJECTHANDLE CreateGlobalPinningHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_PINNED, object);
}
inline void DestroyGlobalPinningHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_PINNED, handle);
}
#ifdef FEATURE_COMINTEROP
inline OBJECTHANDLE CreateGlobalRefcountedHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_REFCOUNTED, object);
}
inline void DestroyGlobalRefcountedHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_REFCOUNTED, handle);
}
#endif // FEATURE_COMINTEROP
inline void ResetOBJECTHANDLE(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
StoreObjectInHandle(handle, NULL);
}
#ifndef FEATURE_REDHAWK
typedef Holder<OBJECTHANDLE,DoNothing<OBJECTHANDLE>,ResetOBJECTHANDLE> ObjectInHandleHolder;
#endif
/*
* Table maintenance routines
*/
bool Ref_Initialize();
void Ref_Shutdown();
HandleTableBucket *Ref_CreateHandleTableBucket(ADIndex uADIndex);
BOOL Ref_HandleAsyncPinHandles();
void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket *pTarget);
void Ref_RemoveHandleTableBucket(HandleTableBucket *pBucket);
void Ref_DestroyHandleTableBucket(HandleTableBucket *pBucket);
BOOL Ref_ContainHandle(HandleTableBucket *pBucket, OBJECTHANDLE handle);
/*
* GC-time scanning entrypoints
*/
struct ScanContext;
struct DhContext;
struct ProfilingScanContext;
void Ref_BeginSynchronousGC (UINT uCondemnedGeneration, UINT uMaxGeneration);
void Ref_EndSynchronousGC (UINT uCondemnedGeneration, UINT uMaxGeneration);
typedef void Ref_promote_func(class Object**, ScanContext*, DWORD);
void Ref_TraceRefCountHandles(HANDLESCANPROC callback, LPARAM lParam1, LPARAM lParam2);
void Ref_TracePinningRoots(UINT condemned, UINT maxgen, ScanContext* sc, Ref_promote_func* fn);
void Ref_TraceNormalRoots(UINT condemned, UINT maxgen, ScanContext* sc, Ref_promote_func* fn);
void Ref_UpdatePointers(UINT condemned, UINT maxgen, ScanContext* sc, Ref_promote_func* fn);
void Ref_UpdatePinnedPointers(UINT condemned, UINT maxgen, ScanContext* sc, Ref_promote_func* fn);
DhContext *Ref_GetDependentHandleContext(ScanContext* sc);
bool Ref_ScanDependentHandlesForPromotion(DhContext *pDhContext);
void Ref_ScanDependentHandlesForClearing(UINT condemned, UINT maxgen, ScanContext* sc, Ref_promote_func* fn);
void Ref_ScanDependentHandlesForRelocation(UINT condemned, UINT maxgen, ScanContext* sc, Ref_promote_func* fn);
void Ref_ScanSizedRefHandles(UINT condemned, UINT maxgen, ScanContext* sc, Ref_promote_func* fn);
void Ref_CheckReachable (UINT uCondemnedGeneration, UINT uMaxGeneration, LPARAM lp1);
void Ref_CheckAlive (UINT uCondemnedGeneration, UINT uMaxGeneration, LPARAM lp1);
void Ref_ScanPointersForProfilerAndETW(UINT uMaxGeneration, LPARAM lp1);
void Ref_ScanDependentHandlesForProfilerAndETW(UINT uMaxGeneration, ProfilingScanContext * SC);
void Ref_AgeHandles (UINT uCondemnedGeneration, UINT uMaxGeneration, LPARAM lp1);
void Ref_RejuvenateHandles(UINT uCondemnedGeneration, UINT uMaxGeneration, LPARAM lp1);
void Ref_VerifyHandleTable(UINT condemned, UINT maxgen, ScanContext* sc);
#endif // DACCESS_COMPILE
#endif //_OBJECTHANDLE_H

Просмотреть файл

@ -0,0 +1,32 @@
project(clrgcsample)
include_directories(..)
include_directories(../env)
set(SOURCES
GCSample.cpp
)
add_executable(gcsample
${SOURCES}
)
if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
add_definitions(-D_TARGET_AMD64_=1)
add_definitions(-D_WIN64=1)
elseif(CLR_CMAKE_PLATFORM_ARCH_I386)
add_definitions(-D_TARGET_X86_=1)
add_definitions(-D_WIN32=1)
elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
add_definitions(-D_TARGET_ARM_=1)
add_definitions(-D_WIN32=1)
elseif(CLR_CMAKE_PLATFORM_ARCH_ARM64)
add_definitions(-D_TARGET_ARM64_=1)
add_definitions(-D_WIN64=1)
else()
clr_unknown_arch()
endif()
target_link_libraries(gcsample
clrgc
)

Просмотреть файл

@ -0,0 +1,240 @@
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
//
// GCSample.cpp
//
//
// This sample demonstrates:
//
// * How to initialize GC without the rest of CoreCLR
// * How to create a type layout information in format that the GC expects
// * How to implement fast object allocator and write barrier
// * How to allocate objects and work with GC handles
//
// An important part of the sample is the GC environment (gcenv.*) that provides methods for GC to interact
// with the OS and execution engine.
//
// The methods to interact with the OS should be no surprise - block memory allocation, synchronization primitives, etc.
//
// The important methods that the execution engine needs to provide to GC are:
//
// * Thread suspend/resume:
// static void SuspendEE(SUSPEND_REASON reason);
// static void RestartEE(bool bFinishedGC); //resume threads.
//
// * Enumeration of threads that are running managed code:
// static Thread * GetThreadList(Thread * pThread);
//
// * Scanning of stack roots of given thread:
// static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc);
//
// The sample has trivial implementation for these methods. It is single threaded, and there are no stack roots to
// be reported. There are number of other callbacks that GC calls to optionally allow the execution engine to do its
// own bookkeeping.
//
// For now, the sample GC environment has some cruft in it to decouple the GC from Windows and rest of CoreCLR.
// It is something we would like to clean up.
//
#include "common.h"
#include "gcenv.h"
#include "gc.h"
#include "objecthandle.h"
#include "gcdesc.h"
//
// The fast paths for object allocation and write barriers is performance critical. They are often
// hand written in assembly code, etc.
//
Object * AllocateObject(MethodTable * pMT)
{
alloc_context * acontext = GetThread()->GetAllocContext();
Object * pObject;
size_t size = pMT->GetBaseSize();
BYTE* result = acontext->alloc_ptr;
BYTE* advance = result + size;
if (advance <= acontext->alloc_limit)
{
acontext->alloc_ptr = advance;
pObject = (Object *)result;
}
else
{
pObject = GCHeap::GetGCHeap()->Alloc(acontext, size, 0);
if (pObject == NULL)
return NULL;
}
pObject->SetMethodTable(pMT);
return pObject;
}
#if defined(_WIN64)
// Card byte shift is different on 64bit.
#define card_byte_shift 11
#else
#define card_byte_shift 10
#endif
#define card_byte(addr) (((size_t)(addr)) >> card_byte_shift)
inline void ErectWriteBarrier(Object ** dst, Object * ref)
{
// if the dst is outside of the heap (unboxed value classes) then we
// simply exit
if (((BYTE*)dst < g_lowest_address) || ((BYTE*)dst >= g_highest_address))
return;
if((BYTE*)ref >= g_ephemeral_low && (BYTE*)ref < g_ephemeral_high)
{
// volatile is used here to prevent fetch of g_card_table from being reordered
// with g_lowest/highest_address check above. See comment in code:gc_heap::grow_brick_card_tables.
BYTE* pCardByte = (BYTE *)*(volatile BYTE **)(&g_card_table) + card_byte((BYTE *)dst);
if(*pCardByte != 0xFF)
*pCardByte = 0xFF;
}
}
void WriteBarrier(Object ** dst, Object * ref)
{
*dst = ref;
ErectWriteBarrier(dst, ref);
}
int main(int argc, char* argv[])
{
//
// Initialize system info
//
InitializeSystemInfo();
//
// Initialize free object methodtable. The GC uses a special array-like methodtable as placeholder
// for collected free space.
//
static MethodTable freeObjectMT;
freeObjectMT.InitializeFreeObject();
g_pFreeObjectMethodTable = &freeObjectMT;
//
// Initialize handle table
//
if (!Ref_Initialize())
return -1;
//
// Initialize GC heap
//
GCHeap *pGCHeap = GCHeap::CreateGCHeap();
if (!pGCHeap)
return -1;
if (FAILED(pGCHeap->Initialize()))
return -1;
//
// Initialize current thread
//
ThreadStore::AttachCurrentThread(false);
//
// Create a Methodtable with GCDesc
//
class My : Object {
public:
Object * m_pOther1;
int dummy_inbetween;
Object * m_pOther2;
};
static struct My_MethodTable
{
// GCDesc
CGCDescSeries m_series[2];
size_t m_numSeries;
// The actual methodtable
MethodTable m_MT;
}
My_MethodTable;
// 'My' contains the MethodTable*
size_t baseSize = sizeof(My);
// GC expects the size of ObjHeader (extra void*) to be included in the size.
baseSize = baseSize + sizeof(ObjHeader);
// Add padding as necessary. GC requires the object size to be at least MIN_OBJECT_SIZE.
My_MethodTable.m_MT.m_baseSize = max(baseSize, MIN_OBJECT_SIZE);
My_MethodTable.m_MT.m_componentSize = 0; // Array component size
My_MethodTable.m_MT.m_flags = MTFlag_ContainsPointers;
My_MethodTable.m_numSeries = 2;
// The GC walks the series backwards. It expects the offsets to be sorted in descending order.
My_MethodTable.m_series[0].SetSeriesOffset(offsetof(My, m_pOther2));
My_MethodTable.m_series[0].SetSeriesCount(1);
My_MethodTable.m_series[0].seriessize -= My_MethodTable.m_MT.m_baseSize;
My_MethodTable.m_series[1].SetSeriesOffset(offsetof(My, m_pOther1));
My_MethodTable.m_series[1].SetSeriesCount(1);
My_MethodTable.m_series[1].seriessize -= My_MethodTable.m_MT.m_baseSize;
MethodTable * pMyMethodTable = &My_MethodTable.m_MT;
// Allocate instance of MyObject
Object * pObj = AllocateObject(pMyMethodTable);
if (pObj == NULL)
return -1;
// Create strong handle and store the object into it
OBJECTHANDLE oh = CreateGlobalHandle(pObj);
if (oh == NULL)
return -1;
for (int i = 0; i < 1000000; i++)
{
Object * pBefore = ((My *)ObjectFromHandle(oh))->m_pOther1;
// Allocate more instances of the same object
Object * p = AllocateObject(pMyMethodTable);
if (p == NULL)
return -1;
Object * pAfter = ((My *)ObjectFromHandle(oh))->m_pOther1;
// Uncomment this assert to see how GC triggered inside AllocateObject moved objects around
// assert(pBefore == pAfter);
// Store the newly allocated object into a field using WriteBarrier
WriteBarrier(&(((My *)ObjectFromHandle(oh))->m_pOther1), p);
}
// Create weak handle that points to our object
OBJECTHANDLE ohWeak = CreateGlobalWeakHandle(ObjectFromHandle(oh));
if (ohWeak == NULL)
return -1;
// Destroy the strong handle so that nothing will be keeping out object alive
DestroyGlobalHandle(oh);
// Explicitly trigger full GC
pGCHeap->GarbageCollect();
// Verify that the weak handle got cleared by the GC
assert(ObjectFromHandle(ohWeak) == NULL);
printf("Done\n");
return 0;
}

Просмотреть файл

@ -0,0 +1,105 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{58D6B7AE-0A12-49F0-BCF7-200ED8BA445A}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>GCSample</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<PrecompiledHeaderFile>common.h</PrecompiledHeaderFile>
<AdditionalIncludeDirectories>.;..;..\env</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>Use</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>.;..;..\env</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="common.h" />
<ClInclude Include="gcenv.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="GCSample.cpp" />
<ClCompile Include="..\gccommon.cpp" />
<ClCompile Include="..\gceewks.cpp" />
<ClCompile Include="..\gcscan.cpp" />
<ClCompile Include="..\gcwks.cpp" />
<ClCompile Include="..\handletable.cpp" />
<ClCompile Include="..\handletablecache.cpp" />
<ClCompile Include="..\handletablecore.cpp" />
<ClCompile Include="..\handletablescan.cpp" />
<ClCompile Include="..\objecthandle.cpp" />
<ClCompile Include="..\env\gcenv.cpp" />
<ClCompile Include="..\env\common.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
</ClCompile>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

Просмотреть файл

@ -0,0 +1,63 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
<Filter Include="Resource Files">
<UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClInclude Include="common.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="gcenv.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="common.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="GCSample.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\objecthandle.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\handletable.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\handletablecache.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\handletablescan.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\handletablecore.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\gcwks.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\gcscan.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="gcenv.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\gceewks.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\gccommon.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
</Project>