Trim non-monitor files; change build definitions; clean references

This commit is contained in:
Juan Sebastian Hoyos Ayala 2021-02-15 01:26:02 -08:00
Родитель 8e92f22eb2
Коммит ca00141008
1716 изменённых файлов: 363 добавлений и 450102 удалений

Просмотреть файл

@ -1,3 +1,3 @@
@echo off
powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0eng\build.ps1""" -restore %*"
powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0eng\common\build.ps1""" -restore -build %*"
exit /b %ErrorLevel%

Просмотреть файл

@ -1,663 +0,0 @@
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
# Verify minimum required version
cmake_minimum_required(VERSION 2.8.12)
if(CMAKE_VERSION VERSION_EQUAL 3.0 OR CMAKE_VERSION VERSION_GREATER 3.0)
cmake_policy(SET CMP0042 NEW)
endif()
# Set the project name
project(diagnostics)
# Include cmake functions
include(functions.cmake)
if (WIN32)
message(STATUS "VS_PLATFORM_TOOLSET is ${CMAKE_VS_PLATFORM_TOOLSET}")
message(STATUS "VS_PLATFORM_NAME is ${CMAKE_VS_PLATFORM_NAME}")
endif (WIN32)
set(ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR})
# Where the version source file for xplat is generated
set(VERSION_FILE_PATH "${CMAKE_BINARY_DIR}/version.cpp")
# Where _version.h for Windows is generated
if (WIN32)
include_directories("${CMAKE_BINARY_DIR}")
endif (WIN32)
set(CORECLR_SET_RPATH ON)
if(CORECLR_SET_RPATH)
# Enable @rpath support for shared libraries.
set(MACOSX_RPATH ON)
endif(CORECLR_SET_RPATH)
OPTION(CLR_CMAKE_ENABLE_CODE_COVERAGE "Enable code coverage" OFF)
OPTION(CLR_CMAKE_WARNINGS_ARE_ERRORS "Warnings are errors" ON)
# Ensure other tools are present
if (WIN32)
if(CLR_CMAKE_HOST_ARCH STREQUAL arm)
# Confirm that Windows SDK is present
if(NOT DEFINED CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION OR CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION STREQUAL "" )
message(FATAL_ERROR "Windows SDK is required for the Arm32 build.")
else()
message("Using Windows SDK version ${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION}")
endif()
# Explicitly specify the assembler to be used for Arm32 compile
if($ENV{__VSVersion} STREQUAL "vs2015")
file(TO_CMAKE_PATH "$ENV{VCINSTALLDIR}\\bin\\x86_arm\\armasm.exe" CMAKE_ASM_COMPILER)
else()
file(TO_CMAKE_PATH "$ENV{VCToolsInstallDir}\\bin\\HostX86\\arm\\armasm.exe" CMAKE_ASM_COMPILER)
endif()
set(CMAKE_ASM_MASM_COMPILER ${CMAKE_ASM_COMPILER})
message("CMAKE_ASM_MASM_COMPILER explicitly set to: ${CMAKE_ASM_MASM_COMPILER}")
# Enable generic assembly compilation to avoid CMake generate VS proj files that explicitly
# use ml[64].exe as the assembler.
enable_language(ASM)
else()
enable_language(ASM_MASM)
endif()
# Ensure that MC is present
find_program(MC mc)
if (MC STREQUAL "MC-NOTFOUND")
message(FATAL_ERROR "MC not found")
endif()
if (CLR_CMAKE_HOST_ARCH STREQUAL arm64)
# CMAKE_CXX_COMPILER will default to the compiler installed with
# Visual studio. Overwrite it to the compiler on the path.
# TODO, remove when cmake generator supports Arm64 as a target.
find_program(PATH_CXX_COMPILER cl)
set(CMAKE_CXX_COMPILER ${PATH_CXX_COMPILER})
message("Overwriting the CMAKE_CXX_COMPILER.")
message(CMAKE_CXX_COMPILER found:${CMAKE_CXX_COMPILER})
endif()
else (WIN32)
enable_language(ASM)
# Ensure that awk is present
find_program(AWK awk)
if (AWK STREQUAL "AWK-NOTFOUND")
message(FATAL_ERROR "AWK not found")
endif()
# Try to locate the paxctl tool. Failure to find it is not fatal,
# but the generated executables won't work on a system where PAX is set
# to prevent applications to create executable memory mappings.
find_program(PAXCTL paxctl)
if (CMAKE_SYSTEM_NAME STREQUAL Darwin)
# Ensure that dsymutil and strip are present
find_program(DSYMUTIL dsymutil)
if (DSYMUTIL STREQUAL "DSYMUTIL-NOTFOUND")
message(FATAL_ERROR "dsymutil not found")
endif()
find_program(STRIP strip)
if (STRIP STREQUAL "STRIP-NOTFOUND")
message(FATAL_ERROR "strip not found")
endif()
else (CMAKE_SYSTEM_NAME STREQUAL Darwin)
# Ensure that objcopy is present
if (CLR_UNIX_CROSS_BUILD AND NOT DEFINED CLR_CROSS_COMPONENTS_BUILD)
if (CMAKE_SYSTEM_PROCESSOR STREQUAL armv7l OR CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL arm OR CMAKE_SYSTEM_PROCESSOR STREQUAL mips64)
find_program(OBJCOPY ${TOOLCHAIN}-objcopy)
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL i686)
find_program(OBJCOPY objcopy)
else()
clr_unknown_arch()
endif()
else()
find_program(OBJCOPY objcopy)
endif()
if (OBJCOPY STREQUAL "OBJCOPY-NOTFOUND")
message(FATAL_ERROR "objcopy not found")
endif()
endif (CMAKE_SYSTEM_NAME STREQUAL Darwin)
endif(WIN32)
#----------------------------------------
# Detect and set platform variable names
# - for non-windows build platform & architecture is detected using inbuilt CMAKE variables and cross target component configure
# - for windows we use the passed in parameter to CMAKE to determine build arch
#----------------------------------------
if(CMAKE_SYSTEM_NAME STREQUAL Linux)
set(CLR_CMAKE_PLATFORM_UNIX 1)
if(CLR_CROSS_COMPONENTS_BUILD)
# CMAKE_HOST_SYSTEM_PROCESSOR returns the value of `uname -p` on host.
if(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL amd64)
if(CLR_CMAKE_TARGET_ARCH STREQUAL "arm")
set(CLR_CMAKE_PLATFORM_UNIX_X86 1)
else()
set(CLR_CMAKE_PLATFORM_UNIX_AMD64 1)
endif()
elseif(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL i686)
set(CLR_CMAKE_PLATFORM_UNIX_X86 1)
else()
clr_unknown_arch()
endif()
else()
# CMAKE_SYSTEM_PROCESSOR returns the value of `uname -p` on target.
# For the AMD/Intel 64bit architecture two different strings are common.
# Linux and Darwin identify it as "x86_64" while FreeBSD and netbsd uses the
# "amd64" string. Accept either of the two here.
if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL amd64)
set(CLR_CMAKE_PLATFORM_UNIX_AMD64 1)
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL armv7l)
set(CLR_CMAKE_PLATFORM_UNIX_ARM 1)
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL arm)
set(CLR_CMAKE_PLATFORM_UNIX_ARM 1)
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64)
set(CLR_CMAKE_PLATFORM_UNIX_ARM64 1)
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL mips64)
set(CLR_CMAKE_PLATFORM_UNIX_MIPS64 1)
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL i686)
set(CLR_CMAKE_PLATFORM_UNIX_X86 1)
else()
clr_unknown_arch()
endif()
endif()
set(CLR_CMAKE_PLATFORM_LINUX 1)
# Detect Linux ID
if(DEFINED CLR_CMAKE_LINUX_ID)
if(CLR_CMAKE_LINUX_ID STREQUAL ubuntu)
set(CLR_CMAKE_TARGET_UBUNTU_LINUX 1)
elseif(CLR_CMAKE_LINUX_ID STREQUAL tizen)
set(CLR_CMAKE_TARGET_TIZEN_LINUX 1)
elseif(CLR_CMAKE_LINUX_ID STREQUAL alpine)
set(CLR_CMAKE_PLATFORM_ALPINE_LINUX 1)
endif()
if(CLR_CMAKE_LINUX_ID STREQUAL ubuntu)
set(CLR_CMAKE_PLATFORM_UBUNTU_LINUX 1)
endif()
endif(DEFINED CLR_CMAKE_LINUX_ID)
endif(CMAKE_SYSTEM_NAME STREQUAL Linux)
if(CMAKE_SYSTEM_NAME STREQUAL Darwin)
set(CLR_CMAKE_PLATFORM_UNIX 1)
if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64")
set(CLR_CMAKE_PLATFORM_UNIX_AMD64 1)
elseif(CMAKE_OSX_ARCHITECTURES MATCHES "arm64")
set(CLR_CMAKE_PLATFORM_UNIX_ARM64 1)
else()
message(FATAL_ERROR "CMAKE_OSX_ARCHITECTURES:'${CMAKE_OSX_ARCHITECTURES}'")
clr_unknown_arch()
endif()
set(CLR_CMAKE_PLATFORM_DARWIN 1)
if(CMAKE_VERSION VERSION_LESS "3.4.0")
set(CMAKE_ASM_COMPILE_OBJECT "${CMAKE_C_COMPILER} <FLAGS> <DEFINES> -o <OBJECT> -c <SOURCE>")
else()
set(CMAKE_ASM_COMPILE_OBJECT "${CMAKE_C_COMPILER} <FLAGS> <DEFINES> <INCLUDES> -o <OBJECT> -c <SOURCE>")
endif(CMAKE_VERSION VERSION_LESS "3.4.0")
endif(CMAKE_SYSTEM_NAME STREQUAL Darwin)
if(CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
set(CLR_CMAKE_PLATFORM_UNIX 1)
set(CLR_CMAKE_PLATFORM_UNIX_AMD64 1)
set(CLR_CMAKE_PLATFORM_FREEBSD 1)
endif(CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
if(CMAKE_SYSTEM_NAME STREQUAL OpenBSD)
set(CLR_CMAKE_PLATFORM_UNIX 1)
set(CLR_CMAKE_PLATFORM_UNIX_AMD64 1)
set(CLR_CMAKE_PLATFORM_OPENBSD 1)
endif(CMAKE_SYSTEM_NAME STREQUAL OpenBSD)
if(CMAKE_SYSTEM_NAME STREQUAL NetBSD)
set(CLR_CMAKE_PLATFORM_UNIX 1)
set(CLR_CMAKE_PLATFORM_UNIX_AMD64 1)
set(CLR_CMAKE_PLATFORM_NETBSD 1)
endif(CMAKE_SYSTEM_NAME STREQUAL NetBSD)
if(CMAKE_SYSTEM_NAME STREQUAL SunOS)
set(CLR_CMAKE_PLATFORM_UNIX 1)
EXECUTE_PROCESS(
COMMAND isainfo -n
OUTPUT_VARIABLE SUNOS_NATIVE_INSTRUCTION_SET
)
if(SUNOS_NATIVE_INSTRUCTION_SET MATCHES "amd64")
set(CLR_CMAKE_PLATFORM_UNIX_AMD64 1)
set(CMAKE_SYSTEM_PROCESSOR "amd64")
else()
clr_unknown_arch()
endif()
set(CLR_CMAKE_PLATFORM_SUNOS 1)
endif(CMAKE_SYSTEM_NAME STREQUAL SunOS)
#--------------------------------------------
# This repo builds two set of binaries
# 1. binaries which execute on target arch machine
# - for such binaries host architecture & target architecture are same
# - eg. coreclr.dll
# 2. binaries which execute on host machine but target another architecture
# - host architecture is different from target architecture
# - eg. crossgen.exe - runs on x64 machine and generates nis targeting arm64
# - for complete list of such binaries refer to file crosscomponents.cmake
#-------------------------------------------------------------
# Set HOST architecture variables
if(CLR_CMAKE_PLATFORM_UNIX_ARM)
set(CLR_CMAKE_PLATFORM_ARCH_ARM 1)
set(CLR_CMAKE_HOST_ARCH "arm")
elseif(CLR_CMAKE_PLATFORM_UNIX_ARM64)
set(CLR_CMAKE_PLATFORM_ARCH_ARM64 1)
set(CLR_CMAKE_HOST_ARCH "arm64")
elseif(CLR_CMAKE_PLATFORM_UNIX_AMD64)
set(CLR_CMAKE_PLATFORM_ARCH_AMD64 1)
set(CLR_CMAKE_HOST_ARCH "x64")
elseif(CLR_CMAKE_PLATFORM_UNIX_X86)
set(CLR_CMAKE_PLATFORM_ARCH_I386 1)
set(CLR_CMAKE_HOST_ARCH "x86")
elseif(CLR_CMAKE_PLATFORM_UNIX_MIPS64)
set(CLR_CMAKE_PLATFORM_ARCH_MIPS64 1)
set(CLR_CMAKE_HOST_ARCH "mips64")
elseif(WIN32)
# CLR_CMAKE_HOST_ARCH is passed in as param to cmake
if (CLR_CMAKE_HOST_ARCH STREQUAL x64)
set(CLR_CMAKE_PLATFORM_ARCH_AMD64 1)
elseif(CLR_CMAKE_HOST_ARCH STREQUAL x86)
set(CLR_CMAKE_PLATFORM_ARCH_I386 1)
elseif(CLR_CMAKE_HOST_ARCH STREQUAL arm)
set(CLR_CMAKE_PLATFORM_ARCH_ARM 1)
elseif(CLR_CMAKE_HOST_ARCH STREQUAL arm64)
set(CLR_CMAKE_PLATFORM_ARCH_ARM64 1)
else()
clr_unknown_arch()
endif()
endif()
# Set TARGET architecture variables
# Target arch will be a cmake param (optional) for both windows as well as non-windows build
# if target arch is not specified then host & target are same
if(NOT DEFINED CLR_CMAKE_TARGET_ARCH OR CLR_CMAKE_TARGET_ARCH STREQUAL "" )
set(CLR_CMAKE_TARGET_ARCH ${CLR_CMAKE_HOST_ARCH})
endif()
# Set target architecture variables
if (CLR_CMAKE_TARGET_ARCH STREQUAL x64)
set(CLR_CMAKE_TARGET_ARCH_AMD64 1)
elseif(CLR_CMAKE_TARGET_ARCH STREQUAL x86)
set(CLR_CMAKE_TARGET_ARCH_I386 1)
elseif(CLR_CMAKE_TARGET_ARCH STREQUAL arm64)
set(CLR_CMAKE_TARGET_ARCH_ARM64 1)
elseif(CLR_CMAKE_TARGET_ARCH STREQUAL arm)
set(CLR_CMAKE_TARGET_ARCH_ARM 1)
elseif(CLR_CMAKE_TARGET_ARCH STREQUAL mips64)
set(CLR_CMAKE_TARGET_ARCH_MIPS64 1)
else()
clr_unknown_arch()
endif()
# check if host & target arch combination are valid
if(NOT(CLR_CMAKE_TARGET_ARCH STREQUAL CLR_CMAKE_HOST_ARCH))
if(NOT((CLR_CMAKE_PLATFORM_ARCH_AMD64 AND CLR_CMAKE_TARGET_ARCH_ARM64) OR (CLR_CMAKE_PLATFORM_ARCH_I386 AND CLR_CMAKE_TARGET_ARCH_ARM)))
message(FATAL_ERROR "Invalid host and target arch combination")
endif()
endif()
#-----------------------------------------------------
# Initialize Cmake compiler flags and other variables
#-----------------------------------------------------
if (CMAKE_CONFIGURATION_TYPES) # multi-configuration generator?
set(CMAKE_CONFIGURATION_TYPES "Debug;Checked;Release;RelWithDebInfo" CACHE STRING "" FORCE)
endif (CMAKE_CONFIGURATION_TYPES)
set(CMAKE_C_FLAGS_CHECKED ${CLR_C_FLAGS_CHECKED_INIT} CACHE STRING "Flags used by the compiler during checked builds.")
set(CMAKE_CXX_FLAGS_CHECKED ${CLR_CXX_FLAGS_CHECKED_INIT} CACHE STRING "Flags used by the compiler during checked builds.")
set(CMAKE_EXE_LINKER_FLAGS_CHECKED "")
set(CMAKE_SHARED_LINKER_FLAGS_CHECKED "")
set(CMAKE_CXX_STANDARD_LIBRARIES "") # do not link against standard win32 libs i.e. kernel32, uuid, user32, etc.
if (WIN32)
# For multi-configuration toolset (as Visual Studio)
# set the different configuration defines.
foreach (Config DEBUG CHECKED RELEASE RELWITHDEBINFO)
foreach (Definition IN LISTS CLR_DEFINES_${Config}_INIT)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS $<$<CONFIG:${Config}>:${Definition}>)
endforeach (Definition)
endforeach (Config)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /GUARD:CF")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /GUARD:CF")
# Linker flags
#
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /MANIFEST:NO") #Do not create Side-by-Side Assembly Manifest
if (CLR_CMAKE_PLATFORM_ARCH_ARM)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SUBSYSTEM:WINDOWS,6.02") #windows subsystem - arm minimum is 6.02
elseif(CLR_CMAKE_PLATFORM_ARCH_ARM64)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SUBSYSTEM:WINDOWS,6.03") #windows subsystem - arm64 minimum is 6.03
else ()
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SUBSYSTEM:WINDOWS,6.01") #windows subsystem
endif ()
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /LARGEADDRESSAWARE") # can handle addresses larger than 2 gigabytes
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /NXCOMPAT") #Compatible with Data Execution Prevention
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DYNAMICBASE") #Use address space layout randomization
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /PDBCOMPRESS") #shrink pdb size
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUG")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /IGNORE:4197,4013,4254,4070,4221")
set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /IGNORE:4221")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /DEBUG /PDBCOMPRESS")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:1572864")
# Temporarily disable incremental link due to incremental linking CFG bug crashing crossgen.
# See https://github.com/dotnet/coreclr/issues/12592
# This has been fixed in VS 2017 Update 5 but we're keeping this around until everyone is off
# the versions that have the bug. The bug manifests itself as a bad crash.
set(NO_INCREMENTAL_LINKER_FLAGS "/INCREMENTAL:NO")
# Debug build specific flags
set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "/NOVCFEATURE ${NO_INCREMENTAL_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${NO_INCREMENTAL_LINKER_FLAGS}")
# Checked build specific flags
set(CMAKE_SHARED_LINKER_FLAGS_CHECKED "${CMAKE_SHARED_LINKER_FLAGS_CHECKED} /OPT:REF /OPT:NOICF /NOVCFEATURE ${NO_INCREMENTAL_LINKER_FLAGS}")
set(CMAKE_STATIC_LINKER_FLAGS_CHECKED "${CMAKE_STATIC_LINKER_FLAGS_CHECKED}")
set(CMAKE_EXE_LINKER_FLAGS_CHECKED "${CMAKE_EXE_LINKER_FLAGS_CHECKED} /OPT:REF /OPT:NOICF ${NO_INCREMENTAL_LINKER_FLAGS}")
# Release build specific flags
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG /OPT:REF /OPT:ICF ${NO_INCREMENTAL_LINKER_FLAGS}")
set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG /OPT:REF /OPT:ICF ${NO_INCREMENTAL_LINKER_FLAGS}")
# ReleaseWithDebugInfo build specific flags
set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG /OPT:REF /OPT:ICF ${NO_INCREMENTAL_LINKER_FLAGS}")
set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG /OPT:REF /OPT:ICF ${NO_INCREMENTAL_LINKER_FLAGS}")
# Temporary until cmake has VS generators for arm64
if(CLR_CMAKE_PLATFORM_ARCH_ARM64)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /machine:arm64")
set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /machine:arm64")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /machine:arm64")
endif(CLR_CMAKE_PLATFORM_ARCH_ARM64)
# Force uCRT to be dynamically linked for Release build
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /NODEFAULTLIB:libucrt.lib /DEFAULTLIB:ucrt.lib")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /NODEFAULTLIB:libucrt.lib /DEFAULTLIB:ucrt.lib")
set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /NODEFAULTLIB:libucrt.lib /DEFAULTLIB:ucrt.lib")
set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /NODEFAULTLIB:libucrt.lib /DEFAULTLIB:ucrt.lib")
elseif (CLR_CMAKE_PLATFORM_UNIX)
# Set the values to display when interactively configuring CMAKE_BUILD_TYPE
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "DEBUG;CHECKED;RELEASE;RELWITHDEBINFO")
# Use uppercase CMAKE_BUILD_TYPE for the string comparisons below
string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE)
# For single-configuration toolset
# set the different configuration defines.
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG)
# First DEBUG
set_property(DIRECTORY PROPERTY COMPILE_DEFINITIONS ${CLR_DEFINES_DEBUG_INIT})
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
# Then CHECKED
set_property(DIRECTORY PROPERTY COMPILE_DEFINITIONS ${CLR_DEFINES_CHECKED_INIT})
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL RELEASE)
# Then RELEASE
set_property(DIRECTORY PROPERTY COMPILE_DEFINITIONS ${CLR_DEFINES_RELEASE_INIT})
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL RELWITHDEBINFO)
# And then RELWITHDEBINFO
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS ${CLR_DEFINES_RELWITHDEBINFO_INIT})
else ()
message(FATAL_ERROR "Unknown build type! Set CMAKE_BUILD_TYPE to DEBUG, CHECKED, RELEASE, or RELWITHDEBINFO!")
endif ()
# set the CLANG sanitizer flags for debug build
if(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
# obtain settings from running enablesanitizers.sh
string(FIND "$ENV{DEBUG_SANITIZERS}" "asan" __ASAN_POS)
string(FIND "$ENV{DEBUG_SANITIZERS}" "ubsan" __UBSAN_POS)
if ((${__ASAN_POS} GREATER -1) OR (${__UBSAN_POS} GREATER -1))
set(CLR_SANITIZE_CXX_FLAGS "${CLR_SANITIZE_CXX_FLAGS} -fsanitize-blacklist=${CMAKE_CURRENT_SOURCE_DIR}/sanitizerblacklist.txt -fsanitize=")
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS} -fsanitize=")
if (${__ASAN_POS} GREATER -1)
set(CLR_SANITIZE_CXX_FLAGS "${CLR_SANITIZE_CXX_FLAGS}address,")
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}address,")
add_definitions(-DHAS_ASAN)
message("Address Sanitizer (asan) enabled")
endif ()
if (${__UBSAN_POS} GREATER -1)
# all sanitizier flags are enabled except alignment (due to heavy use of __unaligned modifier)
set(CLR_SANITIZE_CXX_FLAGS "${CLR_SANITIZE_CXX_FLAGS}bool,bounds,enum,float-cast-overflow,float-divide-by-zero,function,integer,nonnull-attribute,null,object-size,return,returns-nonnull-attribute,shift,unreachable,vla-bound,vptr")
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}undefined")
message("Undefined Behavior Sanitizer (ubsan) enabled")
endif ()
# -fdata-sections -ffunction-sections: each function has own section instead of one per .o file (needed for --gc-sections)
# -fPIC: enable Position Independent Code normally just for shared libraries but required when linking with address sanitizer
# -O1: optimization level used instead of -O0 to avoid compile error "invalid operand for inline asm constraint"
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${CLR_SANITIZE_CXX_FLAGS} -fdata-sections -ffunction-sections -fPIC -O1")
set(CMAKE_CXX_FLAGS_CHECKED "${CMAKE_CXX_FLAGS_CHECKED} ${CLR_SANITIZE_CXX_FLAGS} -fdata-sections -ffunction-sections -fPIC -O1")
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} ${CLR_SANITIZE_LINK_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS_CHECKED "${CMAKE_EXE_LINKER_FLAGS_CHECKED} ${CLR_SANITIZE_LINK_FLAGS}")
# -Wl and --gc-sections: drop unused sections\functions (similar to Windows /Gy function-level-linking)
set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} ${CLR_SANITIZE_LINK_FLAGS} -Wl,--gc-sections")
set(CMAKE_SHARED_LINKER_FLAGS_CHECKED "${CMAKE_SHARED_LINKER_FLAGS_CHECKED} ${CLR_SANITIZE_LINK_FLAGS} -Wl,--gc-sections")
endif ()
endif(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
endif(WIN32)
# CLR_ADDITIONAL_LINKER_FLAGS - used for passing additional arguments to linker
# CLR_ADDITIONAL_COMPILER_OPTIONS - used for passing additional arguments to compiler
if(CLR_CMAKE_PLATFORM_UNIX)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${CLR_ADDITIONAL_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${CLR_ADDITIONAL_LINKER_FLAGS}" )
add_compile_options(${CLR_ADDITIONAL_COMPILER_OPTIONS})
endif(CLR_CMAKE_PLATFORM_UNIX)
if(CLR_CMAKE_PLATFORM_LINUX)
set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -Wa,--noexecstack")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--build-id=sha1")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1")
endif(CLR_CMAKE_PLATFORM_LINUX)
#------------------------------------
# Definitions (for platform)
#-----------------------------------
if (CLR_CMAKE_PLATFORM_ARCH_AMD64)
add_definitions(-D_AMD64_)
add_definitions(-D_WIN64)
add_definitions(-DAMD64)
add_definitions(-DBIT64=1)
elseif (CLR_CMAKE_PLATFORM_ARCH_I386)
add_definitions(-D_X86_)
elseif (CLR_CMAKE_PLATFORM_ARCH_ARM)
add_definitions(-D_ARM_)
add_definitions(-DARM)
elseif (CLR_CMAKE_PLATFORM_ARCH_ARM64)
add_definitions(-D_ARM64_)
add_definitions(-DARM64)
add_definitions(-D_WIN64)
add_definitions(-DBIT64=1)
elseif (CLR_CMAKE_PLATFORM_ARCH_MIPS64)
add_definitions(-D_MIPS64_)
add_definitions(-DMIPS64)
add_definitions(-D_WIN64)
add_definitions(-DBIT64=1)
else ()
clr_unknown_arch()
endif ()
if (CLR_CMAKE_PLATFORM_UNIX)
if(CLR_CMAKE_PLATFORM_LINUX)
if(CLR_CMAKE_PLATFORM_UNIX_AMD64)
message("Detected Linux x86_64")
add_definitions(-DLINUX64)
elseif(CLR_CMAKE_PLATFORM_UNIX_ARM)
message("Detected Linux ARM")
add_definitions(-DLINUX32)
elseif(CLR_CMAKE_PLATFORM_UNIX_ARM64)
message("Detected Linux ARM64")
add_definitions(-DLINUX64)
elseif(CLR_CMAKE_PLATFORM_UNIX_X86)
message("Detected Linux i686")
add_definitions(-DLINUX32)
elseif(CLR_CMAKE_PLATFORM_UNIX_MIPS64)
message("Detected Linux MIPS64")
add_definitions(-DLINUX64)
else()
clr_unknown_arch()
endif()
endif(CLR_CMAKE_PLATFORM_LINUX)
endif(CLR_CMAKE_PLATFORM_UNIX)
if (CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DPLATFORM_UNIX=1)
if(CLR_CMAKE_PLATFORM_DARWIN)
message("Detected OSX x86_64")
endif(CLR_CMAKE_PLATFORM_DARWIN)
if(CLR_CMAKE_PLATFORM_FREEBSD)
message("Detected FreeBSD amd64")
endif(CLR_CMAKE_PLATFORM_FREEBSD)
if(CLR_CMAKE_PLATFORM_NETBSD)
message("Detected NetBSD amd64")
endif(CLR_CMAKE_PLATFORM_NETBSD)
endif(CLR_CMAKE_PLATFORM_UNIX)
if (WIN32)
# Define the CRT lib references that link into Desktop imports
set(STATIC_MT_CRT_LIB "libcmt$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib")
set(STATIC_MT_VCRT_LIB "libvcruntime$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib")
set(STATIC_MT_CPP_LIB "libcpmt$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib")
endif(WIN32)
# Architecture specific files folder name
if (CLR_CMAKE_TARGET_ARCH_AMD64)
set(ARCH_SOURCES_DIR amd64)
elseif (CLR_CMAKE_TARGET_ARCH_ARM64)
set(ARCH_SOURCES_DIR arm64)
elseif (CLR_CMAKE_TARGET_ARCH_ARM)
set(ARCH_SOURCES_DIR arm)
elseif (CLR_CMAKE_TARGET_ARCH_I386)
set(ARCH_SOURCES_DIR i386)
elseif (CLR_CMAKE_TARGET_ARCH_MIPS64)
set(ARCH_SOURCES_DIR mips64)
else ()
clr_unknown_arch()
endif ()
if (CLR_CMAKE_TARGET_ARCH_AMD64)
if (CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DDBG_TARGET_AMD64_UNIX)
endif()
add_definitions(-D_TARGET_64BIT_=1)
add_definitions(-D_TARGET_AMD64_=1)
add_definitions(-DDBG_TARGET_64BIT=1)
add_definitions(-DDBG_TARGET_AMD64=1)
add_definitions(-DDBG_TARGET_WIN64=1)
elseif (CLR_CMAKE_TARGET_ARCH_ARM64)
if (CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DDBG_TARGET_ARM64_UNIX)
endif()
add_definitions(-D_TARGET_ARM64_=1)
add_definitions(-D_TARGET_64BIT_=1)
add_definitions(-DDBG_TARGET_64BIT=1)
add_definitions(-DDBG_TARGET_ARM64=1)
add_definitions(-DDBG_TARGET_WIN64=1)
add_definitions(-DFEATURE_MULTIREG_RETURN)
elseif (CLR_CMAKE_TARGET_ARCH_ARM)
if (CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DDBG_TARGET_ARM_UNIX)
elseif (WIN32 AND NOT DEFINED CLR_CROSS_COMPONENTS_BUILD)
# Set this to ensure we can use Arm SDK for Desktop binary linkage when doing native (Arm32) build
add_definitions(-D_ARM_WINAPI_PARTITION_DESKTOP_SDK_AVAILABLE=1)
add_definitions(-D_ARM_WORKAROUND_)
endif (CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-D_TARGET_ARM_=1)
add_definitions(-DDBG_TARGET_32BIT=1)
add_definitions(-DDBG_TARGET_ARM=1)
elseif (CLR_CMAKE_TARGET_ARCH_I386)
add_definitions(-D_TARGET_X86_=1)
add_definitions(-DDBG_TARGET_32BIT=1)
add_definitions(-DDBG_TARGET_X86=1)
elseif (CLR_CMAKE_TARGET_ARCH_MIPS64)
add_definitions(-DDBG_TARGET_MIPS64_UNIX)
add_definitions(-D_TARGET_MIPS64_=1)
add_definitions(-D_TARGET_64BIT_=1)
add_definitions(-DDBG_TARGET_64BIT=1)
add_definitions(-DDBG_TARGET_MIPS64=1)
add_definitions(-DDBG_TARGET_WIN64=1)
add_definitions(-DFEATURE_MULTIREG_RETURN)
else ()
clr_unknown_arch()
endif (CLR_CMAKE_TARGET_ARCH_AMD64)
if(WIN32)
add_definitions(-DWIN32)
add_definitions(-D_WIN32)
add_definitions(-DWINVER=0x0602)
add_definitions(-D_WIN32_WINNT=0x0602)
add_definitions(-DWIN32_LEAN_AND_MEAN=1)
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
endif(WIN32)
#--------------------------------------
# FEATURE Defines
#--------------------------------------
add_definitions(-DFEATURE_CORESYSTEM)
if(CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DFEATURE_PAL)
add_definitions(-DFEATURE_PAL_ANSI)
endif(CLR_CMAKE_PLATFORM_UNIX)
if(WIN32)
add_definitions(-DFEATURE_COMINTEROP)
endif(WIN32)
if(NOT CMAKE_SYSTEM_NAME STREQUAL NetBSD)
add_definitions(-DFEATURE_HIJACK)
endif(NOT CMAKE_SYSTEM_NAME STREQUAL NetBSD)
if(FEATURE_EVENT_TRACE)
add_definitions(-DFEATURE_EVENT_TRACE=1)
add_definitions(-DFEATURE_PERFTRACING=1)
endif(FEATURE_EVENT_TRACE)
if(CLR_CMAKE_PLATFORM_UNIX_AMD64)
add_definitions(-DFEATURE_MULTIREG_RETURN)
endif (CLR_CMAKE_PLATFORM_UNIX_AMD64)
if(CLR_CMAKE_PLATFORM_UNIX AND CLR_CMAKE_TARGET_ARCH_AMD64)
add_definitions(-DUNIX_AMD64_ABI)
endif(CLR_CMAKE_PLATFORM_UNIX AND CLR_CMAKE_TARGET_ARCH_AMD64)
#--------------------------------------
# Compile Options
#--------------------------------------
include(compileoptions.cmake)
#-----------------------------------------
# Native Projects
#-----------------------------------------
add_subdirectory(src)

Просмотреть файл

@ -6,15 +6,9 @@
<packageSources>
<clear />
<!-- Feeds used in Maestro/Arcade publishing -->
<add key="dotnet6" value="https://dnceng.pkgs.visualstudio.com/public/_packaging/dotnet6/nuget/v3/index.json" />
<add key="dotnet6-transport" value="https://dnceng.pkgs.visualstudio.com/public/_packaging/dotnet6-transport/nuget/v3/index.json" />
<add key="dotnet5" value="https://dnceng.pkgs.visualstudio.com/public/_packaging/dotnet5/nuget/v3/index.json" />
<add key="dotnet5-transport" value="https://dnceng.pkgs.visualstudio.com/public/_packaging/dotnet5-transport/nuget/v3/index.json" />
<add key="dotnet-tools" value="https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json" />
<add key="dotnet-eng" value="https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-eng/nuget/v3/index.json" />
<add key="dotnet-diagnostics-tests" value="https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-diagnostics-tests/nuget/v3/index.json" />
<!-- Legacy feeds -->
<add key="dotnet-core" value="https://dotnetfeed.blob.core.windows.net/dotnet-core/index.json" />
<!-- Standard feeds -->
<add key="dotnet-public" value="https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-public/nuget/v3/index.json" />
</packageSources>

Просмотреть файл

@ -1,60 +1,34 @@
.NET Core Diagnostics Repo
==========================
# .NET Core Diagnostics Repo
This repository contains the source code for various .NET Core runtime diagnostic tools. It currently contains SOS, the managed portion of SOS, the lldb SOS plugin and various global diagnostic tools. The goals of this repo is to build SOS and the lldb SOS plugin for the portable (glibc based) Linux platform (Centos 7) and the platforms not supported by the portable (musl based) build (Centos 6, Alpine, and macOS) and to test across various indexes in a very large matrix: OSs/distros (Centos 6/7, Ubuntu, Alpine, Fedora, Debian, RHEL 7.2), architectures (x64, x86, arm, arm64), lldb versions (3.9 to 9.0) and .NET Core versions (2.1, 3.1, 5.0.x).
This repository contains the source code for dotnet-monitor, a diagnostic tool
Another goal to make it easier to obtain a version of lldb (currently 3.9) with scripts and documentation for platforms/distros like Centos, Alpine, Fedora, etc. that by default provide really old versions.
This repo will also allow out of band development of new SOS and lldb plugin features like symbol server support for the .NET Core runtime and solve the source build problem having SOS.NETCore (managed portion of SOS) in the runtime repo.
See the [GitHub Release tab](https://github.com/dotnet/diagnostics/releases) for notes on SOS and diagnostic tools releases.
--------------------------
## Building the Repository
The build depends on Git, CMake, Python and of course a C++ compiler. Once these prerequisites are installed
the build is simply a matter of invoking the 'build' script (`build.cmd` or `build.sh`) at the base of the
repository.
See [building instructions](documentation/building.md/windows-instructions.md) in our documentation directory.
The details of installing the components differ depending on the operating system. See the following
pages based on your OS. There is no cross-building across OS (only for ARM, which is built on x64).
You have to be on the particular platform to build that platform.
## Reporting security issues and security bugs
To install the platform's prerequisites and build:
Security issues and bugs should be reported privately, via email, to the Microsoft Security Response Center (MSRC) <secure@microsoft.com>. You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Further information, including the MSRC PGP key, can be found in the [Security TechCenter](https://www.microsoft.com/msrc/faqs-report-an-issue).
* [Windows Instructions](documentation/building/windows-instructions.md)
* [Linux Instructions](documentation/building/linux-instructions.md)
* [MacOS Instructions](documentation/building/osx-instructions.md)
* [FreeBSD Instructions](documentation/building/freebsd-instructions.md)
* [NetBSD Instructions](documentation/building/netbsd-instructions.md)
* [Testing on private runtime builds](documentation/privatebuildtesting.md)
## SOS and Other Diagnostic Tools
* [SOS](documentation/sos.md) - About the SOS debugger extension.
* [dotnet-dump](documentation/dotnet-dump-instructions.md) - Dump collection and analysis utility.
* [dotnet-gcdump](documentation/dotnet-gcdump-instructions.md) - Heap analysis tool that collects gcdumps of live .NET processes.
* [dotnet-trace](documentation/dotnet-trace-instructions.md) - Enable the collection of events for a running .NET Core Application to a local trace file.
* [dotnet-counters](documentation/dotnet-counters-instructions.md) - Monitor performance counters of a .NET Core application in real time.
Also see info about related [Microsoft .NET Core and ASP.NET Core Bug Bounty Program](https://www.microsoft.com/msrc/bounty-dot-net-core).
## Useful Links
* [FAQ](documentation/FAQ.md) - Frequently asked questions.
* [The LLDB Debugger](http://lldb.llvm.org/index.html) - More information about lldb.
* [SOS](https://msdn.microsoft.com/en-us/library/bb190764(v=vs.110).aspx) - More information about SOS.
* [Debugging CoreCLR](https://github.com/dotnet/runtime/blob/master/docs/workflow/debugging/coreclr/debugging.md) - Instructions for debugging .NET Core and the CoreCLR runtime.
* [dotnet/runtime](https://github.com/dotnet/runtime) - Source for the .NET Core runtime.
* [Official Build Instructions](documentation/building/official-build-instructions.md) - Internal official build instructions.
See [Introducing dotnet-monitor](https://devblogs.microsoft.com/dotnet/introducing-dotnet-monitor/)
[//]: # (Begin current test results)
## .NET Foundation
## Build Status
.NET Monitor is a [.NET Foundation](https://www.dotnetfoundation.org/projects) project.
[![Build Status](https://dnceng.visualstudio.com/public/_apis/build/status/dotnet/diagnostics/diagnostics-public-ci?branchName=master)](https://dnceng.visualstudio.com/public/_build/latest?definitionId=72&branchName=master)
There are many .NET related projects on GitHub.
[//]: # (End current test results)
- [.NET home repo](https://github.com/Microsoft/dotnet) - links to 100s of .NET projects, from Microsoft and the community.
- [ASP.NET Core home](https://docs.microsoft.com/aspnet/core/?view=aspnetcore-3.1) - the best place to start learning about ASP.NET Core.
This project has adopted the code of conduct defined by the [Contributor Covenant](http://contributor-covenant.org/) to clarify expected behavior in our community. For more information, see the [.NET Foundation Code of Conduct](http://www.dotnetfoundation.org/code-of-conduct).
General .NET OSS discussions: [.NET Foundation forums](https://forums.dotnetfoundation.org)
## License
The diagnostics repository is licensed under the [MIT license](LICENSE.TXT).
.NET monitor is licensed under the [MIT](LICENSE.TXT) license.

Просмотреть файл

@ -1,3 +1,3 @@
@echo off
powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0eng\build.ps1""" -restore -skipmanaged -skipnative %*"
powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0eng\common\build.ps1""" -restore %*"
exit /b %ErrorLevel%

Просмотреть файл

@ -1,3 +1,3 @@
@echo off
powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0eng\build.ps1""" -test -skipmanaged -skipnative %*"
powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0eng\common\build.ps1""" -test %*"
exit /b %ErrorLevel%

Просмотреть файл

@ -13,4 +13,4 @@ while [[ -h $source ]]; do
done
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
"$scriptroot/eng/build.sh" --restore $@
"$scriptroot/eng/common/build.sh" --restore -build $@

Просмотреть файл

@ -1,156 +0,0 @@
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
if (CLR_CMAKE_PLATFORM_UNIX)
# Disable frame pointer optimizations so profilers can get better call stacks
add_compile_options(-fno-omit-frame-pointer)
# The -fms-extensions enable the stuff like __if_exists, __declspec(uuid()), etc.
add_compile_options(-fms-extensions )
#-fms-compatibility Enable full Microsoft Visual C++ compatibility
#-fms-extensions Accept some non-standard constructs supported by the Microsoft compiler
# Make signed arithmetic overflow of addition, subtraction, and multiplication wrap around
# using twos-complement representation (this is normally undefined according to the C++ spec).
add_compile_options(-fwrapv)
if(CLR_CMAKE_PLATFORM_DARWIN)
# We cannot enable "stack-protector-strong" on OS X due to a bug in clang compiler (current version 7.0.2)
add_compile_options(-fstack-protector)
if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64")
add_compile_options(-arch x86_64)
elseif(CMAKE_OSX_ARCHITECTURES MATCHES "arm64")
add_compile_options(-arch arm64)
else()
clr_unknown_arch()
endif()
else()
add_compile_options(-fstack-protector-strong)
endif(CLR_CMAKE_PLATFORM_DARWIN)
add_definitions(-DDISABLE_CONTRACTS)
# The -ferror-limit is helpful during the porting, it makes sure the compiler doesn't stop
# after hitting just about 20 errors.
add_compile_options(-ferror-limit=4096)
if (CLR_CMAKE_WARNINGS_ARE_ERRORS)
# All warnings that are not explicitly disabled are reported as errors
add_compile_options(-Werror)
endif(CLR_CMAKE_WARNINGS_ARE_ERRORS)
# Disabled warnings
add_compile_options(-Wno-unused-private-field)
add_compile_options(-Wno-unused-variable)
# Explicit constructor calls are not supported by clang (this->ClassName::ClassName())
add_compile_options(-Wno-microsoft)
# This warning is caused by comparing 'this' to NULL
add_compile_options(-Wno-tautological-compare)
# There are constants of type BOOL used in a condition. But BOOL is defined as int
# and so the compiler thinks that there is a mistake.
add_compile_options(-Wno-constant-logical-operand)
# We use pshpack1/2/4/8.h and poppack.h headers to set and restore packing. However
# clang 6.0 complains when the packing change lifetime is not contained within
# a header file.
add_compile_options(-Wno-pragma-pack)
add_compile_options(-Wno-unknown-warning-option)
#These seem to indicate real issues
add_compile_options(-Wno-invalid-offsetof)
# The following warning indicates that an attribute __attribute__((__ms_struct__)) was applied
# to a struct or a class that has virtual members or a base class. In that case, clang
# may not generate the same object layout as MSVC.
add_compile_options(-Wno-incompatible-ms-struct)
# Some architectures (e.g., ARM) assume char type is unsigned while CoreCLR assumes char is signed
# as x64 does. It has been causing issues in ARM (https://github.com/dotnet/coreclr/issues/4746)
add_compile_options(-fsigned-char)
endif(CLR_CMAKE_PLATFORM_UNIX)
if(CLR_CMAKE_PLATFORM_UNIX_ARM)
# Because we don't use CMAKE_C_COMPILER/CMAKE_CXX_COMPILER to use clang
# we have to set the triple by adding a compiler argument
add_compile_options(-mthumb)
add_compile_options(-mfpu=vfpv3)
add_compile_options(-march=armv7-a)
if(ARM_SOFTFP)
add_definitions(-DARM_SOFTFP)
add_compile_options(-mfloat-abi=softfp)
endif(ARM_SOFTFP)
endif(CLR_CMAKE_PLATFORM_UNIX_ARM)
if (WIN32)
# Compile options for targeting windows
# The following options are set by the razzle build
add_compile_options(/TP) # compile all files as C++
add_compile_options(/d2Zi+) # make optimized builds debugging easier
add_compile_options(/nologo) # Suppress Startup Banner
add_compile_options(/W3) # set warning level to 3
add_compile_options(/WX) # treat warnings as errors
add_compile_options(/Oi) # enable intrinsics
add_compile_options(/Oy-) # disable suppressing of the creation of frame pointers on the call stack for quicker function calls
add_compile_options(/U_MT) # undefine the predefined _MT macro
add_compile_options(/GF) # enable read-only string pooling
add_compile_options(/Gm-) # disable minimal rebuild
add_compile_options(/EHa) # enable C++ EH (w/ SEH exceptions)
add_compile_options(/Zp8) # pack structs on 8-byte boundary
add_compile_options(/Gy) # separate functions for linker
add_compile_options(/Zc:wchar_t-) # C++ language conformance: wchar_t is NOT the native type, but a typedef
add_compile_options(/Zc:forScope) # C++ language conformance: enforce Standard C++ for scoping rules
add_compile_options(/GR-) # disable C++ RTTI
add_compile_options(/FC) # use full pathnames in diagnostics
add_compile_options(/MP) # Build with Multiple Processes (number of processes equal to the number of processors)
add_compile_options(/GS) # Buffer Security Check
add_compile_options(/Zm200) # Specify Precompiled Header Memory Allocation Limit of 150MB
add_compile_options(/wd4960 /wd4961 /wd4603 /wd4627 /wd4838 /wd4456 /wd4457 /wd4458 /wd4459 /wd4091 /we4640)
add_compile_options(/Zi) # enable debugging information
add_compile_options(/ZH:SHA_256) # use SHA256 for generating hashes of compiler processed source files.
add_compile_options(/source-charset:utf-8) # Force MSVC to compile source as UTF-8.
if (CLR_CMAKE_PLATFORM_ARCH_I386)
add_compile_options(/Gz)
endif (CLR_CMAKE_PLATFORM_ARCH_I386)
add_compile_options($<$<OR:$<CONFIG:Release>,$<CONFIG:Relwithdebinfo>>:/GL>)
add_compile_options($<$<OR:$<OR:$<CONFIG:Release>,$<CONFIG:Relwithdebinfo>>,$<CONFIG:Checked>>:/O1>)
if (CLR_CMAKE_PLATFORM_ARCH_AMD64)
# The generator expression in the following command means that the /homeparams option is added only for debug builds
add_compile_options($<$<CONFIG:Debug>:/homeparams>) # Force parameters passed in registers to be written to the stack
endif (CLR_CMAKE_PLATFORM_ARCH_AMD64)
# enable control-flow-guard support for native components for non-Arm64 builds
add_compile_options(/guard:cf)
# Statically linked CRT (libcmt[d].lib, libvcruntime[d].lib and libucrt[d].lib) by default. This is done to avoid
# linking in VCRUNTIME140.DLL for a simplified xcopy experience by reducing the dependency on VC REDIST.
#
# For Release builds, we shall dynamically link into uCRT [ucrtbase.dll] (which is pushed down as a Windows Update on downlevel OS) but
# wont do the same for debug/checked builds since ucrtbased.dll is not redistributable and Debug/Checked builds are not
# production-time scenarios.
add_compile_options($<$<OR:$<CONFIG:Release>,$<CONFIG:Relwithdebinfo>>:/MT>)
add_compile_options($<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:/MTd>)
set(CMAKE_ASM_MASM_FLAGS "${CMAKE_ASM_MASM_FLAGS} /ZH:SHA_256")
endif (WIN32)
if(CLR_CMAKE_ENABLE_CODE_COVERAGE)
if(CLR_CMAKE_PLATFORM_UNIX)
string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE)
if(NOT UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG)
message( WARNING "Code coverage results with an optimised (non-Debug) build may be misleading" )
endif(NOT UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG)
add_compile_options(-fprofile-arcs)
add_compile_options(-ftest-coverage)
set(CLANG_COVERAGE_LINK_FLAGS "--coverage")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${CLANG_COVERAGE_LINK_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${CLANG_COVERAGE_LINK_FLAGS}")
else()
message(FATAL_ERROR "Code coverage builds not supported on current platform")
endif(CLR_CMAKE_PLATFORM_UNIX)
endif(CLR_CMAKE_ENABLE_CODE_COVERAGE)

Просмотреть файл

@ -1,27 +0,0 @@
# Contains the crossgen build specific definitions. Included by the leaf crossgen cmake files.
add_definitions(
-DCROSSGEN_COMPILE
-DCROSS_COMPILE
-DFEATURE_NATIVE_IMAGE_GENERATION
-DSELF_NO_HOST)
remove_definitions(
-DFEATURE_CODE_VERSIONING
-DEnC_SUPPORTED
-DFEATURE_EVENT_TRACE=1
-DFEATURE_LOADER_OPTIMIZATION
-DFEATURE_MULTICOREJIT
-DFEATURE_PERFMAP
-DFEATURE_REJIT
-DFEATURE_TIERED_COMPILATION
-DFEATURE_VERSIONING_LOG
)
if(FEATURE_READYTORUN)
add_definitions(-DFEATURE_READYTORUN_COMPILER)
endif(FEATURE_READYTORUN)
if(CLR_CMAKE_PLATFORM_LINUX)
add_definitions(-DFEATURE_PERFMAP)
endif(CLR_CMAKE_PLATFORM_LINUX)

Просмотреть файл

@ -1,735 +0,0 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 16
VisualStudioVersion = 16.0.29019.234
MinimumVisualStudioVersion = 10.0.40219.1
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TestDebuggee", "src\SOS\lldbplugin.tests\TestDebuggee\TestDebuggee.csproj", "{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "SOS", "SOS", "{41638A4C-0DAF-47ED-A774-ECBBAC0315D7}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{19FAB78C-3351-4911-8F0C-8C6056401740}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Debuggees", "Debuggees", "{C3072949-6D24-451B-A308-2F3621F858B0}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "WebApp3", "src\SOS\SOS.UnitTests\Debuggees\WebApp3\WebApp3.csproj", "{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "WebApp", "src\SOS\SOS.UnitTests\Debuggees\WebApp\WebApp.csproj", "{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SimpleThrow", "src\SOS\SOS.UnitTests\Debuggees\SimpleThrow\SimpleThrow.csproj", "{179EF543-E30A-4428-ABA0-2E2621860173}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DivZero", "src\SOS\SOS.UnitTests\Debuggees\DivZero\DivZero.csproj", "{447AC053-2E0A-4119-BD11-30A4A8E3F765}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "GCWhere", "src\SOS\SOS.UnitTests\Debuggees\GCWhere\GCWhere.csproj", "{664F46A9-3C99-489B-AAB9-4CD3A430C425}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "NestedExceptionTest", "src\SOS\SOS.UnitTests\Debuggees\NestedExceptionTest\NestedExceptionTest.csproj", "{0CB805C8-0B76-4B1D-8AAF-48535B180448}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Overflow", "src\SOS\SOS.UnitTests\Debuggees\Overflow\Overflow.csproj", "{20251748-AA7B-45BE-ADAA-C9375F5CC80F}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ReflectionTest", "src\SOS\SOS.UnitTests\Debuggees\ReflectionTest\ReflectionTest.csproj", "{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TaskNestedException", "src\SOS\SOS.UnitTests\Debuggees\TaskNestedException\TaskNestedException\TaskNestedException.csproj", "{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "RandomUserLibrary", "src\SOS\SOS.UnitTests\Debuggees\TaskNestedException\RandomUserLibrary\RandomUserLibrary.csproj", "{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SymbolTestApp", "src\SOS\SOS.UnitTests\Debuggees\SymbolTestApp\SymbolTestApp\SymbolTestApp.csproj", "{112FE2A7-3FD2-4496-8A14-171898AD5CF5}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SymbolTestDll", "src\SOS\SOS.UnitTests\Debuggees\SymbolTestApp\SymbolTestDll\SymbolTestDll.csproj", "{8C27904A-47C0-44C7-B191-88FF34580CBE}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LineNums", "src\SOS\SOS.UnitTests\Debuggees\LineNums\LineNums.csproj", "{84881FB8-37E1-4D9B-B27E-9831C30DCC04}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "GCPOH", "src\SOS\SOS.UnitTests\Debuggees\GCPOH\GCPOH.csproj", "{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DotnetDumpCommands", "src\SOS\SOS.UnitTests\Debuggees\DotnetDumpCommands\DotnetDumpCommands.csproj", "{F9A69812-DC52-428D-9DB1-8B831A8FF776}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Checked|Any CPU = Checked|Any CPU
Checked|ARM = Checked|ARM
Checked|ARM64 = Checked|ARM64
Checked|x64 = Checked|x64
Checked|x86 = Checked|x86
Debug|Any CPU = Debug|Any CPU
Debug|ARM = Debug|ARM
Debug|ARM64 = Debug|ARM64
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Release|Any CPU = Release|Any CPU
Release|ARM = Release|ARM
Release|ARM64 = Release|ARM64
Release|x64 = Release|x64
Release|x86 = Release|x86
RelWithDebInfo|Any CPU = RelWithDebInfo|Any CPU
RelWithDebInfo|ARM = RelWithDebInfo|ARM
RelWithDebInfo|ARM64 = RelWithDebInfo|ARM64
RelWithDebInfo|x64 = RelWithDebInfo|x64
RelWithDebInfo|x86 = RelWithDebInfo|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Checked|Any CPU.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Checked|Any CPU.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Checked|ARM.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Checked|ARM.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Checked|ARM64.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Checked|ARM64.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Checked|x64.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Checked|x64.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Checked|x86.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Checked|x86.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Debug|Any CPU.Build.0 = Debug|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Debug|ARM.ActiveCfg = Debug|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Debug|ARM.Build.0 = Debug|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Debug|ARM64.Build.0 = Debug|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Debug|x64.ActiveCfg = Debug|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Debug|x64.Build.0 = Debug|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Debug|x86.ActiveCfg = Debug|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Debug|x86.Build.0 = Debug|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Release|Any CPU.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Release|Any CPU.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Release|ARM.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Release|ARM.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Release|ARM64.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Release|ARM64.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Release|x64.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Release|x64.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Release|x86.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.Release|x86.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Checked|Any CPU.Build.0 = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Checked|ARM.ActiveCfg = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Checked|ARM.Build.0 = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Checked|ARM64.Build.0 = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Checked|x64.ActiveCfg = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Checked|x64.Build.0 = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Checked|x86.ActiveCfg = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Checked|x86.Build.0 = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Debug|Any CPU.Build.0 = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Debug|ARM.ActiveCfg = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Debug|ARM.Build.0 = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Debug|ARM64.Build.0 = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Debug|x64.ActiveCfg = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Debug|x64.Build.0 = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Debug|x86.ActiveCfg = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Debug|x86.Build.0 = Debug|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Release|Any CPU.ActiveCfg = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Release|Any CPU.Build.0 = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Release|ARM.ActiveCfg = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Release|ARM.Build.0 = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Release|ARM64.ActiveCfg = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Release|ARM64.Build.0 = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Release|x64.ActiveCfg = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Release|x64.Build.0 = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Release|x86.ActiveCfg = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.Release|x86.Build.0 = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Checked|Any CPU.Build.0 = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Checked|ARM.ActiveCfg = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Checked|ARM.Build.0 = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Checked|ARM64.Build.0 = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Checked|x64.ActiveCfg = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Checked|x64.Build.0 = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Checked|x86.ActiveCfg = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Checked|x86.Build.0 = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Debug|Any CPU.Build.0 = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Debug|ARM.ActiveCfg = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Debug|ARM.Build.0 = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Debug|ARM64.Build.0 = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Debug|x64.ActiveCfg = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Debug|x64.Build.0 = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Debug|x86.ActiveCfg = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Debug|x86.Build.0 = Debug|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Release|Any CPU.ActiveCfg = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Release|Any CPU.Build.0 = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Release|ARM.ActiveCfg = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Release|ARM.Build.0 = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Release|ARM64.ActiveCfg = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Release|ARM64.Build.0 = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Release|x64.ActiveCfg = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Release|x64.Build.0 = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Release|x86.ActiveCfg = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.Release|x86.Build.0 = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Checked|Any CPU.Build.0 = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Checked|ARM.ActiveCfg = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Checked|ARM.Build.0 = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Checked|ARM64.Build.0 = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Checked|x64.ActiveCfg = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Checked|x64.Build.0 = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Checked|x86.ActiveCfg = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Checked|x86.Build.0 = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Debug|Any CPU.Build.0 = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Debug|ARM.ActiveCfg = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Debug|ARM.Build.0 = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Debug|ARM64.Build.0 = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Debug|x64.ActiveCfg = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Debug|x64.Build.0 = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Debug|x86.ActiveCfg = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Debug|x86.Build.0 = Debug|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Release|Any CPU.ActiveCfg = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Release|Any CPU.Build.0 = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Release|ARM.ActiveCfg = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Release|ARM.Build.0 = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Release|ARM64.ActiveCfg = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Release|ARM64.Build.0 = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Release|x64.ActiveCfg = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Release|x64.Build.0 = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Release|x86.ActiveCfg = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.Release|x86.Build.0 = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{179EF543-E30A-4428-ABA0-2E2621860173}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Checked|Any CPU.Build.0 = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Checked|ARM.ActiveCfg = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Checked|ARM.Build.0 = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Checked|ARM64.Build.0 = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Checked|x64.ActiveCfg = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Checked|x64.Build.0 = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Checked|x86.ActiveCfg = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Checked|x86.Build.0 = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Debug|Any CPU.Build.0 = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Debug|ARM.ActiveCfg = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Debug|ARM.Build.0 = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Debug|ARM64.Build.0 = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Debug|x64.ActiveCfg = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Debug|x64.Build.0 = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Debug|x86.ActiveCfg = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Debug|x86.Build.0 = Debug|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Release|Any CPU.ActiveCfg = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Release|Any CPU.Build.0 = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Release|ARM.ActiveCfg = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Release|ARM.Build.0 = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Release|ARM64.ActiveCfg = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Release|ARM64.Build.0 = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Release|x64.ActiveCfg = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Release|x64.Build.0 = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Release|x86.ActiveCfg = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.Release|x86.Build.0 = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{447AC053-2E0A-4119-BD11-30A4A8E3F765}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Checked|Any CPU.Build.0 = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Checked|ARM.ActiveCfg = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Checked|ARM.Build.0 = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Checked|ARM64.Build.0 = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Checked|x64.ActiveCfg = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Checked|x64.Build.0 = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Checked|x86.ActiveCfg = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Checked|x86.Build.0 = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Debug|Any CPU.Build.0 = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Debug|ARM.ActiveCfg = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Debug|ARM.Build.0 = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Debug|ARM64.Build.0 = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Debug|x64.ActiveCfg = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Debug|x64.Build.0 = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Debug|x86.ActiveCfg = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Debug|x86.Build.0 = Debug|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Release|Any CPU.ActiveCfg = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Release|Any CPU.Build.0 = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Release|ARM.ActiveCfg = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Release|ARM.Build.0 = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Release|ARM64.ActiveCfg = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Release|ARM64.Build.0 = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Release|x64.ActiveCfg = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Release|x64.Build.0 = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Release|x86.ActiveCfg = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.Release|x86.Build.0 = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{664F46A9-3C99-489B-AAB9-4CD3A430C425}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Checked|Any CPU.Build.0 = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Checked|ARM.ActiveCfg = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Checked|ARM.Build.0 = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Checked|ARM64.Build.0 = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Checked|x64.ActiveCfg = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Checked|x64.Build.0 = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Checked|x86.ActiveCfg = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Checked|x86.Build.0 = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Debug|Any CPU.Build.0 = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Debug|ARM.ActiveCfg = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Debug|ARM.Build.0 = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Debug|ARM64.Build.0 = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Debug|x64.ActiveCfg = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Debug|x64.Build.0 = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Debug|x86.ActiveCfg = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Debug|x86.Build.0 = Debug|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Release|Any CPU.ActiveCfg = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Release|Any CPU.Build.0 = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Release|ARM.ActiveCfg = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Release|ARM.Build.0 = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Release|ARM64.ActiveCfg = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Release|ARM64.Build.0 = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Release|x64.ActiveCfg = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Release|x64.Build.0 = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Release|x86.ActiveCfg = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.Release|x86.Build.0 = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{0CB805C8-0B76-4B1D-8AAF-48535B180448}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Checked|Any CPU.Build.0 = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Checked|ARM.ActiveCfg = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Checked|ARM.Build.0 = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Checked|ARM64.Build.0 = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Checked|x64.ActiveCfg = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Checked|x64.Build.0 = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Checked|x86.ActiveCfg = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Checked|x86.Build.0 = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Debug|Any CPU.Build.0 = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Debug|ARM.ActiveCfg = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Debug|ARM.Build.0 = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Debug|ARM64.Build.0 = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Debug|x64.ActiveCfg = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Debug|x64.Build.0 = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Debug|x86.ActiveCfg = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Debug|x86.Build.0 = Debug|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Release|Any CPU.ActiveCfg = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Release|Any CPU.Build.0 = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Release|ARM.ActiveCfg = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Release|ARM.Build.0 = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Release|ARM64.ActiveCfg = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Release|ARM64.Build.0 = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Release|x64.ActiveCfg = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Release|x64.Build.0 = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Release|x86.ActiveCfg = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.Release|x86.Build.0 = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{20251748-AA7B-45BE-ADAA-C9375F5CC80F}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Checked|Any CPU.Build.0 = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Checked|ARM.ActiveCfg = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Checked|ARM.Build.0 = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Checked|ARM64.Build.0 = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Checked|x64.ActiveCfg = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Checked|x64.Build.0 = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Checked|x86.ActiveCfg = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Checked|x86.Build.0 = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Debug|Any CPU.Build.0 = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Debug|ARM.ActiveCfg = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Debug|ARM.Build.0 = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Debug|ARM64.Build.0 = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Debug|x64.ActiveCfg = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Debug|x64.Build.0 = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Debug|x86.ActiveCfg = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Debug|x86.Build.0 = Debug|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Release|Any CPU.ActiveCfg = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Release|Any CPU.Build.0 = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Release|ARM.ActiveCfg = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Release|ARM.Build.0 = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Release|ARM64.ActiveCfg = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Release|ARM64.Build.0 = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Release|x64.ActiveCfg = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Release|x64.Build.0 = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Release|x86.ActiveCfg = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.Release|x86.Build.0 = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Checked|Any CPU.Build.0 = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Checked|ARM.ActiveCfg = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Checked|ARM.Build.0 = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Checked|ARM64.Build.0 = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Checked|x64.ActiveCfg = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Checked|x64.Build.0 = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Checked|x86.ActiveCfg = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Checked|x86.Build.0 = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Debug|Any CPU.Build.0 = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Debug|ARM.ActiveCfg = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Debug|ARM.Build.0 = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Debug|ARM64.Build.0 = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Debug|x64.ActiveCfg = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Debug|x64.Build.0 = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Debug|x86.ActiveCfg = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Debug|x86.Build.0 = Debug|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Release|Any CPU.ActiveCfg = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Release|Any CPU.Build.0 = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Release|ARM.ActiveCfg = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Release|ARM.Build.0 = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Release|ARM64.ActiveCfg = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Release|ARM64.Build.0 = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Release|x64.ActiveCfg = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Release|x64.Build.0 = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Release|x86.ActiveCfg = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.Release|x86.Build.0 = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Checked|Any CPU.Build.0 = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Checked|ARM.ActiveCfg = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Checked|ARM.Build.0 = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Checked|ARM64.Build.0 = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Checked|x64.ActiveCfg = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Checked|x64.Build.0 = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Checked|x86.ActiveCfg = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Checked|x86.Build.0 = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Debug|ARM.ActiveCfg = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Debug|ARM.Build.0 = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Debug|ARM64.Build.0 = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Debug|x64.ActiveCfg = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Debug|x64.Build.0 = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Debug|x86.ActiveCfg = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Debug|x86.Build.0 = Debug|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Release|Any CPU.Build.0 = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Release|ARM.ActiveCfg = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Release|ARM.Build.0 = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Release|ARM64.ActiveCfg = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Release|ARM64.Build.0 = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Release|x64.ActiveCfg = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Release|x64.Build.0 = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Release|x86.ActiveCfg = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.Release|x86.Build.0 = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Checked|Any CPU.Build.0 = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Checked|ARM.ActiveCfg = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Checked|ARM.Build.0 = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Checked|ARM64.Build.0 = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Checked|x64.ActiveCfg = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Checked|x64.Build.0 = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Checked|x86.ActiveCfg = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Checked|x86.Build.0 = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Debug|Any CPU.Build.0 = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Debug|ARM.ActiveCfg = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Debug|ARM.Build.0 = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Debug|ARM64.Build.0 = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Debug|x64.ActiveCfg = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Debug|x64.Build.0 = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Debug|x86.ActiveCfg = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Debug|x86.Build.0 = Debug|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Release|Any CPU.ActiveCfg = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Release|Any CPU.Build.0 = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Release|ARM.ActiveCfg = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Release|ARM.Build.0 = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Release|ARM64.ActiveCfg = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Release|ARM64.Build.0 = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Release|x64.ActiveCfg = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Release|x64.Build.0 = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Release|x86.ActiveCfg = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.Release|x86.Build.0 = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{112FE2A7-3FD2-4496-8A14-171898AD5CF5}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Checked|Any CPU.Build.0 = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Checked|ARM.ActiveCfg = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Checked|ARM.Build.0 = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Checked|ARM64.Build.0 = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Checked|x64.ActiveCfg = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Checked|x64.Build.0 = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Checked|x86.ActiveCfg = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Checked|x86.Build.0 = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Debug|Any CPU.Build.0 = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Debug|ARM.ActiveCfg = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Debug|ARM.Build.0 = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Debug|ARM64.Build.0 = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Debug|x64.ActiveCfg = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Debug|x64.Build.0 = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Debug|x86.ActiveCfg = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Debug|x86.Build.0 = Debug|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Release|Any CPU.ActiveCfg = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Release|Any CPU.Build.0 = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Release|ARM.ActiveCfg = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Release|ARM.Build.0 = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Release|ARM64.ActiveCfg = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Release|ARM64.Build.0 = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Release|x64.ActiveCfg = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Release|x64.Build.0 = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Release|x86.ActiveCfg = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.Release|x86.Build.0 = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{8C27904A-47C0-44C7-B191-88FF34580CBE}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Checked|Any CPU.Build.0 = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Checked|ARM.ActiveCfg = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Checked|ARM.Build.0 = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Checked|ARM64.Build.0 = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Checked|x64.ActiveCfg = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Checked|x64.Build.0 = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Checked|x86.ActiveCfg = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Checked|x86.Build.0 = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Debug|Any CPU.Build.0 = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Debug|ARM.ActiveCfg = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Debug|ARM.Build.0 = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Debug|ARM64.Build.0 = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Debug|x64.ActiveCfg = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Debug|x64.Build.0 = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Debug|x86.ActiveCfg = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Debug|x86.Build.0 = Debug|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Release|Any CPU.ActiveCfg = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Release|Any CPU.Build.0 = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Release|ARM.ActiveCfg = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Release|ARM.Build.0 = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Release|ARM64.ActiveCfg = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Release|ARM64.Build.0 = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Release|x64.ActiveCfg = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Release|x64.Build.0 = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Release|x86.ActiveCfg = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.Release|x86.Build.0 = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{84881FB8-37E1-4D9B-B27E-9831C30DCC04}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Checked|Any CPU.Build.0 = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Checked|ARM.ActiveCfg = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Checked|ARM.Build.0 = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Checked|ARM64.Build.0 = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Checked|x64.ActiveCfg = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Checked|x64.Build.0 = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Checked|x86.ActiveCfg = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Checked|x86.Build.0 = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Debug|Any CPU.Build.0 = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Debug|ARM.ActiveCfg = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Debug|ARM.Build.0 = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Debug|ARM64.Build.0 = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Debug|x64.ActiveCfg = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Debug|x64.Build.0 = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Debug|x86.ActiveCfg = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Debug|x86.Build.0 = Debug|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Release|Any CPU.ActiveCfg = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Release|Any CPU.Build.0 = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Release|ARM.ActiveCfg = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Release|ARM.Build.0 = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Release|ARM64.ActiveCfg = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Release|ARM64.Build.0 = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Release|x64.ActiveCfg = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Release|x64.Build.0 = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Release|x86.ActiveCfg = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.Release|x86.Build.0 = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Checked|Any CPU.Build.0 = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Checked|ARM.ActiveCfg = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Checked|ARM.Build.0 = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Checked|ARM64.Build.0 = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Checked|x64.ActiveCfg = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Checked|x64.Build.0 = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Checked|x86.ActiveCfg = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Checked|x86.Build.0 = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Debug|Any CPU.Build.0 = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Debug|ARM.ActiveCfg = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Debug|ARM.Build.0 = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Debug|ARM64.Build.0 = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Debug|x64.ActiveCfg = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Debug|x64.Build.0 = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Debug|x86.ActiveCfg = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Debug|x86.Build.0 = Debug|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Release|Any CPU.ActiveCfg = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Release|Any CPU.Build.0 = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Release|ARM.ActiveCfg = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Release|ARM.Build.0 = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Release|ARM64.ActiveCfg = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Release|ARM64.Build.0 = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Release|x64.ActiveCfg = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Release|x64.Build.0 = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Release|x86.ActiveCfg = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.Release|x86.Build.0 = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{F9A69812-DC52-428D-9DB1-8B831A8FF776}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{6C43BE85-F8C3-4D76-8050-F25CE953A7FD} = {C3072949-6D24-451B-A308-2F3621F858B0}
{41638A4C-0DAF-47ED-A774-ECBBAC0315D7} = {19FAB78C-3351-4911-8F0C-8C6056401740}
{C3072949-6D24-451B-A308-2F3621F858B0} = {41638A4C-0DAF-47ED-A774-ECBBAC0315D7}
{252E5845-8D4C-4306-9D8F-ED2E2F7005F6} = {C3072949-6D24-451B-A308-2F3621F858B0}
{E7FEA82E-0E16-4868-B122-4B0BC0014E7F} = {C3072949-6D24-451B-A308-2F3621F858B0}
{179EF543-E30A-4428-ABA0-2E2621860173} = {C3072949-6D24-451B-A308-2F3621F858B0}
{447AC053-2E0A-4119-BD11-30A4A8E3F765} = {C3072949-6D24-451B-A308-2F3621F858B0}
{664F46A9-3C99-489B-AAB9-4CD3A430C425} = {C3072949-6D24-451B-A308-2F3621F858B0}
{0CB805C8-0B76-4B1D-8AAF-48535B180448} = {C3072949-6D24-451B-A308-2F3621F858B0}
{20251748-AA7B-45BE-ADAA-C9375F5CC80F} = {C3072949-6D24-451B-A308-2F3621F858B0}
{DDDA69DF-2C4C-477A-B6C9-B4FE73C6E288} = {C3072949-6D24-451B-A308-2F3621F858B0}
{73EA5188-1E4F-42D8-B63E-F1B878A4EB63} = {C3072949-6D24-451B-A308-2F3621F858B0}
{B50D14DB-8EE5-47BD-B412-62FA5C693CC7} = {C3072949-6D24-451B-A308-2F3621F858B0}
{112FE2A7-3FD2-4496-8A14-171898AD5CF5} = {C3072949-6D24-451B-A308-2F3621F858B0}
{8C27904A-47C0-44C7-B191-88FF34580CBE} = {C3072949-6D24-451B-A308-2F3621F858B0}
{84881FB8-37E1-4D9B-B27E-9831C30DCC04} = {C3072949-6D24-451B-A308-2F3621F858B0}
{0A34CA51-8B8C-41A1-BE24-AB2C574EA144} = {C3072949-6D24-451B-A308-2F3621F858B0}
{F9A69812-DC52-428D-9DB1-8B831A8FF776} = {C3072949-6D24-451B-A308-2F3621F858B0}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {46465737-C938-44FC-BE1A-4CE139EBB5E0}
EndGlobalSection
EndGlobal

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,505 +0,0 @@
parameters:
- name: runtimeFeed
displayName: Feed for runtime installation
type: string
default: default
values:
- default
- custom
- msrc-feed
- dotnetclimsrc-feed
- name: runtimeFeedToken
displayName: Base 64 SAS Token for runtime installation
type: string
default: default
values:
- default
- custom
- msrc-feed-sas-token-base64
- dotnetclimsrc-sas-token-base64
trigger: none
pr:
autoCancel: true
branches:
include:
- master
- release/*
paths:
exclude:
- documentation/*
- THIRD-PARTY-NOTICES.TXT
- LICENSE.TXT
variables:
- name: _TeamName
value: DotNetCore
- name: _InternalBuildArgs
value: ''
- ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
- name: _SignType
value: real
# DotNet-Diagnostics-SDL-Params provides Tsa* variables for SDL checks.
- group: DotNet-Diagnostics-SDL-Params
- name: _InternalBuildArgs
value: /p:DotNetSignType=$(_SignType)
/p:TeamName=$(_TeamName)
/p:DotNetPublishUsingPipelines=true
/p:OfficialBuildId=$(BUILD.BUILDNUMBER)
- group: DotNet-MSRC-Storage
# Custom feed and token
- ${{ if eq(parameters.runtimeFeed, 'custom') }}:
- name: RuntimeFeedUrl
value: $(DotnetRuntimeDownloadFeed)
- ${{ if eq(parameters.runtimeFeedToken, 'custom') }}:
- name: RuntimeFeedBase64SasToken
value: $(DotnetRuntimeDownloadBase64SasToken)
# MSRC dotnet feed. Usually on orchestrated 2.1 releases.
- ${{ if eq(parameters.runtimeFeed, 'msrc-feed') }}:
- name: RuntimeFeedUrl
value: https://dotnetfeedmsrc.blob.core.windows.net
- ${{ if eq(parameters.runtimeFeedToken, 'msrc-feed-sas-token-base64') }}:
- name: RuntimeFeedBase64SasToken
value: $(dotnetfeedmsrc-read-sas-token-base64)
# dotnetclimsrc contains 3.1+
- ${{ if eq(parameters.runtimeFeed, 'dotnetclimsrc-feed') }}:
- name: RuntimeFeedUrl
value: https://dotnetclimsrc.blob.core.windows.net/dotnet
- ${{ if eq(parameters.runtimeFeedToken, 'dotnetclimsrc-sas-token-base64') }}:
- name: RuntimeFeedBase64SasToken
value: $(dotnetclimsrc-read-sas-token-base64)
stages:
- stage: build
displayName: Build and Test Diagnostics
jobs:
############################
# #
# Build legs #
# #
############################
- template: /eng/build.yml
parameters:
name: Windows
osGroup: Windows_NT
strategy:
matrix:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
Build_Release:
_BuildConfig: Release
_BuildArch: x64
_PublishArtifacts: bin
Build_Release_x86:
_BuildConfig: Release
_BuildArch: x86
_PublishArtifacts: bin/Windows_NT.x86.Release
${{ if ne(variables['System.TeamProject'], 'public') }}:
Build_Release_arm:
_BuildOnly: true
_BuildConfig: Release
_BuildArch: arm
_PublishArtifacts: bin/Windows_NT.arm.Release
Build_Release_arm64:
_BuildOnly: true
_BuildConfig: Release
_BuildArch: arm64
_PublishArtifacts: bin/Windows_NT.arm64.Release
- template: /eng/build.yml
parameters:
name: CentOS_7
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:centos-7-3e800f1-20190501005343
strategy:
matrix:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
_PublishArtifacts: bin/Linux.x64.Debug
Build_Release:
_BuildConfig: Release
_BuildArch: x64
_PublishArtifacts: bin/Linux.x64.Release
- template: /eng/build.yml
parameters:
name: Alpine3_6
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.6-WithNode-f4d3fe3-20181220200247
strategy:
matrix:
Build_Release:
_BuildConfig: Release
_BuildArch: x64
_PublishArtifacts: bin/Linux.x64.Release
${{ if in(variables['Build.Reason'], 'PullRequest') }}:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
- template: /eng/build.yml
parameters:
name: MacOS
osGroup: MacOS
strategy:
matrix:
Build_Release:
_BuildConfig: Release
_BuildArch: x64
_PublishArtifacts: bin/OSX.x64.Release
${{ if in(variables['Build.Reason'], 'PullRequest') }}:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
- template: /eng/build.yml
parameters:
name: MacOS_cross
osGroup: MacOS_cross
strategy:
matrix:
Build_Release:
_BuildConfig: Release
_BuildArch: arm64
_PublishArtifacts: bin/OSX.arm64.Release
${{ if in(variables['Build.Reason'], 'PullRequest') }}:
Build_Debug:
_BuildConfig: Debug
_BuildArch: arm64
- ${{ if ne(variables['System.TeamProject'], 'public') }}:
- template: /eng/build.yml
parameters:
name: Linux_cross
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-14.04-cross-1735d26-20190521133857
crossrootfsDir: '/crossrootfs/arm'
strategy:
matrix:
Build_Release:
_BuildOnly: true
_BuildConfig: Release
_BuildArch: arm
_PublishArtifacts: bin/Linux.arm.Release
- template: /eng/build.yml
parameters:
name: Linux_cross64
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-arm64-a3ae44b-20180315221921
crossrootfsDir: '/crossrootfs/arm64'
strategy:
matrix:
Build_Release:
_BuildOnly: true
_BuildConfig: Release
_BuildArch: arm64
_PublishArtifacts: bin/Linux.arm64.Release
- template: /eng/build.yml
parameters:
name: Alpine3_6_cross64
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-arm64-alpine-406629a-20200127195039
crossrootfsDir: '/crossrootfs/arm64'
strategy:
matrix:
Build_Release:
_BuildOnly: true
_BuildConfig: Release
_BuildArch: arm64
_PublishArtifacts: bin/Linux.arm64.Release
############################
# #
# Test only legs #
# #
############################
- template: /eng/build.yml
parameters:
name: Debian_Stretch
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:debian-stretch-3e800f1-20190521154431
dependsOn: CentOS_7
testOnly: true
strategy:
matrix:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
- template: /eng/build.yml
parameters:
name: Fedora_28
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-28-09ca40b-20190508143249
dependsOn: CentOS_7
testOnly: true
strategy:
matrix:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
- template: /eng/build.yml
parameters:
name: Fedora_29
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-29-09ca40b-20190508143249
dependsOn: CentOS_7
testOnly: true
strategy:
matrix:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
- template: /eng/build.yml
parameters:
name: OpenSuse_42_1
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:opensuse-42.1-c103199-20180628122439
dependsOn: CentOS_7
testOnly: true
strategy:
matrix:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
- template: /eng/build.yml
parameters:
name: OpenSuse_42_3
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:opensuse-42.3-3e800f1-20190501005344
dependsOn: CentOS_7
testOnly: true
strategy:
matrix:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
- template: /eng/build.yml
parameters:
name: Ubuntu_16_04
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-09ca40b-20190520220842
dependsOn: CentOS_7
testOnly: true
strategy:
matrix:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
- template: /eng/build.yml
parameters:
name: Ubuntu_18_04
osGroup: Linux
dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-3e800f1-20190508143252
dependsOn: CentOS_7
testOnly: true
strategy:
matrix:
Build_Debug:
_BuildConfig: Debug
_BuildArch: x64
# Download, sign, package and publish
- ${{ if notin(variables['Build.Reason'], 'PullRequest') }}:
- template: /eng/common/templates/job/job.yml
parameters:
name: Sign_Package_Publish
displayName: Sign, Package, and Generate BAR Manifests
dependsOn:
- Windows
- CentOS_7
- Alpine3_6
- MacOS
- MacOS_cross
- Linux_cross
- Linux_cross64
- Alpine3_6_cross64
condition: succeeded()
pool:
name: NetCoreInternal-Pool
queue: BuildPool.Windows.10.Amd64.VS2017
enablePublishUsingPipelines: true
enableMicrobuild: true
artifacts:
publish:
logs:
name: Logs_Packaging_Signing
steps:
# Windows x64 download. Everything under "bin" is published for the Windows x64 build.
- task: DownloadPipelineArtifact@2
displayName: Download Windows x64 and Managed Artifacts
inputs:
artifactName: Windows_x64_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin'
condition: succeeded()
# Windows x86 download
- task: DownloadPipelineArtifact@2
displayName: Download Windows x86 Artifacts
inputs:
artifactName: Windows_x86_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/Windows_NT.x86.Release'
condition: succeeded()
# Windows arm download
- task: DownloadPipelineArtifact@2
displayName: Download Windows Arm Artifacts
inputs:
artifactName: Windows_arm_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/Windows_NT.arm.Release'
condition: succeeded()
# Windows arm64 download
- task: DownloadPipelineArtifact@2
displayName: Download Windows Arm64 Artifacts
inputs:
artifactName: Windows_arm64_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/Windows_NT.arm64.Release'
condition: succeeded()
# Linux x64 download
- task: DownloadPipelineArtifact@2
displayName: Download Linux Artifacts
inputs:
artifactName: CentOS_7_x64_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/Linux.x64.Release'
condition: succeeded()
# Linux MUSL x64 download
- task: DownloadPipelineArtifact@2
displayName: Download Linux Musl Artifacts
inputs:
artifactName: Alpine3_6_x64_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/Linux-musl.x64.Release'
condition: succeeded()
# Linux arm download
- task: DownloadPipelineArtifact@2
displayName: Download Linux Arm Artifacts
inputs:
artifactName: Linux_cross_arm_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/Linux.arm.Release'
condition: succeeded()
# Linux arm64 download
- task: DownloadPipelineArtifact@2
displayName: Download Linux Arm64 Artifacts
inputs:
artifactName: Linux_cross64_arm64_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/Linux.arm64.Release'
condition: succeeded()
# Linux MUSL arm64 download
- task: DownloadPipelineArtifact@2
displayName: Download Linux Musl arm64 Artifacts
inputs:
artifactName: Alpine3_6_cross64_arm64_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/Linux-musl.arm64.Release'
condition: succeeded()
# MacOS download
- task: DownloadPipelineArtifact@2
displayName: Download MacOS Artifacts
inputs:
artifactName: MacOS_x64_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/OSX.x64.Release'
condition: succeeded()
# MacOS arm64 download
- task: DownloadPipelineArtifact@2
displayName: Download MacOS arm64 Artifacts
inputs:
artifactName: MacOS_cross_arm64_Release
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/OSX.arm64.Release'
condition: succeeded()
# Create nuget packages, sign binaries and publish to blob feed
- script: $(Build.SourcesDirectory)\eng\ci-prepare-artifacts.cmd $(_InternalBuildArgs)
displayName: Package, Sign, and Publish
continueOnError: false
condition: succeeded()
# Publish package and log build artifacts
- task: PublishBuildArtifacts@1
displayName: Publish Package Artifacts
inputs:
publishLocation: Container
pathtoPublish: '$(Build.SourcesDirectory)/artifacts/packages'
artifactName: Packages
continueOnError: true
condition: always()
- task: PublishBuildArtifacts@1
displayName: Publish Bundled Tools
inputs:
publishLocation: Container
pathtoPublish: '$(Build.SourcesDirectory)/artifacts/bundledtools'
artifactName: BundledTools
continueOnError: true
condition: always()
- template: /eng/common/templates/job/publish-build-assets.yml
parameters:
configuration: Release
dependsOn: Sign_Package_Publish
publishUsingPipelines: true
pool:
name: NetCoreInternal-Pool
queue: buildpool.windows.10.amd64.vs2017
- ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
- template: /eng/common/templates/post-build/post-build.yml
parameters:
# This is to enable SDL runs part of Post-Build Validation Stage.
# as well as NuGet, SourceLink, and signing validation.
# The variables get imported from group dotnet-diagnostics-sdl-params
publishingInfraVersion: 3
enableSourceLinkValidation: true
enableSigningValidation: false
enableSymbolValidation: false
enableNugetValidation: true
symbolPublishingAdditionalParameters: '/p:PublishSpecialClrFiles=false'
publishInstallersAndChecksums: true
SDLValidationParameters:
enable: true
continueOnError: true
params: ' -SourceToolsList @("policheck","credscan")
-TsaInstanceURL $(_TsaInstanceURL)
-TsaProjectName $(_TsaProjectName)
-TsaNotificationEmail $(_TsaNotificationEmail)
-TsaCodebaseAdmin $(_TsaCodebaseAdmin)
-TsaBugAreaPath $(_TsaBugAreaPath)
-TsaIterationPath $(_TsaIterationPath)
-TsaRepositoryName "diagnostics"
-TsaCodebaseName "diagnostics"
-TsaPublish $True'
artifactNames:
- 'Packages'

Просмотреть файл

@ -1,54 +0,0 @@
Frequently Asked Questions
==========================
* If SOS or dotnet-dump analyze commands display "UNKNOWN" for types or functions names, your core dump may not have all the managed state. Dumps created with gdb or gcore have this problem. Linux system generated core dumps need the `coredump_filter` for the process to be set to at least 0x3f. See [core](http://man7.org/linux/man-pages/man5/core.5.html) for more information.
* If dump collection (`dotnet-dump collect` or `createdump`) doesn't work in a docker container, try adding the SYS\_TRACE capability with --cap-add=SYS\_PTRACE or --privileged.
* If dump analysis (`dotnet-dump analyze`) on Microsoft .NET Core SDK Linux docker images fails with an`Unhandled exception: System.DllNotFoundException: Unable to load shared library 'libdl.so' or one of its dependencies` exception. Try installing the "libc6-dev" package.
* During dump collection (`dotnet-dump collect`) a failure ending in a message like `Permission denied /tmp/dotnet-diagnostic-19668-22628141-socket error` hints you don't have access to use such a socket. Verify the target process is owned by the user trying to create the dump, or trigger dump creation command with `sudo`. If you use `sudo` to collect the dump, make sure the dump file output path is accessible by the target process/user (via the --output option). The default dump path is the in the current directory and may not be the same user as the target process.
* If dump collection (`dotnet-dump collect`) fails with `Core dump generation FAILED 0x80004005` look for error message output on the target process's console (not the console executing the dotnet-dump collect). This error may be caused by writing the core dump to a protected, inaccessible or non-existent location. To get more information about the core dump generation add the `--diag` option the dotnet-dump collect command and look for the diagnostic logging on the target process's console.
* If you receive the following error message executing a SOS command under `lldb` or `dotnet-dump analyze`, SOS cannot find the DAC module (`libmscordaccore.so` or `libmscordaccore.dylib`) in the same directory as the runtime (libcoreclr.so or libcoreclr.dylib) module.
```
(lldb) clrstack
Failed to load data access module, 0x80131c64
Can not load or initialize libmscordaccore.so. The target runtime may not be initialized.
ClrStack failed
```
or
```
Failed to load data access module, 0x80131c4f
You can run the debugger command 'setclrpath ' to control the load path of libmscordaccore.so.
If that succeeds, the SOS command should work on retry.
For more information see https://go.microsoft.com/fwlink/?linkid=2135652
```
First try enabling the symbol downloading with `setsymbolserver -ms`. This is already enabled for `dotnet-dump analyze` and if SOS for lldb was installed with `dotnet-sos install`.
If that doesn't work, try using the `setclrpath <directory>` command with a directory that contains the matching version of the DAC module. This is useful for private runtimes or debug builds that haven't been published to our symbol servers.
If this is a dump, the problem could also be that the dump is missing some memory required by SOS. Try generating a "full" dump (the default with `dotnet-dump collect` without a `--type` option) or add setting the crash dump generation (createdump) environment variable `COMPlus_DbgMiniDumpType=4`. For more details on crash dump generation see [here](https://docs.microsoft.com/en-us/dotnet/core/diagnostics/dumps#collecting-dumps-on-crash).
* If you receive this error message executing a SOS command:
```
Failed to find runtime module (libcoreclr.so), 0x80004005
Extension commands need it in order to have something to do.
ClrStack failed
```
The following could be the problem:
* The process or core dump hasn't loaded the .NET Core runtime yet.
* The coredump was loaded under lldb without specifying the host (i.e `dotnet`). `target modules list` doesn't display `libcoreclr.so` or `libcoreclr.dylib`. Start lldb with the host as the target program and the core file, for example `lldb --core coredump /usr/share/dotnet/dotnet`. In case you don't have the host available, `dotnet symbol` is will be able to download them.
* If a coredump was loaded under lldb, a host was specified, and `target modules list` displays the runtime module but you still get that message lldb needs the correct version of libcoreclr.so/dylib next to the coredump. You can use `dotnet-symbol --modules <coredump>` to download the needed binaries.
* If you receive one of these error messages executing a SOS command running on Windows:
```
SOS does not support the current target architecture 0x0000014c
```
or
```
SOS does not support the current target architecture 'arm32' (0x01c4). A 32 bit target may require a 32 bit debugger or vice versa. In general, try to use the same bitness for the debugger and target process.
```
You may need a different bitness of the Windows (windbg/cdb) debugger or dotnet-dump. If you are running an x64 (64 bit), try an x86 (32 bit) version. The easiest way to get an x86 version of dotnet-dump is installing the "single-file" version [here](https://aka.ms/dotnet-dump/win-x86). For more information on single-file tools see [here](https://github.com/dotnet/diagnostics/blob/master/documentation/single-file-tools.md#single-file-diagnostic-tools).

25
documentation/building.md Normal file
Просмотреть файл

@ -0,0 +1,25 @@
# Clone, build and test the repo
------------------------------
To clone, build and test the repo on Windows:
```cmd
cd $HOME
git clone https://github.com/dotnet/dotnet-monitor
cd dotnet-monitor
./build.sh
./test.sh
```
On Linux and macOS:
```bash
cd $HOME
git clone https://github.com/dotnet/dotnet-monitor
cd dotnet-monitor
./build.sh
./test.sh
```
If you prefer to use *Visual Studio*, *Visual Studio Code*, or *Visual Studio for Mac*, you can open the `dotnet-monitor` solution at the root of the repo.

Просмотреть файл

@ -1,79 +0,0 @@
Cross Compilation for Android on Linux
======================================
Through cross compilation, on Linux it is possible to build for arm64 Android.
Requirements
------------
You'll need to generate a toolchain and a sysroot for Android. There's a script which takes care of the required steps.
Generating the rootfs
---------------------
To generate the rootfs, run the following command in the `diagnostics` folder:
```
cross/init-android-rootfs.sh
```
This will download the NDK and any packages required to compile Android on your system. It's over 1 GB of data, so it may take a while.
Cross compiling
---------------
Once the rootfs has been generated, it will be possible to cross compile the repo.
When cross compiling, you need to set both the `CONFIG_DIR` and `ROOTFS_DIR` variables.
To compile for arm64, run:
```
CONFIG_DIR=`realpath cross/android/arm64` ROOTFS_DIR=`realpath cross/android-rootfs/toolchain/arm64/sysroot` ./build.sh cross arm64 skipgenerateversion skipmscorlib cmakeargs -DENABLE_LLDBPLUGIN=0
```
The resulting binaries will be found in `bin/Product/Linux.BuildArch.BuildType/`
Debugging on Android
--------------------
You can debug on Android using a remote lldb server which you run on your Android device.
First, push the lldb server to Android:
```
adb push cross/android/lldb/2.2/android/arm64-v8a/lldb-server /data/local/tmp/
```
Then, launch the lldb server on the Android device. Open a shell using `adb shell` and run:
```
adb shell
cd /data/local/tmp
./lldb-server platform --listen *:1234
```
After that, you'll need to forward port 1234 from your Android device to your PC:
```
adb forward tcp:1234 tcp:1234
```
Finally, install lldb on your PC and connect to the debug server running on your Android device:
```
lldb-3.9
(lldb) platform select remote-android
Platform: remote-android
Connected: no
(lldb) platform connect connect://localhost:1234
Platform: remote-android
Triple: aarch64-*-linux-android
OS Version: 23.0.0 (3.10.84-perf-gf38969a)
Kernel: #1 SMP PREEMPT Fri Sep 16 11:29:29 2016
Hostname: localhost
Connected: yes
WorkingDir: /data/local/tmp
(lldb) target create coreclr/pal/tests/palsuite/file_io/CopyFileA/test4/paltest_copyfilea_test4
(lldb) env LD_LIBRARY_PATH=/data/local/tmp/coreclr/lib
(lldb) run
```

Просмотреть файл

@ -1,31 +0,0 @@
FreeBSD Prerequisites
===================
These instructions will lead you through preparing to build the diagnostics repo on FreeBSD. We'll start by showing how to set up your environment from scratch.
Environment
===========
These instructions are written assuming FreeBSD 10.1-RELEASE, since that's the release the team uses.
These instructions assume you use the binary package tool `pkg` (analog to `apt-get` or `yum` on Linux) to install the environment. Compiling the dependencies from source using the ports tree might work too, but is untested.
Minimum RAM required to build is 1GB. The build is known to fail on 512 MB VMs ([Issue 536](https://github.com/dotnet/coreclr/issues/536)).
Toolchain Setup
---------------
Install the following packages for the toolchain:
- bash
- cmake
- llvm39 (includes LLVM 3.9, Clang 3.9 and LLDB 3.9)
- gettext
- ninja (optional)
- python27
To install the packages you need:
```sh
sudo pkg install bash cmake llvm39 gettext python27
```

Просмотреть файл

@ -1,210 +0,0 @@
Linux Prerequisites
===================
These instructions will lead you through preparing to build and test the diagnostics repo on Linux. We'll start by showing how to set up your environment from scratch. In some cases, a version lldb/llvm that works the best on the distro will have to be built and installed.
Toolchain Setup
---------------
The following instructions will install the required packages. This only needs to be done once per machine. These instructions assume that you already have "sudo" installed. It is also recommended to create a github fork of the diagnostics repo and cloning that instead of https://github.com/dotnet/diagnostics.git directly.
To build or cross build for ARM on Windows or Linux see the instructions [here](https://github.com/dotnet/runtime/blob/master/docs/workflow/building/coreclr/cross-building.md#generating-the-rootfs) in the runtime repo. You will need to clone the runtime [repo](https://github.com/dotnet/runtime.git) and build the appropriate "rootfs" for arm or arm64 using these instructions. You only need to do this once.
#### Ubuntu 14.04 ####
In order to get clang-3.9, llvm-3.9 and lldb-3.9, we need to add additional package sources (see [http://llvm.org/apt/](http://llvm.org/apt/) for the other Ubuntu versions not listed here):
sudo apt-get update
sudo apt-get install wget
echo "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty main" | sudo tee /etc/apt/sources.list.d/llvm.list
echo "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.9 main" | sudo tee -a /etc/apt/sources.list.d/llvm.list
wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
sudo apt-get update
Then install the required packages:
sudo apt-get install cmake clang-3.9 gdb gettext git libicu-dev lldb-3.9 liblldb-3.9-dev libunwind8 llvm-3.9 make python python-lldb-3.9 tar zip
The lldb 3.9 package needs a lib file symbolic link fixed:
cd /usr/lib/llvm-3.9/lib
sudo ln -s ../../x86_64-linux-gnu/liblldb-3.9.so.1 liblldb-3.9.so.1
See the section below on how to clone, build and test the repo.
#### Ubuntu 16.04 ####
Add the additional package sources:
sudo apt-get update
sudo apt-get install wget
echo "deb http://llvm.org/apt/xenial/ llvm-toolchain-xenial main" | sudo tee /etc/apt/sources.list.d/llvm.list
echo "deb http://llvm.org/apt/xenial/ llvm-toolchain-xenial-3.9 main" | sudo tee -a /etc/apt/sources.list.d/llvm.list
wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
sudo apt-get update
Then install the required packages:
sudo apt-get install cmake clang-3.9 gdb gettext git libicu-dev lldb-3.9 liblldb-3.9-dev libunwind8 llvm-3.9 make python python-lldb-3.9 tar zip
The lldb 3.9 package needs a lib file symbolic link fixed:
cd /usr/lib/llvm-3.9/lib
sudo ln -s ../../x86_64-linux-gnu/liblldb-3.9.so.1 liblldb-3.9.so.1
See the section below on how to clone, build and test the repo.
#### Ubuntu 17.10 ####
Add the additional package sources:
sudo apt-get update
sudo apt-get install wget
echo "deb http://llvm.org/apt/artful/ llvm-toolchain-artful main" | sudo tee /etc/apt/sources.list.d/llvm.list
echo "deb http://llvm.org/apt/artful/ llvm-toolchain-artful-3.9 main" | sudo tee -a /etc/apt/sources.list.d/llvm.list
wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
sudo apt-get update
Then install the required packages:
sudo apt-get install cmake clang-3.9 gdb gettext git libicu-dev lldb-3.9 liblldb-3.9-dev libunwind8 llvm-3.9 make python python-lldb-3.9 tar zip
#### Ubuntu 18.04 ####
Install the required packages:
sudo apt-get update
sudo apt-get install cmake clang-3.9 curl gdb gettext git libicu-dev libssl1.0-dev lldb-3.9 liblldb-3.9-dev libunwind8 llvm-3.9 make python-lldb-3.9 tar wget zip
See the section below on how to clone, build and test the repo.
#### Ubuntu 20.04 ####
Install the required packages:
sudo apt update
sudo apt install cmake clang curl gdb gettext git libicu-dev lldb liblldb-dev libunwind8 llvm make python python-lldb tar wget zip
See the section below on how to clone, build and test the repo.
#### Alpine 3.8/3.9 ####
Install the required packages:
sudo apk add autoconf bash clang clang-dev cmake coreutils curl gcc gettext-dev git icu-dev krb5-dev libunwind-dev llvm make openssl openssl-dev python which
#### CentOS 6 ####
[TBD]
#### CentOS 7 ####
llvm, clang and lldb 3.9 will have to be built for this distro.
First the prerequisites:
sudo yum install centos-release-SCL epel-release
sudo yum install cmake cmake3 gcc gcc-c++ gdb git libicu libunwind make python27 tar wget which zip
Now build and install llvm/lldb 3.9 using the script provided here: [build-install-lldb.sh](../lldb/centos7/build-install-lldb.sh).
WARNING: this script installs llvm and lldb and may overwrite any previously installed versions.
cd $HOME
git clone https://github.com/dotnet/diagnostics.git
$HOME/diagnostics/documentation/lldb/centos7/build-install-lldb.sh
This will take some time to complete, but after it is finished all the required components will be built and installed. To build and test the repo:
cd diagnostics
./build.sh
./test.sh
#### Debian 8.2/8.7 ####
In order to get lldb-5.0 (3.9 doesn't seem to work that well on 8.x), we need to add additional package sources:
sudo apt-get update
sudo apt-get install wget
echo "deb http://llvm.org/apt/jessie/ llvm-toolchain-jessie main" | sudo tee /etc/apt/sources.list.d/llvm.list
echo "deb http://llvm.org/apt/jessie/ llvm-toolchain-jessie-5.0 main" | sudo tee -a /etc/apt/sources.list.d/llvm.list
wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
sudo apt-get update
Then install the packages you need:
sudo apt-get install cmake clang-5.0 gdb gettext git libicu-dev liblldb-5.0-dev libunwind8 lldb-5.0 llvm make python-lldb-5.0 tar wget zip
See the section below on how to clone, build and test the repo. Add "--clang5.0" to the ./build.sh command.
#### Debian 9 (Stretch) ####
sudo apt-get install cmake clang-3.9 gdb gettext git libicu-dev liblldb-3.9-dev libunwind8 lldb-3.9 llvm make python-lldb-3.9 tar wget zip
See the section below on how to clone, build and test the repo.
#### Fedora 24 ####
sudo dnf install clang cmake findutils gdb git libicu libunwind make python tar wget which zip
Now build and install llvm/lldb 3.9 using the script provided here: [build-install-lldb.sh](../lldb/fedora24/build-install-lldb.sh).
WARNING: this script installs llvm and lldb and may overwrite any previously installed versions.
cd $HOME
git clone https://github.com/dotnet/diagnostics.git
$HOME/diagnostics/documentation/lldb/fedora24/build-install-lldb.sh
This will take some time to complete, but after it is finished all the required components will be built and installed. To build and test the repo:
cd diagnostics
./build.sh
./test.sh
#### Fedora 27, 28, 29 ####
sudo dnf install clang cmake compat-openssl10 findutils gdb git libicu libunwind lldb-devel llvm-devel make python python2-lldb tar wget which zip
See the section below on how to clone, build and test the repo.
#### OpenSuse 42.1, 42.3 ####
sudo zypper install cmake gcc-c++ gdb git hostname libicu libunwind lldb-devel llvm-clang llvm-devel make python python-xml tar wget which zip
ln -s /usr/bin/clang++ /usr/bin/clang++-3.5
Now build and install llvm/lldb 3.9 using the script provided here: [build-install-lldb.sh](../lldb/opensuse/build-install-lldb.sh).
WARNING: this script installs llvm and lldb and may overwrite any previously installed versions.
cd $HOME
git clone https://github.com/dotnet/diagnostics.git
$HOME/diagnostics/documentation/lldb/opensuse/build-install-lldb.sh
This will take some time to complete, but after it is finished all the required components will be built and installed. To build and test the repo:
cd diagnostics
./build.sh
./test.sh
#### RHEL 7.5 ####
[TBD]
#### SLES ####
[TBD]
Set the maximum number of file-handles
--------------------------------------
To ensure that your system can allocate enough file-handles for the build run `sysctl fs.file-max`. If it is less than 100000, add `fs.file-max = 100000` to `/etc/sysctl.conf`, and then run `sudo sysctl -p`.
Clone, build and test the repo
------------------------------
You now have all the required components. To clone, build and test the repo:
cd $HOME
git clone https://github.com/dotnet/diagnostics.git
cd diagnostics
./build.sh
./test.sh

Просмотреть файл

@ -1,55 +0,0 @@
NetBSD Prerequisites
===================
These instructions will lead you through preparing to build the diagnostics repo on NetBSD. We'll start by showing how to set up your environment from scratch.
Environment
===========
These instructions are written on NetBSD 7.x on the amd64 platform, since that's the release the team uses.
Older releases aren't supported because building requires the modern LLVM stack (Clang and LLDB) that is developed against the NetBSD-7.x branch.
Pull Requests are welcome to address other ports (like i386 or evbarm) as long as they don't break the ability to use NetBSD/amd64.
Minimum RAM required to build is 1GB.
The pkgsrc framework is required to build .NET projects on NetBSD. Minimal pkgsrc version required is 2016Q1.
pkgsrc setup
------------
Fetch pkgsrc and install to the system. By default it's done in the /usr directory as root:
```
ftp -o- ftp://ftp.netbsd.org/pub/pkgsrc/stable/pkgsrc.tar.gz | tar -zxpf- -C /usr
```
The .NET projects are tracked in pkgsrc-wip.
In order to use pkgsrc-wip, git must be installed:
```
cd /usr/pkgsrc/devel/git-base && make install
```
To access resources over SSL link, mozilla-rootcerts must be installed:
```
cd /usr/pkgsrc/security/mozilla-rootcerts && make install
```
And follow the MESSAGE commands to finish the installation.
Installing pkgsrc-wip
---------------------
Type the following command to fetch the pkgsrc-wip sources:
```sh
cd /usr/pkgsrc
git clone --depth 1 git://wip.pkgsrc.org/pkgsrc-wip.git wip
```

Просмотреть файл

@ -1,24 +0,0 @@
Official Build Instructions
===========================
WARNING: These instructions will only work internally at Microsoft.
To kick off an official build, go to this build definition: https://dev.azure.com/dnceng/internal/_build?definitionId=528.
This signs and publishes the following packages to the tools feed (https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json):
- dotnet-dump
- dotnet-gcdump
- dotnet-sos
- dotnet-trace
- dotnet-counters
- dotnet-monitor
- Microsoft.Diagnostics.NETCore.Client
To release the latest tools:
1) Merge all the commits for this release from the master branch to the release/stable branch.
2) Kick off an official build from the release/stable branch.
3) Change all the package version references in the documentation folder to this official build's package version to maintain the docs up to date.
4) Download the above packages from the successful official build under "Artifacts" -> "PackageArtifacts".
5) Upload these packages to nuget.org.
6) Create a new "release" in the [releases](https://github.com/dotnet/diagnostics/releases) diagnostics repo release tab with the package version (not the official build id) as the "tag". Add any release notes about known issues, issues fixed and new features.

Просмотреть файл

@ -1,44 +0,0 @@
MacOS Prerequisites
===================
These instructions will lead you through preparing to build the diagnostics repo on macOS. We'll start by showing how to set up your environment from scratch.
Environment
===========
These instructions were validated on macOS 10.12.6 (Sierra) and Xcode 9.2.
Git Setup
---------
Clone the diagnostics repository (either upstream or a fork).
```sh
git clone https://github.com/dotnet/diagnostics
```
CMake
-----
This repo has a dependency on CMake for the build. You can download it from [CMake downloads](http://www.cmake.org/download/).
Alternatively, you can install CMake from [Homebrew](http://brew.sh/).
```sh
brew install cmake
```
Building
--------
In the root of the diagnostics repo run:
```sh
./build.sh
```
This will build SOS, the tests and the SOS plugin (libsosplugin.dylib) for the Xcode (9.2) version of lldb (swift 4.0) on the device.
For later versions of macOS/Xcode/lldb is TBD.

Просмотреть файл

@ -1,18 +0,0 @@
# Install cmake version 3.10.2 without installing libcurl.so.4
wget https://cmake.org/files/v3.10/cmake-3.10.2-Linux-x86_64.tar.gz
sudo tar -xf cmake-3.10.2-Linux-x86_64.tar.gz --strip 1 -C /usr/local
rm cmake-3.10.2-Linux-x86_64.tar.gz
# Build and install curl 7.45.0 to get the right version of libcurl.so.4
wget https://curl.haxx.se/download/curl-7.45.0.tar.lzma
tar -xf curl-7.45.0.tar.lzma
rm curl-7.45.0.tar.lzma
cd curl-7.45.0
./configure --disable-dict --disable-ftp --disable-gopher --disable-imap --disable-ldap --disable-ldaps --disable-libcurl-option --disable-manual --disable-pop3 --disable-rtsp --disable-smb --disable-smtp --disable-telnet --disable-tftp --enable-ipv6 --enable-optimize --enable-symbol-hiding --with-ca-path=/etc/ssl/certs/ --with-nghttp2 --with-gssapi --with-ssl --without-librtmp
make
sudo make install
cd ..
rm -r curl-7.45.0
# Install curl
sudo apt-get install curl

Просмотреть файл

@ -1,110 +0,0 @@
Windows Prerequisites
=====================
These instructions will lead you through preparing to build the diagnostics repo on Windows.
----------------
# Environment
You must install several components to build. These instructions were tested on Windows 7+.
## Visual Studio
- Install [Visual Studio 2019](https://visualstudio.microsoft.com/downloads/). The Community version is completely free.
Visual Studio 2019 installation process:
* It's recommended to use 'Workloads' installation approach. The following are the minimum requirements:
* .NET Desktop Development with all default components.
* Desktop Development with C++ with all default components.
* To build for Arm32 or Arm64, Make sure that you have the Windows 10 SDK installed (or selected to be installed as part of VS installation). To explicitly install Windows SDK, download it from here: [Windows SDK for Windows 10](https://developer.microsoft.com/en-us/windows/downloads).
* In addition, ensure you install the ARM tools. In the "Individual components" window, in the "Compilers, build tools, and runtimes" section, check the box for "MSVC v142 - VS 2019 C++ ARM build tools (v14.23)".
* Also, ensure you install the ARM64 tools. In the "Individual components" window, in the "Compilers, build tools, and runtimes" section, check the box for "MSVC v142 - VS 2019 C++ ARM64 build tools (v14.23)".
* To build the tests, you will need some additional components:
* Windows 10 SDK component version 10.0.18362 or newer. This component is installed by default as a part of 'Desktop Development with C++' workload.
* C++/CLI support for v142 build tools (14.23)
The dotnet/diagnostics repository requires at least Visual Studio 2019 16.3.
Visual Studio Express is not supported.
## CMake
This repo build has been validated using CMake 3.15.5
- Install [CMake](http://www.cmake.org/download) for Windows.
- Add its location (e.g. C:\Program Files (x86)\CMake\bin) to the PATH environment variable.
The installation script has a check box to do this, but you can do it yourself after the fact
following the instructions at [Adding to the Default PATH variable](#adding-to-the-default-path-variable)
## Python
Python is used in the build system. We are currently using python 2.7.9, although
any recent (2.4+) version of Python should work, including Python 3.
- Install [Python](https://www.python.org/downloads/) for Windows.
- Add its location (e.g. C:\Python*\) to the PATH environment variable.
The installation script has a check box to do this, but you can do it yourself after the fact
following the instructions at [Adding to the Default PATH variable](#adding-to-the-default-path-variable)
## Git
For actual user operations, it is often more convenient to use the GIT features built into Visual Studio 2015.
However the diagnostics repo use the GIT command line utilities directly so you need to install them
for these to work properly. You can get it from
- Install [Git For Windows](https://git-for-windows.github.io/)
- Add its location (e.g. C:\Program Files\Git\cmd) to the PATH environment variable.
The installation script has a check box to do this, but you can do it yourself after the fact
following the instructions at [Adding to the Default PATH variable](#adding-to-the-default-path-variable)
## PowerShell
PowerShell is used in the build system. Ensure that it is accessible via the PATH environment variable.
Typically this is %SYSTEMROOT%\System32\WindowsPowerShell\v1.0\.
Powershell version must be 3.0 or higher. This should be the case for Windows 8 and later builds.
- Windows 7 SP1 can install Powershell version 4 [here](https://www.microsoft.com/en-us/download/details.aspx?id=40855).
## Adding to the default PATH variable
The commands above need to be on your command lookup path. Some installers will automatically add them to
the path as part of installation, but if not here is how you can do it.
You can of course add a directory to the PATH environment variable with the syntax
```
set PATH=%PATH%;DIRECTORY_TO_ADD_TO_PATH
```
However the change above will only last until the command windows closes. You can make your change to
the PATH variable persistent by going to Control Panel -> System And Security -> System -> Advanced system settings -> Environment Variables,
and select the 'Path' variable in the 'System variables' (if you want to change it for all users) or 'User variables' (if you only want
to change it for the current user). Simply edit the PATH variable's value and add the directory (with a semicolon separator).
## Cross Compilation for ARM on Windows
Building ARM for Windows can be done using cross compilation. Make sure the above requirements for building for arm32 are installed.
C:\diagnostics> build.cmd -architecture arm
## Building
To build under Windows, run build.cmd from the root of the repository:
```bat
build.cmd
[Lots of build spew]
BUILD: Repo sucessfully built.
BUILD: Product binaries are available at c:\git\diagnostics\artifacts\Debug\bin\Windows_NT.x64
```
To build for x86:
```bat
build.cmd -architecture x86
```
To test the resulting SOS:
```bat
test.cmd
```

Просмотреть файл

@ -1,108 +0,0 @@
Debugging Linux or MacOS Core Dump
==================================
These instructions will lead you through getting symbols, loading and debugging a Linux or MacOS core dump. The best way to generate a core dump on Linux (only) is through the [createdump](https://github.com/dotnet/runtime/blob/master/docs/design/coreclr/botr/xplat-minidump-generation.md#configurationpolicy) facility.
Dumps created with gdb or gcore do not have all the managed state so various SOS or dotnet-dump commands may display "UNKNOWN" for type and function names. This can also happen with Linux system generated core dumps if the `coredump_filter` for the process is not set to at least 0x3f. See [core](http://man7.org/linux/man-pages/man5/core.5.html) for more information.
### Getting symbols ###
Because SOS now has symbol download support (both managed PDBs and native symbols via `loadsymbols`) all that lldb requires is the host program and a few other binaries. The host is usually `dotnet` but for self-contained applications it the .NET Core `apphost` renamed to the program/project name. These steps will handle either case and download the host lldb needs to properly diagnose a core dump. There are also cases that the runtime module (i.e. libcoreclr.so) is need by lldb.
First install or update the dotnet CLI symbol tool. This only needs to be done once. See this [link](https://github.com/dotnet/symstore/tree/master/src/dotnet-symbol#install) for more details. We need version 1.0.142101 or greater of dotnet-symbol installed.
~$ dotnet tool install -g dotnet-symbol
You can invoke the tool using the following command: dotnet-symbol
Tool 'dotnet-symbol' (version '1.0.142101') was successfully installed.
Or update if already installed:
~$ dotnet tool update -g dotnet-symbol
Tool 'dotnet-symbol' was successfully updated from version '1.0.51501' to version '1.0.142101'.
Copy the core dump to a tmp directory.
~$ mkdir /tmp/dump
~$ cp ~/coredump.32232 /tmp/dump
Download the host program, modules and symbols for the core dump:
~$ dotnet-symbol /tmp/dump/coredump.32232
If your project/program binaries are not on the machine the core dump is being loaded on, copy them to a temporary directory. You can use the lldb/SOS command `setsymbolserver -directory <temp-dir>` to add this directory to the search path.
Alternatively, you can download just the host program for the core dump (this all lldb needs) if you only need symbols for the managed modules. The `loadsymbols` command in SOS will attempt to download the native runtime symbols.
~$ dotnet-symbol --host-only --debugging /tmp/dump/coredump.32232
If the `--host-only` option is not found, update dotnet-symbol to the latest with the above step.
### Install lldb ###
See the instructions [here](sos.md#getting-lldb) on installing lldb.
### Install the latest SOS ###
See the instructions [here](sos.md#installing-sos) on installing SOS.
### Launch lldb under Linux ###
~$ lldb --core /tmp/dump/coredump.32232 <host-program>
Core file '/tmp/dump/coredump.32232' (x86_64) was loaded.
(lldb)
The `<host-program>` is the native program that started the .NET Core application. It is usually `dotnet` unless the application is self contained and then it is the name of application without the .dll.
Add the directory with the core dump and symbols to the symbol search path:
(lldb) setsymbolserver -directory /tmp/dump
Added symbol directory path: /tmp/dump
(lldb)
Optionally load the native symbols. The managed PDBs will be loaded on demand when needed:
(lldb) loadsymbols
Even if the core dump was not generated on this machine, the native and managed .NET Core symbols should be available along with all the SOS commands.
### Launch lldb under MacOS ###
~$ lldb --core /cores/core.32232 <host-program>
(lldb)
Follow the rest of the above Linux steps to set the symbol server and load native symbols.
The MacOS lldb has a bug that prevents SOS clrstack from properly working. Because of this bug SOS can't properly match the lldb native with with the managed thread OSID displayed by `clrthreads`. The `setsostid` command is a work around for this lldb bug. This command maps the OSID from this command:
```
(lldb) clrthreads
ThreadCount: 2
UnstartedThread: 0
BackgroundThread: 1
PendingThread: 0
DeadThread: 0
Hosted Runtime: no
Lock
DBG ID OSID ThreadOBJ State GC Mode GC Alloc Context Domain Count Apt Exception
XXXX 1 1fbf31 00007FBEC9007200 20020 Preemptive 0000000190191710:0000000190191FD0 00007FBEC981F200 0 Ukn System.IO.DirectoryNotFoundException 0000000190172b88
XXXX 2 1fbf39 00007FBEC9008000 21220 Preemptive 0000000000000000:0000000000000000 00007FBEC981F200 0 Ukn (Finalizer)
```
To one of the native thread indexes from this command:
```
(lldb) thread list
Process 0 stopped
* thread #1: tid = 0x0000, 0x00007fffb5595d42 libsystem_kernel.dylib`__pthread_kill + 10, stop reason = signal SIGSTOP
thread #2: tid = 0x0001, 0x00007fffb558e34a libsystem_kernel.dylib`mach_msg_trap + 10, stop reason = signal SIGSTOP
thread #3: tid = 0x0002, 0x00007fffb559719e libsystem_kernel.dylib`poll + 10, stop reason = signal SIGSTOP
thread #4: tid = 0x0003, 0x00007fffb5595a3e libsystem_kernel.dylib`__open + 10, stop reason = signal SIGSTOP
thread #5: tid = 0x0004, 0x00007fffb5595bf2 libsystem_kernel.dylib`__psynch_cvwait + 10, stop reason = signal SIGSTOP
thread #6: tid = 0x0005, 0x00007fffb5595bf2 libsystem_kernel.dylib`__psynch_cvwait + 10, stop reason = signal SIGSTOP
thread #7: tid = 0x0006, 0x00007fffb558e34a libsystem_kernel.dylib`mach_msg_trap + 10, stop reason = signal SIGSTOP
```
Map the main managed thread `1fbf31` to native thread index `1`:
```
(lldb) setsostid 1fbf31 1
```

Двоичные данные
documentation/design-docs/EventCounters.jpg

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 142 KiB

Просмотреть файл

@ -1,384 +0,0 @@
# Diagnostics Client Library API Design
## Intro
The Diagnostics Client Library (currently named as "Runtime Client Library") - `Microsoft.Diagnostics.NetCore.Client.dll` - is a managed library that can be used to interact with the .NET runtime via the diagnostics IPC protocol as documented in https://github.com/dotnet/diagnostics/blob/master/documentation/design-docs/ipc-protocol.md. It provides managed classes for invoking the diagnostics IPC commands programmatically, and can be extended to write various diagnostics tools. It also comes with various classes that should facilitate interacting with the diagnostics IPC commands.
The name "Diagnostics Client Library" comes from the fact that we call the runtime (CoreCLR) component responsible for accepting and handling the diagnostics IPC commands the "diagnostics server" - https://github.com/dotnet/runtime/blob/master/src/coreclr/src/vm/diagnosticserver.h. Since this library is a managed library on the other side of the IPC protocol responsible for communicating with the runtime's "diagnostics server", calling this the "Diagnostics Client Library" made sense.
## Goals
The goal of this library is as following:
* Serve as an implementation of the IPC protocol to communicate with CoreCLR's diagnostics server.
* Provide an easy-to-use API for any library/tools authors to utilize the IPC protocol
## Non-Goals
* Provide tool-specific functionalities that are too high-level (i.e. dumping the GC heap, parsing counter payload, etc.) This will broaden the scope of this library too far and will cause complexity
* Parse event payloads (i.e. - This is also command-specific and can be done by other libraries.
## Sample Code:
Here are some sample code showing the usage of this library.
#### 1. Attaching to a process and dumping out all the runtime GC events in real time to the console
This sample shows an example where we trigger an EventPipe session with the .NET runtime provider with the GC keyword at informational level, and use `EventPipeEventSource` (provided by the TraceEvent library) to parse the events coming in and print the name of each event to the console in real time.
```cs
using Microsoft.Diagnostics.NETCore.Client;
using Microsoft.Diagnostics.Tracing.Parsers;
public void PrintRuntimeGCEvents(int processId)
{
var providers = new List<EventPipeProvider>()
{
new EventPipeProvider("Microsoft-Windows-DotNETRuntime",
EventLevel.Informational, (long)ClrTraceEventParser.Keywords.GC)
};
var client = new DiagnosticsClient(processId);
using (var session = client.StartEventPipeSession(providers, false))
{
var source = new EventPipeEventSource(session.EventStream);
source.Clr.All += (TraceEvent obj) => {
Console.WriteLine(obj.EventName);
}
try
{
source.Process();
}
// NOTE: This exception does not currently exist. It is something that needs to be added to TraceEvent.
catch (EventStreamException e)
{
Console.WriteLine("Error encountered while processing events");
Console.WriteLine(e.ToString());
}
}
}
```
#### 2. Write a core dump.
This sample shows how to trigger a dump using `DiagnosticsClient`.
```cs
using Microsoft.Diagnostics.NetCore.Client;
public void TriggerCoreDump(int processId)
{
var client = new DiagnosticsClient(processId);
client.WriteDump(DumpType.Normal);
}
```
#### 3. Trigger a core dump when CPU usage goes above a certain threshold
This sample shows an example where we monitor the `cpu-usage` counter published by the .NET runtime and use the `WriteDump` API to write out a dump when the CPU usage grows beyond a certain threshold.
```cs
using Microsoft.Diagnostics.NETCore.Client;
public void TriggerDumpOnCpuUsage(int processId, int threshold)
{
var providers = new List<EventPipeProvider>()
{
new EventPipeProvider(
"System.Runtime",
EventLevel.Informational,
(long)ClrTraceEventParser.Keywords.None,
new Dictionary<string, string>() {
{ "EventCounterIntervalSec", "1" }
}
)
};
var client = new DiagnosticsClient(processId);
using(var session = client.StartEventPipeSession(providers))
{
var source = new EventPipeEventSource(session.EventStream);
source.Dynamic.All += (TraceEvent obj) =>
{
if (obj.EventName.Equals("EventCounters"))
{
// I know this part is ugly. But this is all TraceEvent.
IDictionary<string, object> payloadVal = (IDictionary<string, object>)(obj.PayloadValue(0));
IDictionary<string, object> payloadFields = (IDictionary<string, object>)(payloadVal["Payload"]);
if (payloadFields["Name"].ToString().Equals("cpu-usage"))
{
double cpuUsage = Double.Parse(payloadFields["Mean"]);
if (cpuUsage > (double)threshold)
{
client.WriteDump(DumpType.Normal, "/tmp/minidump.dmp");
}
}
}
}
try
{
source.Process();
}
catch (EventStreamException) {}
}
}
}
```
#### 4. Trigger a CPU trace for given number of seconds
This sample shows an example where we trigger an EventPipe session for certain period of time, with the default CLR trace keyword as well as the sample profiler, and read from the stream that gets created as a result and write the bytes out to a file. Essentially this is what `dotnet-trace` uses internally to write a trace file.
```cs
using Microsoft.Diagnostics.NETCore.Client;
using System.Diagnostics;
using System.IO;
using System.Threading.Task;
public void TraceProcessForDuration(int processId, int duration, string traceName)
{
var cpuProviders = new List<EventPipeProvider>()
{
new EventPipeProvider("Microsoft-Windows-DotNETRuntime", EventLevel.Informational, (long)ClrTraceEventParser.Keywords.Default),
new EventPipeProvider("Microsoft-DotNETCore-SampleProfiler", EventLevel.Informational, (long)ClrTraceEventParser.Keywords.None)
};
var client = new DiagnosticsClient(processId);
using (var traceSession = client.StartEventPipeSession(cpuProviders))
{
Task copyTask = Task.Run(async () =>
{
using (FileStream fs = new FileStream(traceName, FileMode.Create, FileAccess.Write))
{
await traceSession.EventStream.CopyToAsync(fs);
}
});
copyTask.Wait(duration * 1000);
traceSession.Stop();
}
}
```
#### 5. Print names of all .NET processes that published a diagnostics server to connect
This sample shows how to use `DiagnosticsClient.GetPublishedProcesses` API to print the names of the .NET processes that published a diagnostics IPC channel.
```cs
using Microsoft.Diagnostics.NETCore.Client;
using System.Linq;
public static void PrintProcessStatus()
{
var processes = DiagnosticsClient.GetPublishedProcesses()
.Select(GetProcessById)
.Where(process => process != null)
foreach (var process in processes)
{
Console.WriteLine($"{process.ProcessName}");
}
}
```
#### 6. Live-parsing events for a specified period of time.
This sample shows an example where we create two tasks, one that parses the events coming in live with `EventPipeEventSource` and one that reads the console input for a user input signaling the program to end. If the target app exists before the users presses enter, the app exists gracefully. Otherwise, `inputTask` will send the Stop command to the pipe and exit gracefully.
```cs
using Microsoft.Diagnostics.NETCore.Client;
using Microsoft.Diagnostics.Tracing.Parsers;
public static void PrintEventsLive(int processId)
{
var providers = new List<EventPipeProvider>()
{
new EventPipeProvider("Microsoft-Windows-DotNETRuntime",
EventLevel.Informational, (long)ClrTraceEventParser.Keywords.Default)
};
var client = new DiagnosticsClient(processId);
using (var session = client.StartEventPipeSession(providers, false))
{
Task streamTask = Task.Run(() =>
{
var source = new EventPipeEventSource(session.EventStream);
source.Dynamic.All += (TraceEvent obj) =>
{
Console.WriteLine(obj.EventName);
};
try
{
source.Process();
}
// NOTE: This exception does not currently exist. It is something that needs to be added to TraceEvent.
catch (Exception e)
{
Console.WriteLine("Error encountered while processing events");
Console.WriteLine(e.ToString());
}
});
Task inputTask = Task.Run(() =>
{
Console.WriteLine("Press Enter to exit");
while (Console.ReadKey().Key != ConsoleKey.Enter)
{
Thread.Sleep(100);
}
session.Stop();
});
Task.WaitAny(streamTask, sleepTask);
}
}
```
#### 7. Attach a ICorProfiler profiler
This sample shows how to attach an ICorProfiler to a process (profiler attach).
```cs
public static int AttachProfiler(int processId, Guid profilerGuid, string profilerPath)
{
var client = new DiagnosticsClient(processId);
return client.AttachProfiler(TimeSpan.FromSeconds(10), profilerGuid, profilerPath);
}
```
## API Descriptions
At a high level, the DiagnosticsClient class provides static methods that the user may call to invoke diagnostics IPC commands (i.e. start an EventPipe session, request a core dump, etc.) The library also provides several classes that may be helpful for invoking these commands. These commands are described in more detail in the diagnostics IPC protocol documentation available here: https://github.com/dotnet/diagnostics/blob/master/documentation/design-docs/ipc-protocol.md#commands.
### DiagnosticsClient
This is a top-level class that contains methods to send various diagnostics command to the runtime.
```cs
namespace Microsoft.Diagnostics.NETCore.Client
{
public class DiagnosticsClient
{
public DiagnosticsClient(int processId)
/// <summary>
/// Start tracing the application via CollectTracing2 command.
/// </summary>
/// <param name="providers">An IEnumerable containing the list of Providers to turn on.</param>
/// <param name="requestRundown">If true, request rundown events from the runtime</param>
/// <param name="circularBufferMB">The size of the runtime's buffer for collecting events in MB</param>
/// <returns>
/// An EventPipeSession object representing the EventPipe session that just started.
/// </returns>
public EventPipeSession StartEventPipeSession(IEnumerable<EventPipeProvider> providers, bool requestRundown=true, int circularBufferMB=256)
/// <summary>
/// Trigger a core dump generation.
/// </summary>
/// <param name="dumpType">Type of the dump to be generated</param>
/// <param name="dumpPath">Full path to the dump to be generated. By default it is /tmp/coredump.{pid}</param>
/// <param name="logDumpGeneration">When set to true, display the dump generation debug log to the console.</param>
public void WriteDump(DumpType dumpType, string dumpPath=null, bool logDumpGeneration=false)
/// <summary>
/// Attach a profiler.
/// </summary>
/// <param name="attachTimeout">Timeout for attaching the profiler</param>
/// <param name="profilerGuid">Guid for the profiler to be attached</param>
/// <param name="profilerPath">Path to the profiler to be attached</param>
/// <param name="additionalData">Additional data to be passed to the profiler</param>
public void AttachProfiler(TimeSpan attachTimeout, Guid profilerGuid, string profilerPath, byte[] additionalData=null);
/// <summary>
/// Get all the active processes that can be attached to.
/// </summary>
/// <returns>
/// IEnumerable of all the active process IDs.
/// </returns>
public static IEnumerable<int> GetPublishedProcesses();
}
}
```
### Exceptions that can be thrown
```cs
namespace Microsoft.Diagnostics.NETCore.Client
{
// Generic wrapper for exceptions thrown by this library
public class DiagnosticsClientException : Exception {}
// When a certian command is not supported by either the library or the target process' runtime
public class UnsupportedProtocolException : DiagnosticsClientException {}
// When the runtime is no longer availble for attaching.
public class ServerNotAvailableException : DiagnosticsClientException {}
// When the runtime responded with an error
public class ServerErrorException : DiagnosticsClientException {}
}
```
### EventPipeProvider
A class that describes an EventPipe provider.
```cs
namespace Microsoft.Diagnostics.Client
{
public class EventPipeProvider
{
public EventPipeProvider(
string name,
EventLevel eventLevel,
long keywords = 0,
IDictionary<string, string> arguments = null)
public long Keywords { get; }
public EventLevel EventLevel { get; }
public string Name { get; }
public IDictionary<string, string> Arguments { get; }
public override string ToString();
public override bool Equals(object obj);
public override int GetHashCode();
public static bool operator ==(Provider left, Provider right);
public static bool operator !=(Provider left, Provider right);
}
}
```
### EventPipeSession
This is a class to represent an EventPipeSession. It is meant to be immutable and acts as a handle to each session that has been started.
```cs
namespace Microsoft.Diagnostics.Client
{
public class EventPipeSession : IDisposable
{
public Stream EventStream { get; };
///<summary>
/// Stops the given session
///</summary>
public void Stop();
}
}
```
### DumpType (enum)
This is an enum for the dump type
```cs
namespace Microsoft.Diagnostics.NETCore.Client
{
public enum DumpType
{
Normal = 1,
WithHeap = 2,
Triage = 3,
Full = 4
}
}
```

Просмотреть файл

@ -1,273 +0,0 @@
# Diagnostic Tools Extensibility
This document describes a mechanism to allow first and third party users to add custom commands and services to `dotnet-dump` and `SOS` on the supported debuggers. Such extensibility has been a frequent ask from companies like Criteo and some teams at Microsoft. The goal is to write the code for a command once and have it run under all the supported debuggers, including dotnet-dump.
Internally, the ability to host commands like the future `gcheapdiff` under dotnet-dump, lldb and cdb/windbg will be invaluable for the productivity of developers in the ecosystem. Implementing new commands and features in C# is far easier and more productive for the interested parties. Other people on .NET team and in the community are more likely to contribute improvements to our tools, similar to what Stephen did with `dumpasync`. Unlike the plugin situation, if they contribute directly to our repo then the improvements will automatically flow to all customers and provide broader value.
This effort is part of the "unified extensiblity" models - where various teams are coming together to define a common abstraction across all debuggers and debugger like hosts (dotnet-dump). Services such as Azure Watson could use this infrastructure to write a commands akin to `!analyze` and other analysis tools using a subset of the DAC - provided as a service - to do some unhandled exception and stack trace triage.
## Goals
- Provide a simple set of services that hosts can implement and commands/services use.
- Easy use of the ClrMD API in commands and services.
- Host the same commands/command assemblies under various "hosts" like:
- The dotnet-dump REPL
- The lldb debugger
- The Windows debuggers (windbg/cdb)
- Visual Studio
- Create various "target" types from the command line or a command from:
- Windows minidumps
- Linux coredumps
- Live process snapshots
## Customer Value
- Improve our CSS engineer experience by providing commands in Visual Studio that keep them from needing to switch to windbg.
- Commands that CSS devs would find useful in Visual Studio that can't be done in VS any other way:
- !GCHandles - Provides statistics about GCHandles in the process.
- !ThreadPool - This command lists basic information about the ThreadPool, including the number of work requests in the queue, number of completion port threads, and number of timers.
- !SaveModule - This command allows you to take a image loaded in memory and write it to a file. This is especially useful if you are debugging a full memory dump, and saves a PE module to a file from a dump, and don't have the original DLLs or EXEs.
- rest of list TBD.
- Enables support for Azure Geneva diagnostics which is using the Native AOT corert based runtime. This infrastructure will allow the necessary set of SOS commands to be written and executed across the support platforms (Windows windbg and Linux lldb).
- Improve our internal .NET team productivity and inner loop development by providing these managed commands and the classical native SOS commands under debuggers like Visual Studio. See issue [#1397](https://github.com/dotnet/diagnostics/issues/1397).
- This plan would allow these ClrMD based commands to run across all our debuggers (dotnet-dump, windbg, lldb and Visual Studio):
- Criteo's 5 or so extension commands:
- timerinfo - display running timers details.
- pstack - display the parallel stacks
- rest of list TBD
- Existing ClrMD commands like "clrmodules" which displays the version info of the managed assemblies in the dump or process.
- List of issues that will be addressed by this work or has inspired it:
- [#1397](https://github.com/dotnet/diagnostics/issues/1397) and [#40182](https://github.com/dotnet/runtime/issues/40182) "SOS Plugin for Visual Studio". Create a VS package that allows the above extension commands to be run and various native SOS commands.
- [#565](https://github.com/dotnet/diagnostics/issues/565) "Using SOS programmatically". This plan will help enable the work described in this issue.
- [#1031](https://github.com/dotnet/diagnostics/issues/1031) "Ability to load extensions in dotnet-dump analyze". This refers to loading "sosex" and "mex" in dotnet-dump. This plan would make it easier to do this but does not actually include it.
- [#194](https://github.com/dotnet/diagnostics/issues/194) "Implement `gcheapdiff` dotnet-dump analyze command". We haven't had a lot of feedback on whether this purposed command is useful. This issue did inspired the "multi-target" part of this plan i.e. the ability to load/analyze two dumps in dotnet-dump at the same time.
## Road Map
1. Create a VS host/target package allowing SOS and the extension commands to be run from VS using the Concord API.
2. Add Linux, MacOS and Windows live snapshot targets. ClrMD already has support for them; just need to implement the target factory.
## Design
The design consists of abstractions for the debugger or code hosting this infrastructure, one or more targets that represent the dump or process being targeted, one or more .NET runtimes in the target process (both desktop framework and .NET Core runtimes are supported) and various services that are avaiiable for commands, other services and the infrastructure itself.
Each hosting environment will have varing set requirements for the services needed. Other than the basic set of target, memory, and module (TBD), services can be optional. For example, hosting the native SOS code requires a richer set of interfaces than ClrMD based commands.
- ClrMD commands and possible analysis engine
- Basic target info like architecture, etc.
- Console service
- Memory services
- Simple set of module services
- Simple set of thread services
- Mechanism to expose ClrInfo/ClrRuntime instances
- Native AOT commands
- Memory services
- Console service
- Command service
- Simple module services
- Simple set of symbol services
- Mechanism to expose Runtime Snapshot Parse API.
- dotnet-dump and VS that host the native SOS code
- Target and runtime services
- Console service
- Command service
- Memory services
- Richer module services
- Thread services
- Symbol and download services
The threading model is single-threaded mainly because native debuggers like dbgeng are basically single-threaded and using async makes the implementation and the over all infrastructre way more complex.
#### Interface Hierarchy
- IHost
- Global services
- IConsoleService
- ICommandService
- ISymbolService
- IDumpTargetFactory
- IProcessSnapshotTargetFactory
- ITarget
- Per-target services
- IRuntimeService
- IRuntime
- ClrInfo
- ClrRuntime
- Runtime Snapshot Parse instance
- IMemoryService
- IModuleService
- IModule
- IThreadService
- IThread
- SOSHost
- ClrMDHelper
## Hosts
The host is the debugger or program the command and the infrastructure runs on. The goal is to allow the same code for a command to run under different programs like the dotnet-dump REPL, lldb and Windows debuggers. Under Visual Studio the host will be a VS extension package.
#### IHost
The host implements this interface to provide to the rest the global services, current target and the type of host.
See [IHost.cs](../../src/Microsoft.Diagnostics.DebugServices/IHost.cs) and [host.h](../../src/SOS/inc/host.h) for details.
## Targets
A data target represents the dump, process snapshot or live target session. It provides information and services for the target like architecture, native process, thread, and module services. For commands like gcheapdiff, the ability to open and access a second dump or process snapshot is necessary.
Because of the various hosting requirements, ClrMD's IDataReader and DataTarget should not be exposed directly and instead use one of the following interfaces and services. This allows this infrastructure provide extra functionality to ClrMD commands like the address space module mapping.
Targets are only created when there is a valid process. This means the ITarget instance in the managed infrastructure and in the native SOS code can be null. On lldb, the current target state is checked and updated if needed before each command executed or callback to the native SOS or the managed infrastructure is made.
The target interface provides a "Flush" callback used to clear any cached state in the per-target services when the native debuggers continue/stop. It is up to each service to register for the OnFlush target event and clear any cached state. On dbgeng an event callback (ChangeEngineState) is used that fires when the debugger stops to invoke the OnFlush event. On lldb, the stop id provided by the lldb API is used. It is checked each time a command is executed or a callback invoked. Any time it changes, the target's OnFlush event is invoked.
The "gcdump" target is a possible target that allows gcdump specific services and commands to be executed in a REPL. None of the SOS commands or ClrMD services will work but it will be easy to provide services and commands specific to the gcdump format. This may require some kind of command filtering by the target or the target provides the command service.
#### ITarget
This interface abstracts the data target and adds value with things like per-target services and the context related state like the current thread.
See [ITarget.cs](../../src/Microsoft.Diagnostics.DebugServices/ITarget.cs) and [target.h](../../src/SOS/inc/target.h) for details.
## Services
Everything a command or another service needs are provided via a service. There are global, per-target and per-command invocation services. Services like the command, console and target service and target factories are global. Services like the thread, memory and module services are per-target. Services like the current ClrRuntime instance are per-command invocation because it could change when there are multiple runtimes in a process.
For Windbg/cdb, these services will be implemented on the dbgeng API.
For lldb, these services will be implemented on the lldb extension API via some new pinvoke wrappers.
For Visual Studio, these services will be implemented on the Concord API in VS package. The hardest part of this work is loading/running the native SOS and DAC modules in a 64bit environment.
### IDumpTargetFactory
This global service allows dump ITargets to be created.
See [IDumpTargetFactory.cs](../../src/Microsoft.Diagnostics.DebugServices/IDumpTargetFactory.cs) for more details.
### ICommandService
This service provides the parsing, dispatching and executing of standardized commands. It is implemented using System.Commandline but there should be no dependencies on System.CommandLine exposed to the commands or other services. It is an implementation detail.
See [ICommandService.cs](../../src/Microsoft.Diagnostics.DebugServices/ICommandService.cs)
### IConsoleService
Abstracts the console output across all the platforms.
See [IConsoleService.cs](../../src/Microsoft.Diagnostics.DebugServices/IConsoleService.cs).
### IMemoryService
Abstracts the memory related functions.
There are two helper IMemoryService implementations PEImageMappingMemoryService and MetadataMappingMemoryService. They are used to wrap the base native debugger memory service implementation.
PEImageMappingMemoryService is used in dotnet-dump for Windows targets to mapping PE images like coreclr.dll into the memory address space the module's memory isn't present. It downloads and loads the actual module and performs the necessary fix ups.
MetadataMappingMemoryService is only used for core dumps when running under lldb to map the managed assemblies metadata into address space. This is needed because the way lldb returns zero's for invalid memory for dumps generated with createdump on older runtimes (< 5.0).
The address sign extension plan for 32 bit processors (arm32/x86) is that address are masked on entry to the managed infrastructure from DAC or DBI callbacks or from the native SOS code in SOS.Hosting. If the native debugger that the infrastructure is hosted needs addresses to be signed extended like dbgeng, it will happen in the debugger services layer (IDebuggerService).
See [IMemoryService.cs](../../src/Microsoft.Diagnostics.DebugServices/IMemoryService.cs).
### IThreadService and IThread
Abstracts the hosting debuggers native threads. There are functions to enumerate, get details and context about native threads.
See [IThreadService.cs](../../src/Microsoft.Diagnostics.DebugServices/IThreadService.cs) and [IThread.cs](../../src/Microsoft.Diagnostics.DebugServices/IThread.cs)
### IModuleService and IModule
Abstracts the modules in the target. Provides the details the name, base address, build id, version, etc. Some targets this includes both native and managed modules (Windows dbgeng, Linux dotnet-dump ELF dumps) but there are hosts/targets (Linux/MacOS lldb) that only provide the native modules.
One issues that may need to be addressed is that some platforms (MacOS) have non-contiguous memory sections in the module so the basic ImageSize isn't enough. May need (maybe only internally) need some concept of "sections" (address, size) and/or "header size". One of the main uses of the ImageBase/ImageSize is to create a memory stream of the PE or module header to extract module details like version, build id, or search for embedded version string.
See [IModuleService.cs](../../src/Microsoft.Diagnostics.DebugServices/IModuleService.cs) and [IModule.cs](../../src/Microsoft.Diagnostics.DebugServices/IModule.cs).
### ISymbolService
This service provides the symbol store services like the functionality that the static APIs in SOS.NETCore's SymbolReader.cs does now. The SOS.NETCore assembly will be removed and replaced with this symbol service implementation. Instead of directly creating delegates to static functions in this assembly, there will be a symbol service wrapper that provides these functions to the native SOS.
The current implementation of the symbol downloading support in SOS.NETCore uses sync over async calls which could cause problems in more async hosts (like VS) but it hasn't caused problems in dotnet-dump so far. To fix this there may be work in the Microsoft.SymbolStore (in the symstore repo) component to expose synchronous APIs.
See [ISymbolService.cs](../../src/Microsoft.Diagnostics.DebugServices/ISymbolService.cs) for more details.
### IRuntimeService/IRuntime
This service provides the runtime instances in the target process. The IRuntime abstracts the runtime providing the ClrInfo and ClrRuntime instances from ClrMD and the Native AOT runtime snapshot parser instance in the future.
See [IRuntimeService.cs](../../src/Microsoft.Diagnostics.DebugServices/IRuntimeService.cs), [IRuntime.cs](../../src/Microsoft.Diagnostics.DebugServices/IRuntime.cs) and [runtime.h](../../src/SOS/inc/runtime.h) for details.
### SOSHost
This service allows native SOS commands to be executed under hosts like dotnet-dump and VS. This should probably have an interface to abstract it (TBD).
Some of the native SOS's "per-target" globals will need to be queried from this infrastructure instead being set once on initialization. This includes things like the DAC module path, DBI module path, the temp directory path (since it contains the process id), etc. It will provide native versions of the IHost, ITarget and IRuntime interfaces to the native SOS to do this.
### IHostServices
This interface provides services to the native SOS/plugins code. It is a private interface between the native SOS code and the SOS.Extensions host. There are services to register the IDebuggerService instance, dispatch commands and create/detroy target instance.
[hostservices.h](../../src/SOS/inc/hostservices.h) for details.
### IDebuggerService
This native interface is what the SOS.Extensions host uses to implement the above services. This is another private interface between SOS.Extensions and the native lldb plugin or Windows SOS native code.
[debuggerservice.h](../../src/SOS/inc/debuggerservice.h) for details.
## Projects and Assemblies
### SOS.Extensions (new)
This assembly implements the host, target and services for the native debuggers (dbgeng, lldb). It provides the IHostServices to the native "extensions" library which registers the IDebuggerService used by the service implementations.
### The "extensions" native library (new)
This is the native code that interops with the managed SOS.Extensions to host the native debuggers. It sets up the managed runtime (.NET Core on Linux/MacOS or desktop on Windows) and calls the SOS.Extensions initialization entry point. It is linked into the lldbplugin on Linux/MacOS and into SOS.dll on Windows.
### SOS.Hosting
This contains the hosting support used by the dotnet-dump REPL and an eventual Visual Studio package to run native SOS commands without a native debugger like dbgeng or lldb.
### SOS.NETCore (going away)
This currently contains the symbol download and portable PDB source/line number support for the native SOS code. It will be replaced by the symbol service and wrappers (see ISymbolService).
### Microsoft.Diagnostics.DebugServices
Contains definations and abstractions for the various services interfaces.
### Microsoft.Diagnostics.DebugServices.Implementation (new)
Contains the common debug services implementations used by dotnet-dump and SOS.Extensions (dbgeng/lldb) hosts.
### Microsoft.Diagnostics.ExtensionCommands (new)
Contains the common commands shared with dotnet-dump and the SOS.Extensions hosts.
### Microsoft.Diagnostics.Repl
The command and console service implemenations.
### dotnet-dump
The dump collection and analysis REPL global tools. It hosts the extensions layer and debug services using ClrMD's Linux and Windows minidump data readers.
### lldbplugin
The lldb plugin that provides debugger services (IDebuggerServices) to SOS.Extensions and LLDBServices to native SOS. It displays both the native SOS and new managed extension commands. It initializes the managed hosting layer via the "extensions" native library.
### Strike (SOS)
Native SOS commands and code.
On Windows, it provides the debugger services (IDebuggerServices) to SOS.Extensions and initializes the managed hosting layer via the "extensions" native library.

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,335 +0,0 @@
# EventCounters in .NET Core 3.0
## Introduction
EventCounters are .NET Core APIs used for lightweight, cross-platform, and near real-time performance metric collection. EventCounters that were added as a cross-platform alternative to the "performance counters" on the .NET Framework on Windows. This documentation serves as a guide on what they are, how to implement them, and how to consume them.
The .NET Core runtime (CoreCLR) and few .NET libraries publish basic diagnostics information using EventCounters starting in .NET Core 3.0.
Apart from the EventCounters that are already provided by the .NET runtime or the rest of the framework (i.e. ASP.NET, gRPC, etc.), you may choose to implement your own EventCounters to keep track of various metrics for your service.
EventCounters live as a part of an [EventSource](https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.tracing.eventsource?view=netcore-3.0) and are automatically pushed to listener tools on a regular basis. Like any other events on an `EventSource`, they can be consumed both in-proc and out-of-proc via [EventListener](https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.tracing.eventlistener?view=netcore-3.0) and EventPipe/ETW.
![EventCounter](EventCounters.jpg)
## Runtime Counters
The .NET runtime (CoreCLR) publishes the following list of counters:
`System.Runtime` provider
* CPU usage
* Working Set Size
* GC Heap Size
* Gen 0 GC Rate
* Gen 1 GC Rate
* Gen 2 GC Rate
* % Time in GC
* Gen 0 Heap Size
* Gen 1 Heap Size
* Gen 2 Heap Size
* LOH Heap Size
* Allocation Rate
* Assembly Count
* Exception Rate
* ThreadPool Thread Count
* Monitor Lock Contention Rate
* ThreadPool Queue Length
* ThreadPool Completed Items Rate
* Active Timer Count
Other components of .NET Core also publishes counters:
ASP.NET Core `Microsoft.AspNetCore.Hosting` provider
* Requests per second
* Total Requests Count
* Current Requests Count
* Failed Requests Count
SignalR `Microsoft.AspNetCore.Http.Connections` provider
* Total Connections Started
* Total Connections Stopped
* Total Connections Timed Out
* Average Connection Duration
## EventCounters API Overview
At a high level, there are two types of counters in terms of their *purpose* - counters for ever-increasing values (i.e. Total # of exceptions, Total # of GCs, Total # of requests, etc.) and "snapshot" values (heap usage, CPU usage, working set size, etc.). Within each of these categories of counters, there are also two types of counters depending on how they get their value - polling counters (value retrieved via a callback) and non-polling counters (value directly set on the counter). That gives us a total of 4 different counters, and each of these are implemented by `EventCounter`, `PollingCounter`, `IncrementingEventCounter`, and `IncrementingPollingCounter`.
The runtime supports four different types of counters for different situations:
1. `EventCounter` records a set of values. The `WriteMetric()` method adds a new value to the set. At the end of each time interval, summary statistics for the set are computed such as the min, max, and mean. dotnet-counters will always display the mean value. EventCounter is useful to describe a discrete set of operations such as the average size in bytes of recent IO operations or the average monetary value of a set of financial transactions.
2. `IncrementingEventCounter` records a running total. The `Increment()` method increases this total. At the end of each time period the difference between the total value for that period and the total of the previous period is reported as an increment. dotnet-counters will display this as a rate, the recorded total / time. This counter is useful to measure how frequently an action is occurring such as the number of requests processed each second.
3. `PollingCounter` is a customizable counter that doesn't have any state and uses a callback to determine the value that is reported. At the end of each time interval the user provided callback function is invoked and whatever value it returns is reported as the current value of the counter. This counter can be used to query a metric from an external source, for example getting the current free bytes on a disk. It can also be used to report custom statistics that can be computed on demand by an application such as 95th percentile of recent request latencies or the current hit/miss ratio of a cache.
4. `IncrementingPollingCounter` is a customizable counter that has no state and uses a callback to determine the increment that is reported. At the end of each time interval the callback is invoked and then the difference between the current invocation and the last invocation is the reported value. `dotnet-counters` always displays this as a rate, the reported value / time. This is useful to measure the rate at which some action is occurring when it isn't feasible to call an API on each occurrence, but it is possible to query the total number of times it has occurred. For example you could report the number of bytes written to a file / sec even if there is no notification each time a byte is written.
## Writing EventCounters
The following code implements a sample `EventSource` exposed as `Samples-EventCounterDemos-Minimal` provider. This source contains an `EventCounter` representing request processing time. Such a counter has a name (i.e. its unique ID in the source) and a display name both used by listener tools such as dotnet-counter.
```cs
using System;
using System.Diagnostics.Tracing;
[EventSource(Name = "Samples-EventCounterDemos-Minimal")]
public sealed class MinimalEventCounterSource : EventSource
{
// define the singleton instance of the event source
public static MinimalEventCounterSource Log = new MinimalEventCounterSource();
public EventCounter RequestTimeCounter;
private MinimalEventCounterSource() : base(EventSourceSettings.EtwSelfDescribingEventFormat)
{
this.RequestTimeCounter = new EventCounter("request-time", this)
{
DisplayName = "Request Processing Time",
DisplayUnits = "MSec"
};
}
public static void Main()
{
var rand = new Random();
while(true)
{
MinimalEventCounterSource.Log.RequestTimeCounter.WriteMetric(rand.NextDouble());
}
}
}
```
Create a new dotnet console app using the code above and run it. Then use `dotnet-counters ps` to see what its process ID is:
```cmd
C:\>dotnet-counters ps
1398652 dotnet C:\Program Files\dotnet\dotnet.exe
1399072 dotnet C:\Program Files\dotnet\dotnet.exe
1399112 dotnet C:\Program Files\dotnet\dotnet.exe
1401880 dotnet C:\Program Files\dotnet\dotnet.exe
1400180 sample-counters C:\sample-counters\bin\Debug\netcoreapp3.1\sample-counters.exe
```
You need to pass the `EventSource` name as an argument to `--providers` to start monitoring your counter with the following command:
```cmd
C:\>dotnet-counters monitor --process-id 1400180 --providers Samples-EventCounterDemos-Minimal
```
Then you will see the following screen in your console:
```
Press p to pause, r to resume, q to quit.
Status: Running
[Samples-EventCounterDemos-Minimal]
Request Processing Time (MSec) 0.445
```
Let's take a look at a couple of sample EventCounter implementation in the .NET Core runtime (CoreCLR). Here is the runtime implementation for the counter that tracks the working set size of the application.
```cs
PollingCounter workingSetCounter = new PollingCounter(
"working-set",
this,
() => (double)(Environment.WorkingSet / 1_000_000)
)
{
DisplayName = "Working Set",
DisplayUnits = "MB"
};
```
This counter reports the current working set of the app. It is a `PollingCounter`, since it captures a metric at a moment in time. The callback for polling the values is `() => (double)(Environment.WorkingSet / 1_000_000)` which is simply just a call to `Environment.WorkingSet` API. The `DisplayName` and `DisplayUnits` is an optional property that can be set to help the consumer side of the counter to display the value more easily/accurately. For example `dotnet-counters` uses these properties to display the more "pretty" version of the counter names.
And that's it! For `PollingCounter` (or `IncrementingPollingCounter`), there is nothing else that needs to be done since they poll the values themselves at the interval requested by the consumer.
Here is another example of runtime counter implemented using `IncrementingPollingCounter`.
```cs
IncrementingPollingCounter monitorContentionCounter = new IncrementingPollingCounter(
"monitor-lock-contention-count",
this,
() => Monitor.LockContentionCount
)
{
DisplayName = "Monitor Lock Contention Count",
DisplayRateTimeScale = new TimeSpan(0, 0, 1)
};
```
This counter uses the [Monitor.LockContentionCount](https://docs.microsoft.com/en-us/dotnet/api/system.threading.monitor.lockcontentioncount?view=netcore-3.0) API to report the increment of the total lock contention count. The `DisplayRateTimeScale` property is an optional `TimeSpan` which can be set to provide a hint of what time interval this counter is best displayed at. For example, the lock contention count is best displayed as *count per second*, so its `DisplayRateTimeScale` is set to 1 second. This can be adjusted for different types of rate counters.
There are more runtime counter implementation to use as a reference in the [CoreCLR](https://github.com/dotnet/runtime/blob/master/src/coreclr/src/System.Private.CoreLib/src/System/Diagnostics/Eventing/RuntimeEventSource.cs) repo.
## Concurrency
It is important to note that if the delegates passed to the `PollingCounter`/`IncrementingPollingCounter` instances are called by multiple threads at once, the EventCounters API does not guarantee thread safety. It is the author's responsibility to guarantee the thread-safety of the delegates being passed to the counter APIs.
For example, let's suppose we have the following `EventSource` to keep track of requests.
```cs
public class RequestEventSource : EventSource
{
// singleton instance of the eventsource.
public static RequestEventSource Log = new RequestEventSource();
public IncrementingPollingCounter requestRateCounter;
private int _requestCnt;
private RequestEventSource() : base(EventSourceSettings.EtwSelfDescribingEventFormat)
{
_requestCnt = 0;
this.requestRateCounter = new IncrementingPollingCounter("request-rate", this, () => _requestCnt)
{
DisplayName = "Request Rate",
DisplayRateTimeScale = TimeSpan.FromSeconds(1)
};
}
// Method being called from request handlers to log that a request happened
public void AddRequest()
{
_requestCnt += 1;
}
}
```
`RequestEventSource.AddRequest()` can be called from a request handler, and `requestRateCounter` simply polls this value at the interval specified by the consumer of this counter. However, this method can be called by multiple threads at once, putting a race condition on `_requestCnt`.
Therefore, this method should be modified to update the value in a thread-safe way.
```cs
public void AddRequest()
{
Interlocked.Increment(ref _requestCnt);
}
```
## Consuming EventCounters
There are two main ways of consuming EventCounters: in-proc and out-of-proc.
### Consuming in-proc
You can consume the counter values via the `EventListener` API. `EventListener` is an in-proc way of consuming any Events written by all instances of EventSources in your application. For more details on how to use the EventListener API, refer to the [EventListener documentation](https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.tracing.eventlistener).
First, the EventSource that produces the counter value needs to be enabled. To do this, you can override the `OnEventSourceCreated` method to get a notification when an EventSource is created, and if this is the correct EventSource with your EventCounters, then you can call Enable on it. Here is an example of such override:
```cs
protected override void OnEventSourceCreated(EventSource source)
{
if (source.Name.Equals("System.Runtime"))
{
Dictionary<string, string> refreshInterval = new Dictionary<string, string>()
{
{ "EventCounterIntervalSec", "1" }
};
EnableEvents(source, 1, 1, refreshInterval);
}
}
```
#### Sample Code
This is a sample `EventListener` class that simply prints out all the counter names and values from a the .NET runtime's EventSource for publishing its internal counters (`System.Runtime`) at some interval.
```cs
public class SimpleEventListener : EventListener
{
private readonly EventLevel _level = EventLevel.Verbose;
public int EventCount { get; private set; } = 0;
private int _intervalSec;
public SimpleEventListener(int intervalSec)
{
_intervalSec = intervalSec;
}
protected override void OnEventSourceCreated(EventSource source)
{
if (source.Name.Equals("System.Runtime"))
{
var refreshInterval = new Dictionary<string, string>();
refreshInterval.Add("EventCounterIntervalSec", "1");
EnableEvents(source, _level, (EventKeywords)(-1), refreshInterval);
}
}
private (string Name, string Value) GetRelevantMetric(IDictionary<string, object> eventPayload)
{
string counterName = "";
string counterValue = "";
foreach ( KeyValuePair<string, object> payload in eventPayload )
{
string key = payload.Key;
string val = payload.Value.ToString();
if (key.Equals("DisplayName"))
{
counterName = val;
}
else if (key.Equals("Mean") || key.Equals("Increment"))
{
counterValue = val;
}
}
return (counterName, counterValue);
}
protected override void OnEventWritten(EventWrittenEventArgs eventData)
{
if (eventData.EventName.Equals("EventCounters"))
{
for (int i = 0; i < eventData.Payload.Count; i++)
{
IDictionary<string, object> eventPayload = eventData.Payload[i] as IDictionary<string, object>;
if (eventPayload != null)
{
var counterKV = GetRelevantMetric(eventPayload);
Console.WriteLine($"{counterKV.Name} : {counterKV.Value}");
}
}
}
}
}
```
As shown above, you *must* make sure the `"EventCounterIntervalSec"` argument is set in the filterPayload argument when calling `EnableEvents`. Otherwise the counters will not be able to flush out values since it doesn't know at which interval it should be getting flushed out.
### Consuming out-of-proc
Consuming EventCounters out-of-proc is also possible. For those that are familiar with ETW (Event Tracing for Windows), you can use ETW to capture counter data as events and view them on your ETW trace viewer (PerfView, WPA, etc.). You may also use `dotnet-counters` to consume it cross-platform via EventPipe. You can also use TraceEvent to consume these.
#### dotnet-counters
dotnet-counters is a cross-platform dotnet CLI tool that can be used to monitor the counter values. To find out how to use `dotnet-counters` to monitor your counters, refer to the [dotnet-counters documentation](https://github.com/dotnet/diagnostics/blob/master/documentation/dotnet-counters-instructions.md).
#### ETW/PerfView
Since EventCounter payloads are reported as `EventSource` events, you can use PerfView to collect/view these counter-data.
Here is a command that can be passed to PerfView to collect an ETW trace with the counters.
```
PerfView.exe /onlyProviders=*Samples-EventCounterDemos-Minimal:EventCounterIntervalSec=1 collect
```
#### dotnet-trace
Similar to how PerfView can be used to consume the counter data through ETW, dotnet-trace can be used to consume the counter data through EventPipe.
Here is an example of using dotnet-trace to get the same counter data.
```
dotnet-trace collect --process-id <pid> --providers Samples-EventCounterDemos-Minimal:0:0:EventCounterIntervalSec=1
```
The official dotnet-trace documentation contains a [section](https://github.com/dotnet/diagnostics/blob/master/documentation/dotnet-trace-instructions.md#using-dotnet-trace-to-collect-counter-values-over-time) on how to do this in more detail.
#### TraceEvent
TraceEvent is a managed library that makes it easy to consume ETW and EventPipe events. For more information, refer to the [TraceEvent Library Programmers Guide](https://github.com/Microsoft/perfview/blob/master/documentation/TraceEvent/TraceEventProgrammersGuide.md).
For some more detailed code samples, you can also try reading [Criteo Labs blog](https://medium.com/criteo-labs/net-core-counters-internals-how-to-integrate-counters-in-your-monitoring-pipeline-5354cd61b42e) on how to do this.

Просмотреть файл

@ -1,360 +0,0 @@
<svg id="mermaid-1557425697940" width="100%" xmlns="http://www.w3.org/2000/svg" height="100%" style="max-width:450px;" viewBox="-50 -10 450 301"><style>
#mermaid-1557425697940 .label {
font-family: 'trebuchet ms', verdana, arial;
color: #333; }
#mermaid-1557425697940 .node rect,
#mermaid-1557425697940 .node circle,
#mermaid-1557425697940 .node ellipse,
#mermaid-1557425697940 .node polygon {
fill: #ECECFF;
stroke: #9370DB;
stroke-width: 1px; }
#mermaid-1557425697940 .node.clickable {
cursor: pointer; }
#mermaid-1557425697940 .arrowheadPath {
fill: #333333; }
#mermaid-1557425697940 .edgePath .path {
stroke: #333333;
stroke-width: 1.5px; }
#mermaid-1557425697940 .edgeLabel {
background-color: #e8e8e8; }
#mermaid-1557425697940 .cluster rect {
fill: #ffffde !important;
stroke: #aaaa33 !important;
stroke-width: 1px !important; }
#mermaid-1557425697940 .cluster text {
fill: #333; }
#mermaid-1557425697940 div.mermaidTooltip {
position: absolute;
text-align: center;
max-width: 200px;
padding: 2px;
font-family: 'trebuchet ms', verdana, arial;
font-size: 12px;
background: #ffffde;
border: 1px solid #aaaa33;
border-radius: 2px;
pointer-events: none;
z-index: 100; }
#mermaid-1557425697940 .actor {
stroke: #CCCCFF;
fill: #ECECFF; }
#mermaid-1557425697940 text.actor {
fill: black;
stroke: none; }
#mermaid-1557425697940 .actor-line {
stroke: grey; }
#mermaid-1557425697940 .messageLine0 {
stroke-width: 1.5;
stroke-dasharray: '2 2';
stroke: #333; }
#mermaid-1557425697940 .messageLine1 {
stroke-width: 1.5;
stroke-dasharray: '2 2';
stroke: #333; }
#mermaid-1557425697940 #arrowhead {
fill: #333; }
#mermaid-1557425697940 #crosshead path {
fill: #333 !important;
stroke: #333 !important; }
#mermaid-1557425697940 .messageText {
fill: #333;
stroke: none; }
#mermaid-1557425697940 .labelBox {
stroke: #CCCCFF;
fill: #ECECFF; }
#mermaid-1557425697940 .labelText {
fill: black;
stroke: none; }
#mermaid-1557425697940 .loopText {
fill: black;
stroke: none; }
#mermaid-1557425697940 .loopLine {
stroke-width: 2;
stroke-dasharray: '2 2';
stroke: #CCCCFF; }
#mermaid-1557425697940 .note {
stroke: #aaaa33;
fill: #fff5ad; }
#mermaid-1557425697940 .noteText {
fill: black;
stroke: none;
font-family: 'trebuchet ms', verdana, arial;
font-size: 14px; }
#mermaid-1557425697940 .activation0 {
fill: #f4f4f4;
stroke: #666; }
#mermaid-1557425697940 .activation1 {
fill: #f4f4f4;
stroke: #666; }
#mermaid-1557425697940 .activation2 {
fill: #f4f4f4;
stroke: #666; }
#mermaid-1557425697940 .section {
stroke: none;
opacity: 0.2; }
#mermaid-1557425697940 .section0 {
fill: rgba(102, 102, 255, 0.49); }
#mermaid-1557425697940 .section2 {
fill: #fff400; }
#mermaid-1557425697940 .section1,
#mermaid-1557425697940 .section3 {
fill: white;
opacity: 0.2; }
#mermaid-1557425697940 .sectionTitle0 {
fill: #333; }
#mermaid-1557425697940 .sectionTitle1 {
fill: #333; }
#mermaid-1557425697940 .sectionTitle2 {
fill: #333; }
#mermaid-1557425697940 .sectionTitle3 {
fill: #333; }
#mermaid-1557425697940 .sectionTitle {
text-anchor: start;
font-size: 11px;
text-height: 14px; }
#mermaid-1557425697940 .grid .tick {
stroke: lightgrey;
opacity: 0.3;
shape-rendering: crispEdges; }
#mermaid-1557425697940 .grid path {
stroke-width: 0; }
#mermaid-1557425697940 .today {
fill: none;
stroke: red;
stroke-width: 2px; }
#mermaid-1557425697940 .task {
stroke-width: 2; }
#mermaid-1557425697940 .taskText {
text-anchor: middle;
font-size: 11px; }
#mermaid-1557425697940 .taskTextOutsideRight {
fill: black;
text-anchor: start;
font-size: 11px; }
#mermaid-1557425697940 .taskTextOutsideLeft {
fill: black;
text-anchor: end;
font-size: 11px; }
#mermaid-1557425697940 .taskText0,
#mermaid-1557425697940 .taskText1,
#mermaid-1557425697940 .taskText2,
#mermaid-1557425697940 .taskText3 {
fill: white; }
#mermaid-1557425697940 .task0,
#mermaid-1557425697940 .task1,
#mermaid-1557425697940 .task2,
#mermaid-1557425697940 .task3 {
fill: #8a90dd;
stroke: #534fbc; }
#mermaid-1557425697940 .taskTextOutside0,
#mermaid-1557425697940 .taskTextOutside2 {
fill: black; }
#mermaid-1557425697940 .taskTextOutside1,
#mermaid-1557425697940 .taskTextOutside3 {
fill: black; }
#mermaid-1557425697940 .active0,
#mermaid-1557425697940 .active1,
#mermaid-1557425697940 .active2,
#mermaid-1557425697940 .active3 {
fill: #bfc7ff;
stroke: #534fbc; }
#mermaid-1557425697940 .activeText0,
#mermaid-1557425697940 .activeText1,
#mermaid-1557425697940 .activeText2,
#mermaid-1557425697940 .activeText3 {
fill: black !important; }
#mermaid-1557425697940 .done0,
#mermaid-1557425697940 .done1,
#mermaid-1557425697940 .done2,
#mermaid-1557425697940 .done3 {
stroke: grey;
fill: lightgrey;
stroke-width: 2; }
#mermaid-1557425697940 .doneText0,
#mermaid-1557425697940 .doneText1,
#mermaid-1557425697940 .doneText2,
#mermaid-1557425697940 .doneText3 {
fill: black !important; }
#mermaid-1557425697940 .crit0,
#mermaid-1557425697940 .crit1,
#mermaid-1557425697940 .crit2,
#mermaid-1557425697940 .crit3 {
stroke: #ff8888;
fill: red;
stroke-width: 2; }
#mermaid-1557425697940 .activeCrit0,
#mermaid-1557425697940 .activeCrit1,
#mermaid-1557425697940 .activeCrit2,
#mermaid-1557425697940 .activeCrit3 {
stroke: #ff8888;
fill: #bfc7ff;
stroke-width: 2; }
#mermaid-1557425697940 .doneCrit0,
#mermaid-1557425697940 .doneCrit1,
#mermaid-1557425697940 .doneCrit2,
#mermaid-1557425697940 .doneCrit3 {
stroke: #ff8888;
fill: lightgrey;
stroke-width: 2;
cursor: pointer;
shape-rendering: crispEdges; }
#mermaid-1557425697940 .doneCritText0,
#mermaid-1557425697940 .doneCritText1,
#mermaid-1557425697940 .doneCritText2,
#mermaid-1557425697940 .doneCritText3 {
fill: black !important; }
#mermaid-1557425697940 .activeCritText0,
#mermaid-1557425697940 .activeCritText1,
#mermaid-1557425697940 .activeCritText2,
#mermaid-1557425697940 .activeCritText3 {
fill: black !important; }
#mermaid-1557425697940 .titleText {
text-anchor: middle;
font-size: 18px;
fill: black; }
#mermaid-1557425697940 g.classGroup text {
fill: #9370DB;
stroke: none;
font-family: 'trebuchet ms', verdana, arial;
font-size: 10px; }
#mermaid-1557425697940 g.classGroup rect {
fill: #ECECFF;
stroke: #9370DB; }
#mermaid-1557425697940 g.classGroup line {
stroke: #9370DB;
stroke-width: 1; }
#mermaid-1557425697940 .classLabel .box {
stroke: none;
stroke-width: 0;
fill: #ECECFF;
opacity: 0.5; }
#mermaid-1557425697940 .classLabel .label {
fill: #9370DB;
font-size: 10px; }
#mermaid-1557425697940 .relation {
stroke: #9370DB;
stroke-width: 1;
fill: none; }
#mermaid-1557425697940 #compositionStart {
fill: #9370DB;
stroke: #9370DB;
stroke-width: 1; }
#mermaid-1557425697940 #compositionEnd {
fill: #9370DB;
stroke: #9370DB;
stroke-width: 1; }
#mermaid-1557425697940 #aggregationStart {
fill: #ECECFF;
stroke: #9370DB;
stroke-width: 1; }
#mermaid-1557425697940 #aggregationEnd {
fill: #ECECFF;
stroke: #9370DB;
stroke-width: 1; }
#mermaid-1557425697940 #dependencyStart {
fill: #9370DB;
stroke: #9370DB;
stroke-width: 1; }
#mermaid-1557425697940 #dependencyEnd {
fill: #9370DB;
stroke: #9370DB;
stroke-width: 1; }
#mermaid-1557425697940 #extensionStart {
fill: #9370DB;
stroke: #9370DB;
stroke-width: 1; }
#mermaid-1557425697940 #extensionEnd {
fill: #9370DB;
stroke: #9370DB;
stroke-width: 1; }
#mermaid-1557425697940 .commit-id,
#mermaid-1557425697940 .commit-msg,
#mermaid-1557425697940 .branch-label {
fill: lightgrey;
color: lightgrey; }
</style><style>#mermaid-1557425697940 {
color: rgba(0, 0, 0, 0.65);
font: ;
}</style><g></g><g><line id="actor1158" x1="75" y1="5" x2="75" y2="290" class="actor-line" stroke-width="0.5px" stroke="#999"></line><rect x="0" y="0" fill="#eaeaea" stroke="#666" width="150" height="65" rx="3" ry="3" class="actor"></rect><text x="75" y="32.5" dominant-baseline="central" alignment-baseline="central" class="actor" style="text-anchor: middle; font-size: 14px; font-family: Open-Sans, sans-serif;"><tspan x="75" dy="0">Runtime</tspan></text></g><g><line id="actor1159" x1="275" y1="5" x2="275" y2="290" class="actor-line" stroke-width="0.5px" stroke="#999"></line><rect x="200" y="0" fill="#eaeaea" stroke="#666" width="150" height="65" rx="3" ry="3" class="actor"></rect><text x="275" y="32.5" dominant-baseline="central" alignment-baseline="central" class="actor" style="text-anchor: middle; font-size: 14px; font-family: Open-Sans, sans-serif;"><tspan x="275" dy="0">Client</tspan></text></g><defs><marker id="arrowhead" refX="5" refY="2" markerWidth="6" markerHeight="4" orient="auto"><path d="M 0,0 V 4 L6,2 Z"></path></marker></defs><defs><marker id="crosshead" markerWidth="15" markerHeight="8" orient="auto" refX="16" refY="4"><path fill="black" stroke="#000000" stroke-width="1px" d="M 9,2 V 6 L16,4 Z" style="stroke-dasharray: 0, 0;"></path><path fill="none" stroke="#000000" stroke-width="1px" d="M 0,1 L 6,7 M 6,1 L 0,7" style="stroke-dasharray: 0, 0;"></path></marker></defs><g><text x="175" y="93" class="messageText" style="text-anchor: middle;">Diagnostic IPC Message</text><line x1="275" y1="100" x2="75" y2="100" class="messageLine0" stroke-width="2" stroke="black" marker-end="url(#arrowhead)" style="fill: none;"></line></g><g><text x="175" y="128" class="messageText" style="text-anchor: middle;">Diagnostic IPC Message</text><line x1="75" y1="135" x2="275" y2="135" class="messageLine1" stroke-width="2" stroke="black" marker-end="url(#arrowhead)" style="stroke-dasharray: 3, 3; fill: none;"></line></g><g><text x="175" y="188" class="messageText" style="text-anchor: middle;">Command Specific Communication</text><line x1="75" y1="195" x2="275" y2="195" class="messageLine0" stroke-width="2" stroke="black" style="fill: none;"></line></g><g><line x1="65" y1="145" x2="285" y2="145" class="loopLine"></line><line x1="285" y1="145" x2="285" y2="205" class="loopLine"></line><line x1="65" y1="205" x2="285" y2="205" class="loopLine"></line><line x1="65" y1="145" x2="65" y2="205" class="loopLine"></line><polygon points="65,145 115,145 115,158 106.6,165 65,165" class="labelBox"></polygon><text x="72.5" y="160" fill="black" class="labelText"><tspan x="72.5" fill="black">opt</tspan></text><text x="175" y="160" fill="black" class="loopText" style="text-anchor: middle;"><tspan x="175" fill="black">[ Optional Continuation ]</tspan></text></g><g><rect x="0" y="225" fill="#eaeaea" stroke="#666" width="150" height="65" rx="3" ry="3" class="actor"></rect><text x="75" y="257.5" dominant-baseline="central" alignment-baseline="central" class="actor" style="text-anchor: middle; font-size: 14px; font-family: Open-Sans, sans-serif;"><tspan x="75" dy="0">Runtime</tspan></text></g><g><rect x="200" y="225" fill="#eaeaea" stroke="#666" width="150" height="65" rx="3" ry="3" class="actor"></rect><text x="275" y="257.5" dominant-baseline="central" alignment-baseline="central" class="actor" style="text-anchor: middle; font-size: 14px; font-family: Open-Sans, sans-serif;"><tspan x="275" dy="0">Client</tspan></text></g></svg>

До

Ширина:  |  Высота:  |  Размер: 11 KiB

Просмотреть файл

@ -1,985 +0,0 @@
# Diagnostic IPC Protocol
## Overview
This spec describes the IPC Protocol to be used for communicating with the dotnet core runtime's Diagnostics Server from an external client over a platform-specific transport, e.g., Unix Domain Sockets.
### Terminology
The protocol will use the following names for various constructs and behaviors defined in this spec:
* *Diagnostic IPC Protocol*: The protocol defined in this spec
* *Diagnostic Server*: The server in the runtime that receives/sends Diagnostic IPC Procotol communication.
* *Commands*: The functionality being invoked in the runtime that communicates over the Diagnostic IPC Protocol, e.g., "Start an EventPipe stream". These are encoded as a `command_set` and a `command_id`.
* *Flow*: A sequence of interactions making up communication with the Diagnostics Server
* *Pipe*: The duplex communication construct this protocol is communicated over. This is a Unix Domain Socket on *nix systems and a Named Pipe on Windows.
* *IPC Message*: The base unit of communication over the Diagnostic IPC Protocol. Is made up of a Header and a Payload.
* *Header*: A struct containing a magic version, the size, a command, and metadata.
* *Payload*: An opaque chunk of data that is Command specific.
* *Optional Continuation*: The reuse of the pipe for application specific communication. This communication does not need to adhere to any requirements listed in this spec, e.g., this could be a stream of custom encoded data that is Command specific.
## General Flow
All communication with the Diagnostic Server will begin with a Diagnostic IPC Message sent from the client to the server. The server will respond with a Diagnostic IPC Message. After this, the client and runtime _may_ reuse the Pipe for any Command specific communication which is referred to as an Optional Continuation.
![Generic Flow](ipc-protocol-genericflow.svg)
```
runtime <- client : [ Header ][ Payload ]
runtime -> client : [ Header ][ Payload ]
optional:
runtime <-> client : [ Optional Continuation ]
connection closed
```
Example flow for EventPipe:
```
runtime <- client : [ magic; size; EventPipe CollectTracing ][ stream config struct ] <- Diagnostic IPC Message
runtime -> client : [ magic; size; Server OK ][ sessionId ] <- Diagnostic IPC Message
runtime -> client : [ stream of nettrace data ] <- Optional Continuation
// stop message is sent on another connection
connection closed
```
## Transport
The protocol will be communicated over a platform-specific transport. On Unix/Linux based platforms, a Unix Domain Socket will be used, and on Windows, a Named Pipe will be used.
#### Naming and Location Conventions
Unix Domain Sockets (MacOS and *nix):
The socket is placed in one of two places:
1. The directory specified in `$TMPDIR`
2. `/tmp` if `$TMPDIR` is undefined/empty
In order to ensure filename uniqueness, a `disambiguation key` is generated. On Mac and NetBSD, this is the process start time encoded as the number of seconds since UNIX epoch time. If `/proc/$PID/stat` is available (all other *nix platforms), then the process start time encoded as jiffies since boot time is used.
> NOTE: If the target application is running inside an application sandbox on MacOS, the transport will be placed in the Application Group container directory. This is a convention for all sandboxed applications on MacOS.
socket name:
```c
dotnet-diagnostic-{%d:PID}-{%llu:disambiguation key}-socket
```
Named Pipes (Windows):
```
\\.\pipe\dotnet-diagnostic-{%d:PID}
```
## Messages
Diagnostic IPC Messages are the base unit of communication with the Diagnostic Server. A Diagnostic IPC Message contains a Header and Payload (described in following sections).
<table>
<tr>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
<th>7</th>
<th>8</th>
<th>9</th>
<th>10</th>
<th>11</th>
<th>12</th>
<th>13</th>
<th>14</th>
<th>15</th>
<th>16</th>
<th>17</th>
<th>18</th>
<th>19</th>
<th>20</th>
<th>21</th>
<th>22</th>
<th>23</th>
<th>24</th>
<th>...</th>
<th>size - 1 </th>
<th>size</th>
</tr>
<tr>
<td colspan="20">header</td>
<td colspan="7">payload</td>
</tr>
<tr>
<td colspan="14">magic</td>
<td colspan="2">size</td>
<td colspan="1">command_set</td>
<td colspan="1">command_id</td>
<td colspan="2">reserved</td>
<td colspan="7">payload</td>
</tr>
</table>
The simplest Diagnostic IPC Message will contain a Header and an empty Payload and therefore only be 20 bytes long.
For example, this IPC Message is the generic OK message which has an empty Payload:
<table>
<tr>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
<th>7</th>
<th>8</th>
<th>9</th>
<th>10</th>
<th>11</th>
<th>12</th>
<th>13</th>
<th>14</th>
<th>15</th>
<th>16</th>
<th>17</th>
<th>18</th>
<th>19</th>
<th>20</th>
</tr>
<tr>
<tr>
<td colspan="14">magic</td>
<td colspan="2">size</td>
<td colspan="2">command</td>
<td colspan="2">reserved</td>
</tr>
<tr>
<td colspan="14">"DOTNET_IPC_V1"</td>
<td colspan="2">20</td>
<td colspan="1">0xFF</td>
<td colspan="1">0x00</td>
<td colspan="2">0x0000</td>
</tr>
</table>
### Headers
Every Diagnostic IPC Message will start with a header and every header will:
* start with a magic version number and a size
* `sizeof(IpcHeader) == 20`
* encode numbers little-endian
* account for the size of the payload in the `size` value, i.e., `IpcHeader.size == sizeof(IpcHeader) + PayloadStruct.GetSize()`
```c
// size = 14 + 2 + 1 + 1 + 2 = 20 bytes
struct IpcHeader
{
uint8_t[14] magic = "DOTNET_IPC_V1";
uint16_t size; // size of packet = size of header + payload
uint8_t command_set; // combined with command_id is the Command to invoke
uint8_t command_id; // combined with command_set is the Command to invoke
uint16_t reserved; // for potential future use
};
```
The `reserved` field is reserved for future use. It is unused in `DOTNET_IPC_V1` and must be 0x0000.
### Payloads
Payloads are Command specific data encoded into a Diagnostic IPC Message. The size of the payload is implicitly encoded in the Header's `size` field as `PayloadSize = header.size - sizeof(struct IpcHeader)`. A Payload _may_ be 0 bytes long if it empty. The encoding of data in the Payload is Command specific.
Payloads are either encoded as fixed size structures that can be `memcpy`'ed , _or_:
* `X, Y, Z` means encode bytes for `X` followed by bytes for `Y` followed by bytes for `Z`
* `uint` = 4 little endian bytes
* `ulong` = 8 little endian bytes
* `wchar` = 2 little endian bytes, UTF16 encoding
* `array<T>` = uint length, length # of `T`s
* `string` = (`array<wchar>` where the last `wchar` must = `0`) or (length = `0`)
As an example, the CollectTracing command to EventPipe (explained below) encodes its Payload as:
<table>
<tr>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
<th>7</th>
<th>8</th>
<th>9</th>
<th>10</th>
<th>11</th>
<th>12</th>
<th>13</th>
<th>14</th>
<th>15</th>
<th>16</th>
<th>17</th>
<th>18</th>
<th>19</th>
<th>20</th>
<th>21</th>
<th>22</th>
<th>23</th>
<th>24</th>
<th>25</th>
<th>26</th>
<th>27</th>
<th>28</th>
<th>29</th>
<th>30</th>
<th>31</th>
<th>32</th>
<th>33</th>
<th>34</th>
<th>35</th>
<th>36</th>
<th>37</th>
<th>38</th>
<th>39</th>
<th>40</th>
<th>41</th>
<th>42</th>
<th>43</th>
<th>44</th>
<th>45</th>
<th>46</th>
<th>47</th>
<th>48</th>
<th>49</th>
<th>50</th>
<th>51</th>
<th>52</th>
<th>53</th>
<th>54</th>
<th>55</th>
<th>56</th>
<th>57</th>
<th>58</th>
<th>59</th>
<th>60</th>
<th>61</th>
<th>62</th>
<th>63</th>
<th>64</th>
<th>65</th>
<th>66</th>
<th>67</th>
<th>68</th>
<th>69</th>
<th>70</th>
<th>71</th>
<th>72</th>
<th>73</th>
<th>74</th>
<th>75</th>
<th>76</th>
<th>77</th>
<th>78</th>
</tr>
<tr>
<td colspan="20">Header</td>
<td colspan="58">Payload</td>
</tr>
<tr>
<td colspan="14">magic</td>
<td colspan="2">size</td>
<td colspan="2">command</td>
<td colspan="2">reserved</td>
<td colspan="4">circularBufferMB</td>
<td colspan="4">outputPath Length</td>
<td colspan="16">outputPath String</td>
<td colspan="4">n Providers</td>
<td colspan="8">Keywords</td>
<td colspan="4">logLevel</td>
<td colspan="4">provider_name length</td>
<td colspan="14">provider_name string</td>
</tr>
<tr>
<td colspan="14">"DOTNET_IPC_V1"</td>
<td colspan="2">78</td>
<td colspan="2">0x0202</td>
<td colspan="2">0x0000</td>
<td colspan="4">250</td>
<td colspan="4">16</td>
<td colspan="16">"/tmp/foo.nettrace"</td>
<td colspan="4">1</td>
<td colspan="8">100</td>
<td colspan="4">2</td>
<td colspan="4">14</td>
<td colspan="14">"MyEventSource"</td>
</tr>
</table>
Where `0x0202` is the Command to start streaming with EventPipe.
### Commands
Commands are a `command_set` and a `command_id`. A `command_set` is analogous to a namespace for `command_id`s. The typical grouping is by service running on the Diagnostic Server, e.g., there is a `command_set` for EventPipe. This allows multiple services to have the same `command_id`s without clashing. The combination of a `command_set` and a `command_id` encodes the Command being invoked on the Diagnostic Server.
The current set of `command_set`s and `command_id`s are listed below:
```c++
enum class CommandSet : uint8_t
{
// reserved = 0x00,
Dump = 0x01,
EventPipe = 0x02,
Profiler = 0x03,
Process = 0x04,
// future
Server = 0xFF,
};
```
```c++
enum class ServerCommandId : uint8_t
{
OK = 0x00,
Error = 0xFF,
};
```
```c++
enum class EventPipeCommandId : uint8_t
{
// reserved = 0x00,
StopTracing = 0x01, // stop a given session
CollectTracing = 0x02, // create/start a given session
CollectTracing2 = 0x03, // create/start a given session with/without rundown
}
```
See: [EventPipe Commands](#EventPipe-Commands)
```c++
enum class DumpCommandId : uint8_t
{
// reserved = 0x00,
CreateCoreDump = 0x01,
// future
}
```
See: [Dump Commands](#Dump-Commands)
```c++
enum class ProfilerCommandId : uint8_t
{
// reserved = 0x00,
AttachProfiler = 0x01,
// future
}
```
See: [Profiler Commands](#Profiler-Commands)
```c++
enum class ProcessCommandId : uint8_t
{
ProcessInfo = 0x00,
ResumeRuntime = 0x01,
// future
}
```
See: [Process Commands](#Process-Commands)
Commands may use the generic `{ magic="DOTNET_IPC_V1"; size=20; command_set=0xFF (Server); command_id=0x00 (OK); reserved = 0x0000; }` to indicate success rather than having a command specific success `command_id`.
For example, the Command to start a stream session with EventPipe would be `0x0202` made up of `0x02` (the `command_set` for EventPipe) and `0x02` (the `command_id` for CollectTracing).
## EventPipe Commands
```c++
enum class EventPipeCommandId : uint8_t
{
// reserved = 0x00,
StopTracing = 0x01, // stop a given session
CollectTracing = 0x02, // create/start a given session
CollectTracing2 = 0x03, // create/start a given session with/without rundown
}
```
EventPipe Payloads are encoded with the following rules:
* `X, Y, Z` means encode bytes for `X` followed by bytes for `Y` followed by bytes for `Z`
* `uint` = 4 little endian bytes
* `ulong` = 8 little endian bytes
* `wchar` = 2 little endian bytes, UTF16 encoding
* `byte` = 1 unsigned little endian byte
* `array<T>` = uint length, length # of `T`s
* `string` = (`array<wchar>` where the last `wchar` must = `0`) or (length = `0`)
### `CollectTracing`
Command Code: `0x0202`
The `CollectTracing` Command is used to start a streaming session of event data. The runtime will attempt to start a session and respond with a success message with a payload of the `sessionId`. The event data is streamed in the `nettrace` format. The stream begins after the response Message from the runtime to the client. The client is expected to continue to listen on the transport until the connection is closed.
In the event there is an [error](#Errors), the runtime will attempt to send an error message and subsequently close the connection.
The client is expected to send a [`StopTracing`](#StopTracing) command to the runtime in order to stop the stream, as there is a "run down" at the end of a stream session that transmits additional metadata.
If the stream is stopped prematurely due to a client or server error, the `nettrace` file generated will be incomplete and should be considered corrupted.
#### Inputs:
Header: `{ Magic; Size; 0x0202; 0x0000 }`
* `uint circularBufferMB`: The size of the circular buffer used for buffering event data while streaming
* `uint format`: 0 for the legacy NetPerf format and 1 for the NetTrace format
* `array<provider_config> providers`: The providers to turn on for the streaming session
A `provider_config` is composed of the following data:
* `ulong keywords`: The keywords to turn on with this providers
* `uint logLevel`: The level of information to turn on
* `string provider_name`: The name of the provider
* `string filter_data` (optional): Filter information
> see ETW documentation for a more detailed explanation of Keywords, Filters, and Log Level.
#### Returns (as an IPC Message Payload):
Header: `{ Magic; 28; 0xFF00; 0x0000; }`
`CollectTracing` returns:
* `ulong sessionId`: the ID for the stream session starting on the current connection
##### Details:
Input:
```
Payload
{
uint circularBufferMB,
uint format,
array<provider_config> providers
}
provider_config
{
ulong keywords,
uint logLevel,
string provider_name,
string filter_data (optional)
}
```
Returns:
```c
Payload
{
ulong sessionId
}
```
Followed by an Optional Continuation of a `nettrace` format stream of events.
### `CollectTracing2`
Command Code: `0x0203`
The `CollectTracing2` Command is an extension of the `CollectTracing` command - its behavior is the same as `CollectTracing` command, except that it has another field that lets you specify whether rundown events should be fired by the runtime.
#### Inputs:
Header: `{ Magic; Size; 0x0203; 0x0000 }`
* `uint circularBufferMB`: The size of the circular buffer used for buffering event data while streaming
* `uint format`: 0 for the legacy NetPerf format and 1 for the NetTrace format
* `bool requestRundown`: Indicates whether rundown should be fired by the runtime.
* `array<provider_config> providers`: The providers to turn on for the streaming session
A `provider_config` is composed of the following data:
* `ulong keywords`: The keywords to turn on with this providers
* `uint logLevel`: The level of information to turn on
* `string provider_name`: The name of the provider
* `string filter_data` (optional): Filter information
> see ETW documentation for a more detailed explanation of Keywords, Filters, and Log Level.
>
#### Returns (as an IPC Message Payload):
Header: `{ Magic; 28; 0xFF00; 0x0000; }`
`CollectTracing2` returns:
* `ulong sessionId`: the ID for the stream session starting on the current connection
##### Details:
Input:
```
Payload
{
uint circularBufferMB,
uint format,
bool requestRundown,
array<provider_config> providers
}
provider_config
{
ulong keywords,
uint logLevel,
string provider_name,
string filter_data (optional)
}
```
Returns:
```c
Payload
{
ulong sessionId
}
```
Followed by an Optional Continuation of a `nettrace` format stream of events.
### `StopTracing`
Command Code: `0x0201`
The `StopTracing` command is used to stop a specific streaming session. Clients are expected to use this command to stop streaming sessions started with [`CollectStreaming`](#CollectStreaming).
#### Inputs:
Header: `{ Magic; 28; 0x0201; 0x0000 }`
* `ulong sessionId`: The ID for the streaming session to stop
#### Returns:
Header: `{ Magic; 28; 0xFF00; 0x0000 }`
* `ulong sessionId`: the ID for the streaming session that was stopped
##### Details:
Inputs:
```c
Payload
{
ulong sessionId
}
```
Returns:
```c
Payload
{
ulong sessionId
}
```
## Dump Commands
### `CreateCoreDump`
Command Code: `0x0101`
The `CreateCoreDump` command is used to instruct the runtime to generate a core dump of the process. The command will keep the connection open while the dump is generated and then respond with a message containing an `HRESULT` indicating success or failure.
In the event of an [error](#Errors), the runtime will attempt to send an error message and subsequently close the connection.
#### Inputs:
Header: `{ Magic; Size; 0x0101; 0x0000 }`
* `string dumpName`: The name of the dump generated.
* `uint dumpType`: A value between 1 and 4 inclusive that indicates the type of dump to take
* Normal = 1,
* WithHeap = 2,
* Triage = 3,
* Full = 4
* `uint diagnostics`: If set to 1, log to console the dump generation diagnostics
* `0` or `1` for on or off
#### Returns (as an IPC Message Payload):
Header: `{ Magic; 28; 0xFF00; 0x0000; }`
`CreateCoreDump` returns:
* `int32 hresult`: The result of creating the core dump (`0` indicates success)
##### Details:
Input:
```
Payload
{
string dumpName,
uint dumpType,
uint diagnostics
}
```
Returns:
```c
Payload
{
int32 hresult
}
```
## Profiler Commands
### `AttachProfiler`
Command Code: `0x0301`
The `AttachProfiler` command is used to attach a profiler to the runtime. The command will keep the connection open while the profiler is being attached and then respond with a message containing an `HRESULT` indicating success or failure.
In the event of an [error](#Errors), the runtime will attempt to send an error message and subsequently close the connection.
#### Inputs:
Header: `{ Magic; Size; 0x0301; 0x0000 }`
* `uint32 attachTimeout`: The timeout for attaching to the profiler (in milliseconds)
* `CLSID profilerGuid`: The GUID associated with the profiler
* `string profilerPath`: Location of the profiler
* `array<byte> clientData`: The data being provided to the profiler
Where a `CLSID` is a fixed size struct consisting of:
* `uint x`
* `byte s1`
* `byte s2`
* `byte[8] c`
#### Returns (as an IPC Message Payload):
Header: `{ Magic; 28; 0xFF00; 0x0000; }`
`AttachProfiler` returns:
* `int32 hresult`: The result of attaching the profiler (`0` indicates success)
##### Details:
Input:
```
Payload
{
uint32 dwAttachTimeout
CLSID profilerGuid
string profilerPath
uint32 clientDataSize
array<byte> pClientData
}
```
Returns:
```c
Payload
{
int32 hresult
}
```
## Process Commands
> Available since .NET 5.0
### `ProcessInfo`
Command Code: `0x0400`
The `ProcessInfo` command queries the runtime for some basic information about the process.
In the event of an [error](#Errors), the runtime will attempt to send an error message and subsequently close the connection.
#### Inputs:
Header: `{ Magic; Size; 0x0400; 0x0000 }`
There is no payload.
#### Returns (as an IPC Message Payload):
Header: `{ Magic; size; 0xFF00; 0x0000; }`
Payload:
* `int64 processId`: the process id in the process's PID-space
* `GUID runtimeCookie`: a 128-bit GUID that should be unique across PID-spaces
* `string commandLine`: the command line that invoked the process
* Windows: will be the same as the output of `GetCommandLineW`
* Non-Windows: will be the fully qualified path of the executable in `argv[0]` followed by all arguments as the appear in `argv` separated by spaces, i.e., `/full/path/to/argv[0] argv[1] argv[2] ...`
* `string OS`: the operating system that the process is running on
* macOS => `"macOS"`
* Windows => `"Windows"`
* Linux => `"Linux"`
* other => `"Unknown"`
* `string arch`: the architecture of the process
* 32-bit => `"x86"`
* 64-bit => `"x64"`
* ARM32 => `"arm32"`
* ARM64 => `"arm64"`
* Other => `"Unknown"`
##### Details:
Returns:
```c++
struct Payload
{
uint64_t ProcessId;
LPCWSTR CommandLine;
LPCWSTR OS;
LPCWSTR Arch;
GUID RuntimeCookie;
}
```
### `ResumeRuntime`
Command Code: `0x0401`
If the target .NET application has been configured Diagnostic Ports configured to suspend with `DOTNET_DiagnosticPorts` or `DOTNET_DefaultDiagnosticPortSuspend` has been set to `1` (`0` is the default value), then the runtime will pause during `EEStartupHelper` in `ceemain.cpp` and wait for an event to be set. (See [Diagnostic Ports](#diagnostic-ports) for more details)
The `ResumeRuntime` command sets the necessary event to resume runtime startup. If the .NET application _has not_ been configured to with Diagnostics Monitor Address or the runtime has _already_ been resumed, this command is a no-op.
In the event of an [error](#Errors), the runtime will attempt to send an error message and subsequently close the connection.
#### Inputs:
Header: `{ Magic; Size; 0x0401; 0x0000 }`
There is no payload.
#### Returns (as an IPC Message Payload):
Header: `{ Magic; size; 0xFF00; 0x0000; }`
There is no payload.
### `ProcessEnvironment`
Command Code: `0x0402`
The `ProcessEnvironment` command queries the runtime for its environment block.
In the event of an [error](#Errors), the runtime will attempt to send an error message and subsequently close the connection.
#### Inputs:
Header: `{ Magic; Size; 0x0402; 0x0000 }`
There is no payload.
#### Returns (as an IPC Message Payload + continuation):
Header: `{ Magic; size; 0xFF00; 0x0000; }`
Payload:
* `uint32_t nIncomingBytes`: the number of bytes to expect in the continuation stream
* `uint16_t future`: unused
Continuation:
* `Array<Array<WCHAR>> environmentBlock`: The environment block written as a length prefixed array of length prefixed arrays of `WCHAR`.
Note: it is valid for `nIncomingBytes` to be `4` and the continuation to simply contain the value `0`.
##### Details:
Returns:
```c++
struct Payload
{
uint32_t nIncomingBytes;
uint16_t future;
}
```
## Errors
In the event an error occurs in the handling of an Ipc Message, the Diagnostic Server will attempt to send an Ipc Message encoding the error and subsequently close the connection. The connection will be closed **regardless** of the success of sending the error message. The Client is expected to be resilient in the event of a connection being abruptly closed.
Errors are `HRESULTS` encoded as `int32_t` when sent back to the client. There are a few Diagnostics IPC specific `HRESULT`s:
```c
#define CORDIAGIPC_E_BAD_ENCODING = 0x80131384
#define CORDIAGIPC_E_UNKNOWN_COMMAND = 0x80131385
#define CORDIAGIPC_E_UNKNOWN_MAGIC = 0x80131386
#define CORDIAGIPC_E_UNKNOWN_ERROR = 0x80131387
```
Diagnostic Server errors are sent as a Diagnostic IPC Message with:
* a `command_set` of `0xFF`
* a `command_id` of `0xFF`
* a Payload consisting of a `int32_t` representing the error encountered (described above)
All errors will result in the Server closing the connection.
Error response Messages will be sent when:
* the client sends an improperly encoded Diagnostic IPC Message
* the client uses an unknown `command`
* the client uses an unknown `magic` version string
* the server encounters an unrecoverable error, e.g., OOM, transport error, runtime malfunction etc.
The client is expected to be resilient in the event that the Diagnostic Server fails to respond in a reasonable amount of time (this may be Command specific).
For example, if the Diagnostic Server finds incorrectly encoded data while parsing a Message, it would send the following Message in response:
<table>
<tr>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
<th>7</th>
<th>8</th>
<th>9</th>
<th>10</th>
<th>11</th>
<th>12</th>
<th>13</th>
<th>14</th>
<th>15</th>
<th>16</th>
<th>17</th>
<th>18</th>
<th>19</th>
<th>20</th>
<th>21</th>
<th>22</th>
<th>23</th>
<th>24</th>
<th>25</th>
<th>26</th>
<th>27</th>
<th>28</th>
</tr>
<tr>
<td colspan="20">Header</td>
<td colspan="8">Payload</td>
</tr>
<tr>
<td colspan="14">magic</td>
<td colspan="2">size</td>
<td colspan="1">command_set</td>
<td colspan="1">command_id</td>
<td colspan="2">reserved</td>
<td colspan="8">Error Code</td>
</tr>
<tr>
<td colspan="14">"DOTNET_IPC_V1"</td>
<td colspan="2">28</td>
<td colspan="1">0xFF</td>
<td colspan="1">0xFF</td>
<td colspan="2">0x0000</td>
<td colspan="8">0x80131384</td>
</tr>
</table>
# Diagnostic Ports
> Available since .NET 5.0
A Diagnostic Port is a mechanism for communicating the Diagnostics IPC Protocol to a .NET application from out of process. There are two flavors of Diagnostic Port: `connect` and `listen`. A `listen` Port is when the runtime creates an IPC transport and listens for incoming connections. The default Diagnostic Port is an example of a `listen` Port. You cannot currently configure additional `listen` Ports. A `connect` Port is when the runtime attempts to connect to an IPC transport owned by another process. Upon connection to a `connect` Port, the runtime will send an [Advertise](#advertise-protocol) message signalling that it is ready to accept Diagnostics IPC Protocol commands. Each command consumes a connection, and the runtime will reconnect to the `connect` Port to wait for more commands.
.NET applications can configure Diagnostic Ports with the following environment variables:
* `DOTNET_DiagnosticPorts=<port address>[,tag[...]][;<port address>[,tag[...]][...]]`
where:
* `<port address>` is a NamedPipe name without `\\.\pipe\` on Windows, and the full path to a Unix domain socket on other platforms
* `tag ::= <SUSPEND_MODE> | <PORT_TYPE>`
* `<SUSPEND_MODE> ::= suspend | nosuspend` (default value is suspend)`
* `<PORT_TYPE> ::= connect` (future types such as additional listen ports could be added to this list)
Example usage:
```shell
$ export DOTNET_DiagnosticPorts=$DOTNET_DiagnosticPorts;~/mydiagport.sock,nosuspend;
```
Any diagnostic ports specified in this configuration will be created in addition to the default port (`dotnet-diagnostic-<pid>-<epoch>`). The suspend mode of the default port is set via the new environment variable `DOTNET_DefaultDotnetPortSuspend` which defaults to `0` for `nosuspend`.
Each port configuration specifies whether it is a `suspend` or `nosuspend` port. Ports specifying `suspend` in their configuration will cause the runtime to pause early on in the startup path before most runtime subsystems have started. This allows any agent to receive a connection and properly setup before the application startup continues. Since multiple ports can individually request suspension, the `resume` command needs to be sent by each suspended port connection before the runtime resumes execution.
If a config specifies multiple tag values from a tag type, for example `"<path>,nosuspend,suspend,suspend,"`, only the first one is respected.
The port address value is **required** for a port configuration. If a configuration doesn't specify an address and only specifies tags, then the first tag will be treated as the path. For example, the configuration `DOTNET_DiagnosticPorts=nosuspend,connect` would cause a port with the name `nosuspend` to be created, in the default `suspend` mode.
The runtime will make a best effort attempt to generate a port from a port configuration. A bad port configuration won't cause an error state, but could lead to consumed resources. For example it could cause the runtime to continuously poll for a connect port that will never exist.
When a Diagnostic Port is configured, the runtime will attempt to connect to the provided address in a retry loop while also listening on the traditional server. The retry loop has an initial timeout of 10ms with a falloff factor of 1.25x and a max timeout of 500 ms. A successful connection will result in an infinite timeout. The runtime is resilient to the remote end of the Diagnostic Port failing, e.g., closing, not `Accepting`, etc.
## Advertise Protocol
Upon successful connection, the runtime will send a fixed-size, 34 byte buffer containing the following information:
* `char[8] magic`: (8 bytes) `"ADVR_V1\0"` (ASCII chars + null byte)
* `GUID runtimeCookie`: (16 bytes) CLR Instance Cookie (little-endian)
* `uint64_t processId`: (8 bytes) PID (little-endian)
* `uint16_t future`: (2 bytes) unused for future-proofing
With the following layout:
<table>
<tr>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
<th>7</th>
<th>8</th>
<th>9</th>
<th>10</th>
<th>11</th>
<th>12</th>
<th>13</th>
<th>14</th>
<th>15</th>
<th>16</th>
<th>17</th>
<th>18</th>
<th>19</th>
<th>20</th>
<th>21</th>
<th>22</th>
<th>23</th>
<th>24</th>
<th>25</th>
<th>26</th>
<th>27</th>
<th>28</th>
<th>29</th>
<th>30</th>
<th>31</th>
<th>32</th>
<th>33</th>
<th>34</th>
</tr>
<tr>
<td colspan="8">magic</td>
<td colspan="16">runtimeCookie</td>
<td colspan="8">processId</td>
<td colspan="2">future</td>
</tr>
<tr>
<td colspan="8">"ADVR_V1\0"</td>
<td colspan="16">123e4567-e89b-12d3-a456-426614174000</td>
<td colspan="8">12345</td>
<td colspan="2">0x0000</td>
</tr>
</table>
This is a one-way transmission with no expectation of an ACK. The tool owning the Diagnostic Port is expected to consume this message and then hold on to the now active connection until it chooses to send a Diagnostics IPC command.
## Dataflow
Due to the potential for an *optional continuation* in the Diagnostics IPC Protocol, each successful connection between the runtime and a Diagnostic Port is only usable **once**. As a result, a .NET process will attempt to _reconnect_ to the diagnostic port immediately after every command that is sent across an active connection.
A typical dataflow has 2 actors, the Target application, `T` and the Diagnostics Monitor Application, `M`, and communicates like so:
```
T -> : Target attempts to connect to M, which may not exist yet
// M comes into existence
T -> M : [ Advertise ] - Target sends advertise message to Monitor
// 0 or more time passes
T <- M : [ Diagnostics IPC Protocol ] - Monitor sends a Diagnostics IPC Protocol command
T -> M : [ Advertise ] - Target reconnects to Monitor with a _new_ connection and re-sends the advertise message
```
It is important to emphasize that a connection **_should not_** be reused for multiple Diagnostic IPC Protocol commands.

Просмотреть файл

@ -1,502 +0,0 @@
# Microsoft.Diagnostics.NETCore.Client API Documentation
## Intro
Microsoft.Diagnostics.NETCore.Client (also known as the Diagnostics Client library) is a managed library that lets you interact with .NET Core runtime (CoreCLR) for various diagnostics related tasks, such as tracing, requesting a dump, or attaching an ICorProfiler. Using this library, you can write your own diagnostics tools customized for your particular scenario.
## Installing
Microsoft.Diagnostics.NETCore.Client is available on [NuGet](https://www.nuget.org/packages/Microsoft.Diagnostics.NETCore.Client/).
## Sample Code:
Here are some sample code showing the usage of this library.
#### 1. Attaching to a process and dumping out all the runtime GC events in real time to the console
This sample shows an example where we trigger an EventPipe session with the .NET runtime provider with the GC keyword at informational level, and use `EventPipeEventSource` (provided by the [TraceEvent library](https://www.nuget.org/packages/Microsoft.Diagnostics.Tracing.TraceEvent/)) to parse the events coming in and print the name of each event to the console in real time.
```cs
using Microsoft.Diagnostics.NETCore.Client;
using Microsoft.Diagnostics.Tracing.Parsers;
public void PrintRuntimeGCEvents(int processId)
{
var providers = new List<EventPipeProvider>()
{
new EventPipeProvider("Microsoft-Windows-DotNETRuntime",
EventLevel.Informational, (long)ClrTraceEventParser.Keywords.GC)
};
var client = new DiagnosticsClient(processId);
using (var session = client.StartEventPipeSession(providers, false))
{
var source = new EventPipeEventSource(session.EventStream);
source.Clr.All += (TraceEvent obj) => {
Console.WriteLine(obj.EventName);
};
try
{
source.Process();
}
// NOTE: This exception does not currently exist. It is something that needs to be added to TraceEvent.
catch (EventStreamException e)
{
Console.WriteLine("Error encountered while processing events");
Console.WriteLine(e.ToString());
}
}
}
```
#### 2. Write a core dump.
This sample shows how to trigger a dump using `DiagnosticsClient`.
```cs
using Microsoft.Diagnostics.NETCore.Client;
public void TriggerCoreDump(int processId)
{
var client = new DiagnosticsClient(processId);
client.WriteDump(DumpType.Normal);
}
```
#### 3. Trigger a core dump when CPU usage goes above a certain threshold
This sample shows an example where we monitor the `cpu-usage` counter published by the .NET runtime and use the `WriteDump` API to write out a dump when the CPU usage grows beyond a certain threshold.
```cs
using Microsoft.Diagnostics.NETCore.Client;
public void TriggerDumpOnCpuUsage(int processId, int threshold)
{
var providers = new List<EventPipeProvider>()
{
new EventPipeProvider(
"System.Runtime",
EventLevel.Informational,
(long)ClrTraceEventParser.Keywords.None,
new Dictionary<string, string>() {
{ "EventCounterIntervalSec", "1" }
}
)
};
var client = new DiagnosticsClient(processId);
using(var session = client.StartEventPipeSession(providers))
{
var source = new EventPipeEventSource(session.EventStream);
source.Dynamic.All += (TraceEvent obj) =>
{
if (obj.EventName.Equals("EventCounters"))
{
// I know this part is ugly. But this is all TraceEvent.
var payloadFields = (IDictionary<string, object>)(obj.GetPayloadValueByName("Payload"));
if (payloadFields["Name"].ToString().Equals("cpu-usage"))
{
double cpuUsage = Double.Parse(payloadFields["Mean"]);
if (cpuUsage > (double)threshold)
{
client.WriteDump(DumpType.Normal, "/tmp/minidump.dmp");
}
}
}
}
try
{
source.Process();
}
catch (EventStreamException) {}
}
}
}
```
#### 4. Trigger a CPU trace for given number of seconds
This sample shows an example where we trigger an EventPipe session for certain period of time, with the default CLR trace keyword as well as the sample profiler, and read from the stream that gets created as a result and write the bytes out to a file. Essentially this is what `dotnet-trace` uses internally to write a trace file.
```cs
using Microsoft.Diagnostics.NETCore.Client;
using System.Diagnostics;
using System.IO;
using System.Threading.Task;
public void TraceProcessForDuration(int processId, int duration, string traceName)
{
var cpuProviders = new List<EventPipeProvider>()
{
new EventPipeProvider("Microsoft-Windows-DotNETRuntime", EventLevel.Informational, (long)ClrTraceEventParser.Keywords.Default),
new EventPipeProvider("Microsoft-DotNETCore-SampleProfiler", EventLevel.Informational, (long)ClrTraceEventParser.Keywords.None)
};
var client = new DiagnosticsClient(processId);
using (var traceSession = client.StartEventPipeSession(cpuProviders))
{
Task copyTask = Task.Run(async () =>
{
using (FileStream fs = new FileStream(traceName, FileMode.Create, FileAccess.Write))
{
await traceSession.EventStream.CopyToAsync(fs);
}
});
copyTask.Wait(duration * 1000);
traceSession.Stop();
}
}
```
#### 5. Print names of all .NET processes that published a diagnostics server to connect
This sample shows how to use `DiagnosticsClient.GetPublishedProcesses` API to print the names of the .NET processes that published a diagnostics IPC channel.
```cs
using Microsoft.Diagnostics.NETCore.Client;
using System.Linq;
public static void PrintProcessStatus()
{
var processes = DiagnosticsClient.GetPublishedProcesses()
.Select(GetProcessById)
.Where(process => process != null)
foreach (var process in processes)
{
Console.WriteLine($"{process.ProcessName}");
}
}
```
#### 6. Live-parsing events for a specified period of time.
This sample shows an example where we create two tasks, one that parses the events coming in live with `EventPipeEventSource` and one that reads the console input for a user input signaling the program to end. If the target app exists before the users presses enter, the app exists gracefully. Otherwise, `inputTask` will send the Stop command to the pipe and exit gracefully.
```cs
using Microsoft.Diagnostics.NETCore.Client;
using Microsoft.Diagnostics.Tracing.Parsers;
public static void PrintEventsLive(int processId)
{
var providers = new List<EventPipeProvider>()
{
new EventPipeProvider("Microsoft-Windows-DotNETRuntime",
EventLevel.Informational, (long)ClrTraceEventParser.Keywords.Default)
};
var client = new DiagnosticsClient(processId);
using (var session = client.StartEventPipeSession(providers, false))
{
Task streamTask = Task.Run(() =>
{
var source = new EventPipeEventSource(session.EventStream);
source.Dynamic.All += (TraceEvent obj) =>
{
Console.WriteLine(obj.EventName);
};
try
{
source.Process();
}
// NOTE: This exception does not currently exist. It is something that needs to be added to TraceEvent.
catch (Exception e)
{
Console.WriteLine("Error encountered while processing events");
Console.WriteLine(e.ToString());
}
});
Task inputTask = Task.Run(() =>
{
Console.WriteLine("Press Enter to exit");
while (Console.ReadKey().Key != ConsoleKey.Enter)
{
Thread.Sleep(100);
}
session.Stop();
});
Task.WaitAny(streamTask, inputTask);
}
}
```
#### 7. Attach a ICorProfiler profiler
This sample shows how to attach an ICorProfiler to a process (profiler attach).
```cs
public static int AttachProfiler(int processId, Guid profilerGuid, string profilerPath)
{
var client = new DiagnosticsClient(processId);
return client.AttachProfiler(TimeSpan.FromSeconds(10), profilerGuid, profilerPath);
}
```
## API Description
This section describes the APIs of the library.
#### class DiagnosticsClient
```cs
public DiagnosticsClient
{
public DiagnosticsClient(int processId);
public EventPipeSession StartEventPipeSession(IEnumerable<EventPipeProvider> providers, bool requestRundown=true, int circularBufferMB=256);
public void WriteDump(DumpType dumpType, string dumpPath=null, bool logDumpGeneration=false);
public void AttachProfiler(TimeSpan attachTimeout, Guid profilerGuid, string profilerPath, byte[] additionalData=null);
public static IEnumerable<int> GetPublishedProcesses();
}
```
#### Methods
```csharp
public DiagnosticsClient(int processId);
```
Creates a new instance of `DiagnosticsClient` for a compatible .NET process running with process ID of `processId`.
`processID` : Process ID of the target application.
```csharp
public EventPipeSession StartEventPipeSession(IEnumerable<EventPipeProvider> providers, bool requestRundown=true, int circularBufferMB=256)
```
Starts an EventPipe tracing session using the given providers and settings.
* `providers` : An `IEnumerable` of [`EventPipeProvider`](#class-eventpipeprovider)s to start tracing.
* `requestRundown`: A `bool` specifying whether rundown provider events from the target app's runtime should be requested.
* `circularBufferMB`: An `int` specifying the total size of circular buffer used by the target app's runtime on collecting events.
```csharp
public EventPipeSession StartEventPipeSession(EventPipeProvider providers, bool requestRundown=true, int circularBufferMB=256)
```
* `providers` : An [`EventPipeProvider`](#class-eventpipeprovider) to start tracing.
* `requestRundown`: A `bool` specifying whether rundown provider events from the target app's runtime should be requested.
* `circularBufferMB`: An `int` specifying the total size of circular buffer used by the target app's runtime on collecting events.
**Remarks**
Rundown events contain payloads that may be needed for post analysis, such as resolving method names of thread samples. Unless you know you do not want this, we recommend setting this to true. In large applications, this may take up to minutes.
* `circularBufferMB` : The size of the circular buffer to be used as a buffer for writing events within the runtime.
```csharp
public void WriteDump(DumpType dumpType, string dumpPath=null, bool logDumpGeneration=false);
```
Request a dump for post-mortem debugging of the target application. The type of the dump can be specified using the [`DumpType`](#enum-dumptype) enum.
* `dumpType` : Type of the dump to be requested.
* `dumpPath` : The path to the dump to be written out to.
* `logDumpGeneration` : If set to `true`, the target application will write out diagnostic logs during dump generation.
```csharp
public void AttachProfiler(TimeSpan attachTimeout, Guid profilerGuid, string profilerPath, byte[] additionalData=null);
```
Request to attach an ICorProfiler to the target application.
* `attachTimeout` : A `TimeSpan` after which attach will be aborted.
* `profilerGuid` : `Guid` of the ICorProfiler to be attached.
* `profilerPath ` : Path to the ICorProfiler dll to be attached.
* `additionalData` : Optional additional data that can be passed to the runtime during profiler attach.
```csharp
public static IEnumerable<int> GetPublishedProcesses();
```
Get an `IEnumerable` of process IDs of all the active .NET processes that can be attached to.
#### class EventPipeProvider
```cs
public class EventPipeProvider
{
public EventPipeProvider(
string name,
EventLevel eventLevel,
long keywords = 0,
IDictionary<string, string> arguments = null)
public string Name { get; }
public EventLevel EventLevel { get; }
public long Keywords { get; }
public IDictionary<string, string> Arguments { get; }
public override string ToString();
public override bool Equals(object obj);
public override int GetHashCode();
public static bool operator ==(Provider left, Provider right);
public static bool operator !=(Provider left, Provider right);
}
```
```csharp
public EventPipeProvider(string name,
EventLevel eventLevel,
long keywords = 0,
IDictionary<string, string> arguments = null)
```
Creates a new instance of `EventPipeProvider` with the given provider name, [EventLevel](https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.tracing.eventlevel), keywords, and arguments.
#### Properties
```csharp
public string Name { get; }
```
The name of the Provider
```csharp
public EventLevel EventLevel { get; }
```
The EventLevel of the given instance of [`EventPipeProvider`](#class-eventpipeprovider).
```csharp
public long Keywords { get; }
```
A long that represents bitmask for keywords of the EventSource.
```csharp
public IDictionary<string, string> Arguments { get; }
```
An `IDictionary` of key-value pair string representing optional arguments to be passed to EventSource representing the given `EventPipeProvider`.
#### Remarks
This class is immutable, as EventPipe does not allow a provider's configuration to be modified during an EventPipe session (as of .NET Core 3.1).
### class EventPipeSession
```csharp
public class EventPipeSession : IDisposable
{
public Stream EventStream { get; }
public void Stop();
}
```
This class represents an ongoing EventPipe session that has been started. It is immutable and acts as a handle to an EventPipe session of the given runtime.
#### Properties
```csharp
public Stream EventStream { get; }
```
Returns a `Stream` that can be used to read the event stream.
#### Methods
```csharp
public void Stop();
```
Stops the given EventPipe session.
### enum DumpType
```csharp
public enum DumpType
{
Normal = 1,
WithHeap = 2,
Triage = 3,
Full = 4
}
```
Represents the type of dump that can be requested.
* `Normal`: Include just the information necessary to capture stack traces for all existing traces for all existing threads in a process. Limited GC heap memory and information.
* `WithHeap`: Includes the GC heaps and information necessary to capture stack traces for all existing threads in a process.
* `Triage`: Include just the information necessary to capture stack traces for all existing traces for all existing threads in a process. Limited GC heap memory and information.
* `Full`: Include all accessible memory in the process. The raw memory data is included at the end, so that the initial structures can be mapped directly without the raw memory information. This option can result in a very large dump file.
### Exceptions
Either `DiagnosticsClientException` or its subclass can be thrown from the library.
```csharp
public class DiagnosticsClientException : Exception
```
#### UnsupportedProtocolException
```csharp
public class UnsupportedProtocolException : DiagnosticsClientException
```
This may be thrown when the command is not supported by either the library or the target process' runtime.
#### ServerNotAvailableException
```csharp
public class ServerNotAvailableException : DiagnosticsClientException
```
This may be thrown when the runtime is not available for diagnostics IPC commands, such as early during runtime startup before the runtime is ready for diagnostics commands, or when the runtime is shutting down.
#### ServerErrorException
```csharp
public class ServerErrorException : DiagnosticsClientException
```
This may be thrown when the runtime responds with an error to a given command.

Просмотреть файл

@ -1,392 +0,0 @@
# .NET Core Diagnostics Vision
[This document](https://github.com/dotnet/diagnostics/blob/master/documentation/diagnostics-planing.md)
describes a high level vision/plan for diagnostics / monitoring for the .NET Core runtime.
The overarching goal is easy to state.
* .NET Core should be the most diagnosable, easiest to manage service/app framework platform bar none
This document tries to bring more details into exactly how we can achieve this goal.
## What are the Goals of our Customers.
Broadly speaking our customer is someone building a cloud service (in Azure). There will be great
variability in both the kinds of applications as well as the scale and complexity of the apps, but
it is likely that every such customer has very similar needs with respect to monitoring and Diagnostics.
In particular
* Our customers care about the users of their service. Thus they want to
* Monitor service failures proactively - Failure are VERY bad.
* Monitor service response time - Slow service is bad.
* Our customers care about costs
* Thus they want good billing information and the ability to diagnose any surprises.
* To reason about costs, they also need throughput information (that is benefit as well as cost)
* Our customers wish to proactively avoid problems with their service
* Security/Privacy. They want their service to be hacker proof. They want information about attacks an defenses.
* Availability. They want to know about any redundancy, failover and geolocation. Failover needs testing (e.g. Chaos-Monkey)
* End-to-end visibility. They will want external 'customer-like' testing to know that networking and routing to the service works properly.
* Dependency management. If the services use other services (including third parties), they need to monitor them as well.
* Data management. Regulations like [GDPR](https://eugdpr.org/the-regulation/) places constraints on data storage and management.
* Our customers need to fix the problems they find.
* They want the data to not only tell them problem exists, but enough additional information to easily guide them to corrective action.
* They want the data to quickly blame the correct owner (e.g. cloud vendor, external service, or their service)
* They want the data presented in a way that they can understand (mirrors the model they programmed against)
### Environment Independence
So customers want information out of their Diagnostics/Monitoring system that help reach the above goals.
But it is also important to say that they want a certain uniformity, and it is this uniformity
that is actually often the hardest part. In particular they want this information regardless of
their choices with respect to:
* The hardware being used (e.g. X64, X64, ARM64 ...)
* The operating system being used (Windows, Linux ...)
* The software environment in use (Raw VMs, containers, Azure Service Fabric, Web Applications (PAAS), Azure Functions, ...)
* The other services used (Database, Storage, Redis Caches, Identity/Authorization (e.g. Microsoft ID, FaceBook Google)
### The Need for Collection (Verbosity) Control.
In addition to working where they need it, they also need the right 'amount' of information. Too little
information and problems simply don't get solved, and this has a SEVERE impact on their business. Too much
detail all the time simply slows things down (sometime prohibitively), and consumes resources to transmit, store,
retrieve and analyze. Thus a good system need to start out with a modest amount of 'always on' information
with the ability to collect more to solve basically 'any' problem. The 'always on' component looks a lot
like traditional logging, and the 'very detailed' end of the spectrum looks a lot like traditional debugging,
but there are a sliding scale between these two extremes with lots of possible variation. The important
high level point is that the ability to control the verbosity will be needed.
### Summary
While the above description may not be complete, it is very likely to be the most important concerns of
service providers. Fundamentally they also have to keep their service running will to make money, so it
makes sense that the would care most about the very visible characteristics that are central to keeping things
running (making money) with a minimum of cost/effort. Ultimately they want to know that their service is
up and performing well, and when it is not, they have sufficient information to fix things. They want
this to be true regardless of hardware, platform, environment or what services are used.
## What is .NET Core's role?
Many of the items on the above list are cloud (Azure) itself (e.g. billing, geolocation, failover ...) and the
.NET Core runtime does not play a central role. The.NET Core runtime role is to:
* Provide hooks that enable very detailed logging and control. This includes traditional
debugging (where you can get extremely detailed information and very precise control) but
also includes any other 'Profiler' hooks that allow for collecting detailed information
about what the runtime itself is doing.
* Insuring that all the interesting information that only the runtime has is exposed in some way.
* Provide APIs/Standards/Guidance for adding instrumentation for code outside the runtime.
This includes the functionality of traditional 'logging' as well as counters for light
weight monitoring. It also includes standards for correlation IDs mark particular pieces
of telemetry as being part of semantically relevant groups (like a single user request).
* Using those standards to instrument important places in the in the .NET Framework libraries.
### Tooling support
While the hooks/infrastructure that the runtime provides are critical, there are other parts that
are just as critical including
* An agent/tool that controls the hooks, stores it (either locally or in the cloud),
organizes it (with retention policies etc), as well as tooling for accessing the
information.
Moreover it is likely that there will be constraints on this system including
* Control over the verbosity and targeting a particular investigation.
* Tight constraints over impact to the running system (it is live, serving customers)
* Restrictions on where the data can be stored and managed (e.g. for [GDPR](https://eugdpr.org/the-regulation/))
* Accepting information from 'foreign' services to incorporate into the system.
* Working with existing monitoring systems.
In addition there are likely to be different environments
* A development environment where quick setup and ease of use are critical.
* A production environment where very low overhead, and low impact are critical.
* A test environment where excellent drill-into detail is a high priority.
This argues for a set of tools/tooling configurations that can all operate using the
same runtime hooks. The key point here is that the hooks and other .NET Core
support are of no value without the tools.
## What Problems does .NET Core Runtime Monitoring need to Address
It is clear that the a diagnostics/monitoring system for services has to handle
the basics: namely get information about 'interesting' events like errors (exceptions)
as well as measure top level metrics (e.g. response time, throughput etc). However
in addition to these, there are some requirements that are not so obvious that
deserve special mention because they either affect many parts of the design, or
simply will require a lot of work to solve. We list them here.
### Supporting Asynchronous Programming
Asynchronous support is a big, complex issue that deserves special mention because
it is a particular pain point with customers and is a huge issue for diagnostics.
Traditionally code was written in a 'synchronous' style where there was a single thread
of computation and instructions were executed sequentially. In a server environment
this mode was generalized to be 'multi-threaded' where a service could have many
independent threads of execution each one executing sequentially. This works well
for services that handle up to hundreds of concurrent request simultaneously.
However if the service wishes to handle thousands or 10s of thousands of outstanding
requests the overhead of the thread (which includes a full execution stack and
related OS machinery), and switching between threads becomes problematic.
The solution to this is scaling problem is to write asynchronous code. In this style
of coding, when doing non-CPU activities, instead of the thread blocking for the operation to
complete, it simply creates an 'work-item' (in .NET we use a class called System.Threading.Task) that
represents the in-flight operation. You can then associate callback code called a
continuation with the work-item that will run when the operation completes. Because
the work item 'remembers' what to do next, the thread can do other work (typically some
other continuation), while it is waiting. Thus even with thousands of outstanding
requests you only need a handful threads (e.g. one per processor) to service them.
Sadly Asynchronous code is significantly harder to write than synchronous code. Basically
all the value that the call stack was providing now has to be done by the programmer using
continuations. It is VERY easy to get this wrong, especially considering error processing.
On the .NET Platform the C# language and runtime have created a bunch of features (e.g. async
methods and Tasks), that try to make it easier to write asynchronous code. Basically
users write something that looks much more like synchronous code, and the C# compiler
and the runtime conspire to turn this code into efficient asynchronous code.
While this support greatly eases the effort to write correct asynchronous code, it also creates
a rather HUGE diagnostics problem. The code that the programmer writes BY DESIGN looks
a lot like synchronous code, but under the hood, the C# compiler morphs the code in
non-trivial ways, mangling the method names to make state machines that serves as
continuations needed for the I/O callbacks. There is no longer a 'call stack' or in
fact any 'blocking' by threads. Instead methods return Tasks and you 'await' them, which
means scheduling a continuation state machine to run when the task completes.
The details are non-trivial, but the key point here is that there is
a very significant difference between how the programmer 'thinks' about
the code (he thinks of it as being synchronous but efficient), and what is really
happening (state machines being created, linked together and threads reused without
blocking). Since the diagnostics/monitoring/profiling system sees what is 'really'
happening, it has to morph the data it gets back into something that matches the
model the programmer is expecting.
Moreover asynchronous code tends to be in the most performance-critical part of a service,
and users will want the ability to monitor in-production systems. Thus there is
a premium in making it so that the asynchronous-to-synchronous view transformation can
be done with very low runtime overhead (which makes it hard).
#### Async and Request Correlation
One of the very pleasant side effects of synchronous programming was the very simple
correlation model. A thread starts processing a request, and that thread is effectively
'committed' to doing that request until the request completes. Thus the threadID acts
a a correlation ID for the request itself. That is, every operation that is associated with
that thread is also related to the request (since the thread does nothing else).
However with asynchronous programming a request is broken up into many small continuations, and the threads
run these continuations in an effectively random order. Thus another way of 'marking'
operations as belonging to a request must be created.
TODO Scenarios: Live, Post-mortem, Trace.
### Multi-Service Programming Activity Correlation
As we have seen, asynchronous code makes correlating a request to a operations needed
to perform the request more challenging since a thread ID does not work as a correlation
ID as well as it does in synchronous code. However even purely synchronous code has
the same correlation problem when it requires services on other machines. When
a request causes a database call (or any other oof-machine service) again the thread ID
can no longer serve as the correlation ID. We need something to link the requestor and
the service provider together.
There is a trend in the industry toward [microservices](https://azure.microsoft.com/en-us/blog/microservices-an-application-revolution-powered-by-the-cloud/)
which break an application up into a set of smaller services that are very loosely coupled
(in particular they are not necessarily executed in the same process). This makes
a thread ID even less useful as a correlation ID. In short, we need a way of marking
operations as part that works works well in an async/microservice/multi-tier world.
#### Industry-wide Standards
It is already the case that applications use 3rd party services for things like authorization
and authentication (think logging in with your facebook/google/microsoft account). To
do a proper job of tracing through these services, correlation IDS need to be passed around
among these 3rd parties. It is also useful for other information to flow with the
request (e.g. whether this particular request should have detailed logging turned on
while it is being processed). Supporting good logging with 3rd parties requires
some standards of each party.
There are already w3c draft standards for the [Trace Context](https://w3c.github.io/trace-context/) ([github](https://github.com/w3c/trace-context))
and [Trace Context Headers](https://w3c.github.io/correlation-context/) ([github](https://github.com/w3c/correlation-context))
that allow for this kind of 3rd party interoperation. We should be supporting this.
## Inventory of Existing .NET Core Monitoring / Diagnostics Infrastructure
The .NET Core runtime already has quite a bit of infrastructure associated with
monitoring or diagnostics.
* Formatted Text Logging (ILogger)
* Microsoft.Extensions.Logging.ILogger - Assumes the ASP.NET Dependency Injection Framework. Not suitable for general framework use because of this. Definitely relevant.
* System.Diagnostics.TraceSource - Arguably only here for compatibility. Probably not relevant.
* System.Diagnostics.Trace - Not expected to be used in production scenarios. Ultra-simple development-time logging. Probably not relevant.
* Structured loggers
* System.Diagnostics.DiagnosticSource - Designed specifically for the in-process case (listener sees true objects). Relatively new, meant to be used by in-proc monitors, or feed into EventSource. Definitely relevant.
* System.Diagnostics.Tracing.EventSource - Designed for the out of process case (all data serialized). Built on top of ETW (Windows) or EVentPipe (Linux and Windows) Definitely relevant.
* System.Diagnostics.Activity - helper class for flowing diagnostics information around. Defines a correlation ID.
* EventPipe - A light-weight scalable system for logging events that underlies System.Diagnostics.Tracing.EventSource. Built original as an ETW replacement for Linux, but ultimately may supplant ETW on Windows as well.
* [Snapshot Debugger](https://docs.microsoft.com/en-us/azure/application-insights/app-insights-snapshot-debugger?toc=/azure/azure-monitor/toc.json) Capture memory snapshots at user defined points.
* [Azure Monitor](https://docs.microsoft.com/en-us/azure/azure-monitor/overview) - An umbrella of monitoring technologies offered in Azure.
* [Application Insights](https://docs.microsoft.com/en-us/azure/application-insights/app-insights-overview?toc=/azure/azure-monitor/toc.json) - A full featured structured logger + tools/dashboards to view data.
* [AppInsights Profiler](https://docs.microsoft.com/en-us/azure/application-insights/app-insights-profiler) - A collector of highly detailed data for drilling into fine details of a performance problem.
* [Application Map](https://docs.microsoft.com/en-us/azure/application-insights/app-insights-app-map?toc=/azure/azure-monitor/toc.jso) A viewer that shows the high level causal flow of a multi-service application.
* [Azure Service Diagnostics](https://blogs.msdn.microsoft.com/appserviceteam/2018/06/06/app-service-diagnostics-profiling-an-asp-net-web-app-on-azure-app-service/) Trace collecting in Azure.
* [Geneva Monitoring](https://genevamondocs.azurewebsites.net/)- This is an internal telemetry system for applications that Microsoft itself supports (e.g. Azure Storage, Office 365, Bing, Windows services ...)
It supports Azure but also private systems like Autopilot. Quite a bit of Azure Monitor uses technology from Geneva. This is how
Azure knows how much CPU and Memory your VM instances are taking. Azure's MA (Monitoring Agent) is a Geneva client
* [Geneva Getting Started](https://genevamondocs.azurewebsites.net/getting%20started/intro.html)
* [Geneva Analytics](https://www.analytics.msftcloudes.com/v2/#/), is the 'back end' that allows you to make interesting queries.
* Time Travel Debugging (highly detailed logs that allow program replay (and reverse execution)
* Intellitrace - TODO
## Inventory of Vendors in for Application Performance Monitoring
* NuRelic
* AppDynamics
* Stackify Retrace
* Dynatrace
* Riverbed SteelCentral
* Dell Foglight
* See https://www.imobdevtech.com/Blog/list-of-top-10-application-performance-monitoring-tools/ for more.
* see https://stackify.com/application-performance-management-tools/ (from Stackify)
* see https://raygun.com/blog/performance-monitoring-tools/ (from raygun)
Feedback on Application Insights as an APM tool
* We included App Insights in our list, but it is arguably not a full-fledged APM solution. It does not do code level profiling but instead provides some high-level performance details only for SQL queries and web service calls. We would call it “APM light”.
* Cons: No reporting per SQL query. No transaction tracing view. Does not correlate logs, errors, and request details well. No reporting across apps. Does not work for non web apps without major code changes. Can not track the performance of any line of code in your app via custom CLR profiling.
### What do other Runtime environments do.
TODO
## Elements of the Plan for Diagnostics/Monitoring.
* TODO this is not done, these are rough thoughts currently.
### Philosophy/Properties of the Solution
* Integration with Azure is a feature, not a bug. We wish the 'in Azure' experience to be
very good, and we are not that interested in other cloud platforms.
* Bottom Up, Modular Design
* Ultimately we want a beautify nicely integrated, better-on-Azure scenario working well.
However as you build out that vision, you take dependencies on platforms, environments,
storage, correlations, tools and other infrastructure that will inevitably leave some
users behind (who get NO value). Instead build the system bottom up in a modular way
so that the components at the bottom have very few dependencies and thus should work
for 'anyone'. This gives everyone SOME options.
* Make sure the basics work. Work off our debt in terms of platforms and insure that what we have works as intended.
* We have a pretty-good story on Windows in the sense that the runtime/OS can collect
the data needed to diagnose most problems in a production setting. We are approaching
parity for this on Linux, but we still have non-trivial amount of work to do.
* Even on Windows we have a non-trivial number of complaints that sometimes things just
dont work or have issues with symbols or whatever. Typically these problem are
catastrophic in the sense that users get NO value out of the feature unless the
problem is fixed.
* There are know limitations / unfriendliness in our current windows implementations
(typically with respect to async support). We should just fix this.
* We should not detect diagnostic infrastructure issues in the field. We need a
non-trivial effort to beef up our testing (which is very sparse right now).
* Prefer cross-platform solutions.
* We want to work on both Windows and Linux, and each has a notion of containers
which has its own set of quirks. Thus solutions that avoid a dependency on the
operating system are to be preferred.
* In particular EventPipe should be preferred over ETW mechanisms (which is OS specific)
* Let the Documentation / Demos drive the sort term deliverables.
* We know from feedback that documentation on using our monitoring/profiling mechanisms
are either non-existent, or poor. We should just invest this effort, but also
use it as a opportunity to drive feature refinement. Basically in addition
to other documentation, have walk-throughs of non-trivial interesting demos.
In writing these docs, we will see the deficiencies in the current features
or incompleteness (does snot work on all platforms) and that can be used to drive
feature work.
* First insure that data is available within the process, then provide a mechanism for getting it
out of proc (e.g. REST API of the EventPipe stream) and finally add a minimal amount of
'command line' access, but mostly wire into our APM solutions (Azure Monitor / Application Insights))
### Issues / Design Decisions (that effect the user experience)
* How much do we want to do work to enable 'no modification' to the apps being monitored?
It is additional work to provide a mechanism to serialize the data out of proc to some
external monitor. How much do we care?
* What is the appropriate support in dev tools like Visual Studio for profiling/monitoring?
Currently this kind of monitoring is done through the Azure portal.
* Is having a single machine toy monitor (e.g. dotnet monitor) worthwhile?
* Having the ability to collect traces locally 'by hand' is useful and should be supported.
* Ability to locally display monitoring stats is not that useful. It is most likely
a testing capability (when new instrumentation is added). We should only invest minimally.
### End-to-End Scenario
1. Startup problem (config / deployment / versioning ...). Key is just to have information
2. Intermittent problems (both functional and performance)
3. Slow degrades to performance.
*******************************************************************************
## .NET Core 3.0 Diagnostics Plan
Focusing in on V3.0 work for .NET Core specifically. We have a top level experience in TFS for diagnostic work
* [Close Desktop/.NET Core and windows/linux diagnosability gap](https://devdiv.visualstudio.com/DevDiv/_workitems/edit/648058)
Noah Falk has done customer research to act as input to planning 3.0 which he called the
[.NET Core Framing Document](https://na01.safelinks.protection.outlook.com/?url=https%3A%2F%2Fmicrosoft.sharepoint.com%2F%3Aw%3A%2Ft%2FDNFPlanning%2FEYlvkars0C1EhhPlHXgTnnkB8dWyVesla7Gau10LW49A8A%3Fe%3DNRcH4O&data=02%7C01%7Cvancem%40microsoft.com%7C9ad662619d0d45250cb008d648e0c898%7C72f988bf86f141af91ab2d7cd011db47%7C1%7C0%7C636776527605907620&sdata=C7gW4FCNdb0BlwbRs1MWE0oxq0SUY6ZZh7ESWFPTlu8%3D&reserved=0).
The main summary of this doc is 5 scenarios to improve
1. .NET Core documentation on Diagnostics is almost non-existent (especially compared to offerings from other vendors).
2. Async programming is error-prone and difficult to diagnose.
3. It is difficult/impossible to do a performance investigation if using Linux/Containers.
4. It is difficult/impossible to do a crash dump investigations, and in particular memory investigations.
5. No 'easy on ramp' to monitor performance of a deployed app.
Based on this, a V3.0 plan was written up [.NET Core V3.0 Diagnostics Plan](https://microsoft.sharepoint.com/:w:/t/DNFPlanning/ES28ZR8LCmFEn4JjXQmMM_gBrrvnWOLOhH_rswCRviBYgw?rtime=3kgIi-5T1kg)
that laid out some user stories, however that should just be consider a rough first draft.
This was then turned into User Stories / work items that will be tracked on Github here [Github .NET Core 3.0 Diagnostics Project](https://github.com/orgs/dotnet/projects/9).
A high level summary of the work being done is as follows
1. Add a top level landing page for Diagnostics and Performance on the https://docs.microsoft.com.
Then flesh that out with details on how to do specific investigations, and in particular all the
other work items listed here will be included in that documentation.
2. Add a new DumpASync command to the SOS debugging extension. Work with Visual Studio Team to have it integrated into their parallel stacks view. Make sure our existing Async diagnostics works well.
3. Create a new 'dotnet-collect' global command that allows the collection of EventPipe data 'by hand'. (See [dotnet-collect And dotnet-analyze](https://github.com/aspnet/AspLabs/tree/master/src/DotNetDiagnostics) for more.
It should be able to collect traces that can be read with either PerfView or Visual Studio.
4. Create a new 'dotnet-analyze' global command that allows you to do investigations on crash dumps. Simple triage of crashes as well as memory investigations should be possible.
5. Insure that normal Azure and App-Insights default dashboard metrics work with .NET Core (on all platforms). dotnet-collect will allow the ad-hoc collection of metrics for local monitoring.
This work mostly fixes the gaps in 'ad-hoc' investigations (where a dev is working with a particular machine).
In addition to this we want good integration with our other players in particular
1. Azure allows ad hoc collection of traces from the portal. This should work with Linux/Containers
2. App-Insights profiler is using basically the same technology that is being leveraged for the ad-hoc case with dotnet-collect.
They have solved the problem for the Azure App-Service case (and frankly most cases in Azure). Make sure that this
works end-to-end (on all platforms and scenarios), and that there is good doc links from the ad-hoc case to using App-Insights.
3. App-Insights is working on a standard for that allows them to interoperate with other vendors. This requires some runtime
changes to support, we will be adding this. This is our 'real' monitoring solution and in particular our distributed
and micro-service monitoring story.
Note that the basic plan for V3.0 to frankly make sure that things we already have
(our existing monitoring story (e.g. App-Insights), our existing performance investigation
capabilities (e.g. Visual Studio, PerfView), work well ON ALL PLATFORMS and we have
'fallbacks' (e.g. ad hoc ways of collecting data), when for whatever reason our main offerings
did not work. Along with this that these solutions are WELL DOCUMENTED and discoverable.
It is not flashy work, but is the LACK of theses basics working well (and being discovered)
which drive most of the feedback we have so far. We need to get these under control.
The most notable gap that we have cut but would rather have not, is to have this story work
well when you don't have access to a Windows machine. Currently our performance tools are
windows only and we have not fixed that. Thus in the V3.0 plan above you need access to a
windows machine. We may be able to do something simple to mitigate this in the V3.0 timeframe
(Flamegraphs are a traditional 'easy' way of avoiding building UI on Linux), but this is
currently not in plan (if we have time we will look into it)

Просмотреть файл

@ -1,185 +0,0 @@
# Appendix/Raw Ideas
Everything after this is really not meant for consumption at this time. It represents working notes.
## New Features
* Decouple Diagnostic information from its presentation.
* We need tools that work in a highly integrated environment (which probably have
a number of prerequisites (e.g. App Insights) as well as in a 'dev startup'
scenario where trivial setup and works everywhere (no dependencies), is valuable.
We do this by making the data available as a standard REST API, and have
'minimal' tooling make a UI over that REST API (probably in JavaScript (just like VSCode))
These minimal tools will be open sourced and community driven, but along side them
we can have full featured/integrated tools (e.g. AppInsights)
* Use JavaScript/HTML Electron for cross platform UI for these presentation tools. (Alternatively maybe Blazor (Web Assembly))
* Standard compliance can be done with a doc driven approach.
* If we pick a demo with a 3rd party that is committed to implementing their side
of the correlation standards, we can write up the demo that drives the features
needed to make that end-to-end scenario work.
* 'dotnet profile' - local machine ETW tracing. Definitely useful.
* 'dotnet monitor' - logs monitoring information. Ideally creates a format that other tools will just us
thus it is really more of a format converter than anything else. Not a lot of work, but also useful
for people
* Currently Azure Monitor Metrics does track some performance counters
* If we care about supporting existing Application Performance Monitoring solutions, the issue of how to have
multiple .NET Profilers connected to the same .NET Core runtime needs to be addressed.
## WORK-IN-PROGRESS
Elements of plan
* Well Instrumented Runtime
* Good Documentation on diagnostics / monitoring / profiling at [Microsoft Docs](https://docs.microsoft.com)
* Good support for Async (with Docs)
* Good support for Multi-Machine and/or MicroServices
* Works on all platforms / Architectures
* Monitoring costs very little, but you have the data you need
* This requires sampling of REQUESTS (causality flow) Event across tiers, Azure Functions, Service Fabric. Containers
Plan
1. EventPipe (No ETW) Works on Linux, Can do counters, all inst
2. EventCounter instrumentation in the Framework.
3. Named Pipe / HTTP REST API for accessing EventPipe information for monitoring.
4. Good Causality Instrumentation (so that Async -> Sync transformation works well)
[Geneva and One DS](https://microsoft.sharepoint.com/teams/MSWHub/_layouts/15/search.aspx?k=One%20DS&q=OneDS&t=y&v=search)
[OneDS Integration Presentation](https://microsoft.sharepoint.com/teams/osg_unistore/sce/Shared%20Documents/Forms/AllItems.aspx?id=%2Fteams%2Fosg_unistore%2Fsce%2FShared%20Documents%2FTechnical%20Reference%20Documents%2F1DSIntegrationApproaches.pptx&parent=%2Fteams%2Fosg_unistore%2Fsce%2FShared%20Documents%2FTechnical%20Reference%20Documents&embed=%7B%22o%22%3A%22https%3A%2F%2Fmicrosoft.sharepoint.com%22%2C%22id%22%3A%22d9ec5f58-a52f-483e-946a-2913b83935bc%22%2C%22af%22%3Atrue%7D)
[Geneva Logging Presentation](https://microsoft.sharepoint.com/teams/mswhub/_layouts/15/search.aspx?q=OneDS&t=y&v=search&k=OneDS#Default=%7B%22k%22%3A%22OneDS%22%2C%22r%22%3A%5B%7B%22n%22%3A%22LastModifiedTime%22%2C%22t%22%3A%5B%22range(2017-11-26T22%3A42%3A43.849Z%2C%20max%2C%20to%3D%5C%22le%5C%22)%22%5D%2C%22o%22%3A%22and%22%2C%22k%22%3Afalse%2C%22m%22%3Anull%7D%5D%2C%22l%22%3A1033%7D)
[Service Logging Library (SLL)](https://microsoft.sharepoint.com/teams/mswhub/_layouts/15/search.aspx?q=SLL%20Logging&t=y&v=search&k=SLL%20Logging#Default=%7B%22k%22%3A%22SLL%20Logging%22%2C%22r%22%3A%5B%7B%22n%22%3A%22LastModifiedTime%22%2C%22t%22%3A%5B%22range(2017-11-26T22%3A44%3A19.557Z%2C%20max%2C%20to%3D%5C%22le%5C%22)%22%5D%2C%22o%22%3A%22and%22%2C%22k%22%3Afalse%2C%22m%22%3Anull%7D%5D%2C%22l%22%3A1033%7D)
[Syslog.net](https://github.com/emertechie/SyslogNet)
[RFC 5424 Syslogd transport](https://datatracker.ietf.org/doc/rfc5424/?include_text=1)
[One DS Docs](https://1dsdocs.azurewebsites.net/getting-started/csharp-getting_started.html)
[One Data Strategy 1DS](https://microsoft.sharepoint.com/teams/WAG/EngSys/Shared%20Documents/Forms/AllItems.aspx?id=%2Fteams%2FWAG%2FEngSys%2FShared%20Documents%2FTelemetry%20Collaboration%2F1DS%2F1DS%20Vision%20and%20Strategy%20(2018).docx&parent=%2Fteams%2FWAG%2FEngSys%2FShared%20Documents%2FTelemetry%20Collaboration%2F1DS&embed=%7B%22o%22%3A%22https%3A%2F%2Fmicrosoft.sharepoint.com%22%2C%22id%22%3A%222079771a-df5c-4f23-8f38-f91963fda137%22%2C%22af%22%3Atrue%7D)
[Aria](https://aria.microsoft.com/)
[Aria Event Analytics](https://www.aria.ms/?ref=vc_banner)
[Aria Telemetry SDKs](https://aria.microsoft.com/developer/downloads/downloads/telemetry-sdks)
[Azure Data Explorer](https://azure.microsoft.com/en-us/pricing/details/data-explorer/) is a public version of the Kusto which is suplanting Geneva (Asimov, Aria)
Things I think we can improve
* Making it easy to instrument
* Sampling of requests
* Harmonize System.Activity and EventSource concept of Activity.
* Guidance on how to do instrumentation.
Version 3.0 work
1. Documentation
2. I can see Existing Live Metrics on Azure Portal (Uniformly, Linux, Windows)
Http 5XX, DataIn, DataOut, #Requests, ResponseTime.
Ave Working set, CPU, Private Bytes
Connections, Request in App Queue, Current Assemblies, Gen 0, 1, 2, Handle Count, Http Errors (various)
Windows I/O Read, Write, Other, Thread Count, # AppDomain
3. Other Metrics? (Exceptions) Some Metric that shows Async failure?
4. See Metrics Via Console / Local ?
5. Capture CPU Trace Locally. (Linux)
6. View Trace in Visual Studio View Traces locally (Linux)
7. Take a Heap Snapshot locally.
8. Extract a Heap Snapshot from a Crash Dump.
9. Diagnose Async starvation from Crash dump. (VS)
Work
1. Activity ID support for Application Insights
2. Creating a 'REST-LIKE' interface for EventPipe
3. Making EventPipe Multi-session
4. Test/Validate/Fix Causality View for Async
*******************
Has some work committed or in progress.
Experience 648209: .NET Core scenarios should support diagnostics as a perpendicular feature
In particular this one
[.NET Core 3.0 has great Fundamentals (Acquisition, Compliance, Security, Performance, Reliability, Globalization etc.)](https://devdiv.visualstudio.com/DevDiv/_workitems/edit/633055)
[EventPipe Work](https://github.com/dotnet/coreclr/issues?q=is%3Aopen+is%3Aissue+project%3Adotnet%2Fcoreclr%2F5)
*******************
Work items in [Issues](https://github.com/dotnet/diagnostics/issues)
[Migrate DotNetDiagnostics from to AspLabs to this repo](https://github.com/dotnet/diagnostics/issues/92)
[User Story: Stream logs to a console UI in the local machine scenario](https://github.com/dotnet/diagnostics/issues/91)
[User Story: Enable diagnosing common Async problems](https://github.com/dotnet/diagnostics/issues/90)
[User Story: Heap investigation from a crash dump on a different machine Priority 2](https://github.com/dotnet/diagnostics/issues/89)
[User Story: Enable local crash dump analysis with a standalone tool](https://github.com/dotnet/diagnostics/issues/88)
[User Story: Enable ad-hoc memory leak investigation in VS](https://github.com/dotnet/diagnostics/issues/87)
[User Story: Enable ad-hoc perf trace collection in VS](https://github.com/dotnet/diagnostics/issues/86)
[User Story: Expose .NET Perf Counters in the local machine scenario](https://github.com/dotnet/diagnostics/issues/85)
[User Story: Expose .NET Core Perf Counters for App Insights Live Metrics page](https://github.com/dotnet/diagnostics/issues/84)
[Provide managed APIs for our canonical set of runtime performance counters](https://github.com/dotnet/diagnostics/issues/83)
[Add docs for .Net Core diagnostic scenarios](https://github.com/dotnet/diagnostics/issues/81)
*******************
[All .NET Core Diagnostics](https://github.com/dotnet/diagnostics/issues)
Damian Edwards, Tom McDonald, Sourabh Shirhatti
*******************
[DumpAsync work item](https://github.com/dotnet/diagnostics/issues/90)
DumpAsync
```log
!DumpAsync [-addr <Object Address>]
[-mt <MethodTable address>]
[-type <partial type name>]
[-tasks]
[-completed]
[-fields]
[-stacks]
[-roots]
```
`!DumpAsync` traverses the garbage collected heap, looking for objects representing
async state machines as created when an async method's state is transferred to the
heap. This command recognizes async state machines defined as `async void`, `async Task`,
`async Task<T>`, `async ValueTask`, and `async ValueTask<T>`. It also optionally supports
any other tasks.
```log
"Usage: DumpAsync [-addr ObjectAddr] [-mt MethodTableAddr] [-type TypeName] [-tasks] [-completed] [-fields] [-stacks] [-roots]\n"
"[-addr ObjectAddr] => Only display the async object at the specified address.\n"
"[-mt MethodTableAddr] => Only display top-level async objects with the specified method table address.\n"
"[-type TypeName] => Only display top-level async objects whose type name includes the specified substring.\n"
"[-tasks] => Include Task and Task-derived objects, in addition to any state machine objects found.\n"
"[-completed] => Include async objects that represent completed operations but that are still on the heap.\n"
"[-fields] => Show the fields of state machines.\n"
"[-stacks] => Gather, output, and consolidate based on continuation chains / async stacks for discovered async objects.\n"
"[-roots] => Perform a gcroot on each rendered async object.\n"
```
[Stephen's Async working group slides](https://microsoft-my.sharepoint.com/:p:/p/stoub/EfbGD3TCYlFMkR1jG6XlXFkBx9UJ_Wr1y458IZUJ_fJ9Zg?e=Q8CXrg)
[Notes from the 10/23/18 working group meeting](https://microsoft-my.sharepoint.com/personal/stoub_microsoft_com/_layouts/15/WopiFrame.aspx?sourcedoc={934d02da-ac53-4dc3-8a64-33921d886c04}&action=edit&wd=target%28Untitled%20Section.one%7C709eb0e8-9290-4071-98a7-23c752b25f99%2FOct%205%2C%202018%7C6341db20-85e9-4468-8446-b13fe50da646%2F%29&wdorigin=703)

Просмотреть файл

@ -1,212 +0,0 @@
# dotnet-counters
NOTE: This documentation page may contain information on some features that are still work-in-progress. For most up-to-date documentation on released version of `dotnet-counters`, please refer to [its official documentation](https://docs.microsoft.com/en-us/dotnet/core/diagnostics/dotnet-counters) page.
## Intro
dotnet-counters is a performance monitoring tool for ad-hoc health monitoring or 1st level performance investigation. It can observe performance counter values that are published via `EventCounter` API (https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.tracing.eventcounter). For example, you can quickly monitor things like the CPU usage or the rate of exceptions being thrown in your .NET Core application to see if there is anything suspiscious before diving into more serious performance investigation using PerfView or dotnet-trace.
## Install dotnet-counters
```
dotnet tool install --global dotnet-counters
```
## Using dotnet-counters
*SYNOPSIS*
dotnet-counters [--version]
[-h, --help]
<command> [<args>]
*OPTIONS*
--version
Display the version of the dotnet-counters utility.
-h, --help
Show command line help
*COMMANDS*
list Display a list of counter names and descriptions
ps Display a list of dotnet processes that can be monitored
monitor Display periodically refreshing values of selected counters
collect Periodically collect selected counter values and export them into a specified file format for post-processing.
*PS*
dotnet-counters ps
Display a list of dotnet processes that can be monitored.
Examples:
> dotnet-counters ps
15683 WebApi /home/suwhang/repos/WebApi/WebApi
16324 dotnet /usr/local/share/dotnet/dotnet
*LIST*
dotnet-counters list [-h|--help]
Display a list of counter names and descriptions, grouped by provider.
-h, --help
Show command line help
Examples:
> dotnet-counters list
Showing well-known counters only. Specific processes may support additional counters.
System.Runtime
cpu-usage Amount of time the process has utilized the CPU (ms)
working-set Amount of working set used by the process (MB)
gc-heap-size Total heap size reported by the GC (MB)
gen-0-gc-count Number of Gen 0 GCs / sec
gen-1-gc-count Number of Gen 1 GCs / sec
gen-2-gc-count Number of Gen 2 GCs / sec
exception-count Number of Exceptions / sec
*MONITOR*
### Examples:
1. Monitoring all counters from `System.Runtime` at a refresh interval of 3 seconds:
> dotnet-counters monitor --process-id 1902 System.Runtime
Press p to pause, r to resume, q to quit.
System.Runtime:
CPU Usage (%) 24
Working Set (MB) 1982
GC Heap Size (MB) 811
Gen 0 GC / second 20
Gen 1 GC / second 4
Gen 1 GC / Second 1
Number of Exceptions / sec 4
2. Monitoring just CPU usage and GC heap size from `System.Runtime` at a refresh interval of 5 seconds:
> dotnet-counters monitor --process-id 1902 --refresh-interval 5 System.Runtime[cpu-usage,gc-heap-size,exception-count]
Press p to pause, r to resume, q to quit.
System.Runtime:
CPU Usage (%) 24
GC Heap Size (MB) 811
Number of Exceptions / sec 4
3. Monitoring EventCounter values from user-defined EventSource: (see https://github.com/dotnet/corefx/blob/master/src/System.Diagnostics.Tracing/documentation/EventCounterTutorial.md on how to do this.0)
> dotnet-counters monitor --process-id 1902 Samples-EventCounterDemos-Minimal
Press p to pause, r to resume, q to quit.
request 100
4. Launch `my-aspnet-server.exe` with `arg1` and `arg2` as command-line arguments and monitor its GC heap size and working set from startup.
NOTE: This works for apps running .NET 5.0 or later only.
```console
> dotnet-counters monitor --counters System.Runtime[assembly-count] -- my-aspnet-server.exe arg1 arg2
Press p to pause, r to resume, q to quit.
Status: Running
[System.Runtime]
GC Heap Size (MB) 39
Working Set (MB) 59
```
### Syntax:
dotnet-counters monitor [-h||--help]
[-p|--process-id <pid>]
[--refresh-interval <sec>]
[--counters <counters>]
[-- <command>]
Display periodically refreshing values of selected counters
-h, --help
Show command line help
-p,--process-id
The ID of the process that will be monitored
--refresh-interval
The number of seconds to delay between updating the displayed counters
--counters
A comma separated list of counters. Counters can be specified provider_name[:counter_name]. If the
provider_name is used without a qualifying counter_name then all counters will be shown. To discover
provider and counter names, use the list command.
-- <command> (for target applications running .NET 5.0 or later only)
After the collection configuration parameters, the user can append `--` followed by a command to start a .NET application with at least a 5.0 runtime. `dotnet-counters` will launch a process with the provided command and collect the requested metrics.
*COLLECT*
### Examples:
1. Collect the runtime performance counters at a refresh interval of 10 seconds and export it as a JSON file named "test.json".
```
dotnet-counters collect --process-id 863148 --refresh-interval 10 --output test --format json
```
2. Collect the runtime performance counters as well as the ASP.NET hosting performance counters at the default refresh interval (1 second) and export it as a CSV file named "mycounter.csv".
```
dotnet-counters collect --process-id 863148 --output mycounter --format csv System.Runtime Microsoft.AspNetCore.Hosting
```
3. Launch `my-aspnet-server` and collect the assembly-count counter from its startup.
NOTE: This works for apps running .NET 5.0 or later only.
```bash
$ dotnet-counters monitor --counters System.Runtime[assembly-count] -- my-aspnet-server.exe
```
### Syntax:
dotnet-counters collect [-h||--help]
[-p|--process-id <pid>]
[-n|--name <name>]
[-o|--output <name>]
[--format <csv|json>]
[--refresh-interval <sec>]
[--counters <counters>]
[-- <command>]
Periodically collect selected counter values and export them into a specified file format for post-processing.
-h, --help
Show command line help
-p,--process-id
The ID of the process that will be monitored
-n,--name
The name of the process that will be monitored. This can be specified in place of process-id.
-o, --output
The name of the output file
--format
The format to be exported. Currently available: csv, json
--refresh-interval
The number of seconds to delay between updating the displayed counters
--counters
A comma separated list of counters. Counters can be specified provider_name[:counter_name]. If the
provider_name is used without a qualifying counter_name then all counters will be shown. To discover
provider and counter names, use the list command.
-- <command> (for target applications running .NET 5.0 or later only)
After the collection configuration parameters, the user can append `--` followed by a command to start a .NET application with at least a 5.0 runtime. `dotnet-counters` will launch a process with the provided command and collect the requested metrics.

Просмотреть файл

@ -1,135 +0,0 @@
Dump collection and analysis utility (dotnet-dump)
==================================================
NOTE: This documentation page may contain information on some features that are still work-in-progress. For most up-to-date documentation on released version of `dotnet-dump`, please refer to [its official documentation](https://docs.microsoft.com/en-us/dotnet/core/diagnostics/dotnet-dump) page.
## Intro
The dotnet-dump CLI global tool is way to collect and analyze the managed data structures in Windows and Linux dumps all without any native debugger involved. This makes creating a managed dump easier, and on some platforms like Alpine Linux or Linux ARM32/ARM64 (where a fully working lldb isn't available) it makes creating a managed dump possible. The dotnet-dump tool will allow you to run SOS commands to analyze crashes and the GC, but it isn't a native debugger so things like displaying the native stack frames aren't supported.
Here's a table showing how dotnet-dump fits into your dump debugging options:
| | Windows native dumps | Windows managed eg from `dotnet-dump collect` | Linux system dump | Linux from `dotnet-dump collect` | macOS system dump | macOS from `dotnet-dump collect` |
|:-----------------------|:---------------------|:----------------------------------------------|:------------------|:---------------------------------|:------------------|:---------------------------------|
| Visual Studio | yes | yes | yes (1) | yes (1) | no, need lldb | no |
| Windbg (including SOS) | yes | yes | yes (2) | yes (2) | no, need lldb | no |
| `dotnet-dump analyze` | no | yes | no | yes | no | yes |
(1) [Requires Visual Studio 2019 version 16.8 Preview 3](https://devblogs.microsoft.com/cppblog/debug-linux-core-dumps-in-visual-studio/) or later.
(2) [Requires WinDbg Preview version 1.0.2007.01003](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/windbg-what-is-new-preview) or later.
## Installing dotnet-dump
The first step is to install the dotnet-dump CLI global tool. This requires at least the 2.1 or greater .NET Core SDK to be installed. If you see the error message `Tool 'dotnet-dump' is already installed`, you will need to uninstall the global tool (see below).
$ dotnet tool install -g dotnet-dump
You can invoke the tool using the following command: dotnet-dump
Tool 'dotnet-dump' (version '3.0.47001') was successfully installed.
If this is the first global tool installed or you get message `Could not execute because the specified command or file was not found.` you need to add `$HOME/.dotnet/tools` to your path.
export PATH=$PATH:$HOME/.dotnet/tools
## Using dotnet-dump
The next step is to collect a dump. This can be skipped if a core dump has already been generated by the operating system or [createdump](https://github.com/dotnet/runtime/blob/master/docs/design/coreclr/botr/xplat-minidump-generation.md#configurationpolicy) on Linux. The default dump type (--type option) is currently "full".
On Linux, the .NET runtime version must be 3.0 or greater. On Windows, `dotnet-dump collect` will work with any version of the .NET runtime.
$ dotnet-dump collect --process-id 1902
Writing minidump to file ./core_20190226_135837
Written 98983936 bytes (24166 pages) to core file
Complete
If you are running under docker, dump collection requires SYS_PTRACE docker capabilities (--cap-add=SYS_PTRACE or --privileged).
Now analyze the core dump.
$ dotnet-dump analyze ./core_20190226_135850
Loading core dump: ./core_20190226_135850
Ready to process analysis commands. Type 'help' to list available commands or 'help [command]' to get detailed help on a command.
Type 'quit' or 'exit' to exit the session.
>
This brings up an interactive command processor that accepts commands like:
> clrstack
OS Thread Id: 0x573d (0)
Child SP IP Call Site
00007FFD28B42C58 00007fb22c1a8ed9 [HelperMethodFrame_PROTECTOBJ: 00007ffd28b42c58] System.RuntimeMethodHandle.InvokeMethod(System.Object, System.Object[], System.Signature, Boolean, Boolean)
00007FFD28B42DD0 00007FB1B1334F67 System.Reflection.RuntimeMethodInfo.Invoke(System.Object, System.Reflection.BindingFlags, System.Reflection.Binder, System.Object[], System.Globalization.CultureInfo) [/root/coreclr/src/mscorlib/src/System/Reflection/RuntimeMethodInfo.cs @ 472]
00007FFD28B42E20 00007FB1B18D33ED SymbolTestApp.Program.Foo4(System.String) [/home/mikem/builds/SymbolTestApp/SymbolTestApp/SymbolTestApp.cs @ 54]
00007FFD28B42ED0 00007FB1B18D2FC4 SymbolTestApp.Program.Foo2(Int32, System.String) [/home/mikem/builds/SymbolTestApp/SymbolTestApp/SymbolTestApp.cs @ 29]
00007FFD28B42F00 00007FB1B18D2F5A SymbolTestApp.Program.Foo1(Int32, System.String) [/home/mikem/builds/SymbolTestApp/SymbolTestApp/SymbolTestApp.cs @ 24]
00007FFD28B42F30 00007FB1B18D168E SymbolTestApp.Program.Main(System.String[]) [/home/mikem/builds/SymbolTestApp/SymbolTestApp/SymbolTestApp.cs @ 19]
00007FFD28B43210 00007fb22aa9cedf [GCFrame: 00007ffd28b43210]
00007FFD28B43610 00007fb22aa9cedf [GCFrame: 00007ffd28b43610]
To see the unhandled exception if your app was terminated:
> pe -lines
Exception object: 00007fb18c038590
Exception type: System.Reflection.TargetInvocationException
Message: Exception has been thrown by the target of an invocation.
InnerException: System.Exception, Use !PrintException 00007FB18C038368 to see more.
StackTrace (generated):
SP IP Function
00007FFD28B42DD0 0000000000000000 System.Private.CoreLib.dll!System.RuntimeMethodHandle.InvokeMethod(System.Object, System.Object[], System.Signature, Boolean, Boolean)
00007FFD28B42DD0 00007FB1B1334F67 System.Private.CoreLib.dll!System.Reflection.RuntimeMethodInfo.Invoke(System.Object, System.Reflection.BindingFlags, System.Reflection.Binder, System.Object[], System.Globalization.CultureInfo)+0xa7 [/root/coreclr/src/mscorlib/src/System/Reflection/RuntimeMethodInfo.cs @ 472]
00007FFD28B42E20 00007FB1B18D33ED SymbolTestApp.dll!SymbolTestApp.Program.Foo4(System.String)+0x15d [/home/mikem/builds/SymbolTestApp/SymbolTestApp/SymbolTestApp.cs @ 54]
00007FFD28B42ED0 00007FB1B18D2FC4 SymbolTestApp.dll!SymbolTestApp.Program.Foo2(Int32, System.String)+0x34 [/home/mikem/builds/SymbolTestApp/SymbolTestApp/SymbolTestApp.cs @ 29]
00007FFD28B42F00 00007FB1B18D2F5A SymbolTestApp.dll!SymbolTestApp.Program.Foo1(Int32, System.String)+0x3a [/home/mikem/builds/SymbolTestApp/SymbolTestApp/SymbolTestApp.cs @ 24]
00007FFD28B42F30 00007FB1B18D168E SymbolTestApp.dll!SymbolTestApp.Program.Main(System.String[])+0x6e [/home/mikem/builds/SymbolTestApp/SymbolTestApp/SymbolTestApp.cs @ 19]
StackTraceString: <none>
HResult: 80131604
To display the help:
> help
Usage:
dotnet-dump [command]
Commands:
exit, quit Exit interactive mode.
help, soshelp <command> Display help for a command.
lm, modules Displays the native modules in the process.
threads, setthread <threadid> Sets or displays the current thread id for the SOS commands.
clrstack <arguments> Provides a stack trace of managed code only.
clrthreads <arguments> List the managed threads running.
dumpasync <arguments> Displays info about async state machines on the garbage-collected heap.
dumpassembly <arguments> Displays details about an assembly.
dumpclass <arguments> Displays information about a EE class structure at the specified address.
dumpdelegate <arguments> Displays information about a delegate.
dumpdomain <arguments> Displays information all the AppDomains and all assemblies within the domains.
dumpheap <arguments> Displays info about the garbage-collected heap and collection statistics about objects.
dumpil <arguments> Displays the Microsoft intermediate language (MSIL) that is associated with a managed method.
dumplog <arguments> Writes the contents of an in-memory stress log to the specified file.
dumpmd <arguments> Displays information about a MethodDesc structure at the specified address.
dumpmodule <arguments> Displays information about a EE module structure at the specified address.
dumpmt <arguments> Displays information about a method table at the specified address.
dumpobj <arguments> Displays info about an object at the specified address.
dso, dumpstackobjects <arguments> Displays all managed objects found within the bounds of the current stack.
eeheap <arguments> Displays info about process memory consumed by internal runtime data structures.
finalizequeue <arguments> Displays all objects registered for finalization.
gcroot <arguments> Displays info about references (or roots) to an object at the specified address.
gcwhere <arguments> Displays the location in the GC heap of the argument passed in.
ip2md <arguments> Displays the MethodDesc structure at the specified address in code that has been JIT-compiled.
name2ee <arguments> Displays the MethodTable structure and EEClass structure for the specified type or method in the specified module.
pe, printexception <arguments> Displays and formats fields of any object derived from the Exception class at the specified address.
syncblk <arguments> Displays the SyncBlock holder info.
histclear <arguments> Releases any resources used by the family of Hist commands.
histinit <arguments> Initializes the SOS structures from the stress log saved in the debuggee.
histobj <arguments> Examines all stress log relocation records and displays the chain of garbage collection relocations that may have led to the address passed in as an argument.
histobjfind <arguments> Displays all the log entries that reference an object at the specified address.
histroot <arguments> Displays information related to both promotions and relocations of the specified root.
setsymbolserver <arguments> Enables the symbol server support
soshelp <arguments> Displays all available commands when no parameter is specified, or displays detailed help information about the specified command. soshelp <command>
This command on Microsoft .NET Core SDK Linux docker images can throw `Unhandled exception: System.DllNotFoundException: Unable to load shared library 'libdl.so' or one of its dependencies` exception. To work around this problem install the "libc6-dev" package.
## Uninstalling dotnet-dump
$ dotnet tool uninstall -g dotnet-dump
Tool 'dotnet-dump' (version '3.0.47001') was successfully uninstalled.

Просмотреть файл

@ -1,83 +0,0 @@
# Heap Analysis Tool (dotnet-gcdump)
NOTE: This documentation page may contain information on some features that are still work-in-progress. For most up-to-date documentation on released version of `dotnet-gcdump`, please refer to [its official documentation](https://docs.microsoft.com/en-us/dotnet/core/diagnostics/dotnet-gcdump) page.
## Intro
The dotnet-gcdump tool is a cross-platform CLI tool that collects gcdumps of live .NET processes. It is built using the EventPipe technology which is a cross-platform alternative to ETW on Windows. Gcdumps are created by triggering a GC
in the target process, turning on special events, and regenerating the graph of object roots from the event stream. This allows for gcdumps to be collected while the process is running with minimal overhead. These dumps are useful for
several scenarios:
* comparing the number of objects on the heap at several points in time
* analyzing roots of objects (answering questions like, "what still has a reference to this type?")
* collecting general statistics about the counts of objects on the heap.
dotnet-gcdump can be used on Linux, Mac, and Windows with runtime versions 3.1 or newer.
## Installing dotnet-gcdump
The first step is to install the dotnet-gcdump CLI global tool.
```cmd
$ dotnet tool install --global dotnet-gcdump
You can invoke the tool using the following command: dotnet-gcdump
Tool 'dotnet-gcdump' (version '3.0.47001') was successfully installed.
```
## Using dotnet-gcdump
In order to collect gcdumps using dotnet-gcdump, you will need to:
- First, find out the process identifier (pid) of the target .NET application.
- On Windows, there are options such as using the task manager or the `tasklist` command in the cmd prompt.
- On Linux, the trivial option could be using `pidof` in the terminal window.
You may also use the command `dotnet-gcdump ps` command to find out what .NET processes are running, along with their process IDs.
- Then, run the following command:
```cmd
dotnet-gcdump collect --process-id <PID>
Writing gcdump to 'C:\git\diagnostics\src\Tools\dotnet-gcdump\20191023_042913_24060.gcdump'...
Finished writing 486435 bytes.
```
- Note that gcdumps can take several seconds depending on the size of the application
## Viewing the gcdump captured from dotnet-gcdump
On Windows, `.gcdump` files can be viewed in [PerfView](https://github.com/microsoft/perfview) for analysis or in Visual Studio. There is not currently a way of opening a `.gcdump` on non-Windows platforms.
You can collect multiple `.gcdump`s and open them simultaneously in Visual Studio to get a comparison experience.
## Known Caveats
- There is no type information in the gcdump
Prior to .NET Core 3.1, there was an issue where a type cache was not cleared between gcdumps when they were invoked with EventPipe. This resulted in the events needed for determining type information not being sent for the second and subsequent gcdumps. This was fixed in .NET Core 3.1-preview2.
- COM and static types aren't in the gcdump
Prior to .NET Core 3.1-preview2, there was an issue where static and COM types weren't sent when the gcdump was invoked via EventPipe. This has been fixed in .NET Core 3.1-preview2.
## *dotnet-gcdump* help
```cmd
collect:
Collects a gcdump from a currently running process
Usage:
dotnet-gcdump collect [options]
Options:
-p, --process-id <pid> The process to collect the gcdump from
-n, --name <name> The name of the process to collect the gcdump from.
-o, --output <gcdump-file-path> The path where collected gcdumps should be written. Defaults to '.\YYYYMMDD_HHMMSS_<pid>.gcdump'
where YYYYMMDD is Year/Month/Day and HHMMSS is Hour/Minute/Second. Otherwise, it is the full path
and file name of the dump.
-v, --verbose Output the log while collecting the gcdump
-t, --timeout <timeout> Give up on collecting the gcdump if it takes longer the this many seconds. The default value is 30s
```

Просмотреть файл

@ -1,285 +0,0 @@
# Trace for performance analysis utility (dotnet-trace)
NOTE: This documentation page may contain information on some features that are still work-in-progress. For most up-to-date documentation on released version of `dotnet-trace`, please refer to [its official documentation](https://docs.microsoft.com/en-us/dotnet/core/diagnostics/dotnet-trace) page.
## Intro
The dotnet-trace tool is a cross-platform CLI global tool that enables the collection of .NET Core traces of a running process without any native profiler involved. It is built around the EventPipe technology of the .NET Core runtime as a cross-platform alternative to ETW on Windows and LTTng on Linux, which only work on a single platform. With EventPipe/dotnet-trace, we are trying to deliver the same experience on Windows, Linux, or macOS. dotnet-trace can be used on any .NET Core applications using versions .NET Core 3.0 Preview 5 or later.
## Installing dotnet-trace
The first step is to install the dotnet-trace CLI global tool.
```cmd
$ dotnet tool install --global dotnet-trace
You can invoke the tool using the following command: dotnet-trace
Tool 'dotnet-trace' (version '3.0.47001') was successfully installed.
```
## Using dotnet-trace
In order to collect traces using dotnet-trace, you will need to:
- First, find out the process identifier (pid) of the .NET Core 3.0 application (using builds Preview 5 or after) to collect traces from.
- On Windows, there are options such as using the task manager or the `tasklist` command on the cmd window.
- On Linux, the trivial option could be using `pidof` on the terminal window.
You may also use the command `dotnet-trace ps` command to find out what .NET Core processes are running, along with their process IDs.
- Then, run the following command:
```cmd
dotnet-trace collect --process-id <PID> --providers Microsoft-Windows-DotNETRuntime
Press <Enter> to exit...
Connecting to process: <Full-Path-To-Process-Being-Profiled>/dotnet.exe
Collecting to file: <Full-Path-To-Trace>/trace.nettrace
Session Id: <SessionId>
Recording trace 721.025 (KB)
```
- Finally, stop collection by pressing the \<Enter> key, and *dotnet-trace* will finish logging events to *trace.nettrace* file.
### Using dotnet-trace to collect counter values over time
If you are trying to use EventCounter for basic health monitoring in performance-sensitive settings like production environments and you want to collect traces instead of watching them in real-time, you can do that with `dotnet-trace` as well.
For example, if you want to enable and collect runtime performance counter values, you can use the following command:
```cmd
dotnet-trace collect --process-id <PID> --providers System.Runtime:0:1:EventCounterIntervalSec=1
```
This will tell the runtime counters to be reported once every second for lightweight health monitoring. Replacing `EventCounterIntervalSec=1` with a higher value (say 60) will allow you to collect a smaller trace with less granularity in the counter data.
If you want to disable runtime events to reduce the overhead (and trace size) even further, you can use the following command to disable runtime events and managed stack profiler.
```cmd
dotnet-trace collect --process-id <PID> --providers System.Runtime:0:1:EventCounterIntervalSec=1,Microsoft-Windows-DotNETRuntime:0:1,Microsoft-DotNETCore-SampleProfiler:0:1
```
## Using dotnet-trace to launch a child process and trace it from startup.
Sometimes it may be useful to collect a trace of a process from its startup. For apps running .NET 5.0 or later, it is possible to do this by using dotnet-trace.
This will launch `hello.exe` with `arg1` and `arg2` as its command line arguments and collect a trace from its runtime startup:
```console
dotnet-trace collect -- hello.exe arg1 arg2
```
The preceding command generates output similar to the following:
```console
No profile or providers specified, defaulting to trace profile 'cpu-sampling'
Provider Name Keywords Level Enabled By
Microsoft-DotNETCore-SampleProfiler 0x0000F00000000000 Informational(4) --profile
Microsoft-Windows-DotNETRuntime 0x00000014C14FCCBD Informational(4) --profile
Process : E:\temp\gcperfsim\bin\Debug\net5.0\gcperfsim.exe
Output File : E:\temp\gcperfsim\trace.nettrace
[00:00:00:05] Recording trace 122.244 (KB)
Press <Enter> or <Ctrl+C> to exit...
```
You can stop collecting the trace by pressing `<Enter>` or `<Ctrl + C>` key. Doing this will also exit `hello.exe`.
### NOTE
* Launching `hello.exe` via dotnet-trace will redirect its input/output and you will not be able to interact with it on the console by default. Use the --show-child-io switch to interact with its stdin/stdout.
* Exiting the tool via CTRL+C or SIGTERM will safely end both the tool and the child process.
* If the child process exits before the tool, the tool will exit as well and the trace should be safely viewable.
## Viewing the trace captured from dotnet-trace
On Windows, `.nettrace` files can be viewed on PerfView (https://github.com/microsoft/perfview) for analysis, just like traces collected with ETW or LTTng. For traces collected on Linux, you can either move the trace to a Windows machine to be viewed on PerfView.
If you would rather view the trace on a Linux machine, you can do this by changing the output format of `dotnet-trace` to `speedscope`. You can change the output file format using the `-f|--format` option - `-f speedscope` will make `dotnet-trace` to produce a speedscope file. You can currently choose between `nettrace` (the default option) and `speedscope`. Speedscope files can be opened at https://www.speedscope.app.
Note: The .NET Core runtime generates traces in the `nettrace` format, and are converted to speedscope (if specified) after the trace is completed. Since some conversions may result in loss of data, the original `nettrace` file is preserved next to the converted file.
## Known Caveats
- Perfview/VS aren't showing any callstacks
There was a regression in Preview6 (https://github.com/dotnet/coreclr/issues/25046) that dropped these callstacks. It has since been fixed in daily builds. If you want to demo callstacks you can use either Preview5, Preview7 which will be out soon, or daily builds.
- "dotnet-trace used to work but now it's giving me `Unable to create a session`"
Between .NET Core Preview 5 and Preview 6, there were breaking changes in the runtime. To use the Preview 6 version of dotnet-trace, you need to be using it on an application with Preview 6 of the runtime, and the same holds for the other way around - To trace an application using .NET Core Preview 6 or later, you need to use the latest version of dotnet-trace.
## Commonly used keywords for the *Microsoft-Windows-DotNETRuntime* provider
Runtime keyword name | Keyword Value | Description
------------------------------ | ----------------: | ------------
None | 0 |
All | FFFFFFFFFFFFFFBF | All does not include start-enumeration. It just is not that useful.
GC | 1 | Logging when garbage collections and finalization happen.
GCHandle | 2 | Events when GC handles are set or destroyed.
Binder | 4 |
Loader | 8 | Logging when modules actually get loaded and unloaded.
Jit | 10 | Logging when Just in time (JIT) compilation occurs.
NGen | 20 | Logging when precompiled native (NGEN) images are loaded.
StartEnumeration | 40 | Indicates that on attach or module load, a rundown of all existing methods should be done.
StopEnumeration | 80 | Indicates that on detach or process shutdown, a rundown of all existing methods should be done.
Security | 400 | Events associated with validating security restrictions.
AppDomainResourceManagement | 800 | Events for logging resource consumption on an app-domain level granularity.
JitTracing | 1000 | Logging of the internal workings of the Just In Time compiler. This is fairly verbose. It details decisions about interesting optimization (like inlining and tail call).
Interop | 2000 | Log information about code thunks that transition between managed and unmanaged code.
Contention | 4000 | Log when lock contention occurs. (Monitor.Enters actually blocks).
Exception | 8000 | Log exception processing.
Threading | 10000 | Log events associated with the threadpool, and other threading events.
JittedMethodILToNativeMap | 20000 | Dump the native to IL mapping of any method that is JIT compiled. (V4.5 runtimes and above).
OverrideAndSuppressNGenEvents | 40000 | If enabled will suppress the rundown of NGEN events on V4.0 runtime (has no effect on Pre-V4.0 runtimes).
SupressNGen | 40000 | This suppresses NGEN events on V4.0 (where you have NGEN PDBs), but not on V2.0 (which does not know about this bit and also does not have NGEN PDBS).
JITSymbols | 60098 | What is needed to get symbols for JIT compiled code.<br>This is equivalent to `Jit+JittedMethodILToNativeMap+Loader+OverrideAndSuppressNGenEvents+StopEnumeration`
Type | 80000 | Enables the 'BulkType' event.
GCHeapDump | 100000 | Enables the events associated with dumping the GC heap.
GCSampledObjectAllocationHigh | 200000 | Enables allocation sampling with the 'fast'. Sample to limit to 100 allocations per second per type. This is good for most detailed performance investigations.<br>Note that this DOES update the allocation path to be slower and only works if the process start with this on.
GCHeapSurvivalAndMovement | 400000 | Enables events associate with object movement or survival with each GC.
GCHeapCollect | 800000 | Triggers a GC. Can pass a 64 bit value that will be logged with the GC Start event so you know which GC you actually triggered.
GCHeapAndTypeNames | 1000000 | Indicates that you want type names looked up and put into the events (not just meta-data tokens).
GCHeapSnapshot | 1980001 | This provides the flags commonly needed to take a heap .NET Heap snapshot with EventPipe.<br>This is equivalent to `GC+Type+GCHeapDump+GCHeapCollect+GCHeapAndTypeNames`
GCSampledObjectAllocationLow | 2000000 | Enables allocation sampling with the 'slow' rate, Sample to limit to 5 allocations per second per type. This is reasonable for monitoring. Note that this DOES update the allocation path to be slower and only works if the process start with this on.
GCAllObjectAllocation | 2200000 | Turns on capturing the stack and type of object allocation made by the .NET Runtime. This is only supported after V4.5.3 (Late 2014) This can be very verbose and you should seriously using GCSampledObjectAllocationHigh instead (and GCSampledObjectAllocationLow for production scenarios).
Stack | 40000000 | Also log the stack trace of events for which this is valuable.
ThreadTransfer | 80000000 | This allows tracing work item transfer events (thread pool enqueue/dequeue/ioenqueue/iodequeue/a.o.).
Debugger | 100000000 | .NET Debugger events
Monitoring | 200000000 | Events intended for monitoring on an ongoing basis.
Codesymbols | 400000000 | Events that will dump PDBs of dynamically generated assemblies to the EventPipe stream.
Default | 4C14FCCBD | Recommend default flags (good compromise on verbosity).
[source](https://github.com/Microsoft/perfview/blob/master/src/TraceEvent/Parsers/ClrTraceEventParser.cs#L41)
## More information on .NET Providers
Provider Name | Information
-------------------------------------: | ------------
Microsoft-Windows-DotNETRuntime | [The Runtime Provider](https://docs.microsoft.com/en-us/dotnet/framework/performance/clr-etw-providers#the-runtime-provider)<br>[CLR Runtime Keywords](https://docs.microsoft.com/en-us/dotnet/framework/performance/clr-etw-keywords-and-levels#runtime)
Microsoft-Windows-DotNETRuntimeRundown | [The Rundown Provider](https://docs.microsoft.com/en-us/dotnet/framework/performance/clr-etw-providers#the-rundown-provider)<br>[CLR Rundown Keywords](https://docs.microsoft.com/en-us/dotnet/framework/performance/clr-etw-keywords-and-levels#rundown)
Microsoft-DotNETCore-SampleProfiler | Enable the sample profiler
## Example Providers
See the help text below for the encoding of Providers.
Examples of valid specifications:
```
Microsoft-Windows-DotNETRuntime:0xFFF:5
Microsoft-Diagnostics-DiagnosticSource:0x00000003:5:FilterAndPayloadSpecs="Microsoft.EntityFrameworkCore/Microsoft.EntityFrameworkCore.Database.Command.CommandExecuting@Activity2Start:Command.CommandText;\r\nMicrosoft.EntityFrameworkCore/Microsoft.EntityFrameworkCore.Database.Command.CommandExecuted@Activity2Stop:"
```
If the provider you are using makes use of filter strings, make sure you
are properly encoding the key-value arguments. Values that contain
`;` or `=` characters need to be surrounded by double quotes `"`.
Depending on your shell environment, you may need to escape the `"`
characters and/or surround the entire argument in quotes, e.g.,
```bash
$ dotnet trace collect -p 1234 --providers 'Microsoft-Diagnostics-DiagnosticSource:0x00000003:5:FilterAndPayloadSpecs=\"Microsoft.EntityFrameworkCore/Microsoft.EntityFrameworkCore.Database.Command.CommandExecuting@Activity2Start:Command.CommandText;\r\nMicrosoft.EntityFrameworkCore/Microsoft.EntityFrameworkCore.Database.Command.CommandExecuted@Activity2Stop:\"'
```
## *dotnet-trace* help
```cmd
dotnet.exe run -c Release --no-restore --no-build -- collect --help
collect:
Collects a diagnostic trace from a currently running process
Usage:
dotnet-trace collect [options]
Options:
-h, --help
Shows this help message and exit.
-p, --process-id <pid>
The process to collect the trace from
-n, --name <name>
The name of the process to collect the trace from.
-o, --output <trace-file-path>
The output path for the collected trace data. If not specified it defaults to 'trace.nettrace'
--profile
A named pre-defined set of provider configurations that allows common tracing scenarios to be specified
succinctly. The options are:
cpu-sampling Useful for tracking CPU usage and general .NET runtime information. This is the default
option if no profile or providers are specified.
gc-verbose Tracks GC collection and sampled object allocations
gc-collect Tracks GC collection only at very low overhead
--providers <list-of-comma-separated-providers>
A list of comma separated EventPipe providers to be enabled.
This option adds to the configuration already provided via the --profile argument. If the same provider is configured in both places, this option takes precedence.
A provider consists of the name and optionally the keywords, verbosity level, and custom key/value pairs.
The string is written 'Provider[,Provider]'
Provider format: KnownProviderName[:[Keywords][:[Level][:[KeyValueArgs]]]]
KnownProviderName - The provider's name
Keywords - 8 character hex number bit mask
Level - A number in the range [0, 5]
0 - Always
1 - Critical
2 - Error
3 - Warning
4 - Informational
5 - Verbose
KeyValueArgs - A semicolon separated list of key=value
KeyValueArgs format: '[key1=value1][;key2=value2]'
note: values that contain ';' or '=' characters should be surrounded by double quotes ("), e.g., 'key="value;with=symbols";key2=value2'
--clrevents <clrevents>
List of CLR events to collect.
The string should be in the format '[Keyword1]+[Keyword2]+...+[KeywordN]'. For example: --clrevents GC+GCHandle
List of CLR event keywords:
* GC
* GCHandle
* Fusion
* Loader
* JIT
* NGEN
* StartEnumeration
* EndEnumeration
* Security
* AppDomainResourceManagement
* JITTracing
* Interop
* Contention
* Exception
* Threading
* JittedMethodILToNativeMap
* OverrideAndSuppressNGENEvents
* Type
* GCHeapDump
* GCSampledObjectAllocationHigh
* GCHeapSurvivalAndMovement
* GCHeapCollect
* GCHeapAndTypeNames
* GCSampledObjectAllocationLow
* PerfTrack
* Stack
* ThreadTransfer
* Debugger
--buffersize <Size>
Sets the size of the in-memory circular buffer in megabytes. Default 256 MB.
-f, --format
The format of the output trace file. This defaults to "nettrace" on Windows and "speedscope" on other OSes.
-- <command> (for target applications running .NET 5.0 or later only)
The command to run to launch a child process and trace from startup.

Просмотреть файл

@ -1,95 +0,0 @@
Installing SOS on Linux and MacOS
=================================
The first step is to install the dotnet-sos CLI global tool. This requires at least the 2.1 or greater .NET Core SDK to be installed. If you see the error message `Tool 'dotnet-sos' is already installed`, you will need to uninstall the global tool (see below).
$ dotnet tool install -g dotnet-sos
You can invoke the tool using the following command: dotnet-sos
Tool 'dotnet-sos' (version '3.0.47001') was successfully installed.
The next step is use this global tool to install SOS.
$ dotnet-sos install
Installing SOS to /home/mikem/.dotnet/sos from /home/mikem/.dotnet/tools/.store/dotnet-sos/3.0.47001/dotnet-sos/3.0.47001/tools/netcoreapp2.1/any/linux-x64
Creating installation directory...
Copying files...
Updating existing /home/mikem/.lldbinit file - LLDB will load SOS automatically at startup
SOS install succeeded
Now any time you run lldb, SOS will automatically be loaded and the symbol downloading enabled. This requires at least lldb 3.9 installed. See [Getting lldb](../README.md) section.
$ lldb
(lldb) soshelp
-------------------------------------------------------------------------------
SOS is a debugger extension DLL designed to aid in the debugging of managed
programs. Functions are listed by category, then roughly in order of
importance. Shortcut names for popular functions are listed in parenthesis.
Type "soshelp <functionname>" for detailed info on that function.
Object Inspection Examining code and stacks
----------------------------- -----------------------------
DumpObj (dumpobj) Threads (clrthreads)
DumpArray ThreadState
DumpAsync (dumpasync) IP2MD (ip2md)
DumpDelegate (dumpdelegate) u (clru)
DumpStackObjects (dso) DumpStack (dumpstack)
DumpHeap (dumpheap) EEStack (eestack)
DumpVC CLRStack (clrstack)
FinalizeQueue (finalizequeue) GCInfo
GCRoot (gcroot) EHInfo
PrintException (pe) bpmd (bpmd)
Examining CLR data structures Diagnostic Utilities
----------------------------- -----------------------------
DumpDomain (dumpdomain) VerifyHeap
EEHeap (eeheap) FindAppDomain
Name2EE (name2ee) DumpLog (dumplog)
SyncBlk (syncblk)
DumpMT (dumpmt)
DumpClass (dumpclass)
DumpMD (dumpmd)
Token2EE
DumpModule (dumpmodule)
DumpAssembly
DumpRuntimeTypes
DumpIL (dumpil)
DumpSig
DumpSigElem
Examining the GC history Other
----------------------------- -----------------------------
HistInit (histinit) SetHostRuntime (sethostruntime)
HistRoot (histroot) SetSymbolServer (setsymbolserver, loadsymbols)
HistObj (histobj) FAQ
HistObjFind (histobjfind) SOSFlush
HistClear (histclear) Help (soshelp)
(lldb)
## Updating SOS
$ dotnet tool update -g dotnet-sos
The installer needs to be run again:
$ dotnet-sos install
Installing SOS to /home/mikem/.dotnet/sos from /home/mikem/.dotnet/tools/.store/dotnet-sos/3.0.47001/dotnet-sos/3.0.47001/tools/netcoreapp2.1/any/linux-x64
Installing over existing installation...
Creating installation directory...
Copying files...
Updating existing /home/mikem/.lldbinit file - LLDB will load SOS automatically at startup
Cleaning up...
SOS install succeeded
## Uninstalling SOS
To uninstall and remove the lldb configuration run this command:
$ dotnet-sos uninstall
Uninstalling SOS from /home/mikem/.dotnet/sos
Reverting /home/mikem/.lldbinit file - LLDB will no longer load SOS at startup
SOS uninstall succeeded
To remove the SOS installer global tool:
$ dotnet tool uninstall -g dotnet-sos
Tool 'dotnet-sos' (version '3.0.47001') was successfully uninstalled.

Просмотреть файл

@ -1,106 +0,0 @@
Installing SOS on Windows
=========================
There are three ways to install the Windows Debugger:
* The Microsoft Windows SDK. See [Debugging Tools for Windows 10 (WinDbg)](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/debugger-download-tools#small-classic-windbg-preview-logo-debugging-tools-for-windows-10-windbg) for more information. SOS will need to be manually installed with dotnet-sos.
* The WinDbg Preview. See [Download WinDbg Preview](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/debugger-download-tools#small-windbg-preview-logo-download-windbg-preview). SOS will be automatically loaded for .NET Core apps.
* The Microsoft internal version of the Windows Debugger. The latest SOS will automatically be loaded from the internal Microsoft extension gallery. For more details see below.
### Manually Installing SOS on Windows ###
To install the latest released SOS manually, use the dotnet-sos CLI global tool. This applies to any of the ways the Windows debugger was installed. You may have to `.unload sos` a version of SOS that was automatically loaded.
C:\Users\mikem>dotnet tool install -g dotnet-sos
You can invoke the tool using the following command: dotnet-sos
Tool 'dotnet-sos' (version '5.0.160202') was successfully installed.
Run the installer:
C:\Users\mikem>dotnet-sos install
Installing SOS to C:\Users\mikem\.dotnet\sos from C:\Users\mikem\.dotnet\tools\.store\dotnet-sos\5.0.160202\dotnet-sos\5.0.160202\tools\netcoreapp2.1\any\win-x64
Installing over existing installation...
Creating installation directory...
Copying files...
Execute '.load C:\Users\mikem\.dotnet\sos\sos.dll' to load SOS in your Windows debugger.
Cleaning up...
SOS install succeeded
SOS will need to be loaded manually with the above ".load" command:
C:\Users\mikem>"C:\Program Files (x86)\Windows Kits\10\Debuggers\x64\cdb.exe" dotnet SymbolTestApp2.dll
Microsoft (R) Windows Debugger Version 10.0.19041.685 AMD64
Copyright (c) Microsoft Corporation. All rights reserved.
CommandLine: dotnet SymbolTestApp2.dll
Symbol search path is: srv*
Executable search path is:
ModLoad: 00007ff7`f7450000 00007ff7`f7477000 dotnet.exe
ModLoad: 00007fff`16d90000 00007fff`16f7d000 ntdll.dll
ModLoad: 00007fff`145e0000 00007fff`14693000 C:\WINDOWS\System32\KERNEL32.DLL
ModLoad: 00007fff`13c30000 00007fff`13ec3000 C:\WINDOWS\System32\KERNELBASE.dll
ModLoad: 00007fff`13a70000 00007fff`13b6c000 C:\WINDOWS\System32\ucrtbase.dll
(92cd8.92eb4): Break instruction exception - code 80000003 (first chance)
ntdll!LdrpDoDebuggerBreak+0x30:
00007fff`16e62cbc cc int 3
0:000> .unload sos
Unloading sos extension DLL
0:000> .load C:\Users\mikem\.dotnet\sos\sos.dll
0:000> .chain
Extension DLL search Path:
C:\Program Files\Debugging Tools for Windows (x64);...
Extension DLL chain:
C:\Users\mikem\.dotnet\sos\sos.dll: image 1.0.2-dev.19151.2+26ec7875d312cf57db83926db0d9340e297e2a4c, API 2.0.0, built Mon Feb 25 17:27:33 2019
[path: C:\Users\mikem\.dotnet\sos\sos.dll]
dbghelp: image 10.0.18317.1001, API 10.0.6,
[path: C:\Program Files\Debugging Tools for Windows (x64)\dbghelp.dll]
...
ntsdexts: image 10.0.18317.1001, API 1.0.0,
[path: C:\Program Files\Debugging Tools for Windows (x64)\WINXP\ntsdexts.dll]
### SOS for the Microsoft Internal Windows Debugger ###
The latest released version of SOS will automatically be loaded from the internal Microsoft extension gallery. You need at least version 10.0.18317.1001 or greater of the Windows debugger (windbg or cdb). SOS will load when the "coreclr.dll" module is loaded.
"C:\Program Files\Debugging Tools for Windows (x64)\cdb.exe" dotnet SymbolTestApp2.dll
Microsoft (R) Windows Debugger Version 10.0.21251.1000 AMD64
Copyright (c) Microsoft Corporation. All rights reserved.
0:000> sxe ld coreclr
0:000> g
ModLoad: 00007ffe`e9100000 00007ffe`e9165000 C:\Program Files\dotnet\host\fxr\2.2.2\hostfxr.dll
ModLoad: 00007ffe`e7ba0000 00007ffe`e7c32000 C:\Program Files\dotnet\shared\Microsoft.NETCore.App\2.1.6\hostpolicy.dll
ModLoad: 00007ffe`abb60000 00007ffe`ac125000 C:\Program Files\dotnet\shared\Microsoft.NETCore.App\2.1.6\coreclr.dll
ntdll!ZwMapViewOfSection+0x14:
00007fff`16e2fb74 c3 ret
0:000> .chain
Extension DLL search Path:
C:\Program Files\Debugging Tools for Windows (x64);...
Extension DLL chain:
sos: image 5.0.160202+5734230e3ee516339a4b0e4729def135027aa255, API 2.0.0, built Wed Dec 2 19:15:02 2020
[path: C:\Users\mikem\AppData\Local\DBG\ExtRepository\EG\cache2\Packages\SOS\5.0.3.10202\win-x64\sos.dll]
dbghelp: image 10.0.21251.1000, API 10.0.6,
[path: C:\Program Files\Debugging Tools for Windows (x64)\dbghelp.dll]
ext: image 10.0.21276.1001, API 1.0.0,
[path: C:\Users\mikem\AppData\Local\DBG\ExtRepository\EG\cache2\Packages\ext\10.0.21276.1001\amd64fre\winext\ext.dll]
...
0:000> !soshelp
-------------------------------------------------------------------------------
SOS is a debugger extension DLL designed to aid in the debugging of managed
programs. Functions are listed by category, then roughly in order of
importance. Shortcut names for popular functions are listed in parenthesis.
Type "!help <functionname>" for detailed info on that function.
Object Inspection Examining code and stacks
----------------------------- -----------------------------
DumpObj (do) Threads (clrthreads)
DumpArray (da) ThreadState
DumpAsync IP2MD
DumpDelegate U
DumpStackObjects (dso) DumpStack
DumpHeap EEStack
...

Просмотреть файл

@ -1,92 +0,0 @@
Building LLDB from the LLVM repos
=================================
These instructions have been replace with better and more through documentation. See [Getting lldb](../../README.md#getting-lldb) in the main readme.
1. Clone the llvm, clang, and lldb repos like this:
llvm
|
`-- tools
|
+-- clang
|
`-- lldb
```
cd $HOME
git clone http://llvm.org/git/llvm.git
cd $HOME/llvm/tools
git clone http://llvm.org/git/clang.git
git clone http://llvm.org/git/lldb.git
```
2. Checkout the "release_39" branches in llvm/clang/lldb:
```
cd $HOME/llvm
git checkout release_39
cd $HOME/llvm/tools/clang
git checkout release_39
cd $HOME/llvm/tools/lldb
git checkout release_39
```
3. Install the prerequisites:
For Linux (Debian or Ubuntu):
```
sudo apt-get install build-essential subversion swig python2.7-dev libedit-dev libncurses5-dev
```
For OSX, the latest Xcode needs to be installed and I use Homebrew to install the rest:
```
brew install python swig doxygen ocaml
```
[TBD] Add the prerequisites for Alpine, CentOS, Fedora, OpenSuse.
There may be more prerequisites required, when building the cmake files it should let
you know if there are any I missed.
See [http://lldb.llvm.org/build.html](http://lldb.llvm.org/build.html) for more details on these preliminaries.
4. If building on OSX, carefully following the signing directions (before you build)
here: $HOME/llvm/tools/lldb/docs/code-signing.txt. Even though those build directions
say to use Xcode to build lldb, I never got it to work, but cmake/make works.
5. Building the cmake files (you can build either debug or release or both).
For debug:
```
mkdir -p $HOME/build/debug
cd $HOME/build/debug
cmake -DCMAKE_BUILD_TYPE=debug $HOME/llvm
```
For release:
```
mkdir -p $HOME/build/release
cd $HOME/build/release
cmake -DCMAKE_BUILD_TYPE=release $HOME/llvm
```
6. Build lldb (release was picked in this example, but can be replaced with "debug"):
```
cd $HOME/build/release/tools/lldb
make -j16
```
When you build with -j16 (parallel build with 16 jobs), sometimes it fails. Just start again with just make.
For OS X, building in remote ssh shell won't sign properly, use a terminal window on the machine itself.
7. To use the newly built lldb and to build/test the SOS plugin with it, set these environment variables in your .profile:
```
export LLDB_INCLUDE_DIR=$HOME/llvm/tools/lldb/include
export LLDB_LIB_DIR=$HOME/build/release/lib
export LLDB_PATH=$HOME/build/release/bin/lldb
PATH=$HOME/build/release/bin:$PATH
```
For OS X also set:
```
export LLDB_DEBUGSERVER_PATH=$HOME/build/release/bin/debugserver
```
It also seems to be necessary to run lldb as superuser e.g. `sudo -E $HOME/build/release/bin/lldb` (the -E is necessary so the above debug server environment variable is passed) if using a remote ssh, but it isn't necessary if run it in a local terminal session.

Просмотреть файл

@ -1,30 +0,0 @@
cd $HOME
wget http://ftp.gnu.org/gnu/binutils/binutils-2.29.1.tar.xz
wget http://releases.llvm.org/3.9.1/cfe-3.9.1.src.tar.xz
wget http://releases.llvm.org/3.9.1/llvm-3.9.1.src.tar.xz
wget http://releases.llvm.org/3.9.1/lldb-3.9.1.src.tar.xz
wget http://releases.llvm.org/3.9.1/compiler-rt-3.9.1.src.tar.xz
tar -xf binutils-2.29.1.tar.xz
tar -xf llvm-3.9.1.src.tar.xz
mkdir llvm-3.9.1.src/tools/clang
mkdir llvm-3.9.1.src/tools/lldb
mkdir llvm-3.9.1.src/projects/compiler-rt
tar -xf cfe-3.9.1.src.tar.xz --strip 1 -C llvm-3.9.1.src/tools/clang
tar -xf lldb-3.9.1.src.tar.xz --strip 1 -C llvm-3.9.1.src/tools/lldb
tar -xf compiler-rt-3.9.1.src.tar.xz --strip 1 -C llvm-3.9.1.src/projects/compiler-rt
rm binutils-2.29.1.tar.xz
rm cfe-3.9.1.src.tar.xz
rm lldb-3.9.1.src.tar.xz
rm llvm-3.9.1.src.tar.xz
rm compiler-rt-3.9.1.src.tar.xz
mkdir llvmbuild
cd llvmbuild
cmake3 -DCMAKE_BUILD_TYPE=Release -DLLVM_LIBDIR_SUFFIX=64 -DLLVM_ENABLE_EH=1 -DLLVM_ENABLE_RTTI=1 -DLLVM_BINUTILS_INCDIR=../binutils-2.29.1/include ../llvm-3.9.1.src
make -j $(($(getconf _NPROCESSORS_ONLN)+1))
sudo make install
cd ..
rm -r llvmbuild
rm -r llvm-3.9.1.src
rm -r binutils-2.29.1

Просмотреть файл

@ -1,23 +0,0 @@
cd $HOME
wget http://releases.llvm.org/3.9.1/cfe-3.9.1.src.tar.xz
wget http://releases.llvm.org/3.9.1/llvm-3.9.1.src.tar.xz
wget http://releases.llvm.org/3.9.1/lldb-3.9.1.src.tar.xz
tar -xf llvm-3.9.1.src.tar.xz
mkdir llvm-3.9.1.src/tools/clang
mkdir llvm-3.9.1.src/tools/lldb
tar -xf cfe-3.9.1.src.tar.xz --strip 1 -C llvm-3.9.1.src/tools/clang
tar -xf lldb-3.9.1.src.tar.xz --strip 1 -C llvm-3.9.1.src/tools/lldb
rm cfe-3.9.1.src.tar.xz
rm lldb-3.9.1.src.tar.xz
rm llvm-3.9.1.src.tar.xz
mkdir llvmbuild
cd llvmbuild
cmake -DCMAKE_BUILD_TYPE=Release -DLLDB_DISABLE_CURSES=1 -DLLVM_LIBDIR_SUFFIX=64 -DLLVM_ENABLE_EH=1 -DLLVM_ENABLE_RTTI=1 -DLLVM_BUILD_DOCS=0 ../llvm-3.9.1.src
make -j $(($(getconf _NPROCESSORS_ONLN)+1))
sudo make install
cd ..
rm -r llvmbuild
rm -r llvm-3.9.1.src

Просмотреть файл

@ -1,6 +0,0 @@
Installing lldb on FreeBSD
==========================
Working in progress.
sudo pkg install llvm39 gettext python27

Просмотреть файл

@ -1,217 +0,0 @@
Installing LLDB on Linux
========================
These instructions will lead you through installing or building the best version of lldb for your distro to use with SOS. If you have already followed the diagnostics repo build [prerequisites](../building/linux-instructions.md) and built the diagnostics repo, then the best version of lldb is already installed.
SOS needs at least lldb 3.9 or greater. Some distros only have older versions available by default so there are directions and scripts to build lldb 3.9 for that platform. These instructions assume that you have dotnet cli and its prerequisites installed.
The libsosplugin.so built for lldb 3.9 does work with lldb 4.0 and greater but most of the testing has been on lldb 3.9.
lldb 10.0 or greater is recommended if available for the distro version. For arm32, it is recommended to debug on Ubuntu 18.04 if possible with lldb 10.0 which is the only version of lldb found that works with SOS on arm32.
#### Ubuntu 14.04 ####
In order to get lldb-3.9, we need to add additional package sources (see [http://llvm.org/apt/](http://llvm.org/apt/) for the other Ubuntu versions not listed here):
sudo apt-get update
sudo apt-get install wget
echo "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty main" | sudo tee /etc/apt/sources.list.d/llvm.list
echo "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.9 main" | sudo tee -a /etc/apt/sources.list.d/llvm.list
wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
sudo apt-get update
Install the lldb packages:
sudo apt-get install lldb-3.9 python-lldb-3.9
To launch lldb:
lldb-3.9
#### Ubuntu 16.04 ####
Add the additional package sources:
sudo apt-get update
sudo apt-get install wget
echo "deb http://llvm.org/apt/xenial/ llvm-toolchain-xenial main" | sudo tee /etc/apt/sources.list.d/llvm.list
echo "deb http://llvm.org/apt/xenial/ llvm-toolchain-xenial-3.9 main" | sudo tee -a /etc/apt/sources.list.d/llvm.list
wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
sudo apt-get update
Install the lldb packages:
sudo apt-get install lldb-3.9 python-lldb-3.9
To launch lldb:
lldb-3.9
#### Ubuntu 17.10 ####
Add the additional package sources:
sudo apt-get update
sudo apt-get install wget
echo "deb http://llvm.org/apt/artful/ llvm-toolchain-artful main" | sudo tee /etc/apt/sources.list.d/llvm.list
echo "deb http://llvm.org/apt/artful/ llvm-toolchain-artful-3.9 main" | sudo tee -a /etc/apt/sources.list.d/llvm.list
wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
sudo apt-get update
Install the lldb packages:
sudo apt-get install lldb-3.9 python-lldb-3.9
To launch lldb:
lldb-3.9
#### Ubuntu 18.04 ####
To install the lldb packages:
sudo apt-get update
sudo apt-get install lldb-3.9 llvm-3.9 python-lldb-3.9
To launch lldb:
lldb-3.9
10.0 is the only version of lldb found that works with SOS for arm32 on Ubuntu 18.04.
#### Ubuntu 20.04 ####
To install the lldb packages:
sudo get-get update
sudo apt-get install lldb
This installs lldb version 10.0.0.
#### Alpine 3.9 ####
Currently there is no lldb that works on Alpine.
Issue https://github.com/dotnet/diagnostics/issues/73
#### Alpine 3.12 ####
lldb 10.0 is available for this Apline version.
#### CentOS 6 ####
[TBD]
#### CentOS 7 ####
lldb 3.9 will have to be built for this distro.
First the prerequisites:
sudo yum install centos-release-SCL epel-release
sudo yum install cmake cmake3 gcc gcc-c++ git libicu libunwind make python27 tar wget which zip
sudo yum install doxygen libedit-devel libxml2-devel python-argparse python-devel readline-devel swig xz
Now build and install llvm/lldb 3.9 using the script provided here: [build-install-lldb.sh](../lldb/centos7/build-install-lldb.sh).
WARNING: This script installs llvm and lldb as root (via sudo) and may overwrite any previously installed versions.
cd $HOME
git clone https://github.com/dotnet/diagnostics.git
$HOME/diagnostics/documentation/lldb/centos7/build-install-lldb.sh
This will take some time to complete. After the build is finished, run these commands to remove the no longer needed packages:
sudo yum remove doxygen libedit-devel libxml2-devel python-argparse python-devel readline-devel swig xz
sudo yum clean all
To launch lldb:
lldb-3.9.1
#### Debian 8.2/8.7 ####
In order to get lldb-5.0 (3.9 doesn't seem to work that well), we need to add additional package sources:
sudo apt-get update
sudo apt-get install wget
echo "deb http://llvm.org/apt/jessie/ llvm-toolchain-jessie main" | sudo tee /etc/apt/sources.list.d/llvm.list
echo "deb http://llvm.org/apt/jessie/ llvm-toolchain-jessie-5.0 main" | sudo tee -a /etc/apt/sources.list.d/llvm.list
wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
sudo apt-get update
Install the lldb packages:
sudo apt-get install lldb-5.0 python-lldb-5.0
To launch lldb:
lldb-5.0
#### Debian 9 (Stretch) ####
sudo apt-get install lldb-3.9 python-lldb-3.9
To launch lldb:
lldb-3.9
#### Fedora 24 ####
sudo dnf install clang cmake findutils git libicu libunwind make python tar wget which zip
sudo dnf install doxygen libedit-devel libxml2-devel python-argparse python-devel readline-devel swig xz
Now build and install llvm/lldb 3.9 using the script provided here: [build-install-lldb.sh](../lldb/fedora24/build-install-lldb.sh).
WARNING: This script installs llvm and lldb as root (via sudo) and may overwrite any previously installed versions.
cd $HOME
git clone https://github.com/dotnet/diagnostics.git
$HOME/diagnostics/documentation/lldb/fedora24/build-install-lldb.sh
This will take some time to complete. After the build is finished, run these commands to remove the no longer needed packages:
sudo dnf remove doxygen libedit-devel libxml2-devel readline-devel swig
sudo dnf clean all
To launch lldb:
lldb
#### Fedora 27, 28, 29 ####
sudo dnf install lldb python2-lldb
To launch lldb:
lldb
#### OpenSuse 42.1, 42.3 ####
sudo zypper install cmake gcc-c++ git hostname libicu libunwind lldb-devel llvm-clang llvm-devel make python python-xml tar wget which zip
sudo zypper install doxygen libedit-devel libxml2-devel ncurses-devel python-argparse python-devel readline-devel swig
Now build and install llvm/lldb 3.9 using the script provided here: [build-install-lldb.sh](../lldb/opensuse/build-install-lldb.sh).
WARNING: This script installs llvm and lldb as root (via sudo) and may overwrite any previously installed versions.
cd $HOME
git clone https://github.com/dotnet/diagnostics.git
$HOME/diagnostics/documentation/lldb/opensuse/build-install-lldb.sh
This will take some time to complete. After the build is finished, run these commands to remove the no longer needed packages:
sudo zypper rm doxygen libedit-devel libxml2-devel ncurses-devel python-argparse python-devel readline-devel swig
sudo zypper clean -a
To launch lldb:
lldb-3.9.1
#### RHEL 7.5 ####
See [LLDB](https://access.redhat.com/documentation/en-us/red_hat_developer_tools/2018.2/html/using_clang_and_llvm_toolset/chap-lldb) on RedHat's web site.
#### SLES ####
[TBD]

Просмотреть файл

@ -1,4 +0,0 @@
Installing lldb for NetBSD
==========================
[TBD]

Просмотреть файл

@ -1,28 +0,0 @@
cd $HOME
wget http://cmake.org/files/v3.11/cmake-3.11.4-Linux-x86_64.tar.gz
wget http://releases.llvm.org/3.9.1/cfe-3.9.1.src.tar.xz
wget http://releases.llvm.org/3.9.1/llvm-3.9.1.src.tar.xz
wget http://releases.llvm.org/3.9.1/lldb-3.9.1.src.tar.xz
tar -xf cmake-3.11.4-Linux-x86_64.tar.gz
tar -xf llvm-3.9.1.src.tar.xz
mkdir llvm-3.9.1.src/tools/clang
mkdir llvm-3.9.1.src/tools/lldb
tar -xf cfe-3.9.1.src.tar.xz --strip 1 -C llvm-3.9.1.src/tools/clang
tar -xf lldb-3.9.1.src.tar.xz --strip 1 -C llvm-3.9.1.src/tools/lldb
rm cmake-3.11.4-Linux-x86_64.tar.gz
rm cfe-3.9.1.src.tar.xz
rm lldb-3.9.1.src.tar.xz
rm llvm-3.9.1.src.tar.xz
mkdir llvmbuild
cd llvmbuild
../cmake-3.11.4-Linux-x86_64/bin/cmake -DCMAKE_BUILD_TYPE=Release -DLLDB_DISABLE_CURSES=1 -DLLVM_LIBDIR_SUFFIX=64 -DLLVM_ENABLE_EH=1 -DLLVM_ENABLE_RTTI=1 -DLLVM_BUILD_DOCS=0 ../llvm-3.9.1.src
make -j $(($(getconf _NPROCESSORS_ONLN)+1))
sudo make install
cd ..
rm -r llvmbuild
rm -r llvm-3.9.1.src
rm -r cmake-3.11.4-Linux-x86_64

Просмотреть файл

@ -1,14 +0,0 @@
MacOS
=====
### MacOS (Sierra 10.12.6) Instructions
The version of lldb that comes with Xcode 9.2 will now work with SOS and the lldb plugin. We no longer have to build lldb locally.
### MacOS (High Sierra 10.13.4) Instructions
The version of lldb that comes with Xcode 9.3/9.4 swift-4.1 will now work with SOS and the lldb plugin.
### MacOS (Mojave 10.14) Instructions
There has been no testing for the version of Xcode/lldb that comes with Mojave.

Просмотреть файл

@ -0,0 +1,16 @@
# Official Build Instructions
*WARNING*: These instructions will only work internally at Microsoft.
This signs and publishes the following packages to the tools feed (https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json):
- dotnet-monitor
- Microsoft.Diagnostics.NETCore.Client
To release the latest tools:
1) Merge the desired commits for this release from the master branch to the release branch.
2) Kick off an official build in the [internal pipeline](https://dev.azure.com/dnceng/internal/_build?definitionId=954) for the desired branch after the changes have been properly mirrored.
3) Change all the package version references as needed in any documentation if needed.
4) Download the above packages from the successful official build under "Artifacts" -> "PackageArtifacts".
5) Upload these packages to NuGet.org.
6) Create a new "release" in the [releases](https://github.com/dotnet/dotnet-monitor/releases) dotnet-monitor repo release tab with the package version (not the official build id) as the "tag". Add any release notes about known issues, issues fixed and new features.

Просмотреть файл

@ -1,39 +0,0 @@
Private runtime build testing
=============================
Here are some instructions on how to run the diagnostics repo's tests against a locally build private .NET Core runtime. These directions will work on Windows, Linux and MacOS.
1. Build the runtime repo (see [Workflow Guide](https://github.com/dotnet/runtime/blob/master/docs/workflow/README.md)).
2. Build the diagnostics repo (see [Building the Repository](../README.md)).
3. Run the diagnostics repo tests with the -privatebuildpath option.
On Windows:
```
C:\diagnostics> test -privatebuildpath c:\runtime\artifacts\bin\coreclr\Windows_NT.x64.Debug
```
When you are all done with the private runtime testing, run this command to remove the Windows registry entries added in the above steps.
```
C:\diagnostics> test -cleanupprivatebuild
```
There will be some popups from regedit asking for administrator permission to edit the registry (press Yes), warning about adding registry keys from AddPrivateTesting.reg (press Yes) and that the edit was successful (press OK).
On Linux/MacOS:
```
~/diagnostics$ ./test.sh --privatebuildpath /home/user/runtime/artifacts/bin/coreclr/Linux.x64.Debug
```
The private runtime will be copied to the diagnostics repo and the tests started. It can be just the runtime binaries but this assumes that the private build is close to the latest published master build. If not, you can pass the runtime's testhost directory containing all the shared runtime bits i.e. `c:\runtime\artifacts\bin\coreclr\testhost\netcoreapp5.0-Windows_NT-Debug-x64\shared\Microsoft.NETCore.App\5.0.0` or `/home/user/runtime/artifacts/bin/coreclr/testhost/netcoreapp5.0-Linux-Release-x64/shared/Microsoft.NETCore.App/5.0.0`
On Linux/MacOS it is recommended to test against Release runtime builds because of a benign assert in DAC (tracked by issue #[31897](https://github.com/dotnet/runtime/issues/31897)) that causes the tests to fail.
On Windows the DAC is not properly signed for a private runtime build so there are a couple of registry keys that need to be added so Windows will load the DAC and the tests can create proper mini-dumps. An example of the registry key values added are:
```
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\KnownManagedDebuggingDlls]
"C:\diagnostics\.dotnet\shared\Microsoft.NETCore.App\5.0.0-alpha.1.20102.3\mscordaccore.dll"=dword:0
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\MiniDumpAuxiliaryDlls]
"C:\diagnostics\.dotnet\shared\Microsoft.NETCore.App\5.0.0-alpha.1.20102.3\coreclr.dll"="C:\diagnostics\.dotnet\shared\Microsoft.NETCore.App\5.0.0-alpha.1.20102.3\mscordaccore.dll"
```

Просмотреть файл

@ -1,77 +0,0 @@
# Single File Diagnostic Tools
Our diagnostic tools are distributed so far as [global tools](https://docs.microsoft.com/en-us/dotnet/core/tools/global-tools). Global tools provide a great user experience as they provide clear ways to easily download, update, and configure the scope of the tools. However, this mechanism requires a full .NET SDK to be available. This is rarely the case in environments like production machines. For these scenarios we provide a single file distribution mechanism that only requires a runtime to be available on the target machine. Other than the installation instructions, all functionality and usage remain the same. For instructions on usage and syntax, please refer to the [.NET Core diagnostic global tools documentation](https://docs.microsoft.com/en-us/dotnet/core/diagnostics/#net-core-diagnostic-global-tools).
## Requirements to Run the Tools
- The tools distributed in a single file format are *_framework dependent_* applications. They require a 3.1 or newer .NET Core runtime to be available. This means one of the following:
- Installing the runtime globally in a well-known location using one of the different installer technologies documented in the [official .NET download page](https://dotnet.microsoft.com/download).
- Downloading the binary archives from the [official .NET download page](https://dotnet.microsoft.com/download), extracting them to a directory, and setting the `DOTNET_ROOT` environment variable to the path of extraction.
- Using the [dotnet-install scripts](https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-install-script) and setting `DOTNET_ROOT` to the installation directory. This mechanism is not recommended for development or production environments, but it's ideal for environments like CI machines.
- The workload is being run in a container with the runtime available. This could be either because the workload is running on one of the [official .NET runtime containers](https://hub.docker.com/_/microsoft-dotnet-runtime) or on a custom-built image that installs it at build time.
- These tools are elf-extracting archives will write files to disk as necessary. The extraction target folder is within the platform's `TEMP` directory by default - for Windows this will be in `%TEMP%` and on Unix based systems this will be under `$TMPDIR`. In case `TEMP` is not available in the target environment or there's a desire to control the extraction directory for reasons such as clean up, set the `DOTNET_BUNDLE_EXTRACT_BASE_DIR` environment variable to a path that can be used in lieu of `TEMP`.
## Obtaining the Tools
### Downloading the latest available release of a tool
The latest release of the tools is always available at a link that follows the following schema:
```
https://aka.ms/<TOOL NAME>/<TARGET PLATFORM RUNTIME IDENTIFIER>
```
The tools we support are:
- `dotnet-counters`
- `dotnet-dump`
- `dotnet-gcdump`
- `dotnet-sos`
- `dotnet-trace`
The supported [runtime identifiers](https://docs.microsoft.com/en-us/dotnet/core/rid-catalog) are:
- `win-arm`
- `win-arm64`
- `win-x64`
- `win-x86`
- `osx-x64`
- `linux-arm`.
- `linux-arm64`
- `linux-musl-arm64`
- `linux-x64`
- `linux-musl-x64`
For example, the latest release of `dotnet-trace` for Windows x64 will be available at <https://aka.ms/dotnet-trace/win-x64>.
To download these tools you can either use a browser or use a command line utility like `wget` or `curl`. It's important to note that the file name is returned in the `content-disposition` header of the request. If the utility you use to download the tools respects the header, the file will be saved with the name of the tool downloaded; otherwise you might need to rename the tool. A few examples are:
1. *`curl` (available on Windows after 1706, macOS, and several distributions of Linux)*
```sh
curl -JLO https://aka.ms/dotnet-dump/win-x64
```
2. *`wget`*
```sh
wget --content-disposition https://aka.ms/dotnet-dump/linux-x64
```
3. *`pwsh` (PowerShell core)*
```powershell
$resp = Invoke-WebRequest -Uri "https://aka.ms/dotnet-dump/win-x86"
$header = [System.Net.Http.Headers.ContentDispositionHeaderValue]::Parse($resp.Headers.'content-disposition')
[System.IO.File]::WriteAllBytes($header.FileName, $resp.content)
```
4. *`powershell`*
```powershell
Invoke-WebRequest -Uri "https://aka.ms/dotnet-dump/win-x86" -Outfile "dotnet-dump.exe"
```
### Past Releases and Checksum Validation
Each release in the [releases section](https://github.com/dotnet/diagnostics/releases) of the repository contains a table of stable links for our tools starting release `v5.0.152202`. Additionally, there's a CSV available as an attachment containing all the stable links and SHA512 checksums in case the downloaded files need to be validated.

Просмотреть файл

@ -1,249 +0,0 @@
---
title: "SOS Debugging Extension"
ms.date: "10/3/2018"
helpviewer_keywords:
- "debugging extensions"
- "SOS debugging extensions"
- "xplat debugging"
ms.assetid: TBD
author: "mikem"
ms.author: "mikem"
---
# SOS debugging extension for Windows
## Syntax
```shell
![command] [options]
```
### Command Summary
SOS is a debugger extension DLL designed to aid in the debugging of managed programs. Functions are listed by category, then roughly in order of importance. Shortcut names for popular functions are listed in parenthesis.
Type `!help <functionname>` for detailed info on that function.
Object Inspection Examining code and stacks
----------------------------- -----------------------------
DumpObj (do, dumpobj) Threads (clrthreads)
DumpArray (da) ThreadState
DumpAsync IP2MD
DumpStackObjects (dso) U
DumpHeap DumpStack
DumpVC EEStack
GCRoot CLRStack
ObjSize GCInfo
FinalizeQueue EHInfo
PrintException (pe) BPMD
TraverseHeap COMState
Examining CLR data structures Diagnostic Utilities
----------------------------- -----------------------------
DumpDomain VerifyHeap
EEHeap VerifyObj
Name2EE FindRoots
SyncBlk HeapStat
DumpMT GCWhere
DumpClass ListNearObj (lno)
DumpMD GCHandles
Token2EE GCHandleLeaks
EEVersion FinalizeQueue (fq)
DumpModule FindAppDomain
ThreadPool SaveModule
DumpAssembly ProcInfo
DumpSigElem StopOnException (soe)
DumpRuntimeTypes DumpLog
DumpSig VMMap
RCWCleanupList VMStat
DumpIL MinidumpMode
DumpRCW AnalyzeOOM (ao)
DumpCCW
Examining the GC history Other
----------------------------- -----------------------------
HistInit SetHostRuntime (sethostruntime)
HistRoot SetSymbolServer (setsymbolserver)
HistObj FAQ
HistObjFind SOSFlush
HistClear SOSStatus (sosstatus)
FAQ
Help (soshelp)
## Commands
|Command|Description|
|-------------|-----------------|
|**AnalyzeOOM** (**ao**)|Displays the information for the last out of memory (OOM) that occurred on an allocation request to the garbage collection heap. (In server garbage collection, it displays OOM, if any, on each garbage collection heap.)|
|**BPMD** (**bpmd**) [**-nofuturemodule**] [\<*module name*> \<*method name*>] [**-md** <`MethodDesc`>] **-list** **-clear** \<*pending breakpoint number*> **-clearall**|Creates a breakpoint at the specified method in the specified module.<br /><br /> If the specified module and method have not been loaded, this command waits for a notification that the module was loaded and just-in-time (JIT) compiled before creating a breakpoint.<br /><br /> You can manage the list of pending breakpoints by using the **-list**, **-clear**, and **-clearall** options:<br /><br /> The **-list** option generates a list of all the pending breakpoints. If a pending breakpoint has a non-zero module ID, that breakpoint is specific to a function in that particular loaded module. If the pending breakpoint has a zero module ID, that breakpoint applies to modules that have not yet been loaded.<br /><br /> Use the **-clear** or **-clearall** option to remove pending breakpoints from the list.|
|**CLRStack** [**-a**] [**-l**] [**-p**] [**-n**] [**-f**] [**-r**] [**-all**]|Provides a stack trace of managed code only.<br /><br /> The **-p** option shows arguments to the managed function.<br /><br /> The **-l** option shows information on local variables in a frame. The SOS Debugging Extension cannot retrieve local names, so the output for local names is in the format \<*local address*> **=** \<*value*>.<br /><br /> The **-a** option is a shortcut for **-l** and **-p** combined.<br /><br /> The **-n** option disables the display of source file names and line numbers. If the debugger has the option SYMOPT_LOAD_LINES specified, SOS will look up the symbols for every managed frame and if successful will display the corresponding source file name and line number. The **-n** (No line numbers) parameter can be specified to disable this behavior.<br /><br />The **-f** option (full mode) displays the native frames intermixing them with the managed frames and the assembly name and function offset for the managed frames.<br /><br />The **-r** option dumps the registers for each stack frame.<br /><br />The **-all** option dumps all the managed threads' stacks.|
|**COMState**|Lists the COM apartment model for each thread and a `Context` pointer, if available.|
|**DumpArray** [**-start** \<*startIndex*>] [**-length** \<*length*>] [**-details**] [**-nofields**] \<*array object address*><br /><br /> -or-<br /><br /> **DA** [**-start** \<*startIndex*>] [**-length** \<*length*>] [**-detail**] [**-nofields**] *array object address*>|Examines elements of an array object.<br /><br /> The **-start** option specifies the starting index at which to display elements.<br /><br /> The **-length** option specifies how many elements to show.<br /><br /> The **-details** option displays details of the element using the **DumpObj** and **DumpVC** formats.<br /><br /> The **-nofields** option prevents arrays from displaying. This option is available only when the **-detail** option is specified.|
|**DumpAssembly** \<*assembly address*>|Displays information about an assembly.<br /><br /> The **DumpAssembly** command lists multiple modules, if they exist.<br /><br /> You can get an assembly address by using the **DumpDomain** command.|
|**DumpClass** \<*EEClass address*>|Displays information about the `EEClass` structure associated with a type.<br /><br /> The **DumpClass** command displays static field values but does not display nonstatic field values.<br /><br /> Use the **DumpMT**, **DumpObj**, **Name2EE**, or **Token2EE** command to get an `EEClass` structure address.|
|**DumpDomain** [\<*domain address*>]|Enumerates each <xref:System.Reflection.Assembly> object that is loaded within the specified <xref:System.AppDomain> object address. When called with no parameters, the **DumpDomain** command lists all <xref:System.AppDomain> objects in a process.|
|**DumpHeap** [**-stat**] [**-strings**] [**-short**] [**-min** \<*size*>] [**-max** \<*size*>] [**-thinlock**] [**-startAtLowerBound**] [**-mt** \<*MethodTable address*>] [**-type** \<*partial type name*>][*start* [*end*]]|Displays information about the garbage-collected heap and collection statistics about objects.<br /><br /> The **DumpHeap** command displays a warning if it detects excessive fragmentation in the garbage collector heap.<br /><br /> The **-stat** option restricts the output to the statistical type summary.<br /><br /> The **-strings** option restricts the output to a statistical string value summary.<br /><br /> The **-short** option limits output to just the address of each object. This lets you easily pipe output from the command to another debugger command for automation.<br /><br /> The **-min** option ignores objects that are less than the `size` parameter, specified in bytes.<br /><br /> The **-max** option ignores objects that are larger than the `size` parameter, specified in bytes.<br /><br /> The **-thinlock** option reports ThinLocks. For more information, see the **SyncBlk** command.<br /><br /> The `-startAtLowerBound` option forces the heap walk to begin at the lower bound of a supplied address range. During the planning phase, the heap is often not walkable because objects are being moved. This option forces **DumpHeap** to begin its walk at the specified lower bound. You must supply the address of a valid object as the lower bound for this option to work. You can display memory at the address of a bad object to manually find the next method table. If the garbage collection is currently in a call to `memcopy`, you may also be able to find the address of the next object by adding the size to the start address, which is supplied as a parameter.<br /><br /> The **-mt** option lists only those objects that correspond to the specified `MethodTable` structure.<br /><br /> The **-type** option lists only those objects whose type name is a substring match of the specified string.<br /><br /> The `start` parameter begins listing from the specified address.<br /><br /> The `end` parameter stops listing at the specified address.|
|**DumpIL** \<*Managed DynamicMethod object*> &#124; \<*DynamicMethodDesc pointer*> &#124; \<*MethodDesc pointer*>|Displays the Microsoft intermediate language (MSIL) that is associated with a managed method.<br /><br /> Note that dynamic MSIL is emitted differently than MSIL that is loaded from an assembly. Dynamic MSIL refers to objects in a managed object array rather than to metadata tokens.|
|**DumpLog** [**-addr** \<*addressOfStressLog*>] [<*Filename*>]|Writes the contents of an in-memory stress log to the specified file. If you do not specify a name, this command creates a file called StressLog.txt in the current directory.<br /><br /> The in-memory stress log helps you diagnose stress failures without using locks or I/O. To enable the stress log, set the following registry keys under HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\\.NETFramework:<br /><br /> (DWORD) StressLog = 1<br /><br /> (DWORD) LogFacility = 0xffffffff<br /><br /> (DWORD) StressLogSize = 65536<br /><br /> The optional `-addr` option lets you specify a stress log other than the default log.|
|**DumpMD** \<*MethodDesc address*>|Displays information about a `MethodDesc` structure at the specified address.<br /><br /> You can use the **IP2MD** command to get the `MethodDesc` structure address from a managed function.|
|**DumpMT** [**-MD**] \<*MethodTable address*>|Displays information about a method table at the specified address. Specifying the **-MD** option displays a list of all methods defined with the object.<br /><br /> Each managed object contains a method table pointer.|
|**DumpModule** [**-mt**] \<*Module address*>|Displays information about a module at the specified address. The **-mt** option displays the types defined in a module and the types referenced by the module<br /><br /> You can use the **DumpDomain** or **DumpAssembly** command to retrieve a module's address.|
|**DumpObj** [**-nofields**] \<*object address*><br /><br /> -or-<br /><br /> **DO** \<*object address*>|Displays information about an object at the specified address. The **DumpObj** command displays the fields, the `EEClass` structure information, the method table, and the size of the object.<br /><br /> You can use the **DumpStackObjects** command to retrieve an object's address.<br /><br /> Note that you can run the **DumpObj** command on fields of type `CLASS` because they are also objects.<br /><br /> The `-`**nofields** option prevents fields of the object being displayed, it is useful for objects like String.|
|**DumpRuntimeTypes**|Displays the runtime type objects in the garbage collector heap and lists their associated type names and method tables.|
|**DumpStack** [**-EE**] [**-n**] [`top` *stack* [`bottom` *stac*`k`]]|Displays a stack trace.<br /><br /> The **-EE** option causes the **DumpStack** command to display only managed functions. Use the `top` and `bottom` parameters to limit the stack frames displayed on x86 platforms.<br /><br /> The **-n** option disables the display of source file names and line numbers. If the debugger has the option SYMOPT_LOAD_LINES specified, SOS will look up the symbols for every managed frame and if successful will display the corresponding source file name and line number. The **-n** (No line numbers) parameter can be specified to disable this behavior.<br /><br /> On x86 and x64 platforms, the **DumpStack** command creates a verbose stack trace.<br /><br /> On IA-64-based platforms, the **DumpStack** command mimics the debugger's **K** command. The `top` and `bottom` parameters are ignored on IA-64-based platforms.|
|**DumpSig** \<*sigaddr*> \<*moduleaddr*>|Displays information about a `Sig` structure at the specified address.|
|**DumpSigElem** \<*sigaddr*> \<*moduleaddr*>|Displays a single element of a signature object. In most cases, you should use **DumpSig** to look at individual signature objects. However, if a signature has been corrupted in some way, you can use **DumpSigElem** to read the valid portions of it.|
|**DumpStackObjects** [**-verify**] [`top` *stack* [`bottom` *stack*]]<br /><br /> -or-<br /><br /> **DSO** [**-verify**] [`top` *stack* [`bottom` *stack*]]|Displays all managed objects found within the bounds of the current stack.<br /><br /> The **-verify** option validates each non-static `CLASS` field of an object field.<br /><br /> Use the **DumpStackObject** command with stack tracing commands such as the **K** command and the **CLRStack** command to determine the values of local variables and parameters.|
|**DumpVC** \<*MethodTable address*> \<*Address*>|Displays information about the fields of a value class at the specified address.<br /><br /> The **MethodTable** parameter allows the **DumpVC** command to correctly interpret fields. Value classes do not have a method table as their first field.|
|**EEHeap** [**-gc**] [**-loader**]|Displays information about process memory consumed by internal runtime data structures.<br /><br /> The **-gc** and **-loader** options limit the output of this command to garbage collector or loader data structures.<br /><br /> The information for the garbage collector lists the ranges of each segment in the managed heap. If the pointer falls within a segment range given by **-gc**, the pointer is an object pointer.|
|**EEStack** [**-short**] [**-EE**]|Runs the **DumpStack** command on all threads in the process.<br /><br /> The **-EE** option is passed directly to the **DumpStack** command. The **-short** parameter limits the output to the following kinds of threads:<br /><br /> Threads that have taken a lock.<br /><br /> Threads that have been stalled in order to allow a garbage collection.<br /><br /> Threads that are currently in managed code.|
|**EEVersion**|Displays the runtime version.|
|**EHInfo** [\<*MethodDesc address*>] [\<*Code address*>]|Displays the exception handling blocks in a specified method. This command displays the code addresses and offsets for the clause block (the `try` block) and the handler block (the `catch` block).|
|**FAQ**|Displays frequently asked questions.|
|**FinalizeQueue** [**-detail**] &#124; [**-allReady**] [**-short**]|Displays all objects registered for finalization.<br /><br /> The **-detail** option displays extra information about any `SyncBlocks` that need to be cleaned up, and any `RuntimeCallableWrappers` (RCWs) that await cleanup. Both of these data structures are cached and cleaned up by the finalizer thread when it runs.<br /><br /> The `-allReady` option displays all objects that are ready for finalization, regardless of whether they are already marked by the garbage collection as such, or will be marked by the next garbage collection. The objects that are in the "ready for finalization" list are finalizable objects that are no longer rooted. This option can be very expensive, because it verifies whether all the objects in the finalizable queues are still rooted.<br /><br /> The `-short` option limits the output to the address of each object. If it is used in conjunction with **-allReady**, it enumerates all objects that have a finalizer that are no longer rooted. If it is used independently, it lists all objects in the finalizable and "ready for finalization" queues.|
|**FindAppDomain** \<*Object address*>|Determines the application domain of an object at the specified address.|
|**FindRoots** **-gen** \<*N*> &#124; **-gen any** &#124;\<*object address*>|Causes the debugger to break in the debuggee on the next collection of the specified generation. The effect is reset as soon as the break occurs. To break on the next collection, you have to reissue the command. The *\<object address>* form of this command is used after the break caused by the **-gen** or **-gen any** has occurred. At that time, the debuggee is in the right state for **FindRoots** to identify roots for objects from the current condemned generations.|
|**GCHandles** [**-perdomain**]|Displays statistics about garbage collector handles in the process.<br /><br /> The **-perdomain** option arranges the statistics by application domain.<br /><br /> Use the **GCHandles** command to find memory leaks caused by garbage collector handle leaks. For example, a memory leak occurs when code retains a large array because a strong garbage collector handle still points to it, and the handle is discarded without freeing it.|
|**GCHandleLeaks**|Searches memory for any references to strong and pinned garbage collector handles in the process and displays the results. If a handle is found, the **GCHandleLeaks** command displays the address of the reference. If a handle is not found in memory, this command displays a notification.|
|**GCInfo** \<*MethodDesc address*>\<*Code address*>|Displays data that indicates when registers or stack locations contain managed objects. If a garbage collection occurs, the collector must know the locations of references to objects so it can update them with new object pointer values.|
|**GCRoot** [**-nostacks**] [**-all**] \<*Object address*>|Displays information about references (or roots) to an object at the specified address.<br /><br /> The **GCRoot** command examines the entire managed heap and the handle table for handles within other objects and handles on the stack. Each stack is then searched for pointers to objects, and the finalizer queue is also searched.<br /><br /> This command does not determine whether a stack root is valid or is discarded. Use the **CLRStack** and **U** commands to disassemble the frame that the local or argument value belongs to in order to determine if the stack root is still in use.<br /><br /> The **-nostacks** option restricts the search to garbage collector handles and reachable objects.<br /><br /> The **-all** option forces all roots to be displayed instead of just the unique roots.|
|**GCWhere** *\<object address>*|Displays the location and size in the garbage collection heap of the argument passed in. When the argument lies in the managed heap but is not a valid object address, the size is displayed as 0 (zero).|
|**Help** (**soshelp**) [\<*command*>] [`faq`]|Displays all available commands when no parameter is specified, or displays detailed help information about the specified command.<br /><br /> The `faq` parameter displays answers to frequently asked questions.|
|**HeapStat** [**-inclUnrooted** &#124; **-iu**]|Displays the generation sizes for each heap and the total free space in each generation on each heap. If the -**inclUnrooted** option is specified, the report includes information about the managed objects from the garbage collection heap that is no longer rooted.|
|**HistClear**|Releases any resources used by the family of `Hist` commands.<br /><br /> Generally, you do not have to explicitly call `HistClear`, because each `HistInit` cleans up the previous resources.|
|**HistInit**|Initializes the SOS structures from the stress log saved in the debuggee.|
|**HistObj** *<obj_address>*|Examines all stress log relocation records and displays the chain of garbage collection relocations that may have led to the address passed in as an argument.|
|**HistObjFind** *<obj_address>*|Displays all the log entries that reference an object at the specified address.|
|**HistRoot** *\<root>*|Displays information related to both promotions and relocations of the specified root.<br /><br /> The root value can be used to track the movement of an object through the garbage collections.|
|**IP2MD** \<*Code address*>|Displays the `MethodDesc` structure at the specified address in code that has been JIT-compiled.|
|**ListNearObj** (**lno**) *<obj_address>*|Displays the objects preceding and following the specified address. The command looks for the address in the garbage collection heap that looks like a valid beginning of a managed object (based on a valid method table) and the object following the argument address.|
|**MinidumpMode** [**0**] [**1**]|Prevents running unsafe commands when using a minidump.<br /><br /> Pass **0** to disable this feature or **1** to enable this feature. By default, the **MinidumpMode** value is set to **0**.<br /><br /> Minidumps created with the **.dump /m** command or **.dump** command have limited CLR-specific data and allow you to run only a subset of SOS commands correctly. Some commands may fail with unexpected errors because required areas of memory are not mapped or are only partially mapped. This option protects you from running unsafe commands against minidumps.|
|**Name2EE** \<*module name*> \<*type or method name*><br /><br /> -or-<br /><br /> **Name2EE** \<*module name*>**!**\<*type or method name*>|Displays the `MethodTable` structure and `EEClass` structure for the specified type or method in the specified module.<br /><br /> The specified module must be loaded in the process.<br /><br /> To get the proper type name, browse the module by using the [Ildasm.exe (IL Disassembler)](../../../docs/framework/tools/ildasm-exe-il-disassembler.md). You can also pass `*` as the module name parameter to search all loaded managed modules. The *module name* parameter can also be the debugger's name for a module, such as `mscorlib` or `image00400000`.<br /><br /> This command supports the Windows debugger syntax of <`module`>`!`<`type`>. The type must be fully qualified.|
|**ObjSize** [\<*Object address*>] &#124; [**-aggregate**] [**-stat**]|Displays the size of the specified object. If you do not specify any parameters, the **ObjSize** command displays the size of all objects found on managed threads, displays all garbage collector handles in the process, and totals the size of any objects pointed to by those handles. The **ObjSize** command includes the size of all child objects in addition to the parent.<br /><br /> The **-aggregate** option can be used in conjunction with the **-stat** argument to get a detailed view of the types that are still rooted. By using **!dumpheap -stat** and **!objsize -aggregate -stat**, you can determine which objects are no longer rooted and diagnose various memory issues.|
|**PrintException** [**-nested**] [**-lines**] [\<*Exception object address*>]<br /><br /> -or-<br /><br /> **PE** [**-nested**] [\<*Exception object address*>]|Displays and formats fields of any object derived from the <xref:System.Exception> class at the specified address. If you do not specify an address, the **PrintException** command displays the last exception thrown on the current thread.<br /><br /> The **-nested** option displays details about nested exception objects.<br /><br /> The **-lines** option displays source information, if available.<br /><br /> You can use this command to format and view the `_stackTrace` field, which is a binary array.|
|**ProcInfo** [**-env**] [**-time**] [**-mem**]|Displays environment variables for the process, kernel CPU time, and memory usage statistics.|
|**RCWCleanupList** \<*RCWCleanupList address*>|Displays the list of runtime callable wrappers at the specified address that are awaiting cleanup.|
|**SaveModule** \<*Base address*> \<*Filename*>|Writes an image, which is loaded in memory at the specified address, to the specified file.|
|**SOSFlush**|Flushes an internal SOS cache.|
|**SOSStatus** [**-netfx**] [**-netcore**] [**-reset**]|Display internal SOS status, reset the internal cached state, or change between the desktop .NET framework or .NET Core runtimes when both are loaded in the process or dump.<br/><br/>-netfx - switch to the desktop .NET Framework runtime if loaded.<br/>-netcore - switch to the .NET Core runtime if loaded.<br/>-reset - reset all the cached internal SOS state.<br/><br/>|
|**SetHostRuntime** [**-netcore**] [**-netfx**] [\<runtime-directory\>]|This command controls the runtime that is used to host the maanged code that runs as part of SOS in the debugger (cdb/windbg). The default is the desktop .NET Framework. The "-netcore" option allows the installed .NET Core runtime be used. The "-netfx" option allows switches back to the .NET Framework runtime.<br/><br/>Normally, SOS attempts to find an installed .NET Core runtime to run its managed code automatically but this command is available if it fails. The default is to use the same runtime (libcoreclr) being debugged. Use this command if the default runtime being debugged isn't working enough to run the SOS code or if the version is less than 2.1.0.<br/><br/>If you received the following error message when running a SOS command, use this command to set the path to 2.1.0 or greater .NET Core runtime. <br/><br/>`(lldb) clrstack`<br/>`Error: Fail to initialize CoreCLR 80004005 ClrStack failed`<br/><br/>`(lldb) sethostruntime /usr/share/dotnet/shared/Microsoft.NETCore.App/2.1.6`<br/><br/>You can use the "dotnet --info" in a command shell to find the path of an installed .NET Core runtime.|
|**StopOnException** [**-derived**] [**-create** &#124; **-create2**] \<*Exception*> \<*Pseudo-register number*>|Causes the debugger to stop when the specified exception is thrown, but to continue running when other exceptions are thrown.<br /><br /> The **-derived** option catches the specified exception and every exception that derives from the specified exception.|
|**SyncBlk** [**-all** &#124; \<*syncblk number*>]|Displays the specified `SyncBlock` structure or all `SyncBlock` structures. If you do not pass any arguments, the **SyncBlk** command displays the `SyncBlock` structure corresponding to objects that are owned by a thread.<br /><br /> A `SyncBlock` structure is a container for extra information that does not need to be created for every object. It can hold COM interop data, hash codes, and locking information for thread-safe operations.|
|**ThreadPool**|Displays information about the managed thread pool, including the number of work requests in the queue, the number of completion port threads, and the number of timers.|
|**Token2EE** \<*module name*> \<*token*>|Turns the specified metadata token in the specified module into a `MethodTable` structure or `MethodDesc` structure.<br /><br /> You can pass `*` for the module name parameter to find what that token maps to in every loaded managed module. You can also pass the debugger's name for a module, such as `mscorlib` or `image00400000`.|
|**Threads** [**-live**] [**-special**]|Displays all managed threads in the process.<br /><br /> The **Threads** command displays the debugger shorthand ID, the CLR thread ID, and the operating system thread ID. Additionally, the **Threads** command displays a Domain column that indicates the application domain in which a thread is executing, an APT column that displays the COM apartment mode, and an Exception column that displays the last exception thrown in the thread.<br /><br /> The **-live** option displays threads associated with a live thread.<br /><br /> The **-special** option displays all special threads created by the CLR. Special threads include garbage collection threads (in concurrent and server garbage collection), debugger helper threads, finalizer threads, <xref:System.AppDomain> unload threads, and thread pool timer threads.|
|**ThreadState \<** *State value field* **>**|Displays the state of the thread. The `value` parameter is the value of the `State` field in the **Threads** report output.|
|**TraverseHeap** [**-xml**] \<*filename*>|Writes heap information to the specified file in a format understood by the CLR profiler. The **-xml** option causes the **TraverseHeap** command to format the file as XML.<br /><br /> You can download the CLR Profiler from the [Microsoft Download Center](https://go.microsoft.com/fwlink/?LinkID=67325).|
|**U** [**-gcinfo**] [**-ehinfo**] [**-n**] \<*MethodDesc address*> &#124; \<*Code address*>|Displays an annotated disassembly of a managed method specified either by a `MethodDesc` structure pointer for the method or by a code address within the method body. The **U** command displays the entire method from start to finish, with annotations that convert metadata tokens to names.<br /><br /> The **-gcinfo** option causes the **U** command to display the `GCInfo` structure for the method.<br /><br /> The **-ehinfo** option displays exception information for the method. You can also obtain this information with the **EHInfo** command.<br /><br /> The **-n** option disables the display of source file names and line numbers. If the debugger has the option SYMOPT_LOAD_LINES specified, SOS looks up the symbols for every managed frame and, if successful, displays the corresponding source file name and line number. You can specify the **-n** option to disable this behavior.|
|**VerifyHeap**|Checks the garbage collector heap for signs of corruption and displays any errors found.<br /><br /> Heap corruptions can be caused by platform invoke calls that are constructed incorrectly.|
|**VerifyObj** \<*object address*>|Checks the object that is passed as an argument for signs of corruption.|
|**VMMap**|Traverses the virtual address space and displays the type of protection applied to each region.|
|**VMStat**|Provides a summary view of the virtual address space, ordered by each type of protection applied to that memory (free, reserved, committed, private, mapped, image). The TOTAL column displays the result of the AVERAGE column multiplied by the BLK COUNT column.|
## Remarks
The SOS Debugging Extension lets you view information about code that is running inside the CLR. For example, you can use the SOS Debugging Extension to display information about the managed heap, look for heap corruptions, display internal data types used by the runtime, and view information about all managed code running inside the runtime.
To use the SOS Debugging Extension in Visual Studio, install the [Windows Driver Kit (WDK)](/windows-hardware/drivers/download-the-wdk). For information about the integrated debugging environment in Visual Studio, see [Debugging Environments](/windows-hardware/drivers/debugger/debuggers-in-the-debugging-tools-for-windows-package).
You can also use the SOS Debugging Extension by loading it into the [WinDbg.exe debugger](/windows-hardware/drivers/debugger/debugger-download-tools) and executing commands within WinDbg.exe.
To load the SOS Debugging Extension into the WinDbg.exe debugger, run the following command in the tool:
```
.loadby sos coreclr
```
WinDbg.exe and Visual Studio use a version of SOS.dll that corresponds to the version of Mscorwks.dll currently in use. By default, you should use the version of SOS.dll that matches the current version of Mscorwks.dll.
To use a dump file created on another computer, make sure that the Mscorwks.dll file that came with that installation is in your symbol path, and load the corresponding version of SOS.dll.
To load a specific version of SOS.dll, type the following command into the Windows Debugger:
```
.load <full path to sos.dll>
```
## Examples
The following command displays the contents of an array at the address `00ad28d0`. The display starts from the second element and continues for five elements.
```
!dumparray -start 2 -length 5 -detail 00ad28d0
```
The following command displays the contents of an assembly at the address `1ca248`.
```
!dumpassembly 1ca248
```
The following command displays information about the garbage collector heap.
```
!dumpheap
```
The following command writes the contents of the in-memory stress log to a (default) file called StressLog.txt in the current directory.
```
!DumpLog
```
The following command displays the `MethodDesc` structure at the address `902f40`.
```
!dumpmd 902f40
```
The following command displays information about a module at the address `1caa50`.
```
!dumpmodule 1caa50
```
The following command displays information about an object at the address `a79d40`.
```
!DumpObj a79d40
```
The following command displays the fields of a value class at the address `00a79d9c` using the method table at the address `0090320c`.
```
!DumpVC 0090320c 00a79d9c
```
The following command displays the process memory used by the garbage collector.
```
!eeheap -gc
```
The following command displays all objects scheduled for finalization.
```
!finalizequeue
```
The following command determines the application domain of an object at the address `00a79d98`.
```
!findappdomain 00a79d98
```
The following command displays all garbage collector handles in the current process.
```
!gcinfo 5b68dbb8
```
The following command displays the `MethodTable` and `EEClass` structures for the `Main` method in the class `MainClass` in the module `unittest.exe`.
```
!name2ee unittest.exe MainClass.Main
```
The following command displays information about the metadata token at the address `02000003` in the module `unittest.exe`.
```
!token2ee unittest.exe 02000003
```

Просмотреть файл

@ -1,270 +0,0 @@
---
title: "SOS Debugging Extension"
ms.date: "10/3/2018"
helpviewer_keywords:
- "debugging extensions"
- "SOS debugging extensions"
- "xplat debugging"
ms.assetid: TBD
author: "mikem"
ms.author: "mikem"
---
# SOS debugging extension for xplat
The SOS Debugging Extension lets you view information about code that is running inside the .NET Core runtime. For example, you can use the SOS Debugging Extension to display information about the managed heap, look for heap corruptions, display internal data types used by the runtime, and view information about all managed code running inside the runtime.
## Syntax
```shell
sos [command] [options]
```
Many of the commands have aliases or short cuts under lldb:
```
clrstack [options]
```
### Command Summary
SOS is a lldb debugger extension designed to aid in the debugging of managed programs. Functions are listed by category, then roughly in order of
importance. Shortcut names for popular functions are listed in parenthesis. Type `soshelp <functionname>` for detailed info on that function.
Object Inspection Examining code and stacks
----------------------------- -----------------------------
DumpObj (dumpobj) Threads (clrthreads)
DumpArray ThreadState
DumpAsync (dumpasync) IP2MD (ip2md)
DumpStackObjects (dso) u (clru)
DumpHeap (dumpheap) DumpStack (dumpstack)
DumpVC EEStack (eestack)
GCRoot (gcroot) CLRStack (clrstack)
PrintException (pe) GCInfo
EHInfo
bpmd (bpmd)
Examining CLR data structures Diagnostic Utilities
----------------------------- -----------------------------
DumpDomain (dumpdomain) VerifyHeap
EEHeap (eeheap) FindAppDomain
Name2EE (name2ee) DumpLog (dumplog)
DumpMT (dumpmt)
DumpClass (dumpclass)
DumpMD (dumpmd)
Token2EE
DumpModule (dumpmodule)
DumpAssembly
DumpRuntimeTypes
DumpIL (dumpil)
DumpSig
DumpSigElem
Examining the GC history Other
----------------------------- -----------------------------
HistInit (histinit) SetHostRuntime (sethostruntime)
HistRoot (histroot) SetSymbolServer (setsymbolserver, loadsymbols)
HistObj (histobj) SetClrPath (setclrpath)
HistObjFind (histobjfind) SOSFlush (sosflush)
HistClear (histclear) SOSStatus (sosstatus)
FAQ
Help (soshelp)
## Commands
|Command|Description|
|-------------|-----------------|
|**bpmd** [**-nofuturemodule**] [\<*module name*> \<*method name*>] [**-md** <`MethodDesc`>] **-list** **-clear** \<*pending breakpoint number*> **-clearall**|Creates a breakpoint at the specified method in the specified module.<br /><br /> If the specified module and method have not been loaded, this command waits for a notification that the module was loaded and just-in-time (JIT) compiled before creating a breakpoint.<br /><br /> You can manage the list of pending breakpoints by using the **-list**, **-clear**, and **-clearall** options:<br /><br /> The **-list** option generates a list of all the pending breakpoints. If a pending breakpoint has a non-zero module ID, that breakpoint is specific to a function in that particular loaded module. If the pending breakpoint has a zero module ID, that breakpoint applies to modules that have not yet been loaded.<br /><br /> Use the **-clear** or **-clearall** option to remove pending breakpoints from the list.|
|**CLRStack** [**-a**] [**-l**] [**-p**] [**-n**] [**-f**] [**-r**] [**-all**]|Provides a stack trace of managed code only.<br /><br /> The **-p** option shows arguments to the managed function.<br /><br /> The **-l** option shows information on local variables in a frame. The SOS Debugging Extension cannot retrieve local names, so the output for local names is in the format \<*local address*> **=** \<*value*>.<br /><br /> The **-a** option is a shortcut for **-l** and **-p** combined.<br /><br /> The **-n** option disables the display of source file names and line numbers. If the debugger has the option SYMOPT_LOAD_LINES specified, SOS will look up the symbols for every managed frame and if successful will display the corresponding source file name and line number. The **-n** (No line numbers) parameter can be specified to disable this behavior.<br /><br />The **-f** option (full mode) displays the native frames intermixing them with the managed frames and the assembly name and function offset for the managed frames.<br /><br />The **-r** option dumps the registers for each stack frame.<br /><br />The **-all** option dumps all the managed threads' stacks.|
|**DumpArray** [**-start** \<*startIndex*>] [**-length** \<*length*>] [**-details**] [**-nofields**] \<*array object address*><br /><br /> -or-<br /><br /> **DA** [**-start** \<*startIndex*>] [**-length** \<*length*>] [**-detail**] [**-nofields**] *array object address*>|Examines elements of an array object.<br /><br /> The **-start** option specifies the starting index at which to display elements.<br /><br /> The **-length** option specifies how many elements to show.<br /><br /> The **-details** option displays details of the element using the **DumpObj** and **DumpVC** formats.<br /><br /> The **-nofields** option prevents arrays from displaying. This option is available only when the **-detail** option is specified.|
|**DumpAsync** (**dumpasync**) [**-mt** \<*MethodTable address*>] [**-type** \<*partial type name*>] [**-waiting**] [**-roots**]|DumpAsync traverses the garbage collected heap, looking for objects representing async state machines as created when an async method's state is transferred to the heap. This command recognizes async state machines defined as `async void`, `async Task`, `async Task<T>`, `async ValueTask`, and `async ValueTask<T>`.<br /><br />The output includes a block of details for each async state machine object found. These details include:<br />- a line for the type of the async state machine object, including its MethodTable address, its object address, its size, and its type name.<br />- a line for the state machine type name as contained in the object.<br />- a listing of each field on the state machine.<br />- a line for a continuation from this state machine object, if one or more has been registered.<br />- discovered GC roots for this async state machine object.<br />|
|**DumpAssembly** \<*assembly address*>|Displays information about an assembly.<br /><br /> The **DumpAssembly** command lists multiple modules, if they exist.<br /><br /> You can get an assembly address by using the **DumpDomain** command.|
|**DumpClass** \<*EEClass address*>|Displays information about the `EEClass` structure associated with a type.<br /><br /> The **DumpClass** command displays static field values but does not display nonstatic field values.<br /><br /> Use the **DumpMT**, **DumpObj**, **Name2EE**, or **Token2EE** command to get an `EEClass` structure address.|
|**DumpDomain** [\<*domain address*>]|Enumerates each <xref:System.Reflection.Assembly> object that is loaded within the specified <xref:System.AppDomain> object address. When called with no parameters, the **DumpDomain** command lists all <xref:System.AppDomain> objects in a process.|
|**DumpHeap** [**-stat**] [**-strings**] [**-short**] [**-min** \<*size*>] [**-max** \<*size*>] [**-thinlock**] [**-startAtLowerBound**] [**-mt** \<*MethodTable address*>] [**-type** \<*partial type name*>][*start* [*end*]]|Displays information about the garbage-collected heap and collection statistics about objects.<br /><br /> The **DumpHeap** command displays a warning if it detects excessive fragmentation in the garbage collector heap.<br /><br /> The **-stat** option restricts the output to the statistical type summary.<br /><br /> The **-strings** option restricts the output to a statistical string value summary.<br /><br /> The **-short** option limits output to just the address of each object. This lets you easily pipe output from the command to another debugger command for automation.<br /><br /> The **-min** option ignores objects that are less than the `size` parameter, specified in bytes.<br /><br /> The **-max** option ignores objects that are larger than the `size` parameter, specified in bytes.<br /><br /> The **-thinlock** option reports ThinLocks. For more information, see the **SyncBlk** command.<br /><br /> The `-startAtLowerBound` option forces the heap walk to begin at the lower bound of a supplied address range. During the planning phase, the heap is often not walkable because objects are being moved. This option forces **DumpHeap** to begin its walk at the specified lower bound. You must supply the address of a valid object as the lower bound for this option to work. You can display memory at the address of a bad object to manually find the next method table. If the garbage collection is currently in a call to `memcopy`, you may also be able to find the address of the next object by adding the size to the start address, which is supplied as a parameter.<br /><br /> The **-mt** option lists only those objects that correspond to the specified `MethodTable` structure.<br /><br /> The **-type** option lists only those objects whose type name is a substring match of the specified string.<br /><br /> The `start` parameter begins listing from the specified address.<br /><br /> The `end` parameter stops listing at the specified address.|
|**DumpIL** \<*Managed DynamicMethod object*> &#124; \<*DynamicMethodDesc pointer*> &#124; \<*MethodDesc pointer*>|Displays the Microsoft intermediate language (MSIL) that is associated with a managed method.<br /><br /> Note that dynamic MSIL is emitted differently than MSIL that is loaded from an assembly. Dynamic MSIL refers to objects in a managed object array rather than to metadata tokens.|
|**DumpLog** [**-addr** \<*addressOfStressLog*>] [<*Filename*>]|Writes the contents of an in-memory stress log to the specified file. If you do not specify a name, this command creates a file called StressLog.txt in the current directory.<br /><br /> The in-memory stress log helps you diagnose stress failures without using locks or I/O. To enable the stress log, set the following registry keys under HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\\.NETFramework:<br /><br /> (DWORD) StressLog = 1<br /><br /> (DWORD) LogFacility = 0xffffffff<br /><br /> (DWORD) StressLogSize = 65536<br /><br /> The optional `-addr` option lets you specify a stress log other than the default log.|
|**DumpMD** \<*MethodDesc address*>|Displays information about a `MethodDesc` structure at the specified address.<br /><br /> You can use the **IP2MD** command to get the `MethodDesc` structure address from a managed function.|
|**DumpMT** [**-MD**] \<*MethodTable address*>|Displays information about a method table at the specified address. Specifying the **-MD** option displays a list of all methods defined with the object.<br /><br /> Each managed object contains a method table pointer.|
|**DumpModule** [**-mt**] \<*Module address*>|Displays information about a module at the specified address. The **-mt** option displays the types defined in a module and the types referenced by the module<br /><br /> You can use the **DumpDomain** or **DumpAssembly** command to retrieve a module's address.|
|**DumpObj** [**-nofields**] \<*object address*><br /><br /> -or-<br /><br /> **DO** \<*object address*>|Displays information about an object at the specified address. The **DumpObj** command displays the fields, the `EEClass` structure information, the method table, and the size of the object.<br /><br /> You can use the **DumpStackObjects** command to retrieve an object's address.<br /><br /> Note that you can run the **DumpObj** command on fields of type `CLASS` because they are also objects.<br /><br /> The `-`**nofields** option prevents fields of the object being displayed, it is useful for objects like String.|
|**DumpRuntimeTypes**|Displays the runtime type objects in the garbage collector heap and lists their associated type names and method tables.|
|**DumpStack** [**-EE**] [**-n**] [`top` *stack* [`bottom` *stac*`k`]]|Displays a stack trace.<br /><br /> The **-EE** option causes the **DumpStack** command to display only managed functions. Use the `top` and `bottom` parameters to limit the stack frames displayed on x86 platforms.<br /><br /> The **-n** option disables the display of source file names and line numbers. If the debugger has the option SYMOPT_LOAD_LINES specified, SOS will look up the symbols for every managed frame and if successful will display the corresponding source file name and line number. The **-n** (No line numbers) parameter can be specified to disable this behavior.<br /><br /> On x86 and x64 platforms, the **DumpStack** command creates a verbose stack trace.<br /><br /> On IA-64-based platforms, the **DumpStack** command mimics the debugger's **K** command. The `top` and `bottom` parameters are ignored on IA-64-based platforms.|
|**DumpSig** \<*sigaddr*> \<*moduleaddr*>|Displays information about a `Sig` structure at the specified address.|
|**DumpSigElem** \<*sigaddr*> \<*moduleaddr*>|Displays a single element of a signature object. In most cases, you should use **DumpSig** to look at individual signature objects. However, if a signature has been corrupted in some way, you can use **DumpSigElem** to read the valid portions of it.|
|**DumpStackObjects** [**-verify**] [`top` *stack* [`bottom` *stack*]]<br /><br /> -or-<br /><br /> **DSO** [**-verify**] [`top` *stack* [`bottom` *stack*]]|Displays all managed objects found within the bounds of the current stack.<br /><br /> The **-verify** option validates each non-static `CLASS` field of an object field.<br /><br /> Use the **DumpStackObject** command with stack tracing commands such as the **K** command and the **clrstack** command to determine the values of local variables and parameters.|
|**DumpVC** \<*MethodTable address*> \<*Address*>|Displays information about the fields of a value class at the specified address.<br /><br /> The **MethodTable** parameter allows the **DumpVC** command to correctly interpret fields. Value classes do not have a method table as their first field.|
|**EEHeap** [**-gc**] [**-loader**]|Displays information about process memory consumed by internal runtime data structures.<br /><br /> The **-gc** and **-loader** options limit the output of this command to garbage collector or loader data structures.<br /><br /> The information for the garbage collector lists the ranges of each segment in the managed heap. If the pointer falls within a segment range given by **-gc**, the pointer is an object pointer.|
|**EEStack** [**-short**] [**-EE**]|Runs the **DumpStack** command on all threads in the process.<br /><br /> The **-EE** option is passed directly to the **DumpStack** command. The **-short** parameter limits the output to the following kinds of threads:<br /><br />Threads that have taken a lock.<br /><br />Threads that have been stalled in order to allow a garbage collection.<br /><br /> Threads that are currently in managed code.|
|**EHInfo** [\<*MethodDesc address*>] [\<*Code address*>]|Displays the exception handling blocks in a specified method. This command displays the code addresses and offsets for the clause block (the `try` block) and the handler block (the `catch` block).|
|**FAQ**|Displays frequently asked questions.|
|**FindAppDomain** \<*Object address*>|Determines the application domain of an object at the specified address.|
|**GCInfo** \<*MethodDesc address*>\<*Code address*>|Displays data that indicates when registers or stack locations contain managed objects. If a garbage collection occurs, the collector must know the locations of references to objects so it can update them with new object pointer values.|
|**GCRoot** [**-nostacks**] [**-all**] \<*Object address*>|Displays information about references (or roots) to an object at the specified address.<br /><br /> The **GCRoot** command examines the entire managed heap and the handle table for handles within other objects and handles on the stack. Each stack is then searched for pointers to objects, and the finalizer queue is also searched.<br /><br /> This command does not determine whether a stack root is valid or is discarded. Use the **clrstack** and **U** commands to disassemble the frame that the local or argument value belongs to in order to determine if the stack root is still in use.<br /><br /> The **-nostacks** option restricts the search to garbage collector handles and reachable objects.<br /><br /> The **-all** option forces all roots to be displayed instead of just the unique roots.|
|**GCWhere** *\<object address>*|Displays the location and size in the garbage collection heap of the argument passed in. When the argument lies in the managed heap but is not a valid object address, the size is displayed as 0 (zero).|
|**Help** (**soshelp**) [\<*command*>] [`faq`]|Displays all available commands when no parameter is specified, or displays detailed help information about the specified command.<br /><br /> The `faq` parameter displays answers to frequently asked questions.|
|**HistClear**|Releases any resources used by the family of `Hist` commands.<br /><br /> Generally, you do not have to explicitly call `HistClear`, because each `HistInit` cleans up the previous resources.|
|**HistInit**|Initializes the SOS structures from the stress log saved in the debuggee.|
|**HistObj** *<obj_address>*|Examines all stress log relocation records and displays the chain of garbage collection relocations that may have led to the address passed in as an argument.|
|**HistObjFind** *<obj_address>*|Displays all the log entries that reference an object at the specified address.|
|**HistRoot** *\<root>*|Displays information related to both promotions and relocations of the specified root.<br /><br /> The root value can be used to track the movement of an object through the garbage collections.|
|**IP2MD** (**ip2md**) \<*Code address*>|Displays the `MethodDesc` structure at the specified address in code that has been JIT-compiled.|
|**Name2EE** (**name2ee**) \<*module name*> \<*type or method name*><br /><br /> -or-<br /><br /> **Name2EE** \<*module name*>**!**\<*type or method name*>|Displays the `MethodTable` structure and `EEClass` structure for the specified type or method in the specified module.<br /><br /> The specified module must be loaded in the process.<br /><br /> To get the proper type name, browse the module by using the [Ildasm.exe (IL Disassembler)](../../../docs/framework/tools/ildasm-exe-il-disassembler.md). You can also pass `*` as the module name parameter to search all loaded managed modules. The *module name* parameter can also be the debugger's name for a module, such as `mscorlib` or `image00400000`.<br /><br /> This command supports the Windows debugger syntax of <`module`>`!`<`type`>. The type must be fully qualified.|
|**PrintException** [**-nested**] [**-lines**] [\<*Exception object address*>]<br /><br /> -or-<br /><br /> **PE** [**-nested**] [\<*Exception object address*>]|Displays and formats fields of any object derived from the <xref:System.Exception> class at the specified address. If you do not specify an address, the **PrintException** command displays the last exception thrown on the current thread.<br /><br /> The **-nested** option displays details about nested exception objects.<br /><br /> The **-lines** option displays source information, if available.<br /><br /> You can use this command to format and view the `_stackTrace` field, which is a binary array.|
|**SyncBlk** [**-all** &#124; \<*syncblk number*>]|Displays the specified `SyncBlock` structure or all `SyncBlock` structures. If you do not pass any arguments, the **SyncBlk** command displays the `SyncBlock` structure corresponding to objects that are owned by a thread.<br /><br /> A `SyncBlock` structure is a container for extra information that does not need to be created for every object. It can hold COM interop data, hash codes, and locking information for thread-safe operations.|
|**SOSFlush**|Flushes an internal SOS cache.|
|**SOSStatus** [**-reset**]|Displays internal SOS status or reset the internal cached state.|
|**SetHostRuntime** [\<runtime-directory\>]|This command sets the path to the .NET Core runtime to use to host the managed code that runs as part of SOS in the debugger (lldb). The runtime needs to be at least version 2.1.0 or greater. If there are spaces in directory, it needs to be single-quoted (').<br/><br/>Normally, SOS attempts to find an installed .NET Core runtime to run its managed code automatically but this command is available if it fails. The default is to use the same runtime (libcoreclr) being debugged. Use this command if the default runtime being debugged isn't working enough to run the SOS code or if the version is less than 2.1.0.<br/><br/>If you received the following error message when running a SOS command, use this command to set the path to 2.1.0 or greater .NET Core runtime. <br/><br/>`(lldb) clrstack`<br/>`Error: Fail to initialize CoreCLR 80004005 ClrStack failed`<br/><br/>`(lldb) sethostruntime /usr/share/dotnet/shared/Microsoft.NETCore.App/2.1.6`<br/><br/>You can use the "dotnet --info" in a command shell to find the path of an installed .NET Core runtime.|
|**SetSymbolServer** [**-ms**] [**-disable**] [**-log**] [**-loadsymbols**] [**-cache** \<cache-path>] [**-directory** \<search-directory>] [**-sympath** \<windows-symbol-path>] [\<symbol-server-URL>]|Enables the symbol server downloading support.<br/><br/>The **-ms** option enables downloading from the public Microsoft symbol server.<br/><br/>The **-disable** option turns on the symbol download support.<br/><br/>The **-cache** \<cache-path> option specifies a symbol cache directory. The default is $HOME/.dotnet/symbolcache if not specified.<br/><br/>The **-directory** option add a path to search for symbols. Can be more than one.<br/><br/>The **-sympath** option adds server, cache and directory paths in the Windows symbol path format.<br/><br/>The **-log** option enables symbol download logging.<br/><br/>The **-loadsymbols** option attempts to download the native .NET Core symbols for the runtime.|
|**Token2EE** \<*module name*> \<*token*>|Turns the specified metadata token in the specified module into a `MethodTable` structure or `MethodDesc` structure.<br /><br /> You can pass `*` for the module name parameter to find what that token maps to in every loaded managed module. You can also pass the debugger's name for a module, such as `mscorlib` or `image00400000`.|
|**Threads** (**clrthreads**) [**-live**] [**-special**]|Displays all managed threads in the process.<br /><br /> The **Threads** command displays the debugger shorthand ID, the CLR thread ID, and the operating system thread ID. Additionally, the **Threads** command displays a Domain column that indicates the application domain in which a thread is executing, an APT column that displays the COM apartment mode, and an Exception column that displays the last exception thrown in the thread.<br /><br /> The **-live** option displays threads associated with a live thread.<br /><br /> The **-special** option displays all special threads created by the CLR. Special threads include garbage collection threads (in concurrent and server garbage collection), debugger helper threads, finalizer threads, <xref:System.AppDomain> unload threads, and thread pool timer threads.|
|**ThreadState \<** *State value field* **>**|Displays the state of the thread. The `value` parameter is the value of the `State` field in the **Threads** report output.|
|**U** [**-gcinfo**] [**-ehinfo**] [**-n**] \<*MethodDesc address*> &#124; \<*Code address*>|Displays an annotated disassembly of a managed method specified either by a `MethodDesc` structure pointer for the method or by a code address within the method body. The **U** command displays the entire method from start to finish, with annotations that convert metadata tokens to names.<br /><br /> The **-gcinfo** option causes the **U** command to display the `GCInfo` structure for the method.<br /><br /> The **-ehinfo** option displays exception information for the method. You can also obtain this information with the **EHInfo** command.<br /><br /> The **-n** option disables the display of source file names and line numbers. If the debugger has the option SYMOPT_LOAD_LINES specified, SOS looks up the symbols for every managed frame and, if successful, displays the corresponding source file name and line number. You can specify the **-n** option to disable this behavior.|
|**VerifyHeap**|Checks the garbage collector heap for signs of corruption and displays any errors found.<br /><br /> Heap corruptions can be caused by platform invoke calls that are constructed incorrectly.|
### Aliases ###
By default you can reach all the SOS commands by using: _sos [command\_name]_
However the common commands have been aliased so that you don't need the SOS prefix:
bpmd -- Creates a breakpoint at the specified managed method in the specified module.
clrstack -- Provides a stack trace of managed code only.
clrthreads -- List the managed threads running.
clru -- Displays an annotated disassembly of a managed method.
dso -- Displays all managed objects found within the bounds of the current stack.
dumpasync -- Displays info about async state machines on the garbage-collected heap.
dumpclass -- Displays information about a EE class structure at the specified address.
dumpdomain -- Displays information all the AppDomains and all assemblies within the domains.
dumpheap -- Displays info about the garbage-collected heap and collection statistics about objects.
dumpil -- Displays the Microsoft intermediate language (MSIL) that is associated with a managed method.
dumplog -- Writes the contents of an in-memory stress log to the specified file.
dumpmd -- Displays information about a MethodDesc structure at the specified address.
dumpmodule -- Displays information about a EE module structure at the specified address.
dumpmt -- Displays information about a method table at the specified address.
dumpobj -- Displays info about an object at the specified address.
dumpstack -- Displays a native and managed stack trace.
eeheap -- Displays info about process memory consumed by internal runtime data structures.
eestack -- Runs dumpstack on all threads in the process.
gcroot -- Displays info about references (or roots) to an object at the specified address.
histclear -- Releases any resources used by the family of Hist commands.
histinit -- Initializes the SOS structures from the stress log saved in the debuggee.
histobj -- Examines all stress log relocation records and displays the chain of garbage collection relocations that may have led to the address passed in as an argument.
histobjfind -- Displays all the log entries that reference an object at the specified address.
histroot -- Displays information related to both promotions and relocations of the specified root.
ip2md -- Displays the MethodDesc structure at the specified address in code that has been JIT-compiled.
loadsymbols -- Load the .NET Core native module symbols.
name2ee -- Displays the MethodTable structure and EEClass structure for the specified type or method in the specified module.
pe -- Displays and formats fields of any object derived from the Exception class at the specified address.
setclrpath -- Set the path to load coreclr dac/dbi files. setclrpath <path>
sethostruntime -- Sets or displays the .NET Core runtime directory to use to run managed code in SOS.
setsymbolserver -- Enables the symbol server support.
setsostid -- Set the current os tid/thread index instead of using the one lldb provides. setsostid <tid> <index>
sos -- Various coreclr debugging commands. See 'soshelp' for more details. sos <command-name> <args>
soshelp -- Displays all available commands when no parameter is specified, or displays detailed help information about the specified command. soshelp <command>
syncblk -- Displays the SyncBlock holder info.
## Remarks
TBD
## Examples
The following command displays the contents of an array at the address `00ad28d0`. The display starts from the second element and continues for five elements.
```
sos DumpArray -start 2 -length 5 -detail 00ad28d0
```
The following command displays the contents of an assembly at the address `1ca248`.
```
sos DumpAssembly 1ca248
```
The following command displays information about the garbage collector heap.
```
dumpheap
```
The following command writes the contents of the in-memory stress log to a (default) file called StressLog.txt in the current directory.
```
dumplog
```
The following command displays the `MethodDesc` structure at the address `902f40`.
```
dumpmd 902f40
```
The following command displays information about a module at the address `1caa50`.
```
dumpmodule 1caa50
```
The following command displays information about an object at the address `a79d40`.
```
dumpobj a79d40
```
The following command displays the fields of a value class at the address `00a79d9c` using the method table at the address `0090320c`.
```
sos DumpVC 0090320c 00a79d9c
```
The following command displays the process memory used by the garbage collector.
```
eeheap -gc
```
The following command determines the application domain of an object at the address `00a79d98`.
```
sos FindAppDomain 00a79d98
```
The following command displays all garbage collector handles in the current process.
```
sos GCInfo 5b68dbb8
```
The following command displays the `MethodTable` and `EEClass` structures for the `Main` method in the class `MainClass` in the module `unittest.exe`.
```
name2ee unittest.exe MainClass.Main
```
The following command displays information about the metadata token at the address `02000003` in the module `unittest.exe`.
```
sos Token2EE unittest.exe 02000003
```
The following displays the managed threads and the threadstate for one:
```
clrthreads
```
```
ThreadCount: 2
UnstartedThread: 0
BackgroundThread: 1
PendingThread: 0
DeadThread: 0
Hosted Runtime: no
Lock
ID OSID ThreadOBJ State GC Mode GC Alloc Context Domain Count Apt Exception
1 1 12fd 0000000000673A90 20020 Cooperative 00007FFF5801D9A0:00007FFF5801E808 0000000000654CD0 0 Ukn
7 2 1306 0000000000697E90 21220 Preemptive 0000000000000000:0000000000000000 0000000000654CD0 0 Ukn (Finalizer)
(lldb) sos ThreadState 21220
Legal to Join
Background
CLR Owns
Fully initialized
```

Просмотреть файл

@ -1,46 +0,0 @@
SOS
===
SOS is a debugger extension that allows a developer to inspect the managed state of a .NET Core and desktop runtime process. SOS can be loaded by WinDbg/cdb debuggers on Windows and lldb on Linux and MacOS.
## Getting lldb
Getting a version of lldb that works for your platform can be a problem sometimes. The version has to be at least 3.9 or greater because of a bug running SOS on a core dump that was fixed. Some Linux distros like Ubuntu it is easy as `sudo apt-get install lldb-3.9 python-lldb-3.9`. On other distros, you will need to build lldb. The directions below should give you some guidance.
* [Linux Instructions](lldb/linux-instructions.md)
* [MacOS Instructions](lldb/osx-instructions.md)
* [FreeBSD Instructions](lldb/freebsd-instructions.md)
* [NetBSD Instructions](lldb/netbsd-instructions.md)
## Installing SOS
* [Linux and MacOS Instructions](installing-sos-instructions.md)
* [Windows Instructions](installing-sos-windows-instructions.md)
## Using SOS
* [SOS debugging for Linux/MacOS](sos-debugging-extension.md)
* [SOS debugging for Windows](sos-debugging-extension-windows.md)
* [Debugging a core dump](debugging-coredump.md)
## New SOS Features
The `bpmd` command can now be used before the runtime is loaded. You can load SOS or the sos plugin on Linux and execute bpmd. Always add the module extension for the first parameter.
bpmd SymbolTestApp.dll SymbolTestApp.Program.Main
You can set a source file/line number breakpoint like this (the fully qualified source file path is usually not necessary):
bpmd SymbolTestApp.cs:24
Symbol server support - The `setsymbolserver` command enables downloading the symbol files (portable PDBs) for managed assemblies during commands like `clrstack`, etc. See `soshelp setsymbolserver` for more details.
(lldb) setsymbolserver -ms
Before executing the "bt" command to dump native frames to load the native symbols (for live debugging only):
(lldb) loadsymbols
To add a local directory to search for symbols:
(lldb) setsymbolserver -directory /tmp/symbols

Просмотреть файл

@ -1,33 +0,0 @@
# .NET Core Diagnostics Overview
With .NET Full running on Windows we have grown accustomed to a plethora of great diagnostics tools ranging from dump generation and manual analysis to more sophisticated collection engines such as DebugDiag. As .NET core is picking up (cross platform) steam what types of diagnostics capabilities are available to us when we need to do production diagnostics? It turns out that a lot of work has been done in this area and specifically .net core 3 promises to bring a wide range of diagnostics capabilities.
To learn more about production diagnostics in .net core 3, we'll be running through a set of diagnostics scenarios using the built in runtime/sdk tools. The walkthroughs are all run on Ubuntu 16.04 and use the latest .net core preview bits.
Before we jump in head first, let's take a look at some basic methodologies as it relates to production diagnostics. When an outage occurs in production, typically the first and foremost goal is mitigation. Mitigation typically involves getting the app back up and running as quickly as possible. Common mitigation techniques involve restarting the app or sometimes one or more nodes/servers. While restarting is a quick and effective mitigation technique, root cause of the failure is still expected to be understood and appropriate fix(es) made to avoid future downtime. In order to get to root cause, we need to collect as much diagnostics data as we can prior to executing the mitigation strategy. The diagnostics data collected can then be analyzed postmortem to determine root cause and possible fixes. Each of the scenarios we will explore here will outline what capabilities .net core 3 has in terms of diagnostics data collection and analysis.
Below is the list of (growing) scenarios that will be covered.
Most of the scenarios below are implemented using a simple webapi with methods that expose each particular scenario. You can easily create the webapi using:
* dotnet new webapi
* add diagscenario.cs to your Controllers folder
* dotnet build
Please note that you have to be using at least preview 5 for most of the capabilities to work.
**Note: The tools/APIs that are used are based on preview 5 and are subject to change. The tutorial will be updated to account for later previews and final release**
### [Installing the diagnostics tools](installing_the_diagnostics_tools.md)
### [Scenario - App is leaking memory](app_is_leaking_memory_eventual_crash.md)
### [Scenario - App is running slow](app_running_slow_highcpu.md)
### [Scenario - App is experiencing intermittent memory spikes](intermittent_memory_spike.md)
### [Scenario - App is not responding](hung_app.md)
### Scenario - App is experiencing intermittent exceptions

Просмотреть файл

@ -1,88 +0,0 @@
# App is leaking memory (eventual crash/stops responding)
http://localhost:5000/api/diagscenario/memleak/{kb}
In this scenario, the endpoint will slowly start leaking memory (amount specified by {kb}) and eventually will result in an out of memory exception. In order to diagnose this scenario, we need several key pieces of diagnostics data.
### Memory counters
Before we dig into collecting diagnostics data to help us root cause this scenario, we need to convince ourselves that what we are actually seeing is a memory leak (memory growth). On Windows we could use the myriad of .NET performance counters, but what about on Linux? It turns out .net core has been instrumented to expose metrics from the runtime and we can use the dotnet-counters tool to get at this information (please see 'Installing the diagnostics tools' section).
Next, lets run the webapi (dotnet run) and before hitting the above URL that will cause the leak, lets check our managed memory counters:
> ```bash
> dotnet-counters monitor --refresh-interval 1 -p 4807
> ```
4807 is the process identifier which can be found using dotnet-trace ps. The refresh-interval is the number of seconds before refreshes.
The output should be similar to the below:
![alt text](https://user-images.githubusercontent.com/15442480/57110730-6429fb80-6cee-11e9-8bd1-4f37496c70fe.png)
Here we can see that right after startup, the managed heap memory is 4MB.
Now, let's hit the URL (http://localhost:5000/api/diagscenario/memleak/200000)
Re-run the dotnet-counters command. We should see an increase in memory usage as shown below:
![alt text](https://user-images.githubusercontent.com/15442480/57110722-596f6680-6cee-11e9-9707-954bcbe21312.png)
Memory has now grown to around 265MB.
Note that this shows all the counters. If you want to specify individual counters please use the System.Private[counter1, counter2,...] syntax. For example, to display just the gc-heap-counter, use:
> ```bash
> dotnet-counters monitor System.Runtime[gc-heap-size] --refresh-interval 1 -p 4923
> ```
At this point, we can safely say that memory is leaking (or at the very least is growing and doesn't seem to come back down once request is finished). The next step is now to run a collection tool that can help us collect the right data for memory analysis.
### Core dump generation
Most commonly when analyzing possible memory leaks, we need access to as much of the apps memory as possible. We can then analyze the memory contents and relationships between objects to create theories on why memory is not being freed. A very common diagnostics data source is a memory dump (Win) and the equivalent core dump (on Linux). In order to generate a core dump of a .net core application, we can use the dotnet-dump tool (please see 'Installing the diagnostics tools' section). Using the previous webapi run, run the following command to generate a core dump:
> ```bash
> sudo ./dotnet-dump collect -p 4807
> ```
4807 is the process identifier which can be found using dotnet-trace ps. The result is a core dump located in the same folder. Please note that to generate core dumps, dotnet-dump requires sudo.
### Analyzing the core dump
Now that we have a core dump generated, what options do we have to analyze the core dump? On Windows, we would typically use a combination of WinDBG and SOS and the same strategy applies to Linux (albeit with a different tool set). On Linux, there are a couple of different options with some caveats:
* LLDB/SOS. LLDB is the Linux debugger that must be used when debugging using SOS.
* dotnet-dump analyze <dump_path> provides an SOS REPL experience on the specified core file.
In both cases, you have to be careful to roughly match the environment up with the production server. For example, if I am running .net core preview 5 on Ubuntu 16.04 the core dump must be analyzed on the same architecture and environment.
For the LLDB/SOS experience, please see - https://github.com/dotnet/runtime/blob/master/docs/workflow/building/coreclr/debugging-instructions.md.
To use the dotnet-dump tool to analyze the dump please run:
> ```bash
> dotnet-dump analyze core_20190430_185145
> ```
(where core_20190430_185145 is the name of the core dump you want to analyze)
Note: If you see an error complaining that libdl.so cannot be found, you may have to install the libc6-dev package.
You will be presented with a prompt where you can enter SOS commands. Commonly, the first thing we want to look at is the overall state of the managed heap by running:
> ```bash
> dumpheap -stat
> ```
The (partial) output can be seen below:
![alt text](https://user-images.githubusercontent.com/15442480/57110756-7d32ac80-6cee-11e9-9b80-2ce700e7a2f1.png)
Here we can see that we have quite a few strings laying around (as well as instances of Customer and Customer[]). We can now use the gcroot command on one of the string instances to see how/why the object is rooted:
![alt text](https://user-images.githubusercontent.com/15442480/57110770-8face600-6cee-11e9-8eea-608b59442058.png)
The string instance appears to be rooted from top level Processor object which in turn references a cache. We can continue dumping out objects to see how much the cache is holding on to:
![alt text](https://user-images.githubusercontent.com/15442480/57110703-4b214a80-6cee-11e9-8887-02c25424a0ad.png)
From here we can now try and back-track (from code) why the cache seems to be growing in an unbound fashion.

Просмотреть файл

@ -1,107 +0,0 @@
# App is running slow (due to high CPU)
http://localhost:5000/api/diagscenario/highcpu/{milliseconds}
In this scenario, the endpoint will consume substantial amount of CPU for a duration specified by {milliseconds}. In order to diagnose this scenario, we need several key pieces of diagnostics data.
### CPU counters
Before we dig into collecting diagnostics data to help us root cause this scenario, we need to convince ourselves that what we are actually seeing is a high CPU condition. On Windows we could use the myriad of .NET performance counters, but what about on Linux? It turns out .net core has been instrumented to expose metrics from the runtime and we can use the dotnet-counters tool to get at this information (please see 'Installing the diagnostics tools' section).
Lets run the webapi (dotnet run) and before hitting the above URL that will cause the high CPU condition, lets check our CPU counters:
> ```bash
> dotnet-counters monitor --refresh-interval 1 -p 22884
> ```
22884 is the process identifier which can be found using dotnet-trace ps. The refresh-interval is the number of seconds before refreshes.
The output should be similar to the below:
![alt text](https://user-images.githubusercontent.com/15442480/57110746-75730800-6cee-11e9-81a8-1c253aef37ce.jpg)
Here we can see that right after startup, the CPU is not being consumed at all (0%).
Now, let's hit the URL (http://localhost:5000/api/diagscenario/highcpu/60000)
Re-run the dotnet-counters command. We should see an increase in CPU usage as shown below:
![alt text](https://user-images.githubusercontent.com/15442480/57110736-6be9a000-6cee-11e9-86b6-6e128318a267.jpg)
Throughout the execution of that request, CPU hovers at around 30%.
Note that this shows all the counters. If you want to specify individual counters please use the System.Private[counter1, counter2,...] syntax. For example, to display just the gc-heap-counter, use:
> ```bash
> dotnet-counters monitor System.Runtime[cpu-usage] -p 22884 --refresh-interval 1
> ```
At this point, we can safely say that CPU is running a little hotter than we expect. The next step is now to run a collection tool that can help us collect the right data for the CPU analysis.
### Trace generation
Commonly when analyzing slow request (such as due to high CPU), we need a diagnostics tool that can give us insight into what our code is doing at frequent intervals. A very common diagnostics data source is a profiler. There are a few different options in terms of profilers and depending on which platform you plan on analyzing the trace data on.
#### If you plan on capturing trace data that can be viewed on a Windows machine...
In order to generate profiler traces of a .net core application, we can use the dotnet-trace tool (please see 'Installing the diagnostics tools' section). Using the previous webapi, hit the URL (http://localhost:5000/api/diagscenario/highcpu/60000) again and while its running within the 1 minute request, run the following:
> ```bash
> dotnet-trace collect -p 2266 --providers Microsoft-DotNETCore-SampleProfiler
> ```
2266 is the process identifier which can be found using dotnet-trace ps. Let dotnet-trace run for about 20-30 seconds and then hit enter to exit the collection. The result is a nettrace file located in the same folder. nettrace files are a great way to use existing analysis tools on Windows (such as PerfView) to diagnose performance problems.
Alternatively, you can get the perf and LTTng trace data in nettrace format by using the perfcollect tool (please see Installing the tools section). Once installed, run the following command:
> ```bash
> sudo ./perfcollect collect sampleTrace
> ```
Reproduce the problem and when done, hit CTRL-C to exit the perfcollect tool. You will see a sampleTrace.trace.zip file that you can view using Perfview on a Windows machine.
#### If you plan on capturing trace data that can be viewed on a Linux machine...
If you are more familiar with existing performance tools on Linux, .net core is also instrumented to allow you to make use of those tools. Here, we will illustrate how you can use the 'perf' tool to generate traces that can be used on Linux to diagnose performance problems. Exit the previous instance of the webapi and set the following in the terminal:
> ```bash
> export COMPlus_PerfMapEnabled=1
> ```
Next, re-launch the webapi. This step is required to get more legible frames in the traces.
In the same terminal, run the webapi again, hit the URL (http://localhost:5000/api/diagscenario/highcpu/60000) again and while its running within the 1 minute request, run the following:
> ```bash
> sudo perf record -p 2266 -g
> ```
This will start the perf collection process. Let it run for about 20-30 seconds and then hit CTRL-C to exit the collection process. The output should tell you how many MBs of perf data was written.
### Analyzing the trace
When it comes to analyzing the profiler trace generated in the previous step, you have two options depending on if you generated a nettrace file or used the native perf command in Linux.
Starting with the nettrace file, you need to transfer the nettrace file to a Windows machine and use PerfView to analyze the trace as shown below.
![alt text](https://user-images.githubusercontent.com/15442480/57110777-976c8a80-6cee-11e9-9cf7-407a01a08b1d.jpg)
If you generated the traces using the Linux perf command, you can use the same perf command to see the output of the trace.
> ```bash
> sudo perf report -f
> ```
Alternatively, you can also generate a flamegraph by using the following commands:
> ```bash
> git clone --depth=1 https://github.com/BrendanGregg/FlameGraph
> sudo perf script | FlameGraph/stackcollapse-perf.pl | FlameGraph/flamegraph.pl > flamegraph.svg
> ```
This will generate a flamegraph.svg that you can view in the browser to investigate the performance problem:
![alt text](https://user-images.githubusercontent.com/15442480/57110767-87ed4180-6cee-11e9-98d9-9f1c908acfd5.jpg)

Просмотреть файл

@ -1,173 +0,0 @@
using System;
using System.Diagnostics;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Mvc;
using System.Threading;
namespace testwebapi.Controllers
{
[Route("api/[controller]")]
[ApiController]
public class DiagScenarioController : ControllerBase
{
object o1 = new object();
object o2 = new object();
private static Processor p = new Processor();
[HttpGet]
[Route("deadlock/")]
public ActionResult<string> deadlock()
{
(new System.Threading.Thread(() => {
DeadlockFunc();
})).Start();
Thread.Sleep(5000);
Thread[] threads = new Thread[300];
for(int i=0; i<300;i++)
{
(threads[i] = new Thread(() => {
lock (o1) {Thread.Sleep(100);}
})).Start();
}
foreach(Thread thread in threads)
{
thread.Join();
}
return "success:deadlock";
}
private void DeadlockFunc()
{
lock (o1)
{
(new Thread(() => {
lock (o2) { Monitor.Enter(o1); }
})).Start();
Thread.Sleep(2000);
Monitor.Enter(o2);
}
}
[HttpGet]
[Route("memspike/{seconds}")]
public ActionResult<string> memspike(int seconds)
{
Stopwatch watch=new Stopwatch();
watch.Start();
while(true)
{
p = new Processor();
watch.Stop();
if(watch.ElapsedMilliseconds > seconds*1000)
break;
watch.Start();
int it = (200000*1000) / 100;
for(int i=0; i<it; i++)
{
p.ProcessTransaction(new Customer(Guid.NewGuid().ToString()));
}
Thread.Sleep(5000); // Sleep for 5 seconds before cleaning up
// Cleanup
p = null;
GC.Collect();
GC.Collect();
Thread.Sleep(5000); // Sleep for 5 seconds before spiking memory again
}
return "success:memspike";
}
[HttpGet]
[Route("memleak/{kb}")]
public ActionResult<string> memleak(int kb)
{
int it = (kb*1000) / 100;
for(int i=0; i<it; i++)
{
p.ProcessTransaction(new Customer(Guid.NewGuid().ToString()));
}
return "success:memleak";
}
[HttpGet]
[Route("exception")]
public ActionResult<string> exception()
{
throw new Exception("bad, bad code");
}
[HttpGet]
[Route("highcpu/{milliseconds}")]
public ActionResult<string> highcpu(int milliseconds)
{
Stopwatch watch=new Stopwatch();
watch.Start();
while (true)
{
watch.Stop();
if(watch.ElapsedMilliseconds > milliseconds)
break;
watch.Start();
}
return "success:highcpu";
}
}
class Customer
{
private string id;
public Customer(string id)
{
this.id = id;
}
}
class CustomerCache
{
private List<Customer> cache = new List<Customer>();
public void AddCustomer(Customer c)
{
cache.Add(c);
}
}
class Processor
{
private CustomerCache cache = new CustomerCache();
public void ProcessTransaction(Customer customer)
{
cache.AddCustomer(customer);
}
}
}

Просмотреть файл

@ -1,353 +0,0 @@
# App stops responding
**IMPORTANT: This tutorial uses API/methods available in dotnet core preview 5. These API/methods are _subject to change._**
http://localhost:5000/api/diagscenario/deadlock
In this scenario, the endpoint will experience unresponsiveness and substantial thread accumulation. We'll show how you can use both the existing tools to analyze the problem as well as extending the existing tools to create a more automated debug session.
### Memory counters
As of preview 5, the lock contention and thread counters are not yet available. They are on the backlog to be included in future milestones. The general symptom of this issue is that one or more requests will take a long time to process and ultimately timeout.
### Core dump generation
In order to investigate applications that seem to be making no progress a memory dump is often beneficial allowing us to inspect the state of all the threads as well as any possible locks that may have contention issues.
Run the webapi (dotnet run) and navigate to the following URL:
http://localhost:5000/api/diagscenario/deadlock
Let the request run for about 10-15 seconds. The first thing we need in order to generate a core dump is to find the process identifier of our webapi process:
> ```bash
> $ dotnet-trace list-ps
> ...
> 80926 webapi /home/marioh/webapi/bin/Debug/netcoreapp3.0/webapi
> sudo dotnet triggerdump.dll 80926 500
> ```
Once we have the process identifier, we can use the dotnet-dump collect tool (specifying the process identifier) to generate the core dump:
> ```bash
> $ sudo ./dotnet-dump collect -p 80926
> ```
At this point, we have the core dump and the next step is analyzing it to find the root cause of our unresponsive application.
### Analyzing the core dump
To start our investigation, let's open the core dump using dotnet-dump analyze:
> ```bash
> $ ./dotnet-dump analyze ~/.dotnet/tools/core_20190513_143916
> ```
Since we are looking for the root cause of the unresponsiveness, it is often useful to first get an overall feel for the thread state in the process. We can use the threads command as shown below:
> threads
> *0 0x1DBFF (121855)
> 1 0x1DC01 (121857)
> 2 0x1DC02 (121858)
> 3 0x1DC03 (121859)
> 4 0x1DC04 (121860)
> 5 0x1DC05 (121861)
> 6 0x1DC06 (121862)
> 7 0x1DC07 (121863)
> 8 0x1DC08 (121864)
> 9 0x1DC09 (121865)
> 10 0x1DC0A (121866)
> 11 0x1DC0D (121869)
> 12 0x1DC0E (121870)
> 13 0x1DC10 (121872)
> 14 0x1DC11 (121873)
> 15 0x1DC12 (121874)
> 16 0x1DC13 (121875)
> 17 0x1DC14 (121876)
> 18 0x1DC15 (121877)
> 19 0x1DC1C (121884)
> 20 0x1DC1D (121885)
> 21 0x1DC1E (121886)
> 22 0x1DC21 (121889)
> 23 0x1DC22 (121890)
> 24 0x1DC23 (121891)
> 25 0x1DC24 (121892)
> 26 0x1DC25 (121893)
>
> ...
>
> ...
>
> 317 0x1DD48 (122184)
> 318 0x1DD49 (122185)
> 319 0x1DD4A (122186)
> 320 0x1DD4B (122187)
> 321 0x1DD4C (122188)
The output shows all the threads currently running in the process with their associated debugger thread ID and operating system thread id. Based on the output above it looks like we have a little over 300 threads.
The next step is to get a better understanding of what the threads are currently doing by getting the callstack. There is a command called clrstack that can be used to output both the callstack of the currently selected thread as well as a shortcut to show the callstacks for all the threads. To perform the latter, run the following command:
```bash
> clrstack -all
...
...
...
Child SP IP Call Site
00007F2AE37B5680 00007f305abc6360 [GCFrame: 00007f2ae37b5680]
00007F2AE37B5770 00007f305abc6360 [GCFrame: 00007f2ae37b5770]
00007F2AE37B57D0 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2ae37b57d0] System.Threading.Monitor.ReliableEnter(System.Object, Boolean ByRef)
00007F2AE37B5920 00007F2FE392B31F testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_1() [/home/marioh/webapi/Controllers/diagscenario.cs @ 36]
00007F2AE37B5950 00007F2FE392B46D System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 201]
00007F2AE37B5CA0 00007f30593044af [GCFrame: 00007f2ae37b5ca0]
00007F2AE37B5D70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2ae37b5d70]
OS Thread Id: 0x1dc82
Child SP IP Call Site
00007F2AE2FB4680 00007f305abc6360 [GCFrame: 00007f2ae2fb4680]
00007F2AE2FB4770 00007f305abc6360 [GCFrame: 00007f2ae2fb4770]
00007F2AE2FB47D0 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2ae2fb47d0] System.Threading.Monitor.ReliableEnter(System.Object, Boolean ByRef)
00007F2AE2FB4920 00007F2FE392B31F testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_1() [/home/marioh/webapi/Controllers/diagscenario.cs @ 36]
00007F2AE2FB4950 00007F2FE392B46D System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 201]
00007F2AE2FB4CA0 00007f30593044af [GCFrame: 00007f2ae2fb4ca0]
00007F2AE2FB4D70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2ae2fb4d70]
OS Thread Id: 0x1dc83
Child SP IP Call Site
00007F2AE27B3680 00007f305abc6360 [GCFrame: 00007f2ae27b3680]
00007F2AE27B3770 00007f305abc6360 [GCFrame: 00007f2ae27b3770]
00007F2AE27B37D0 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2ae27b37d0] System.Threading.Monitor.ReliableEnter(System.Object, Boolean ByRef)
00007F2AE27B3920 00007F2FE392B31F testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_1() [/home/marioh/webapi/Controllers/diagscenario.cs @ 36]
00007F2AE27B3950 00007F2FE392B46D System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 201]
00007F2AE27B3CA0 00007f30593044af [GCFrame: 00007f2ae27b3ca0]
00007F2AE27B3D70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2ae27b3d70]
OS Thread Id: 0x1dc84
Child SP IP Call Site
00007F2AE1FB2680 00007f305abc6360 [GCFrame: 00007f2ae1fb2680]
00007F2AE1FB2770 00007f305abc6360 [GCFrame: 00007f2ae1fb2770]
00007F2AE1FB27D0 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2ae1fb27d0] System.Threading.Monitor.ReliableEnter(System.Object, Boolean ByRef)
00007F2AE1FB2920 00007F2FE392B31F testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_1() [/home/marioh/webapi/Controllers/diagscenario.cs @ 36]
00007F2AE1FB2950 00007F2FE392B46D System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 201]
00007F2AE1FB2CA0 00007f30593044af [GCFrame: 00007f2ae1fb2ca0]
00007F2AE1FB2D70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2ae1fb2d70]
OS Thread Id: 0x1dc85
Child SP IP Call Site
00007F2AE17B1680 00007f305abc6360 [GCFrame: 00007f2ae17b1680]
00007F2AE17B1770 00007f305abc6360 [GCFrame: 00007f2ae17b1770]
00007F2AE17B17D0 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2ae17b17d0] System.Threading.Monitor.ReliableEnter(System.Object, Boolean ByRef)
00007F2AE17B1920 00007F2FE392B31F testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_1() [/home/marioh/webapi/Controllers/diagscenario.cs @ 36]
00007F2AE17B1950 00007F2FE392B46D System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 201]
00007F2AE17B1CA0 00007f30593044af [GCFrame: 00007f2ae17b1ca0]
00007F2AE17B1D70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2ae17b1d70]
OS Thread Id: 0x1dc86
Child SP IP Call Site
00007F2AE0FB0680 00007f305abc6360 [GCFrame: 00007f2ae0fb0680]
00007F2AE0FB0770 00007f305abc6360 [GCFrame: 00007f2ae0fb0770]
00007F2AE0FB07D0 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2ae0fb07d0] System.Threading.Monitor.ReliableEnter(System.Object, Boolean ByRef)
00007F2AE0FB0920 00007F2FE392B31F testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_1() [/home/marioh/webapi/Controllers/diagscenario.cs @ 36]
00007F2AE0FB0950 00007F2FE392B46D System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 201]
00007F2AE0FB0CA0 00007f30593044af [GCFrame: 00007f2ae0fb0ca0]
00007F2AE0FB0D70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2ae0fb0d70]
OS Thread Id: 0x1dc87
Child SP IP Call Site
00007F2AE07AF680 00007f305abc6360 [GCFrame: 00007f2ae07af680]
00007F2AE07AF770 00007f305abc6360 [GCFrame: 00007f2ae07af770]
00007F2AE07AF7D0 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2ae07af7d0] System.Threading.Monitor.ReliableEnter(System.Object, Boolean ByRef)
00007F2AE07AF920 00007F2FE392B31F testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_1() [/home/marioh/webapi/Controllers/diagscenario.cs @ 36]
00007F2AE07AF950 00007F2FE392B46D System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 201]
00007F2AE07AFCA0 00007f30593044af [GCFrame: 00007f2ae07afca0]
00007F2AE07AFD70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2ae07afd70]
OS Thread Id: 0x1dc88
Child SP IP Call Site
00007F2ADFFAE680 00007f305abc6360 [GCFrame: 00007f2adffae680]
00007F2ADFFAE770 00007f305abc6360 [GCFrame: 00007f2adffae770]
00007F2ADFFAE7D0 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2adffae7d0] System.Threading.Monitor.ReliableEnter(System.Object, Boolean ByRef)
00007F2ADFFAE920 00007F2FE392B31F testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_1() [/home/marioh/webapi/Controllers/diagscenario.cs @ 36]
00007F2ADFFAE950 00007F2FE392B46D System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 201]
00007F2ADFFAECA0 00007f30593044af [GCFrame: 00007f2adffaeca0]
00007F2ADFFAED70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2adffaed70]
...
...
...
```
(output abbreviated)
Eye balling the callstacks for all 300+ threads shows a pattern where a majority of the threads share a common callstack:
```bash
OS Thread Id: 0x1dc88
Child SP IP Call Site
00007F2ADFFAE680 00007f305abc6360 [GCFrame: 00007f2adffae680]
00007F2ADFFAE770 00007f305abc6360 [GCFrame: 00007f2adffae770]
00007F2ADFFAE7D0 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2adffae7d0] System.Threading.Monitor.ReliableEnter(System.Object, Boolean ByRef)
00007F2ADFFAE920 00007F2FE392B31F testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_1() [/home/marioh/webapi/Controllers/diagscenario.cs @ 36]
00007F2ADFFAE950 00007F2FE392B46D System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 201]
00007F2ADFFAECA0 00007f30593044af [GCFrame: 00007f2adffaeca0]
00007F2ADFFAED70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2adffaed70]
```
The callstack seems to show that the request arrived in our deadlock method which in turn makes a call to Monitor.ReliableEnter. This is a good indication that the majority of threads are trying to enter a monitor lock and are waiting on the availability of the lock (i.e., it is currently locked by a different thread). The next step then is to find out which thread is actually holding the monitor lock. Since monitors (by and large) store lock information in the sync block table, we can use the syncblk command to get more information:
```bash
> syncblk
Index SyncBlock MonitorHeld Recursion Owning Thread Info SyncBlock Owner
41 000000000143D038 603 1 00007F2B542D28C0 1dc1d 20 00007f2e90080fb8 System.Object
42 000000000143D080 3 1 00007F2B400016A0 1dc1e 21 00007f2e90080fd0 System.Object
-----------------------------
Total 264
Free 0
```
The two interesting columns are the MonitorHeld column and the Owning Thread Info column. The former shows whether a monitor lock is acquired by a thread (and also number of waiters) and the latter shows which thread currently owns the monitor lock. Please note that the Owning Thread Info shows three different columns. The column of interest is the operating system thread id (second column).
At this point, we know that two different threads (0x1dc1d and 0x1dc1e) each hold a monitor lock. The next step is to take a look at what those threads are doing and if there is any possibility that they are stuck indefinitely holding the lock. Let's use the setthread and clrstack commands to switch to each of the threads and display the callstacks:
```bash
> setthread 0x1dc1d
> clrstack
OS Thread Id: 0x1dc1d (20)
Child SP IP Call Site
00007F2B862B9610 00007f305abc6360 [GCFrame: 00007f2b862b9610]
00007F2B862B9700 00007f305abc6360 [GCFrame: 00007f2b862b9700]
00007F2B862B9760 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2b862b9760] System.Threading.Monitor.Enter(System.Object)
00007F2B862B98B0 00007F2FE392A9E5 testwebapi.Controllers.DiagScenarioController.DeadlockFunc() [/home/marioh/webapi/Controllers/diagscenario.cs @ 57]
00007F2B862B98F0 00007F2FE392A8FC testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_0() [/home/marioh/webapi/Controllers/diagscenario.cs @ 27]
00007F2B862B9910 00007F2FE02B7BA2 System.Threading.ThreadHelper.ThreadStart_Context(System.Object) [/__w/3/s/src/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs @ 51]
00007F2B862B9930 00007F2FE02C1021 System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 172]
00007F2B862B9980 00007F2FE02B7CBE System.Threading.ThreadHelper.ThreadStart() [/__w/3/s/src/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs @ 101]
00007F2B862B9CA0 00007f30593044af [GCFrame: 00007f2b862b9ca0]
00007F2B862B9D70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2b862b9d70]
> setthread 0x1dc1e
> clrstack
OS Thread Id: 0x1dc1e (21)
Child SP IP Call Site
00007F2B85AB8640 00007f305abc6360 [GCFrame: 00007f2b85ab8640]
00007F2B85AB8730 00007f305abc6360 [GCFrame: 00007f2b85ab8730]
00007F2B85AB8790 00007f305abc6360 [HelperMethodFrame_1OBJ: 00007f2b85ab8790] System.Threading.Monitor.Enter(System.Object)
00007F2B85AB88E0 00007F2FE392AAAE testwebapi.Controllers.DiagScenarioController.<DeadlockFunc>b__4_0() [/home/marioh/webapi/Controllers/diagscenario.cs @ 53]
00007F2B85AB8910 00007F2FE02B7BA2 System.Threading.ThreadHelper.ThreadStart_Context(System.Object) [/__w/3/s/src/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs @ 51]
00007F2B85AB8930 00007F2FE02C1021 System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object) [/__w/3/s/src/System.Private.CoreLib/shared/System/Threading/ExecutionContext.cs @ 172]
00007F2B85AB8980 00007F2FE02B7CBE System.Threading.ThreadHelper.ThreadStart() [/__w/3/s/src/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs @ 101]
00007F2B85AB8CA0 00007f30593044af [GCFrame: 00007f2b85ab8ca0]
00007F2B85AB8D70 00007f30593044af [DebuggerU2MCatchHandlerFrame: 00007f2b85ab8d70]
```
Starting with the first thread, our deadlock method (which already owns a monitor lock) seems to be trying to acquire yet another lock but is waiting for the lock to become available. Similarly, the second thread (which also owns another monitor lock), is also trying to acquire a lock and waiting for the lock to become available.
To summarize our findings, we have two threads, each of which already own a lock and are stuck waiting for another lock to become available (deadlock). The remaining 300+ threads that are all waiting are most likely also waiting on one of the locks that caused the deadlock.
### Simplifying the analysis process
One of the pain points in our analysis process was around having to eye ball hundreds of callstacks to see if we can spot a common pattern. It would be much easier if we had a command that could output just the unique callstacks and all the thread ID's associated with those callstacks. In this section, we'll implement that command as a custom command for dotnet-dump analyze.
Before we get started, you need to do the following:
1. Clone the dotnet/diagnostics repo (https://github.com/dotnet/diagnostics)
2. Make sure all the pre-requisites are in place to build the repo (https://github.com/dotnet/diagnostics/blob/master/README.md)
3. Add the uniquestacks.cs file to ~/diagnostics/src/Tools/dotnet-dump/Commands folder.
4. Build the repo using ~/diagnostics/build.sh
All the interesting command code is located in the InvokeAsync method (uniquestacks.cs) and uses CLRMD to enumerate all the threads like so:
```c#
foreach (ClrThread thread in AnalyzeContext.Runtime.Threads)
{
if (!thread.IsAlive) continue;
StringBuilder completeStack = new StringBuilder();
foreach (ClrStackFrame frame in thread.StackTrace)
{
switch (frame.Kind)
{
case ClrStackFrameType.ManagedMethod:
completeStack.Append(frame.DisplayString);
break;
case ClrStackFrameType.Runtime:
completeStack.Append("["+ frame.DisplayString +"]");
if (frame.Method != null)
{
completeStack.Append("Special");
completeStack.Append(frame.Method);
}
break;
}
completeStack.Append("\n");
string cStack = completeStack.ToString();
if(threads.ContainsKey(cStack)==true)
{
threads[cStack].Add(thread.OSThreadId);
}
else
{
List<uint> l = new List<uint>();
l.Add(thread.OSThreadId);
threads.Add(cStack, l);
}
}
}
```
The code simply iterates over all the threads, gets the callstacks and finally adding them to a dictionary. Once the loop has finished the dictionary will contain all the unique callstacks and associated thread ids.
Lastly, the code prints out the dictionary allowing us to see the duplicate callstacks with ease.
Build the dotnet-dump tool (dotnet build) and run it from the following location on the same dump that we generated earlier:
```bash
~/diagnostics/artifacts/bin/dotnet-dump/Debug/netcoreapp2.1/publish/dotnet dotnet-dump.dll analyze ~/.dotnet/tools/core_20190513_143916
```
You can now use the 'uniquestacks' command and see the following output (partial):
```bash
...
...
...
[GCFrame]
[GCFrame]
[HelperMethodFrame_1OBJ]
testwebapi.Controllers.DiagScenarioController.<deadlock>b__3_1()
System.Threading.ExecutionContext.RunInternal(System.Threading.ExecutionContext, System.Threading.ContextCallback, System.Object)
[GCFrame]
121917,121918,121919,121920,121921,121922,121923,121924,121925,121926,121927,121928,121929,121930,121931,121932,121933,121934,121935,121936,121937,121938,121939,121940,121941,121942,121943,121944,121945,121946,121947,121948,121949,121950,121951,121952,121953,121954,121955,121956,121957,121958,121959,121960,121961,121962,121963,121964,121965,121966,121967,121968,121969,121970,121971,121972,121973,121974,121975,121976,121977,121978,121979,121980,121981,121982,121983,121984,121985,121986,121987,121988,121989,121990,121991,121992,121993,121994,121995,121996,121997,121998,121999,122000,122001,122002,122003,122004,122005,122006,122007,122008,122009,122010,122011,122012,122013,122014,122015,122016,122017,122018,122019,122020,122021,122022,122023,122024,122025,122026,122027,122028,122029,122030,122031,122032,122033,122034,122035,122036,122037,122038,122039,122040,122041,122042,122043,122044,122045,122046,122047,122048,122049,122050,122051,122052,122053,122054,122055,122056,122057,122058,122059,122060,122061,122062,122063,122064,122065,122066,122067,122068,122069,122070,122071,122072,122073,122074,122075,122076,122077,122078,122079,122080,122081,122082,122083,122084,122085,122086,122087,122088,122089,122090,122091,122092,122093,122094,122095,122096,122097,122098,122099,122100,122101,122102,122103,122104,122105,122106,122107,122108,122109,122110,122111,122112,122113,122114,122115,122116,122117,122118,122119,122120,122121,122122,122123,122124,122125,122126,122127,122128,122129,122130,122131,122132,122133,122134,122135,122136,122137,122138,122139,122140,122141,122142,122143,122144,122145,122146,122147,122148,122149,122150,122151,122152,122153,122154,122155,122156,122157,122158,122159,122160,122161,122162,122163,122164,122165,122166,122167,122168,122169,122170,122171,122172,122173,122174,122175,122176,122177,122178,122179,122180,122181,122182,122183,122184,122185,122186,122187,122188
```
The output shows a large number of threads (with associated thread ID's) that is commonly a good indicator that a threading issue is taking place.

Просмотреть файл

@ -1,44 +0,0 @@
# Installing the diagnostics tools
Depending on the diagnostics scenario you will use one or more of the tools below to get to root cause. By default, these tools are installed to ~/.dotnet/tools.
### dotnet-counters
In the .NET full/Windows world, we have a myriad of performance counters that can be used to triage and diagnose production issues. For .Net core we have a similar and cross platform story centered around a tool called dotnet-counters. To install the tool, run the following command:
> ```bash
> dotnet tool install --global dotnet-counters
> ```
### dotnet-trace
.NET core includes what is called the 'EventPipe' through which diagnostics data is exposed. The dotnet-trace tool allows you to consume interesting profiling data from your app that can help in scenarios where you need to root cause apps running slow. To install the tool, run the following command:
> ```bash
> dotnet tool install --global dotnet-trace
> ```
### dotnet-dump
In order to generate core dumps for .net core apps, you can use the dotnet-dump tool. To install the tool, run the following command:
> ```bash
> dotnet tool install --global dotnet-dump
> ```
### dotnet-symbol
In order to debug core dumps, the correct symbols need to be available. The dotnet-symbol tool allows you to point to a core dump and it will automatically download the symbols for you. To install the tool, run:
> ```bash
> dotnet tool install -g dotnet-symbol
> ```
### perfcollect
Thet .NET core runtime is instrumented for both perf and LTTng. To facilitate easier collection of both tracing technologies there is a tool called perfcollect. Perfcollect will output the joint trace data into a nettrace file that can be analyzed using PerfView on Windows. To install the tool run the following commands:
> ```
> curl -OL http://aka.ms/perfcollect
> chmod +x perfcollect
> sudo ./perfcollect install
> ```

Просмотреть файл

@ -1,182 +0,0 @@
# App is experiencing intermittent memory spikes
**IMPORTANT: This tutorial uses API/methods available in dotnet core preview 5. These API/methods are _subject to change._**
http://localhost:5000/api/diagscenario/memspike/{seconds}
In this scenario, the endpoint will experience intermittent memory spikes over the specified number of seconds. Memory will go from base line to spike and back to baseline several times. What makes this scenario different from the memory leak scenario is that we will have to figure out a way to automatically trigger the collection of a dump when the memory spikes.
### Memory counters
Before we dig into collecting diagnostics data to help us root cause this scenario, we need to convince ourselves that what we are seeing is an intermittent memory spike. To help with this we can use the dotnet-counters tool which allows us to watch the memory usage for a selected dotnet process (please see [Installing the diagnostics tools](installing_the_diagnostics_tools.md)).
Let's run the webapi (dotnet run) and before navigating to the above URL (specifying 300 seconds) check our managed memory counters:
> ```bash
> dotnet-counters monitor --refresh-interval 1 -p 4807
> ```
4807 is the process identifier which can be found using dotnet-trace ps. The refresh-interval is the number of seconds before refreshes.
The output should be similar to the below:
![alt text](https://user-images.githubusercontent.com/15442480/57110730-6429fb80-6cee-11e9-8bd1-4f37496c70fe.png)
Here we can see that right after startup, the managed heap memory is 4MB.
Now, let's navigate to the URL (http://localhost:5000/api/diagscenario/memspike/300) which will run for 5 minutes giving us ample time to experiment.
Re-run the dotnet-counters command. We should see an alternating heap size with a baseline of roughly 250MB and the highest spike around 630MB. The memory usage will alternate between baseline and spike every 5 seconds or so.
Baseline:
![alt text](https://user-images.githubusercontent.com/15442480/57338185-463f0b00-70e1-11e9-8d52-0305d3158dd5.jpg)
High:
![alt text](https://user-images.githubusercontent.com/15442480/57338164-36272b80-70e1-11e9-843a-604af1ddfd5f.jpg)
At this point, we can safely say that memory is spiking to a high that is not normal and the next step is to run a collection tool that can help us collect the right data at the right time.
### Core dump generation
Let's step back a bit and revisit the high memory scenario earlier in the tutorial. In that scenario, memory grew high and stayed high allowing us the opportunity to run the dotnet-dump command without restriction. However, in our current scenario we have a short memory spike that only lasts about 5 seconds per spike. This makes it difficult to get setup to run the dotnet-dump tool manually. What we would preferably like is a tool that could monitor the dotnet core counters and automatically create a core dump once a threshold has been breached. This is a perfect opportunity to start exploring how we can write our own diagnostics tools to cater to our diagnostics needs.
What we would like this tool to do is allow the user to specify the pid of the target process as well as the memory consumption threshold (in MBs). It would then continuously monitor the process and create a dump if the threshold is breached:
> ```bash
> sudo ./triggerdump <pid> <memory threshold in MBs>
> ```
#### Some background before we start writing the tool...
The dotnet core runtime contains a mechanism known as the EventPipe that carries events to interested consumers. There are several different events that flow through the EventPipe including diagnostics information such as counters. The EventPipe is exposed as a Unix domain socket on Linux machines and named pipes on Windows. EventPipe is set to duplex mode which means that clients can both read and write to the pipe. A diagnostics application can register to consume these events from the EventPipe and create new diagnostics experiences. Rather than communicating directly with EventPipe there is a client library that can be used and implemented in Microsoft.Diagnostics.Tools.RuntimeClient.dll.
Events that are written to the EventPipe can come from multiple sources (or providers) and as such, clients that receive events over EventPipe can filter those events based on specific providers.
#### Writing the tool...
We have two requirements in order to implement a tool that will create a dump file based on memory consumption:
* Read dotnet memory counter to know if the current memory consumptions goes above the specified threshold
* Generate the actual core dump
Let's start with the first requirement, reading dotnet counters. As explained earlier, we can use the EventPipe mechanism to read counters from the runtime. In this case, the provider that writes counter events is System.Runtime.
[Full code](src/triggerdump/Program.cs)
Below is the releveant code snippet that is required to consume the counters:
```csharp
Task monitorTask = new Task(() =>
{
var prov = new List<Provider>();
prov.Add(new Provider("System.Runtime", filterData:"EventCounterIntervalSec=1"));
var configuration = new SessionConfiguration(
circularBufferSizeMB: 1000,
outputPath: "",
providers: prov);
var binaryReader = EventPipeClient.CollectTracing(Int32.Parse(args[0]), configuration, out _sessionId);
EventPipeEventSource source = new EventPipeEventSource(binaryReader);
source.Dynamic.All += Dynamic_All;
source.Process();
});
```
The above code first creates the configuration and specifying the buffer size, output path and finally the System.Runtime provider that we are interested in. Next, it calls the CollectTracing method specifying the process identifier we are interested in tracing, the configuration and an out session ID. Once that is completed, we create an EventPipeSource from the reader created in the previous step and attach a callback that will be invoked as the events are delivered over EventPipe. Last, we call the Process method to start processing the events. At this point, the Dynamic_All method will be invoked anytime an event comes through from the System.Runtime provider.
Now that we have the events flowing through out callback, let's turn our attention to the callback itself and how we can get the counter information:
```csharp
private static void Dynamic_All(TraceEvent obj)
{
if (obj.EventName.Equals("EventCounters"))
{
IDictionary<string, object> payloadVal = (IDictionary<string, object>)(obj.PayloadValue(0));
IDictionary<string, object> payloadFields = (IDictionary<string, object>)(payloadVal["Payload"]);
ICounterPayload payload = payloadFields.Count == 6 ? (ICounterPayload)new IncrementingCounterPayload(payloadFields) : (ICounterPayload)new CounterPayload(payloadFields);
string displayName = payload.GetDisplay();
if (string.IsNullOrEmpty(displayName))
{
displayName = payload.GetName();
}
if(string.Compare(displayName, "GC Heap Size") == 0 && Convert.ToInt32(payload.GetValue())>threshold)
{
// Generate dump and exit
}
}
}
```
Every time the callback is invoked, a TraceEvent is recieved. The TraceEvent contains information about the event that was delivered. In our case, the first thing we do is to make sure the event corresponds to EventCounters. If so, we get the GC Heap Size counter from the event payload and compare it to the threshold that the user set as part of the command line invocation. If the threshold was breached, we are ready to generate a dump.
The last step of the puzzle is to generate the dump. For brevity, we will focus only on core dump generation on Linux. In preview 5, the way to generate a core dump is to invoke the createdump tool that ships with the runtime. Add the following code to the Dynamic_All method (replacing the Generate dump and exit comment):
```csharp
Console.WriteLine("Memory threshold has been breached....");
System.Diagnostics.Process process = System.Diagnostics.Process.GetProcessById(pid);
System.Diagnostics.ProcessModule coreclr = process.Modules.Cast<System.Diagnostics.ProcessModule>().FirstOrDefault(m => string.Equals(m.ModuleName, "libcoreclr.so"));
if (coreclr == null)
{
Console.WriteLine("Unable to locate .NET runtime associated with this process!");
Environment.Exit(1);
}
else
{
string runtimeDirectory = Path.GetDirectoryName(coreclr.FileName);
string createDumpPath = Path.Combine(runtimeDirectory, "createdump");
if (!File.Exists(createDumpPath))
{
Console.WriteLine("Unable to locate 'createdump' tool in '{runtimeDirectory}'");
Environment.Exit(1);
}
var createdump = new System.Diagnostics.Process()
{
StartInfo = new System.Diagnostics.ProcessStartInfo()
{
FileName = createDumpPath,
Arguments = $"--name coredump --withheap {pid}",
},
EnableRaisingEvents = true,
};
createdump.Start();
createdump.WaitForExit();
Environment.Exit(0);
}
```
We can now build the tool (dotnet build) and run it against our application. Once built, restart the test webapi, get the process id from dotnet-trace and run triggerdump:
> ```bash
> dotnet-trace ps
> ...
> 80926 webapi /home/marioh/webapi/bin/Debug/netcoreapp3.0/webapi
> sudo dotnet triggerdump.dll 80926 500
> ```
Please note that triggerdump has to be run with sudo. The invocation tells triggerdump to monitor process with a process id of 80926 and to create dump when memory grows above 500MB.
At this point, you should see triggerdump waiting/monitoring the memory counter. Let's trigger the intermittent memory spike by navigating to:
http://localhost:5000/api/diagscenario/memspike/300
After 10-15 seconds, you will notice that triggerdump outputs the following followed by exiting.
> ```bash
> ...
> Writing minidump with heap to file coredump
> Written 820518912 bytes (200322 pages) to core file
> ```
That's it for creating your own diagnostics tool to solve the intermittent memory spike scenario!
### Analyzing the core dump
Since the core dump that our tool generates is a standard core dump, we can use the same techniques illustrated in [Scenario - App is leaking memory](app_is_leaking_memory_eventual_crash.md) to analyze the dump and find out why we're seeing intermittent high memory conditions.

Просмотреть файл

@ -1,76 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.Collections.Generic;
using System.Text;
namespace Microsoft.Diagnostics.Tools.Counters
{
public interface ICounterPayload
{
string GetName();
string GetValue();
string GetDisplay();
}
class CounterPayload : ICounterPayload
{
public string m_Name;
public string m_Value;
public string m_DisplayName;
public CounterPayload(IDictionary<string, object> payloadFields)
{
m_Name = payloadFields["Name"].ToString();
m_Value = payloadFields["Mean"].ToString();
m_DisplayName = payloadFields["DisplayName"].ToString();
}
public string GetName()
{
return m_Name;
}
public string GetValue()
{
return m_Value;
}
public string GetDisplay()
{
return m_DisplayName;
}
}
class IncrementingCounterPayload : ICounterPayload
{
public string m_Name;
public string m_Value;
public string m_DisplayName;
public string m_DisplayRateTimeScale;
public IncrementingCounterPayload(IDictionary<string, object> payloadFields)
{
m_Name = payloadFields["Name"].ToString();
m_Value = payloadFields["Increment"].ToString();
m_DisplayName = payloadFields["DisplayName"].ToString();
m_DisplayRateTimeScale = TimeSpan.Parse(payloadFields["DisplayRateTimeScale"].ToString()).ToString("%s' sec'");
}
public string GetName()
{
return m_Name;
}
public string GetValue()
{
return m_Value;
}
public string GetDisplay()
{
return $"{m_DisplayName} / {m_DisplayRateTimeScale}";
}
}
}

Просмотреть файл

@ -1,133 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.Diagnostics;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using System.Collections.Generic;
using System.Text;
using System.IO;
using Microsoft.Diagnostics.Tools.RuntimeClient;
using Microsoft.Diagnostics.Tracing;
namespace Microsoft.Diagnostics.Tools.Counters
{
internal class Program
{
private static ulong _sessionId;
private static int threshold;
private static int pid;
private static void Main(string[] args)
{
if(args.Length<2)
{
Console.WriteLine("triggerdump <pid> <mem threshold in MB>");
}
else
{
pid = Convert.ToInt32(args[0]);
threshold = Convert.ToInt32(args[1]);
Task monitorTask = new Task(() =>
{
var prov = new List<Provider>();
prov.Add(new Provider("System.Runtime", filterData:"EventCounterIntervalSec=1"));
var configuration = new SessionConfiguration(
circularBufferSizeMB: 1000,
outputPath: "",
providers: prov);
var binaryReader = EventPipeClient.CollectTracing(Int32.Parse(args[0]), configuration, out _sessionId);
EventPipeEventSource source = new EventPipeEventSource(binaryReader);
source.Dynamic.All += Dynamic_All;
source.Process();
});
Task commandTask = new Task(() =>
{
while(true)
{
while (!Console.KeyAvailable) { }
ConsoleKey cmd = Console.ReadKey(true).Key;
if (cmd == ConsoleKey.Q)
{
break;
}
}
});
monitorTask.Start();
commandTask.Start();
commandTask.Wait();
try
{
EventPipeClient.StopTracing(Int32.Parse(args[0]), _sessionId);
}
catch (System.IO.EndOfStreamException) {}
}
}
private static void Dynamic_All(TraceEvent obj)
{
if (obj.EventName.Equals("EventCounters"))
{
IDictionary<string, object> payloadVal = (IDictionary<string, object>)(obj.PayloadValue(0));
IDictionary<string, object> payloadFields = (IDictionary<string, object>)(payloadVal["Payload"]);
ICounterPayload payload = payloadFields.Count == 6 ? (ICounterPayload)new IncrementingCounterPayload(payloadFields) : (ICounterPayload)new CounterPayload(payloadFields);
string displayName = payload.GetDisplay();
if (string.IsNullOrEmpty(displayName))
{
displayName = payload.GetName();
}
if(string.Compare(displayName, "GC Heap Size") == 0 && Convert.ToInt32(payload.GetValue())>threshold)
{
Console.WriteLine("Memory threshold has been breached....");
System.Diagnostics.Process process = System.Diagnostics.Process.GetProcessById(pid);
System.Diagnostics.ProcessModule coreclr = process.Modules.Cast<System.Diagnostics.ProcessModule>().FirstOrDefault(m => string.Equals(m.ModuleName, "libcoreclr.so"));
if (coreclr == null)
{
Console.WriteLine("Unable to locate .NET runtime associated with this process!");
Environment.Exit(1);
}
else
{
string runtimeDirectory = Path.GetDirectoryName(coreclr.FileName);
string createDumpPath = Path.Combine(runtimeDirectory, "createdump");
if (!File.Exists(createDumpPath))
{
Console.WriteLine("Unable to locate 'createdump' tool in '{runtimeDirectory}'");
Environment.Exit(1);
}
var createdump = new System.Diagnostics.Process()
{
StartInfo = new System.Diagnostics.ProcessStartInfo()
{
FileName = createDumpPath,
Arguments = $"--name coredump --withheap {pid}",
},
EnableRaisingEvents = true,
};
createdump.Start();
createdump.WaitForExit();
Environment.Exit(0);
}
}
}
}
}
}

Просмотреть файл

@ -1,3 +0,0 @@
{
"rollForwardOnNoCandidateFx": 2
}

Просмотреть файл

@ -1,26 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>netcoreapp2.1</TargetFramework>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<ToolCommandName>triggerdump</ToolCommandName>
<RootNamespace>Diagnosticv.TriggerDump</RootNamespace>
<Description>Trigger dump</Description>
<PackageTags>Diagnostic</PackageTags>
<PackageReleaseNotes>$(Description)</PackageReleaseNotes>
<TargetName>triggerdump</TargetName>
<OutputType>Exe</OutputType>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildThisFileDirectory)..\..\..\..\src\Tools\dotnet-trace\Extensions.cs" Link="Extensions.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="$(MSBuildThisFileDirectory)..\..\..\..\src\Microsoft.Diagnostics.Tools.RuntimeClient\Microsoft.Diagnostics.Tools.RuntimeClient.csproj" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Diagnostics.Tracing.TraceEvent" Version="2.0.41" />
</ItemGroup>
</Project>

Просмотреть файл

@ -1,65 +0,0 @@
using Microsoft.Diagnostic.Repl;
using Microsoft.Diagnostics.Runtime;
using System.CommandLine;
using System.Linq;
using System.Threading.Tasks;
using System.Collections.Generic;
using System.Text;
namespace Microsoft.Diagnostic.Tools.Dump
{
internal class Stack
{
public List<uint> threadIds = new List<uint>();
}
[Command(Name = "uniquestacks", Help = "Displays the unique managed stacks.")]
[CommandAlias(Name = "us")]
public class UniqueStacksCommand : CommandBase
{
[Option(Name = "--verbose", Help = "Displays more details.")]
[OptionAlias(Name = "-v")]
public bool Verbose { get; set; }
public AnalyzeContext AnalyzeContext { get; set; }
public override Task InvokeAsync()
{
Dictionary<string, Stack> threads = new Dictionary<string, Stack>();
foreach (ClrThread thread in AnalyzeContext.Runtime.Threads)
{
if (!thread.IsAlive) continue;
StringBuilder completeStack = new StringBuilder();
foreach (ClrStackFrame frame in thread.StackTrace)
{
//Console.WriteLine("{0,16:X} {1,16:X} {2}", frame.StackPointer, frame.InstructionPointer, frame.DisplayString);
completeStack.Append(frame.StackPointer + " " + frame.InstructionPointer + " " + frame.DisplayString + "\n");
}
string cStack = completeStack.ToString();
if(threads.ContainsKey(cStack)==true)
{
threads[cStack].threadIds.Add(thread.OSThreadId);
}
else
{
Stack s = new Stack();
s.threadIds.Add(thread.OSThreadId);
threads.Add(cStack, s);
}
}
foreach (KeyValuePair<string, Stack> item in threads)
{
System.Console.WriteLine(item.Key);
System.Console.WriteLine("\n\n");
}
return Task.CompletedTask;
}
}
}

Просмотреть файл

@ -1,33 +0,0 @@
# Using your own SOS private build
This document is written for people who need to use the bleeding edge SOS freshly built from the enlistment. You may need to do it because you want the latest feature, or you are developing your own SOS command and you want to try it out. Regardless of the reason, here is how you can do it.
Before we can use the private build, of course, we must build it first. [Here](https://github.com/dotnet/diagnostics/tree/master/documentation/building) is the documentation on how to do that.
## Windows
On Windows, [Debugging Tools for Windows](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/debugger-download-tools) (aka WinDBG) automatically loads a shipped version of sos.dll from the extension gallery whenever it notices a managed runtime is loaded. In order to avoid that behavior, we need to make sure sos is loaded before it encounters the managed runtime, for the launch scenario, we can do this before running anything.
```
0:000> .load <reporoot>\artifacts\bin\Windows_NT.x64.Debug\sos.dll
```
In the attach scenario, we need to do things differently. We couldn't stop WinDBG from loading the shipped sos, but we can replace it.
```
0:000> .unload sos
0:000> .load <reporoot>\artifacts\bin\Windows_NT.x64.Debug\sos.dll
```
This will ensure you are using your own sos.dll, of course you might have a different full path to `sos.dll`.
## Linux
On Linux, we have to manually load `libsosplugin.so`, so we can simply load it from the expected location ourselves.
```
(lldb) plugin load <reporoot>/artifacts/bin/Linux.x64.Debug/libsosplugin.so
```
Then we are using our own sos and we can do whatever we want with it!

136
dotnet-monitor.sln Normal file
Просмотреть файл

@ -0,0 +1,136 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 16
VisualStudioVersion = 16.0.29019.234
MinimumVisualStudioVersion = 10.0.40219.1
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{19FAB78C-3351-4911-8F0C-8C6056401740}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Tools", "Tools", "{B62728C8-1267-4043-B46F-5537BBAEC692}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "dotnet-monitor", "src\Tools\dotnet-monitor\dotnet-monitor.csproj", "{C57F7656-6663-4A3C-BE38-B75C6C57E77D}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.Diagnostics.Monitoring.RestServer", "src\Microsoft.Diagnostics.Monitoring.RestServer\Microsoft.Diagnostics.Monitoring.RestServer.csproj", "{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Common", "Common", "{B24CD8F2-D809-4DB8-89A1-D45FA9218020}"
ProjectSection(SolutionItems) = preProject
src\Tools\Common\CommandExtensions.cs = src\Tools\Common\CommandExtensions.cs
EndProjectSection
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Checked|Any CPU = Checked|Any CPU
Checked|ARM = Checked|ARM
Checked|ARM64 = Checked|ARM64
Checked|x64 = Checked|x64
Checked|x86 = Checked|x86
Debug|Any CPU = Debug|Any CPU
Debug|ARM = Debug|ARM
Debug|ARM64 = Debug|ARM64
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Release|Any CPU = Release|Any CPU
Release|ARM = Release|ARM
Release|ARM64 = Release|ARM64
Release|x64 = Release|x64
Release|x86 = Release|x86
RelWithDebInfo|Any CPU = RelWithDebInfo|Any CPU
RelWithDebInfo|ARM = RelWithDebInfo|ARM
RelWithDebInfo|ARM64 = RelWithDebInfo|ARM64
RelWithDebInfo|x64 = RelWithDebInfo|x64
RelWithDebInfo|x86 = RelWithDebInfo|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Checked|Any CPU.Build.0 = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Checked|ARM.ActiveCfg = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Checked|ARM.Build.0 = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Checked|ARM64.Build.0 = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Checked|x64.ActiveCfg = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Checked|x64.Build.0 = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Checked|x86.ActiveCfg = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Checked|x86.Build.0 = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Debug|Any CPU.Build.0 = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Debug|ARM.ActiveCfg = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Debug|ARM.Build.0 = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Debug|ARM64.Build.0 = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Debug|x64.ActiveCfg = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Debug|x64.Build.0 = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Debug|x86.ActiveCfg = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Debug|x86.Build.0 = Debug|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Release|Any CPU.ActiveCfg = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Release|Any CPU.Build.0 = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Release|ARM.ActiveCfg = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Release|ARM.Build.0 = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Release|ARM64.ActiveCfg = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Release|ARM64.Build.0 = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Release|x64.ActiveCfg = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Release|x64.Build.0 = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Release|x86.ActiveCfg = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.Release|x86.Build.0 = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{C57F7656-6663-4A3C-BE38-B75C6C57E77D}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Checked|Any CPU.ActiveCfg = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Checked|Any CPU.Build.0 = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Checked|ARM.ActiveCfg = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Checked|ARM.Build.0 = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Checked|ARM64.ActiveCfg = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Checked|ARM64.Build.0 = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Checked|x64.ActiveCfg = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Checked|x64.Build.0 = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Checked|x86.ActiveCfg = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Checked|x86.Build.0 = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Debug|ARM.ActiveCfg = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Debug|ARM.Build.0 = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Debug|ARM64.ActiveCfg = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Debug|ARM64.Build.0 = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Debug|x64.ActiveCfg = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Debug|x64.Build.0 = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Debug|x86.ActiveCfg = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Debug|x86.Build.0 = Debug|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Release|Any CPU.Build.0 = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Release|ARM.ActiveCfg = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Release|ARM.Build.0 = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Release|ARM64.ActiveCfg = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Release|ARM64.Build.0 = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Release|x64.ActiveCfg = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Release|x64.Build.0 = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Release|x86.ActiveCfg = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.Release|x86.Build.0 = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.RelWithDebInfo|Any CPU.ActiveCfg = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.RelWithDebInfo|Any CPU.Build.0 = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.RelWithDebInfo|ARM.ActiveCfg = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.RelWithDebInfo|ARM.Build.0 = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.RelWithDebInfo|ARM64.ActiveCfg = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.RelWithDebInfo|ARM64.Build.0 = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.RelWithDebInfo|x64.ActiveCfg = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.RelWithDebInfo|x64.Build.0 = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.RelWithDebInfo|x86.ActiveCfg = Release|Any CPU
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C}.RelWithDebInfo|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{B62728C8-1267-4043-B46F-5537BBAEC692} = {19FAB78C-3351-4911-8F0C-8C6056401740}
{C57F7656-6663-4A3C-BE38-B75C6C57E77D} = {B62728C8-1267-4043-B46F-5537BBAEC692}
{B54DE8DD-6591-45C2-B9F7-22C4A23A384C} = {19FAB78C-3351-4911-8F0C-8C6056401740}
{B24CD8F2-D809-4DB8-89A1-D45FA9218020} = {19FAB78C-3351-4911-8F0C-8C6056401740}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {46465737-C938-44FC-BE1A-4CE139EBB5E0}
EndGlobalSection
EndGlobal

74
dotnet-monitor.yml Normal file
Просмотреть файл

@ -0,0 +1,74 @@
trigger: none
pr:
autoCancel: true
branches:
include:
- master
- release/*
- internal/release/*
paths:
exclude:
- documentation/*
- '*.md'
- THIRD-PARTY-NOTICES.TXT
- LICENSE.TXT
variables:
- name: _TeamName
value: DotNetCore
- ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
# DotNet-Diagnostics-SDL-Params provides Tsa* variables for SDL checks.
- group: DotNet-Diagnostics-SDL-Params
stages:
- stage: build
displayName: Build dotnet-monitor
jobs:
- template: /eng/build.yml
parameters:
name: windows
displayName: Build
osGroup: Windows
configuration: Release
platform: x64
# This registers the build with BAR.
- ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
- template: /eng/common/templates/job/publish-build-assets.yml
parameters:
configuration: Release
dependsOn:
- Windows
publishUsingPipelines: true
pool:
name: NetCoreInternal-Pool
queue: buildpool.windows.10.amd64.vs2017
# These are the stages that perform validation of several SDL requirements and publish the bits required to the designated feed.
- ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
- template: /eng/common/templates/post-build/post-build.yml
parameters:
# This is to enable SDL runs part of Post-Build Validation Stage.
# as well as NuGet, SourceLink, and signing validation.
# The variables get imported from group dotnet-diagnostics-sdl-params
publishingInfraVersion: 3
enableSourceLinkValidation: true
enableSigningValidation: true
enableSymbolValidation: false
enableNugetValidation: true
publishInstallersAndChecksums: true
SDLValidationParameters:
enable: true
continueOnError: true
params: ' -SourceToolsList @("policheck","credscan")
-TsaInstanceURL $(_TsaInstanceURL)
-TsaProjectName $(_TsaProjectName)
-TsaNotificationEmail $(_TsaNotificationEmail)
-TsaCodebaseAdmin $(_TsaCodebaseAdmin)
-TsaBugAreaPath $(_TsaBugAreaPath)
-TsaIterationPath $(_TsaIterationPath)
-TsaRepositoryName "dotnet-monitor"
-TsaCodebaseName "dotnet-monitor"
-TsaPublish $True'
artifactNames:
- 'Packages'

Просмотреть файл

@ -1,23 +0,0 @@
@echo off
setlocal
powershell -ExecutionPolicy ByPass -NoProfile -Command "& { . '%~dp0eng\common\tools.ps1'; InitializeDotNetCli $true $true }"
if NOT [%ERRORLEVEL%] == [0] (
echo Failed to install or invoke dotnet... 1>&2
exit /b %ERRORLEVEL%
)
set /p dotnetPath=<%~dp0artifacts\toolset\sdk.txt
:: Clear the 'Platform' env variable for this session, as it's a per-project setting within the build, and
:: misleading value (such as 'MCD' in HP PCs) may lead to build breakage (issue: #69).
set Platform=
:: Don't resolve runtime, shared framework, or SDK from other locations to ensure build determinism
set DOTNET_MULTILEVEL_LOOKUP=0
:: Disable first run since we want to control all package sources
set DOTNET_SKIP_FIRST_TIME_EXPERIENCE=1
call "%dotnetPath%\dotnet.exe" %*

Просмотреть файл

@ -1,27 +0,0 @@
#!/usr/bin/env bash
source="${BASH_SOURCE[0]}"
# resolve $SOURCE until the file is no longer a symlink
while [[ -h $source ]]; do
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
source="$(readlink "$source")"
# if $source was a relative symlink, we need to resolve it relative to the path where the
# symlink file was located
[[ $source != /* ]] && source="$scriptroot/$source"
done
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
# Don't resolve runtime, shared framework, or SDK from other locations to ensure build determinism
export DOTNET_MULTILEVEL_LOOKUP=0
# Disable first run since we want to control all package sources
export DOTNET_SKIP_FIRST_TIME_EXPERIENCE=1
source $scriptroot/eng/common/tools.sh
InitializeDotNetCli true # Install
__dotnetDir=${_InitializeDotNetCli}
dotnetPath=${__dotnetDir}/dotnet
${dotnetPath} "$@"

Просмотреть файл

@ -1,354 +0,0 @@
@if not defined _echo @echo off
setlocal EnableDelayedExpansion EnableExtensions
:: Define a prefix for most output progress messages that come from this script. That makes
:: it easier to see where these are coming from. Note that there is a trailing space here.
set "__MsgPrefix=BUILD: "
echo %__MsgPrefix%Starting Build at %TIME%
set __ThisScriptFull="%~f0"
set __ThisScriptDir="%~dp0"
call "%__ThisScriptDir%"\setup-vs-tools.cmd
if NOT '%ERRORLEVEL%' == '0' goto ExitWithError
if defined VS160COMNTOOLS (
set "__VSToolsRoot=%VS160COMNTOOLS%"
set "__VCToolsRoot=%VS160COMNTOOLS%\..\..\VC\Auxiliary\Build"
set __VSVersion=vs2019
) else if defined VS150COMNTOOLS (
set "__VSToolsRoot=%VS150COMNTOOLS%"
set "__VCToolsRoot=%VS150COMNTOOLS%\..\..\VC\Auxiliary\Build"
set __VSVersion=vs2017
)
:: Set the default arguments for build
set __BuildArch=x64
if /i "%PROCESSOR_ARCHITECTURE%" == "amd64" set __BuildArch=x64
if /i "%PROCESSOR_ARCHITECTURE%" == "x86" set __BuildArch=x86
set __BuildType=Debug
set __BuildOS=Windows_NT
set __Build=1
set __CI=0
set __Verbosity=minimal
set __BuildCrossArch=0
set __CrossArch=
:: Set the various build properties here so that CMake and MSBuild can pick them up
set "__ProjectDir=%~dp0"
:: remove trailing slash
if %__ProjectDir:~-1%==\ set "__ProjectDir=%__ProjectDir:~0,-1%"
set "__ProjectDir=%__ProjectDir%\.."
set "__SourceDir=%__ProjectDir%\src"
:: __UnprocessedBuildArgs are args that we pass to msbuild (e.g. /p:OfficialBuildId=xxxxxx)
set "__args=%*"
set processedArgs=
set __UnprocessedBuildArgs=
:Arg_Loop
if "%1" == "" goto ArgsDone
if /i "%1" == "-?" goto Usage
if /i "%1" == "-h" goto Usage
if /i "%1" == "-help" goto Usage
if /i "%1" == "--help" goto Usage
if /i "%1" == "-configuration" (set __BuildType=%2&set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop)
if /i "%1" == "-architecture" (set __BuildArch=%2&set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop)
if /i "%1" == "-verbosity" (set __Verbosity=%2&set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop)
if /i "%1" == "-ci" (set __CI=1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
:: These options are ignored for a native build
if /i "%1" == "-clean" (set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
if /i "%1" == "-build" (set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
if /i "%1" == "-rebuild" (set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
if /i "%1" == "-test" (set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
if /i "%1" == "-sign" (set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
if /i "%1" == "-restore" (set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
if /i "%1" == "-pack" (set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
if /i "%1" == "-publish" (set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
if /i "%1" == "-preparemachine" (set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
if /i "%1" == "-projects" (set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop)
if [!processedArgs!] == [] (
set __UnprocessedBuildArgs=%__args%
) else (
set __UnprocessedBuildArgs=%__args%
for %%t in (!processedArgs!) do (
set __UnprocessedBuildArgs=!__UnprocessedBuildArgs:*%%t=!
)
)
:ArgsDone
:: Determine if this is a cross-arch build
if /i "%__BuildArch%" == "arm64" (
set __BuildCrossArch=%__Build%
set __CrossArch=x64
)
if /i "%__BuildArch%" == "arm" (
set __BuildCrossArch=%__Build%
set __CrossArch=x86
)
if /i "%__BuildType%" == "debug" set __BuildType=Debug
if /i "%__BuildType%" == "release" set __BuildType=Release
if "%NUGET_PACKAGES%" == "" (
if %__CI% EQU 1 (
set "NUGET_PACKAGES=%__ProjectDir%\.packages"
) else (
set "NUGET_PACKAGES=%UserProfile%\.nuget\packages"
)
)
echo %NUGET_PACKAGES%
:: Set the remaining variables based upon the determined build configuration
set "__RootBinDir=%__ProjectDir%\artifacts"
set "__BinDir=%__RootBinDir%\bin\%__BuildOS%.%__BuildArch%.%__BuildType%"
set "__LogDir=%__RootBinDir%\log\%__BuildOS%.%__BuildArch%.%__BuildType%"
set "__IntermediatesDir=%__RootBinDir%\obj\%__BuildOS%.%__BuildArch%.%__BuildType%"
set "__PackagesBinDir=%__RootBinDir%\packages\%__BuildType%\Shipping"
set "__CrossComponentBinDir=%__BinDir%"
set "__CrossCompIntermediatesDir=%__IntermediatesDir%\crossgen"
if NOT "%__CrossArch%" == "" set __CrossComponentBinDir=%__CrossComponentBinDir%\%__CrossArch%
:: Generate path to be set for CMAKE_INSTALL_PREFIX to contain forward slash
set "__CMakeBinDir=%__BinDir%"
set "__CMakeBinDir=%__CMakeBinDir:\=/%"
:: Common msbuild arguments
set "__CommonBuildArgs=/v:!__Verbosity! /p:Configuration=%__BuildType% /p:BuildArch=%__BuildArch% %__UnprocessedBuildArgs%"
if not exist "%__BinDir%" md "%__BinDir%"
if not exist "%__IntermediatesDir%" md "%__IntermediatesDir%"
if not exist "%__LogDir%" md "%__LogDir%"
echo %__MsgPrefix%Commencing diagnostics repo build
:: Set the remaining variables based upon the determined build configuration
echo %__MsgPrefix%Checking prerequisites
:: Eval the output from probe-win1.ps1
for /f "delims=" %%a in ('powershell -NoProfile -ExecutionPolicy ByPass "& ""%__ProjectDir%\eng\set-cmake-path.ps1"""') do %%a
REM =========================================================================================
REM ===
REM === Start the build steps
REM ===
REM =========================================================================================
@if defined _echo @echo on
:: Parse the optdata package versions out of msbuild so that we can pass them on to CMake
set __DotNetCli=%__ProjectDir%\.dotnet\dotnet.exe
if not exist "%__DotNetCli%" (
echo %__MsgPrefix%Assertion failed: dotnet cli not found at path "%__DotNetCli%"
goto ExitWithError
)
REM =========================================================================================
REM ===
REM === Build Cross-Architecture Native Components (if applicable)
REM ===
REM =========================================================================================
if /i %__BuildCrossArch% EQU 1 (
rem Scope environment changes start {
setlocal
echo %__MsgPrefix%Commencing build of cross architecture native components for %__BuildOS%.%__BuildArch%.%__BuildType%
:: Set the environment for the native build
set __VCBuildArch=x86_amd64
if /i "%__CrossArch%" == "x86" ( set __VCBuildArch=x86 )
echo %__MsgPrefix%Using environment: "%__VCToolsRoot%\vcvarsall.bat" !__VCBuildArch!
call "%__VCToolsRoot%\vcvarsall.bat" !__VCBuildArch!
@if defined _echo @echo on
if not exist "%__CrossCompIntermediatesDir%" md "%__CrossCompIntermediatesDir%"
echo Generating Version Header
set __GenerateVersionLog="%__LogDir%\GenerateVersion.binlog"
powershell -NoProfile -ExecutionPolicy ByPass -NoLogo -File "%__ProjectDir%\eng\common\msbuild.ps1" "%__ProjectDir%\eng\CreateVersionFile.csproj" /bl:!__GenerateVersionLog! /t:GenerateVersionFiles /restore /p:FileVersionFile=%__RootBinDir%\bin\FileVersion.txt /p:GenerateVersionHeader=true /p:NativeVersionHeaderFile=%__CrossCompIntermediatesDir%\_version.h %__CommonBuildArgs%
if not !errorlevel! == 0 (
echo Generate Version Header FAILED
goto ExitWithError
)
if defined __SkipConfigure goto SkipConfigureCrossBuild
set __CMakeBinDir=%__CrossComponentBinDir%
set "__CMakeBinDir=!__CMakeBinDir:\=/!"
set "__ManagedBinaryDir=%__RootBinDir%\bin"
set "__ManagedBinaryDir=!__ManagedBinaryDir:\=/!"
set __ExtraCmakeArgs="-DCLR_MANAGED_BINARY_DIR=!__ManagedBinaryDir!" "-DCLR_BUILD_TYPE=%__BuildType%" "-DCLR_CMAKE_TARGET_ARCH=%__BuildArch%" "-DCMAKE_SYSTEM_VERSION=10.0" "-DNUGET_PACKAGES=%NUGET_PACKAGES:\=/%"
pushd "%__CrossCompIntermediatesDir%"
call "%__ProjectDir%\eng\gen-buildsys-win.bat" "%__ProjectDir%" %__VSVersion% %__CrossArch% !__ExtraCmakeArgs!
@if defined _echo @echo on
popd
:SkipConfigureCrossBuild
if not exist "%__CrossCompIntermediatesDir%\install.vcxproj" (
echo %__MsgPrefix%Error: failed to generate cross-arch components build project!
goto ExitWithError
)
if defined __ConfigureOnly goto SkipCrossCompBuild
set __BuildLog="%__LogDir%\Cross.Build.binlog"
:: MSBuild.exe is the only one that has the C++ targets. "%__DotNetCli% msbuild" fails because VCTargetsPath isn't defined.
msbuild.exe %__CrossCompIntermediatesDir%\install.vcxproj /bl:!__BuildLog! %__CommonBuildArgs%
if not !ERRORLEVEL! == 0 (
echo %__MsgPrefix%Error: cross-arch components build failed. Refer to the build log files for details:
echo !__BuildLog!
goto ExitWithError
)
:SkipCrossCompBuild
rem } Scope environment changes end
endlocal
)
REM =========================================================================================
REM ===
REM === Build the native code
REM ===
REM =========================================================================================
if %__Build% EQU 1 (
rem Scope environment changes start {
setlocal
echo %__MsgPrefix%Commencing build of native components for %__BuildOS%.%__BuildArch%.%__BuildType%
set __VCBuildArch=x86_amd64
if /i "%__BuildArch%" == "x86" ( set __VCBuildArch=x86 )
if /i "%__BuildArch%" == "arm" (
set __VCBuildArch=x86_arm
:: Make CMake pick the highest installed version in the 10.0.* range
set ___SDKVersion="-DCMAKE_SYSTEM_VERSION=10.0"
)
if /i "%__BuildArch%" == "arm64" (
set __VCBuildArch=x86_arm64
:: Make CMake pick the highest installed version in the 10.0.* range
set ___SDKVersion="-DCMAKE_SYSTEM_VERSION=10.0"
)
echo %__MsgPrefix%Using environment: "%__VCToolsRoot%\vcvarsall.bat" !__VCBuildArch!
call "%__VCToolsRoot%\vcvarsall.bat" !__VCBuildArch!
@if defined _echo @echo on
if not defined VSINSTALLDIR (
echo %__MsgPrefix%Error: VSINSTALLDIR variable not defined.
goto ExitWithError
)
echo Generating Version Header
set __GenerateVersionLog="%__LogDir%\GenerateVersion.binlog"
powershell -NoProfile -ExecutionPolicy ByPass -NoLogo -File "%__ProjectDir%\eng\common\msbuild.ps1" "%__ProjectDir%\eng\CreateVersionFile.csproj" /bl:!__GenerateVersionLog! /t:GenerateVersionFiles /restore /p:FileVersionFile=%__RootBinDir%\bin\FileVersion.txt /p:GenerateVersionHeader=true /p:NativeVersionHeaderFile=%__IntermediatesDir%\_version.h %__CommonBuildArgs%
if not !errorlevel! == 0 (
echo Generate Version Header FAILED
goto ExitWithError
)
if defined __SkipConfigure goto SkipConfigure
echo %__MsgPrefix%Regenerating the Visual Studio solution
set "__ManagedBinaryDir=%__RootBinDir%\bin"
set "__ManagedBinaryDir=!__ManagedBinaryDir:\=/!"
set __ExtraCmakeArgs=!___SDKVersion! "-DCLR_MANAGED_BINARY_DIR=!__ManagedBinaryDir!" "-DCLR_BUILD_TYPE=%__BuildType%" "-DCLR_CMAKE_TARGET_ARCH=%__BuildArch%" "-DNUGET_PACKAGES=%NUGET_PACKAGES:\=/%"
pushd "%__IntermediatesDir%"
call "%__ProjectDir%\eng\gen-buildsys-win.bat" "%__ProjectDir%" %__VSVersion% %__BuildArch% !__ExtraCmakeArgs!
@if defined _echo @echo on
popd
:SkipConfigure
if defined __ConfigureOnly goto SkipNativeBuild
if not exist "%__IntermediatesDir%\install.vcxproj" (
echo %__MsgPrefix%Error: failed to generate native component build project!
goto ExitWithError
)
set __BuildLog="%__LogDir%\Native.Build.binlog"
:: MSBuild.exe is the only one that has the C++ targets. "%__DotNetCli% msbuild" fails because VCTargetsPath isn't defined.
msbuild.exe %__IntermediatesDir%\install.vcxproj /bl:!__BuildLog! %__CommonBuildArgs%
if not !ERRORLEVEL! == 0 (
echo %__MsgPrefix%Error: native component build failed. Refer to the build log files for details:
echo !__BuildLog!
goto ExitWithError
)
:SkipNativeBuild
rem } Scope environment changes end
endlocal
)
REM Copy the native SOS binaries to where these tools expect for CI & VS testing
set "__dotnet_sos=%__RootBinDir%\bin\dotnet-sos\%__BuildType%\netcoreapp2.1"
set "__dotnet_dump=%__RootBinDir%\bin\dotnet-dump\%__BuildType%\netcoreapp2.1"
mkdir %__dotnet_sos%\win-%__BuildArch%
mkdir %__dotnet_sos%\publish\win-%__BuildArch%
mkdir %__dotnet_dump%\win-%__BuildArch%
mkdir %__dotnet_dump%\publish\win-%__BuildArch%
xcopy /y /q /i %__BinDir% %__dotnet_sos%\win-%__BuildArch%
xcopy /y /q /i %__BinDir% %__dotnet_sos%\publish\win-%__BuildArch%
xcopy /y /q /i %__BinDir% %__dotnet_dump%\win-%__BuildArch%
xcopy /y /q /i %__BinDir% %__dotnet_dump%\publish\win-%__BuildArch%
REM =========================================================================================
REM ===
REM === All builds complete!
REM ===
REM =========================================================================================
echo %__MsgPrefix%Repo successfully built. Finished at %TIME%
echo %__MsgPrefix%Product binaries are available at !__BinDir!
exit /b 0
REM =========================================================================================
REM === These two routines are intended for the exit code to propagate to the parent process
REM === Like MSBuild or Powershell. If we directly goto ExitWithError from within a if statement in
REM === any of the routines, the exit code is not propagated due to quirks of nested conditonals
REM === in delayed expansion scripts.
REM =========================================================================================
:ExitWithError
exit /b 1
:ExitWithCode
exit /b !__exitCode!
REM =========================================================================================
REM ===
REM === Helper routines
REM ===
REM =========================================================================================
:Usage
echo.
echo Build the Diagnostics repo.
echo.
echo Usage:
echo build-native.cmd [option1] [option2]
echo.
echo All arguments are optional. The options are:
echo.
echo.-? -h -help --help: view this message.
echo -architecture ^<x64^|x86^|arm^|arm64^>.
echo -configuration ^<debug^|release^>
echo -verbosity ^<q[uiet]^|m[inimal]^|n[ormal]^|d[etailed]^|diag[nostic]^>
goto ExitWithError

Просмотреть файл

@ -1,5 +0,0 @@
<Project>
<ItemGroup Condition=" '$(BundleTools)' == 'true' ">
<ProjectToBuild Include="$(RepoRoot)src/singlefile-tools.proj" />
</ItemGroup>
</Project>

Просмотреть файл

@ -1,3 +0,0 @@
@echo off
powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0build.ps1""" -restore -test -ci %*"
exit /b %ErrorLevel%

Просмотреть файл

@ -1,8 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>netcoreapp2.1</TargetFramework>
</PropertyGroup>
<Import Project="$(RepositoryEngineeringDir)\InstallRuntimes.proj" />
</Project>

Просмотреть файл

@ -1,92 +0,0 @@
<!-- All Rights Reserved. Licensed to the .NET Foundation under one or more agreements. The .NET Foundation licenses this file to you under the MIT license. See the LICENSE file in the project root for more information. -->
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>netcoreapp2.1</TargetFramework>
</PropertyGroup>
<Target Name="GenerateVersionFiles" DependsOnTargets="GenerateVersionHeader;GenerateVersionSourceFile" />
<Target Name="GenerateVersionHeader" DependsOnTargets="GetAssemblyVersion;AddSourceRevisionToInformationalVersion" Condition="'$(NativeVersionHeaderFile)' != '' and '$(GenerateVersionHeader)' == 'true' and !Exists($(NativeVersionHeaderFile))">
<Error Message="VersionSuffixDateStamp is missing" Condition="'$(VersionSuffixDateStamp)' == ''" />
<PropertyGroup>
<ProductVersion>$(FileVersion.Replace(".", ","))</ProductVersion>
</PropertyGroup>
<PropertyGroup>
<NativeVersionLines>
<![CDATA[
#ifndef VER_COMPANYNAME_STR
#define VER_COMPANYNAME_STR "Microsoft Corporation"
#endif
#ifndef VER_PRODUCTNAME_STR
#define VER_PRODUCTNAME_STR "Microsoft\xae .NET Framework"
#endif
#undef VER_PRODUCTVERSION
#define VER_PRODUCTVERSION $(ProductVersion)
#undef VER_PRODUCTVERSION_STR
#define VER_PRODUCTVERSION_STR "$(InformationalVersion)"
#undef VER_FILEVERSION
#define VER_FILEVERSION $(ProductVersion)
#undef VER_FILEVERSION_STR
#define VER_FILEVERSION_STR "$(FileVersion)"
#ifndef VER_LEGALCOPYRIGHT_STR
#define VER_LEGALCOPYRIGHT_STR "\xa9 Microsoft Corporation. All rights reserved."
#endif
]]>
</NativeVersionLines>
</PropertyGroup>
<WriteLinesToFile File="$(NativeVersionHeaderFile)" Lines="$(NativeVersionLines.Replace(';', '%3B'))" Overwrite="true" />
<ItemGroup>
<FileWrites Include="$(NativeVersionHeaderFile)" />
</ItemGroup>
<Message Importance="High" Text="Created version file $(NativeVersionHeaderFile)" />
</Target>
<!-- Non Windows versioning requires to generate a source file and include it on the compilation. -->
<Target Name="GenerateVersionSourceFile" DependsOnTargets="GetAssemblyVersion;AddSourceRevisionToInformationalVersion" Condition="'$(NativeVersionSourceFile)' != '' and '$(GenerateVersionSourceFile)' == 'true' and !Exists($(NativeVersionHeaderFile))">
<!-- Get Username -->
<PropertyGroup>
<VersionUserName Condition="'$(VersionUserName)' == ''">$(USERNAME)</VersionUserName>
</PropertyGroup>
<Exec Command="whoami" Condition="'$(VersionUserName)' == ''" StandardOutputImportance="Low" IgnoreExitCode="true" IgnoreStandardErrorWarningFormat="true" ConsoleToMSBuild="true">
<Output TaskParameter="ConsoleOutput" PropertyName="VersionUserName" />
</Exec>
<!-- Get Hostname -->
<PropertyGroup>
<VersionHostName Condition="'$(VersionHostName)' == ''">$(COMPUTERNAME)</VersionHostName>
</PropertyGroup>
<Exec Command="hostname" Condition="'$(RunningOnUnix)'=='true' AND '$(VersionHostName)'==''" StandardOutputImportance="Low" IgnoreExitCode="true" IgnoreStandardErrorWarningFormat="true" ConsoleToMSBuild="true">
<Output TaskParameter="ConsoleOutput" PropertyName="VersionHostName" />
</Exec>
<PropertyGroup>
<BuiltByString Condition="'$(VersionUserName)' != '' AND '$(VersionHostName)' != ''">$(BuiltByString) %40BuiltBy: $(VersionUserName)-$(VersionHostName)</BuiltByString>
</PropertyGroup>
<PropertyGroup>
<!-- SOS (strike.cpp) has a dependency on this variable being public and that it begins with the @(#)Version prefix -->
<NativeVersionLines>
<![CDATA[
char sccsid[] __attribute__((used)) = "@(#)Version $(InformationalVersion)$(BuiltByString)";
]]>
</NativeVersionLines>
</PropertyGroup>
<WriteLinesToFile File="$(NativeVersionSourceFile)" Lines="$(NativeVersionLines.Replace(';', '%3B'))" Overwrite="true" />
<ItemGroup>
<FileWrites Include="$(NativeVersionSourceFile)" />
</ItemGroup>
<Message Importance="High" Text="Created version file $(NativeVersionSourceFile)" />
</Target>
</Project>

Просмотреть файл

@ -1,4 +0,0 @@
<Project>
<Import Project="$(MSBuildThisFileDirectory)..\Directory.Build.props"/>
<Import Project="Sdk.props" Sdk="Microsoft.DotNet.Arcade.Sdk" />
</Project>

Просмотреть файл

@ -1,4 +0,0 @@
<Project>
<Import Project="Sdk.targets" Sdk="Microsoft.DotNet.Arcade.Sdk" />
<Target Name="Build" />
</Project>

Просмотреть файл

@ -1,40 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<ExtensionPackage>
<Name>SOS</Name>
<Version>X.X.X.X</Version>
<Description>Debugging aid for .NET Core programs and runtimes</Description>
<Components>
<BinaryComponent Name="sos" Type="Engine">
<Files>
<File Architecture="amd64" Module="x64\sos.dll" />
<File Architecture="x86" Module="x86\sos.dll" />
<File Architecture="arm32" Module="arm32\sos.dll" />
</Files>
<LoadTriggers>
<TriggerSet>
<ModuleTrigger Name="coreclr.dll" />
</TriggerSet>
</LoadTriggers>
<EngineCommands>
<EngineCommand Name="clrstack">
<EngineCommandItem>
<Syntax>!clrstack</Syntax>
<Description>Provides a stack trace of managed code only</Description>
</EngineCommandItem>
</EngineCommand>
<EngineCommand Name="clrthreads">
<EngineCommandItem>
<Syntax>!clrthreads</Syntax>
<Description>List the managed threads running</Description>
</EngineCommandItem>
</EngineCommand>
<EngineCommand Name="soshelp">
<EngineCommandItem>
<Syntax>!soshelp</Syntax>
<Description>Displays all available SOS commands or details about the command</Description>
</EngineCommandItem>
</EngineCommand>
</EngineCommands>
</BinaryComponent>
</Components>
</ExtensionPackage>

Просмотреть файл

@ -1,301 +0,0 @@
<!-- All Rights Reserved. Licensed to the .NET Foundation under one or more agreements. The .NET Foundation licenses this file to you under the MIT license. See the LICENSE file in the project root for more information. -->
<Project>
<!--
$(BuildArch) - architecture to test (x64, x86, arm, arm64). Defaults to x64.
$(PrivateBuildPath) - if non-empty, path to private runtime build to copy/test
Internal service release testing:
$(DotnetRuntimeVersion) - the service release version to test against (fx-version option value) i.e. 2.1.17, 3.1.3 or "default"
$(DotnetRuntimeDownloadVersion) - the service release package version i.e. 2.1.17, 3.1.3-servicing.20128.1 or "default"
$(RuntimeSourceFeed) - the service release internal blob storage link
$(RuntimeSourceFeedKey) - the service release blob feed token
From Versions.props:
$(MicrosoftDotnetSdkInternalVersion) - .NET SDK to use for testing
$(VSRedistCommonNetCoreSharedFrameworkx6460Version) - latest dotnet runtime package version (the version to install)
$(MicrosoftNETCoreAppRuntimewinx64Version) - latest dotnet runtime stable version (the version that actually is installed)
$(MicrosoftAspNetCoreAppRefInternalVersion) - latest dotnet aspnetcore package version (the version to install)
$(MicrosoftAspNetCoreAppRefVersion) - latest dotnet aspnetcore stable version (the version that actually is installed)
$(MicrosoftNETCoreApp50Version) $(MicrosoftAspNetCoreApp50Version) - 5.0 version
$(MicrosoftNETCoreApp31Version) $(MicrosoftAspNetCoreApp31Version) - 3.1 version
$(MicrosoftNETCoreApp21Version) $(MicrosoftAspNetCoreApp21Version) - 2.1 version
From Arcade:
$(RepoRoot) - the root of the diagnostics repo
$(RepositoryEngineeringDir) - the "eng" directory
$(VersionsPropsPath) - path of Versions.props
-->
<PropertyGroup>
<BuildArch Condition="'$(BuildArch)' == ''">$(Platform)</BuildArch>
<BuildArch Condition="'$(BuildArch)' == ''">x64</BuildArch>
<PrivateBuildTesting>false</PrivateBuildTesting>
<PrivateBuildTesting Condition="'$(PrivateBuildPath)' != ''">true</PrivateBuildTesting>
<InternalReleaseTesting>false</InternalReleaseTesting>
<InternalReleaseTesting Condition="'$(DotnetRuntimeVersion)' != 'default'">true</InternalReleaseTesting>
<ExtraInstallArgs>-runtimesourcefeed '$(RuntimeSourceFeed)' -runtimesourcefeedkey '$(RuntimeSourceFeedKey)'</ExtraInstallArgs>
</PropertyGroup>
<PropertyGroup Condition="'$(BuildArch)' != 'x86'">
<DotNetInstallRoot>$(RepoRoot).dotnet-test\</DotNetInstallRoot>
<RegistryRoot>HKEY_LOCAL_MACHINE\SOFTWARE</RegistryRoot>
</PropertyGroup>
<PropertyGroup Condition="'$(BuildArch)' == 'x86'">
<DotNetInstallRoot>$(RepoRoot).dotnet-test\x86\</DotNetInstallRoot>
<RegistryRoot>HKEY_LOCAL_MACHINE\SOFTWARE\WOW6432Node</RegistryRoot>
</PropertyGroup>
<PropertyGroup>
<CommonInstallArgs>-NoPath -SkipNonVersionedFiles -Architecture $(BuildArch) -InstallDir $(DotNetInstallRoot)</CommonInstallArgs>
<DotNetInstallDir>$([MSBuild]::NormalizeDirectory('$(DotNetInstallRoot)', 'shared', 'Microsoft.NETCore.App', '$(MicrosoftNETCoreAppRuntimewinx64Version)'))</DotNetInstallDir>
<TestConfigFileName>$(DotNetInstallRoot)Debugger.Tests.Versions.txt</TestConfigFileName>
<AddRegeditFileName>$(DotNetInstallRoot)AddPrivateTesting.reg</AddRegeditFileName>
<RemoveRegeditFileName>$(DotNetInstallRoot)RemovePrivateTesting.reg</RemoveRegeditFileName>
<RegeditCommand>regedit.exe</RegeditCommand>
</PropertyGroup>
<Choose>
<When Condition="$([MSBuild]::IsOsPlatform(Windows))">
<PropertyGroup>
<PowershellWrapper>powershell -NonInteractive -ExecutionPolicy ByPass -NoProfile -command</PowershellWrapper>
<DotnetInstallScriptCmd>'$(RepositoryEngineeringDir)\dotnet-install.ps1'</DotnetInstallScriptCmd>
</PropertyGroup>
</When>
<Otherwise>
<PropertyGroup>
<DotnetInstallScriptCmd>$(RepositoryEngineeringDir)/dotnet-install.sh</DotnetInstallScriptCmd>
</PropertyGroup>
</Otherwise>
</Choose>
<ItemGroup Condition="!$(InternalReleaseTesting) and !$(PrivateBuildTesting)">
<TestVersions Include="Latest" RuntimeVersion="$(VSRedistCommonNetCoreSharedFrameworkx6460Version)" AspNetVersion="$(MicrosoftAspNetCoreAppRefInternalVersion)" />
<TestVersions Include="50" RuntimeVersion="$(MicrosoftNETCoreApp50Version)" AspNetVersion="$(MicrosoftAspNetCoreApp50Version)" />
<TestVersions Include="31" RuntimeVersion="$(MicrosoftNETCoreApp31Version)" AspNetVersion="$(MicrosoftAspNetCoreApp31Version)" />
<TestVersions Condition="'$(BuildArch)' != 'arm' and '$(BuildArch)' != 'arm64'" Include="21" RuntimeVersion="$(MicrosoftNETCoreApp21Version)" AspNetVersion="$(MicrosoftAspNetCoreApp21Version)" />
</ItemGroup>
<!-- Local private build testing -->
<ItemGroup Condition="$(PrivateBuildTesting)">
<TestVersions Include="Latest" RuntimeVersion="$(VSRedistCommonNetCoreSharedFrameworkx6460Version)" AspNetVersion="$(MicrosoftAspNetCoreAppRefInternalVersion)" />
</ItemGroup>
<!-- Internal service release testing -->
<ItemGroup Condition="$(InternalReleaseTesting)">
<TestVersions Include="Internal" RuntimeVersion="$(DotnetRuntimeDownloadVersion)" ExtraInstallArgs="$(ExtraInstallArgs)" Condition="'$(DotnetRuntimeDownloadVersion)' != 'default'"/>
<TestVersions Include="Internal" RuntimeVersion="$(DotnetRuntimeVersion)" ExtraInstallArgs="$(ExtraInstallArgs)" Condition="'$(DotnetRuntimeDownloadVersion)' == 'default'"/>
</ItemGroup>
<!--
Installs the runtimes for the SOS tests, handles private runtime build support or cleans up the private build registry keys
-->
<Target Name="InstallTestRuntimes"
BeforeTargets="RunTests"
DependsOnTargets="CleanupVersionManifest;InstallRuntimesWindows;InstallRuntimesUnix;CopyPrivateBuild;WriteTestVersionManifest;" />
<!--
Installs the test runtimes on Windows
-->
<Target Name="InstallRuntimesWindows"
Condition="$([MSBuild]::IsOsPlatform(Windows))"
Inputs="$(VersionsPropsPath)" Outputs="$(TestConfigFileName)">
<Exec Command="$(PowershellWrapper) &quot;&amp; { &amp;$(DotnetInstallScriptCmd) $(CommonInstallArgs) -Version $(MicrosoftDotnetSdkInternalVersion) }&quot;" />
<Exec Command="$(PowershellWrapper) &quot;&amp; { &amp;$(DotnetInstallScriptCmd) $(CommonInstallArgs) %(TestVersions.ExtraInstallArgs) -Version %(TestVersions.RuntimeVersion) -Runtime dotnet }&quot;"
Condition="'%(TestVersions.RuntimeVersion)' != ''" />
<Exec Command="$(PowershellWrapper) &quot;&amp; { &amp;$(DotnetInstallScriptCmd) $(CommonInstallArgs) %(TestVersions.ExtraInstallArgs) -Version %(TestVersions.AspNetVersion) -Runtime aspnetcore }&quot;"
Condition="'%(TestVersions.AspNetVersion)' != ''" />
</Target>
<!--
Installs the test runtimes on Linux/MacOS
-->
<Target Name="InstallRuntimesUnix"
Condition="!$([MSBuild]::IsOsPlatform(Windows))"
Inputs="$(VersionsPropsPath)" Outputs="$(TestConfigFileName)">
<Exec Command="bash $(DotnetInstallScriptCmd) $(CommonInstallArgs) -Version $(MicrosoftDotnetSdkInternalVersion)"
IgnoreStandardErrorWarningFormat="true" />
<Exec Command="bash $(DotnetInstallScriptCmd) $(CommonInstallArgs) %(TestVersions.ExtraInstallArgs) -Version %(TestVersions.RuntimeVersion) -Runtime dotnet"
IgnoreStandardErrorWarningFormat="true"
Condition="'%(TestVersions.RuntimeVersion)' != ''" />
<Exec Command="bash $(DotnetInstallScriptCmd) $(CommonInstallArgs) %(TestVersions.ExtraInstallArgs) -Version %(TestVersions.AspNetVersion) -Runtime aspnetcore"
IgnoreStandardErrorWarningFormat="true"
Condition="'%(TestVersions.AspNetVersion)' != ''" />
</Target>
<!--
Writes the Debugger.Tests.Versions.txt file used by the SOS test harness
-->
<Target Name="WriteTestVersionManifest"
Inputs="$(VersionsPropsPath)"
Outputs="$(TestConfigFileName)">
<PropertyGroup Condition="'$(PrivateBuildTesting)' != 'true' AND '$(InternalReleaseTesting)' != 'true'">
<RuntimeVersion21>$(MicrosoftNETCoreApp21Version)</RuntimeVersion21>
<AspNetCoreVersion21>$(MicrosoftAspNetCoreApp21Version)</AspNetCoreVersion21>
<RuntimeVersion31>$(MicrosoftNETCoreApp31Version)</RuntimeVersion31>
<AspNetCoreVersion31>$(MicrosoftAspNetCoreApp31Version)</AspNetCoreVersion31>
<RuntimeVersion50>$(MicrosoftNETCoreApp50Version)</RuntimeVersion50>
<AspNetCoreVersion50>$(MicrosoftAspNetCoreApp50Version)</AspNetCoreVersion50>
</PropertyGroup>
<PropertyGroup>
<RuntimeVersionLatest>$(MicrosoftNETCoreAppRuntimewinx64Version)</RuntimeVersionLatest>
<RuntimeVersionLatest Condition="$(InternalReleaseTesting)">$(DotnetRuntimeVersion)</RuntimeVersionLatest>
<AspNetCoreVersionLatest>$(MicrosoftAspNetCoreAppRefVersion)</AspNetCoreVersionLatest>
</PropertyGroup>
<PropertyGroup>
<TestConfigFileLines>
<![CDATA[
<Configuration>
<PrivateBuildTesting>$(PrivateBuildTesting)</PrivateBuildTesting>
<InternalReleaseTesting>$(InternalReleaseTesting)</InternalReleaseTesting>
<RuntimeVersion21>$(RuntimeVersion21)</RuntimeVersion21>
<AspNetCoreVersion21>$(AspNetCoreVersion21)</AspNetCoreVersion21>
<RuntimeVersion31>$(RuntimeVersion31)</RuntimeVersion31>
<AspNetCoreVersion31>$(AspNetCoreVersion31)</AspNetCoreVersion31>
<RuntimeVersion50>$(RuntimeVersion50)</RuntimeVersion50>
<AspNetCoreVersion50>$(AspNetCoreVersion50)</AspNetCoreVersion50>
<RuntimeVersionLatest>$(RuntimeVersionLatest)</RuntimeVersionLatest>
<AspNetCoreVersionLatest>$(AspNetCoreVersionLatest)</AspNetCoreVersionLatest>
</Configuration>
]]>
</TestConfigFileLines>
</PropertyGroup>
<WriteLinesToFile File="$(TestConfigFileName)" Lines="$(TestConfigFileLines)" Overwrite="true" WriteOnlyWhenDifferent="true" />
<Message Importance="High" Text="Created config file $(TestConfigFileName)" />
<ItemGroup>
<FileWrites Include="$(TestConfigFileName)" />
</ItemGroup>
</Target>
<!--
Removes the test config file if internal service release or private build testing
-->
<Target Name="CleanupVersionManifest"
Condition="$(InternalReleaseTesting) or $(PrivateBuildTesting)">
<!-- Make sure the config file gets regenerated in the WriteTestVersionManifest target -->
<Delete Files="$(TestConfigFileName)" />
</Target>
<!--
Copies the private runtime build binaries and on Windows adds registry keys
-->
<Target Name="CopyPrivateBuild"
Condition="'$(PrivateBuildPath)' != ''"
DependsOnTargets="ModifyRegistry">
<ItemGroup>
<PrivateBuildFiles Include="$(PrivateBuildPath)\*" />
</ItemGroup>
<Message Importance="High" Text="Copying private build binaries from $(PrivateBuildPath) to $(DotNetInstallDir.Replace('\\', '\'))" />
<Copy SourceFiles="@(PrivateBuildFiles)" DestinationFolder="$(DotNetInstallDir.Replace('\\', '\'))" />
</Target>
<!--
Removes the private build registry keys
-->
<Target Name="CleanupPrivateBuild"
Condition="Exists($(RemoveRegeditFileName))">
<Exec Command="$(RegeditCommand) $(RemoveRegeditFileName)" />
<!--
Delete only the AddRegeditFileName so the target ModifyRegistry will run on next
build. Leaving the remove reg key file so this target can be run multiple times.
-->
<Delete Files="$(AddRegeditFileName)" />
</Target>
<!--
On Windows adds the registry keys to allow the unsigned private build DAC to generate dumps
-->
<Target Name="ModifyRegistry"
Condition="$([MSBuild]::IsOsPlatform(Windows))"
DependsOnTargets="CreateRemoveRegFile"
Inputs="$(VersionsPropsPath)"
Outputs="$(AddRegeditFileName)">
<PropertyGroup>
<AddRegeditFileLines>
<![CDATA[
Windows Registry Editor Version 5.00
[$(RegistryRoot)\Microsoft\Windows NT\CurrentVersion\KnownManagedDebuggingDlls]
"$(DotNetInstallDir)mscordaccore.dll"=dword:0
[$(RegistryRoot)\Microsoft\Windows NT\CurrentVersion\MiniDumpAuxiliaryDlls]
"$(DotNetInstallDir)coreclr.dll"="$(DotNetInstallDir)mscordaccore.dll"
]]>
</AddRegeditFileLines>
</PropertyGroup>
<WriteLinesToFile File="$(AddRegeditFileName)" Lines="$(AddRegeditFileLines)" Overwrite="true" WriteOnlyWhenDifferent="true" />
<ItemGroup>
<FileWrites Include="$(AddRegeditFileName)" />
</ItemGroup>
<Exec Command="$(RegeditCommand) $(AddRegeditFileName)" />
</Target>
<!--
Creates the reg file to remove the registry keys added in ModifyRegistry
-->
<Target Name="CreateRemoveRegFile">
<PropertyGroup>
<RemoveRegeditFileLines>
<![CDATA[
Windows Registry Editor Version 5.00
[$(RegistryRoot)\Microsoft\Windows NT\CurrentVersion\KnownManagedDebuggingDlls]
"$(DotNetInstallDir)mscordaccore.dll"=-
[$(RegistryRoot)\Microsoft\Windows NT\CurrentVersion\MiniDumpAuxiliaryDlls]
"$(DotNetInstallDir)coreclr.dll"=-
]]>
</RemoveRegeditFileLines>
</PropertyGroup>
<WriteLinesToFile File="$(RemoveRegeditFileName)" Lines="$(RemoveRegeditFileLines)" Overwrite="true" WriteOnlyWhenDifferent="true" />
<ItemGroup>
<FileWrites Include="$(RemoveRegeditFileName)" />
</ItemGroup>
</Target>
</Project>

Просмотреть файл

@ -8,47 +8,14 @@
</PropertyGroup>
<PropertyGroup>
<BundleOutputDir>$(ArtifactsDir)bundledtools/</BundleOutputDir>
<PublishDependsOnTargets>$(PublishDependsOnTargets);CollectBundledToolsArchives;CollectPackageArtifactFiles</PublishDependsOnTargets>
<PublishDependsOnTargets>$(PublishDependsOnTargets);CollectPackageArtifactFiles</PublishDependsOnTargets>
</PropertyGroup>
<ItemGroup>
<PackageFile Include="$(ArtifactsShippingPackagesDir)**/*.nupkg" IsShipping="true" />
<PackageFile Include="$(ArtifactsNonShippingPackagesDir)**/*.nupkg" IsShipping="false" />
<FilesToPublishToSymbolServer Include="$(BundleOutputDir)**/*.pdb" />
<BundledToolsArchives Include="$(BundleOutputDir)**/*.zip" />
</ItemGroup>
<Target Name="CollectBundledToolsArchives">
<!-- To dissambiguate the blob links generated by subsequent builds
by using a build version, preferable related to the tool being published.
Publish.proj - which imports this file - doesn't import the files in
Microsoft.DotNet.Arcade.SDK needed to get a version. This queries a project
to get it; dotnet-trace is a random choice as all tools share the same version.
Once we stabilize dotnet-monitor, we might need to start calling this target
on every tool that needs publishing. -->
<MSBuild Projects="$(RepoRoot)src/Tools/dotnet-trace/dotnet-trace.csproj"
Targets="_GetPackageVersionInfo"
SkipNonexistentProjects="false">
<Output TaskParameter="TargetOutputs" ItemName="_ResolvedPackageVersionInfo" />
</MSBuild>
<PropertyGroup>
<_PackageVersion>@(_ResolvedPackageVersionInfo->'%(PackageVersion)')</_PackageVersion>
</PropertyGroup>
<ItemGroup>
<ItemsToPushToBlobFeed Include="@(BundledToolsArchives)">
<IsShipping>true</IsShipping>
<PublishFlatContainer>true</PublishFlatContainer>
<RelativeBlobPath>diagnostics/bundledtools/$(_PackageVersion)/%(FileName)%(Extension)</RelativeBlobPath>
<ManifestArtifactData>Category=OTHER</ManifestArtifactData>
</ItemsToPushToBlobFeed>
</ItemGroup>
</Target>
<Target Name="GenerateChecksumsForBlobgroups">
<ItemGroup>
<GenerateChecksumItems Include="%(PackageFile.Identity)"

Просмотреть файл

@ -1,18 +1,5 @@
<Project>
<ItemGroup Condition="'$(SignFilesToBundle)' != 'true'">
<ItemsToSign Include="$(ArtifactsPackagesDir)**/*.zip"/>
<ItemsToSign Include="$(ArtifactsDir)bundledtools/**/*.zip" />
</ItemGroup>
<ItemGroup Condition="'$(SignFilesToBundle)' == 'true'">
<ItemsToSign Remove="@(ItemsToSign)"/>
</ItemGroup>
<Import Project="$(SignatureManifest)"
Condition="'$(SignFilesToBundle)' == 'true'" />
<ItemGroup>
<FileSignInfo Include="ParallelStacks.Runtime.dll" CertificateName="3PartySHA2" />
<FileSignInfo Include="Newtonsoft.Json.dll" CertificateName="3PartySHA2" />
<FileSignInfo Include="Newtonsoft.Json.Bson.dll" CertificateName="3PartySHA2" />
</ItemGroup>

Просмотреть файл

@ -4,25 +4,13 @@
<Uri>https://github.com/dotnet/command-line-api</Uri>
<Sha>166610c56ff732093f0145a2911d4f6c40b786da</Sha>
</Dependency>
<Dependency Name="Microsoft.DotNet.RemoteExecutor" Version="6.0.0-beta.21105.5">
<Uri>https://github.com/dotnet/arcade</Uri>
<Sha>fc83e59329203724d4a63c4f6c843be62983a35e</Sha>
<Dependency Name="Microsoft.Diagnostics.Monitoring" Version="5.0.0-preview.21109.1">
<Uri>https://github.com/dotnet/diagnostics</Uri>
<Sha>8816aa8b9beb76548a915eda8db8e28297ae69b2</Sha>
</Dependency>
<Dependency Name="Microsoft.SymbolStore" Version="1.0.211201">
<Uri>https://github.com/dotnet/symstore</Uri>
<Sha>9f0a417f2d96085783b9ef8564fb5f11c3bd8b2f</Sha>
</Dependency>
<Dependency Name="Microsoft.Diagnostics.Runtime" Version="2.0.156101">
<Uri>https://github.com/Microsoft/clrmd</Uri>
<Sha>8b1eadaa0dd50fdd05419764f5606914da56ac9e</Sha>
</Dependency>
<Dependency Name="Microsoft.Diagnostics.Runtime.Utilities" Version="2.0.156101">
<Uri>https://github.com/Microsoft/clrmd</Uri>
<Sha>8b1eadaa0dd50fdd05419764f5606914da56ac9e</Sha>
</Dependency>
<Dependency Name="Microsoft.Dotnet.Sdk.Internal" Version="5.0.100-rc.2.20480.7">
<Uri>https://github.com/dotnet/installer</Uri>
<Sha>53e0c8c7f9c65a13c17f58135557665a5a0c15b1</Sha>
<Dependency Name="Microsoft.Diagnostics.Monitoring.EventPipe" Version="5.0.0-preview.21109.1">
<Uri>https://github.com/dotnet/diagnostics</Uri>
<Sha>8816aa8b9beb76548a915eda8db8e28297ae69b2</Sha>
</Dependency>
</ProductDependencies>
<ToolsetDependencies>
@ -30,21 +18,5 @@
<Uri>https://github.com/dotnet/arcade</Uri>
<Sha>fc83e59329203724d4a63c4f6c843be62983a35e</Sha>
</Dependency>
<Dependency Name="Microsoft.AspNetCore.App.Ref.Internal" Version="6.0.0-preview.2.21110.12">
<Uri>https://github.com/dotnet/aspnetcore</Uri>
<Sha>be34780484ad29674b9685d1b0352a2e2b6d0e1c</Sha>
</Dependency>
<Dependency Name="Microsoft.AspNetCore.App.Ref" Version="6.0.0-preview.2.21110.12">
<Uri>https://github.com/dotnet/aspnetcore</Uri>
<Sha>be34780484ad29674b9685d1b0352a2e2b6d0e1c</Sha>
</Dependency>
<Dependency Name="Microsoft.NETCore.App.Runtime.win-x64" Version="6.0.0-preview.2.21114.2">
<Uri>https://github.com/dotnet/runtime</Uri>
<Sha>c03776bf46f53da3dfc4b35473f1f9a53d5e0264</Sha>
</Dependency>
<Dependency Name="VS.Redist.Common.NetCore.SharedFramework.x64.6.0" Version="6.0.0-preview.2.21114.2">
<Uri>https://github.com/dotnet/runtime</Uri>
<Sha>c03776bf46f53da3dfc4b35473f1f9a53d5e0264</Sha>
</Dependency>
</ToolsetDependencies>
</Dependencies>

Просмотреть файл

@ -1,40 +1,14 @@
<Project>
<PropertyGroup>
<RepositoryUrl>https://github.com/dotnet/diagnostics</RepositoryUrl>
<RepositoryUrl>https://github.com/dotnet/dotnet-monitor</RepositoryUrl>
<PreReleaseVersionLabel>preview</PreReleaseVersionLabel>
<VersionPrefix>5.0.0</VersionPrefix>
<DotNetUseShippingVersions>true</DotNetUseShippingVersions>
<AutoGenerateAssemblyVersion>true</AutoGenerateAssemblyVersion>
</PropertyGroup>
<PropertyGroup>
<!-- Latest symstore version updated by darc -->
<MicrosoftSymbolStoreVersion>1.0.211201</MicrosoftSymbolStoreVersion>
<!-- Runtime versions to test -->
<MicrosoftNETCoreApp21Version>2.1.23</MicrosoftNETCoreApp21Version>
<MicrosoftAspNetCoreApp21Version>$(MicrosoftNETCoreApp21Version)</MicrosoftAspNetCoreApp21Version>
<MicrosoftNETCoreApp31Version>3.1.10</MicrosoftNETCoreApp31Version>
<MicrosoftAspNetCoreApp31Version>$(MicrosoftNETCoreApp31Version)</MicrosoftAspNetCoreApp31Version>
<MicrosoftNETCoreApp50Version>5.0.0</MicrosoftNETCoreApp50Version>
<MicrosoftAspNetCoreApp50Version>$(MicrosoftNETCoreApp50Version)</MicrosoftAspNetCoreApp50Version>
<!-- Latest shared runtime version updated by darc -->
<VSRedistCommonNetCoreSharedFrameworkx6460Version>6.0.0-preview.2.21114.2</VSRedistCommonNetCoreSharedFrameworkx6460Version>
<MicrosoftNETCoreAppRuntimewinx64Version>6.0.0-preview.2.21114.2</MicrosoftNETCoreAppRuntimewinx64Version>
<!-- Latest shared aspnetcore version updated by darc -->
<MicrosoftAspNetCoreAppRefInternalVersion>6.0.0-preview.2.21110.12</MicrosoftAspNetCoreAppRefInternalVersion>
<MicrosoftAspNetCoreAppRefVersion>6.0.0-preview.2.21110.12</MicrosoftAspNetCoreAppRefVersion>
<!-- dotnet/installer: Testing version of the SDK. Needed for the signed & entitled host. -->
<MicrosoftDotnetSdkInternalVersion>5.0.100</MicrosoftDotnetSdkInternalVersion>
</PropertyGroup>
<PropertyGroup>
<!-- Opt-in/out repo features -->
<UsingToolXliff>false</UsingToolXliff>
<UsingToolNetFrameworkReferenceAssemblies>true</UsingToolNetFrameworkReferenceAssemblies>
<!-- Build tools -->
<MicrosoftNetCompilersVersion>3.0.0</MicrosoftNetCompilersVersion>
<!-- CoreFX -->
<SystemReflectionMetadataVersion>1.8.1</SystemReflectionMetadataVersion>
<SystemCollectionsImmutableVersion>1.7.1</SystemCollectionsImmutableVersion>
<MicrosoftWin32PrimitivesVersion>4.3.0</MicrosoftWin32PrimitivesVersion>
<UsingToolNetFrameworkReferenceAssemblies>false</UsingToolNetFrameworkReferenceAssemblies>
<!-- Other libs -->
<AzureStorageBlobsVersion>12.6.0</AzureStorageBlobsVersion>
<MicrosoftAspNetCoreVersion>2.1.7</MicrosoftAspNetCoreVersion>
@ -43,29 +17,11 @@
<MicrosoftAspNetCoreMvcVersion>2.1.3</MicrosoftAspNetCoreMvcVersion>
<MicrosoftAspNetCoreResponseCompressionVersion>2.1.1</MicrosoftAspNetCoreResponseCompressionVersion>
<MicrosoftBclHashCodeVersion>1.1.0</MicrosoftBclHashCodeVersion>
<MicrosoftBclAsyncInterfacesVersion>1.1.0</MicrosoftBclAsyncInterfacesVersion>
<MicrosoftDiagnosticsRuntimeVersion>2.0.161401</MicrosoftDiagnosticsRuntimeVersion>
<MicrosoftDiagnosticsRuntimeUtilitiesVersion>2.0.156101</MicrosoftDiagnosticsRuntimeUtilitiesVersion>
<ParallelStacksRuntimeVersion>2.0.1</ParallelStacksRuntimeVersion>
<MicrosoftDiaSymReaderNativePackageVersion>1.7.0</MicrosoftDiaSymReaderNativePackageVersion>
<MicrosoftDiagnosticsTracingTraceEventVersion>2.0.64</MicrosoftDiagnosticsTracingTraceEventVersion>
<MicrosoftExtensionsConfigurationJsonVersion>2.1.1</MicrosoftExtensionsConfigurationJsonVersion>
<MicrosoftExtensionsConfigurationKeyPerFileVersion>5.0.2</MicrosoftExtensionsConfigurationKeyPerFileVersion>
<MicrosoftExtensionsDependencyInjectionVersion>2.1.1</MicrosoftExtensionsDependencyInjectionVersion>
<MicrosoftExtensionsHostingAbstractionsVersion>2.1.1</MicrosoftExtensionsHostingAbstractionsVersion>
<MicrosoftExtensionsLoggingVersion>2.1.1</MicrosoftExtensionsLoggingVersion>
<MicrosoftExtensionsLoggingConsoleVersion>5.0.0</MicrosoftExtensionsLoggingConsoleVersion>
<!-- We use a newer version of LoggingEventSource due to a bug in an older version-->
<MicrosoftExtensionsLoggingEventSourceVersion>3.1.4</MicrosoftExtensionsLoggingEventSourceVersion>
<SystemCommandLineVersion>2.0.0-beta1.20468.1</SystemCommandLineVersion>
<SystemCommandLineRenderingVersion>2.0.0-beta1.20074.1</SystemCommandLineRenderingVersion>
<SystemMemoryVersion>4.5.4</SystemMemoryVersion>
<SystemTextJsonVersion>4.7.1</SystemTextJsonVersion>
<SystemThreadingChannelsVersion>4.7.0</SystemThreadingChannelsVersion>
<XUnitVersion>2.4.1</XUnitVersion>
<XUnitAbstractionsVersion>2.0.3</XUnitAbstractionsVersion>
<MicrosoftDotNetRemoteExecutorVersion>6.0.0-beta.21105.5</MicrosoftDotNetRemoteExecutorVersion>
<cdbsosversion>10.0.18362</cdbsosversion>
<!-- dotnet-monitor references -->
<MicrosoftDiagnosticsMonitoringVersion>5.0.0-preview.21109.1</MicrosoftDiagnosticsMonitoringVersion>
<MicrosoftDiagnosticsMonitoringEventPipeVersion>5.0.0-preview.21109.1</MicrosoftDiagnosticsMonitoringEventPipeVersion>

Просмотреть файл

@ -1,97 +0,0 @@
[CmdletBinding(PositionalBinding=$false)]
Param(
[ValidateSet("x86","x64","arm","arm64")][string][Alias('a', "platform")]$architecture = [System.Runtime.InteropServices.RuntimeInformation]::ProcessArchitecture.ToString().ToLowerInvariant(),
[ValidateSet("Debug","Release")][string][Alias('c')] $configuration = "Debug",
[string][Alias('v')] $verbosity = "minimal",
[switch][Alias('t')] $test,
[switch] $ci,
[switch] $skipmanaged,
[switch] $skipnative,
[switch] $bundletools,
[string] $privatebuildpath = "",
[switch] $cleanupprivatebuild,
[ValidatePattern("(default|\d+\.\d+.\d+(-[a-z0-9\.]+)?)")][string] $dotnetruntimeversion = 'default',
[ValidatePattern("(default|\d+\.\d+.\d+(-[a-z0-9\.]+)?)")][string] $dotnetruntimedownloadversion= 'default',
[string] $runtimesourcefeed = '',
[string] $runtimesourcefeedkey = '',
[Parameter(ValueFromRemainingArguments=$true)][String[]] $remainingargs
)
Set-StrictMode -Version Latest
$ErrorActionPreference = "Stop"
$crossbuild = $false
if (($architecture -eq "arm") -or ($architecture -eq "arm64")) {
$processor = @([System.Runtime.InteropServices.RuntimeInformation]::ProcessArchitecture.ToString().ToLowerInvariant())
if ($architecture -ne $processor) {
$crossbuild = $true
}
}
switch ($configuration.ToLower()) {
{ $_ -eq "debug" } { $configuration = "Debug" }
{ $_ -eq "release" } { $configuration = "Release" }
}
$reporoot = Join-Path $PSScriptRoot ".."
$engroot = Join-Path $reporoot "eng"
$artifactsdir = Join-Path $reporoot "artifacts"
$logdir = Join-Path $artifactsdir "log"
$logdir = Join-Path $logdir Windows_NT.$architecture.$configuration
if ($ci) {
$remainingargs = "-ci " + $remainingargs
}
if ($bundletools) {
$remainingargs = "/p:BundleTools=true " + $remainingargs
$remainingargs = '/bl:"$logdir\BundleTools.binlog" ' + $remainingargs
$remainingargs = '-noBl ' + $remainingargs
$skipnative = $True
$test = $False
}
# Remove the private build registry keys
if ($cleanupprivatebuild) {
Invoke-Expression "& `"$engroot\common\msbuild.ps1`" $engroot\CleanupPrivateBuild.csproj /v:$verbosity /t:CleanupPrivateBuild /p:BuildArch=$architecture /p:TestArchitectures=$architecture"
exit $lastExitCode
}
# Install sdk for building, restore and build managed components.
if (-not $skipmanaged) {
Invoke-Expression "& `"$engroot\common\build.ps1`" -build -configuration $configuration -verbosity $verbosity /p:BuildArch=$architecture /p:TestArchitectures=$architecture $remainingargs"
if ($lastExitCode -ne 0) {
exit $lastExitCode
}
}
# Build native components
if (-not $skipnative) {
Invoke-Expression "& `"$engroot\Build-Native.cmd`" -architecture $architecture -configuration $configuration -verbosity $verbosity $remainingargs"
if ($lastExitCode -ne 0) {
exit $lastExitCode
}
}
# Run the xunit tests
if ($test) {
if (-not $crossbuild) {
& "$engroot\common\build.ps1" `
-test `
-configuration $configuration `
-verbosity $verbosity `
-ci:$ci `
/bl:$logdir\Test.binlog `
/p:BuildArch=$architecture `
/p:TestArchitectures=$architecture `
/p:PrivateBuildPath="$privatebuildpath" `
/p:DotnetRuntimeVersion="$dotnetruntimeversion" `
/p:DotnetRuntimeDownloadVersion="$dotnetruntimedownloadversion" `
/p:RuntimeSourceFeed="$runtimesourcefeed" `
/p:RuntimeSourceFeedKey="$runtimesourcefeedkey"
if ($lastExitCode -ne 0) {
exit $lastExitCode
}
}
}

Просмотреть файл

@ -1,564 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
# Obtain the location of the bash script to figure out where the root of the repo is.
source="${BASH_SOURCE[0]}"
# Resolve $source until the file is no longer a symlink
while [[ -h "$source" ]]; do
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
source="$(readlink "$source")"
# if $source was a relative symlink, we need to resolve it relative to the path where the
# symlink file was located
[[ $source != /* ]] && source="$scriptroot/$source"
done
__ProjectRoot="$( cd -P "$( dirname "$source" )/.." && pwd )"
__BuildOS=Linux
__HostOS=Linux
__BuildArch=x64
__HostArch=x64
__BuildType=Debug
__PortableBuild=1
__ExtraCmakeArgs=""
__ClangMajorVersion=0
__ClangMinorVersion=0
__NumProc=1
__ManagedBuild=true
__NativeBuild=true
__CrossBuild=false
__Test=false
__PrivateBuildPath=""
__CI=false
__Verbosity=minimal
__ManagedBuildArgs=
__TestArgs=
__UnprocessedBuildArgs=
__DotnetRuntimeVersion='default'
__DotnetRuntimeDownloadVersion='default'
__RuntimeSourceFeed=''
__RuntimeSourceFeedKey=''
usage()
{
echo "Usage: $0 [options]"
echo "--skipmanaged- Skip building managed components"
echo "--skipnative - Skip building native components"
echo "--test - run xunit tests"
echo "--privatebuildpath - path to local private runtime build to test"
echo "--architecture <x64|x86|arm|armel|arm64>"
echo "--configuration <debug|release>"
echo "--rootfs <ROOTFS_DIR>"
echo "--stripsymbols - strip symbols into .dbg files"
echo "--clangx.y - optional argument to build using clang version x.y"
echo "--ci - CI lab build"
echo "--verbosity <q[uiet]|m[inimal]|n[ormal]|d[etailed]|diag[nostic]>"
echo "--help - this help message"
exit 1
}
to_lowercase() {
#eval $invocation
echo "$1" | tr '[:upper:]' '[:lower:]'
return 0
}
# Argument types supported by this script:
#
# Build architecture - valid values are: x64, x86, arm, armel, arm64
# Build Type - valid values are: debug, release
#
# Set the default arguments for build
OSName=$(uname -s)
if [ "$OSName" = "Darwin" ]; then
# On OSX universal binaries make uname -m unreliable. The uname -m response changes
# based on what hardware is being emulated.
# Use sysctl instead
if [ "$(sysctl -q -n hw.optional.arm64)" = "1" ]; then
CPUName=arm64
elif [ "$(sysctl -q -n hw.optional.x86_64)" = "1" ]; then
CPUName=x86_64
else
CPUName=$(uname -m)
fi
else
# Use uname to determine what the CPU is.
CPUName=$(uname -p)
# Some Linux platforms report unknown for platform, but the arch for machine.
if [ "$CPUName" == "unknown" ]; then
CPUName=$(uname -m)
fi
fi
case $CPUName in
i686|i386)
echo "Unsupported CPU $CPUName detected, build might not succeed!"
__BuildArch=x86
__HostArch=x86
;;
x86_64)
__BuildArch=x64
__HostArch=x64
;;
armv7l)
echo "Unsupported CPU $CPUName detected, build might not succeed!"
__BuildArch=arm
__HostArch=arm
;;
aarch64|arm64)
__BuildArch=arm64
__HostArch=arm64
;;
*)
echo "Unknown CPU $CPUName detected, configuring as if for x64"
__BuildArch=x64
__HostArch=x64
;;
esac
# Use uname to determine what the OS is.
OSName=$(uname -s)
case $OSName in
Linux)
__BuildOS=Linux
__HostOS=Linux
;;
Darwin)
__BuildOS=OSX
__HostOS=OSX
;;
FreeBSD)
__BuildOS=FreeBSD
__HostOS=FreeBSD
;;
OpenBSD)
__BuildOS=OpenBSD
__HostOS=OpenBSD
;;
NetBSD)
__BuildOS=NetBSD
__HostOS=NetBSD
;;
SunOS)
__BuildOS=SunOS
__HostOS=SunOS
;;
*)
echo "Unsupported OS $OSName detected, configuring as if for Linux"
__BuildOS=Linux
__HostOS=Linux
;;
esac
while :; do
if [ $# -le 0 ]; then
break
fi
# support both "--" and "-" options
opt="$(echo "${1/#--/-}" | awk '{print tolower($0)}')"
case $opt in
-\?|-h|-help)
usage
exit 1
;;
-skipmanaged)
__ManagedBuild=false
;;
-skipnative)
__NativeBuild=false
;;
-build|-b)
__ManagedBuild=true
;;
-test|-t)
__Test=true
;;
-privatebuildpath)
__PrivateBuildPath="$2"
shift
;;
-dotnetruntimeversion)
__DotnetRuntimeVersion="$2"
shift
;;
-dotnetruntimedownloadversion)
__DotnetRuntimeDownloadVersion="$2"
shift
;;
-runtimesourcefeed)
__RuntimeSourceFeed="$2"
shift
;;
-runtimesourcefeedkey)
__RuntimeSourceFeedKey="$2"
shift
;;
-ci)
__CI=true
__ManagedBuildArgs="$__ManagedBuildArgs $1"
__TestArgs="$__TestArgs $1"
;;
-projects)
__ManagedBuildArgs="$__ManagedBuildArgs $1 $2"
__TestArgs="$__TestArgs $1 $2"
shift
;;
-verbosity)
__Verbosity=$2
shift
;;
-configuration|-c)
__BuildType="$(to_lowercase "$2")"
shift
;;
-architecture|-a|-platform)
__BuildArch="$(to_lowercase "$2")"
shift
;;
-rootfs)
export ROOTFS_DIR="$2"
shift
;;
-portablebuild=false)
__PortableBuild=0
;;
-stripsymbols)
__ExtraCmakeArgs="$__ExtraCmakeArgs -DSTRIP_SYMBOLS=true"
;;
-clang*)
__Compiler=clang
# clangx.y or clang-x.y
version="$(echo "$lowerI" | tr -d '[:alpha:]-=')"
parts=(${version//./ })
__ClangMajorVersion="${parts[0]}"
__ClangMinorVersion="${parts[1]}"
if [[ -z "$__ClangMinorVersion" && "$__ClangMajorVersion" -le 6 ]]; then
__ClangMinorVersion=0;
fi
;;
-clean|-binarylog|-bl|-pipelineslog|-pl|-restore|-r|-rebuild|-pack|-integrationtest|-performancetest|-sign|-publish|-preparemachine)
__ManagedBuildArgs="$__ManagedBuildArgs $1"
;;
-warnaserror|-nodereuse)
__ManagedBuildArgs="$__ManagedBuildArgs $1 $2"
;;
*)
__UnprocessedBuildArgs="$__UnprocessedBuildArgs $1"
;;
esac
shift
done
if [ "$__BuildType" == "release" ]; then
__BuildType=Release
fi
if [ "$__BuildType" == "debug" ]; then
__BuildType=Debug
fi
# Needs to be set for generate version source file/msbuild
if [[ -z $NUGET_PACKAGES ]]; then
if [[ $__CI == true ]]; then
export NUGET_PACKAGES="$__ProjectRoot/.packages"
else
export NUGET_PACKAGES="$HOME/.nuget/packages"
fi
fi
echo $NUGET_PACKAGES
__RootBinDir=$__ProjectRoot/artifacts
__BinDir=$__RootBinDir/bin/$__BuildOS.$__BuildArch.$__BuildType
__LogDir=$__RootBinDir/log/$__BuildOS.$__BuildArch.$__BuildType
__IntermediatesDir=$__RootBinDir/obj/$__BuildOS.$__BuildArch.$__BuildType
__ExtraCmakeArgs="$__ExtraCmakeArgs -DCLR_MANAGED_BINARY_DIR=$__RootBinDir/bin -DCLR_BUILD_TYPE=$__BuildType"
__DotNetCli=$__ProjectRoot/.dotnet/dotnet
# Specify path to be set for CMAKE_INSTALL_PREFIX.
# This is where all built native libraries will copied to.
export __CMakeBinDir="$__BinDir"
if [[ "$__BuildArch" == "armel" ]]; then
# Armel cross build is Tizen specific and does not support Portable RID build
__PortableBuild=0
fi
# Configure environment if we are doing a cross compile.
if [ "${__BuildArch}" != "${__HostArch}" ]; then
__CrossBuild=true
export CROSSCOMPILE=1
if [ "${__BuildOS}" != "OSX" ]; then
if ! [[ -n "$ROOTFS_DIR" ]]; then
echo "ERROR: ROOTFS_DIR not set for cross build"
exit 1
fi
echo "ROOTFS_DIR: $ROOTFS_DIR"
fi
fi
mkdir -p "$__IntermediatesDir"
mkdir -p "$__LogDir"
mkdir -p "$__CMakeBinDir"
build_native()
{
platformArch="$1"
intermediatesForBuild="$2"
extraCmakeArguments="$3"
# All set to commence the build
echo "Commencing $__DistroRid build for $__BuildOS.$__BuildArch.$__BuildType in $intermediatesForBuild"
generator=""
buildFile="Makefile"
buildTool="make"
scriptDir="$__ProjectRoot/eng"
pushd "$intermediatesForBuild"
echo "Invoking \"$scriptDir/gen-buildsys-clang.sh\" \"$__ProjectRoot\" $__ClangMajorVersion \"$__ClangMinorVersion\" $platformArch "$scriptDir" $__BuildType $generator $extraCmakeArguments"
"$scriptDir/gen-buildsys-clang.sh" "$__ProjectRoot" $__ClangMajorVersion "$__ClangMinorVersion" $platformArch "$scriptDir" $__BuildType $generator "$extraCmakeArguments"
popd
if [ ! -f "$intermediatesForBuild/$buildFile" ]; then
echo "Failed to generate build project!"
exit 1
fi
# Check that the makefiles were created.
pushd "$intermediatesForBuild"
echo "Executing $buildTool install -j $__NumProc"
$buildTool install -j $__NumProc | tee $__LogDir/make.log
if [ $? != 0 ]; then
echo "Failed to build."
exit 1
fi
popd
}
initTargetDistroRid()
{
source "$__ProjectRoot/eng/init-distro-rid.sh"
local passedRootfsDir=""
# Only pass ROOTFS_DIR if cross is specified and the current platform is not OSX that doesn't use rootfs
if [ $__CrossBuild == true -a "$__HostOS" != "OSX" ]; then
passedRootfsDir=${ROOTFS_DIR}
fi
initDistroRidGlobal ${__BuildOS} ${__BuildArch} ${__PortableBuild} ${passedRootfsDir}
}
#
# Managed build
#
if [ $__ManagedBuild == true ]; then
echo "Commencing managed build for $__BuildType in $__RootBinDir/bin"
"$__ProjectRoot/eng/common/build.sh" --build --configuration "$__BuildType" --verbosity "$__Verbosity" $__ManagedBuildArgs $__UnprocessedBuildArgs
if [ $? != 0 ]; then
exit 1
fi
fi
#
# Initialize the target distro name
#
initTargetDistroRid
echo "RID: $__DistroRid"
# Set default clang version
if [[ $__ClangMajorVersion == 0 && $__ClangMinorVersion == 0 ]]; then
if [[ "$__BuildArch" == "arm" || "$__BuildArch" == "armel" ]]; then
__ClangMajorVersion=5
__ClangMinorVersion=0
elif [[ "$__BuildArch" == "arm64" && "$__DistroRid" == "linux-musl-arm64" ]]; then
__ClangMajorVersion=9
__ClangMinorVersion=
else
__ClangMajorVersion=3
__ClangMinorVersion=9
fi
fi
#
# Setup LLDB paths for native build
#
if [ "$__HostOS" == "OSX" ]; then
export LLDB_H=$__ProjectRoot/src/SOS/lldbplugin/swift-4.0
export LLDB_LIB=$(xcode-select -p)/../SharedFrameworks/LLDB.framework/LLDB
export LLDB_PATH=$(xcode-select -p)/usr/bin/lldb
export MACOSX_DEPLOYMENT_TARGET=10.12
# If Xcode 9.2 exists (like on the CI/build machines), use that. Xcode 9.3 or
# greater (swift 4.1 lldb) doesn't work that well (seg faults on exit).
if [ -f "/Applications/Xcode_9.2.app/Contents/Developer/usr/bin/lldb" ]; then
if [ -f "/Applications/Xcode_9.2.app/Contents/SharedFrameworks/LLDB.framework/LLDB" ]; then
export LLDB_PATH=/Applications/Xcode_9.2.app/Contents/Developer/usr/bin/lldb
export LLDB_LIB=/Applications/Xcode_9.2.app/Contents/SharedFrameworks/LLDB.framework/LLDB
fi
fi
if [ ! -f $LLDB_LIB ]; then
echo "Cannot find the lldb library. Try installing Xcode."
exit 1
fi
# Workaround bad python version in /usr/local/bin/python2.7 on lab machines
export PATH=/usr/bin:$PATH
which python
python --version
if [[ "$__BuildArch" == x64 ]]; then
__ExtraCmakeArgs="-DCMAKE_OSX_ARCHITECTURES=\"x86_64\" $__ExtraCmakeArgs"
elif [[ "$__BuildArch" == arm64 ]]; then
__ExtraCmakeArgs="-DCMAKE_OSX_ARCHITECTURES=\"arm64\" $__ExtraCmakeArgs"
else
echo "Error: Unknown OSX architecture $__BuildArch."
exit 1
fi
fi
#
# Build native components
#
if [ ! -e $__DotNetCli ]; then
echo "dotnet cli not installed $__DotNetCli"
exit 1
fi
if [ $__NativeBuild == true ]; then
echo "Generating Version Source File"
__GenerateVersionLog="$__LogDir/GenerateVersion.binlog"
"$__ProjectRoot/eng/common/msbuild.sh" \
$__ProjectRoot/eng/CreateVersionFile.csproj \
/v:$__Verbosity \
/bl:$__GenerateVersionLog \
/t:GenerateVersionFiles \
/restore \
/p:GenerateVersionSourceFile=true \
/p:NativeVersionSourceFile="$__IntermediatesDir/version.cpp" \
/p:Configuration="$__BuildType" \
/p:Platform="$__BuildArch" \
$__UnprocessedBuildArgs
if [ $? != 0 ]; then
echo "Generating Version Source File FAILED"
exit 1
fi
build_native "$__BuildArch" "$__IntermediatesDir" "$__ExtraCmakeArgs"
fi
#
# Copy the native SOS binaries to where these tools expect for testing
#
if [[ $__NativeBuild == true || $__Test == true ]]; then
__dotnet_sos=$__RootBinDir/bin/dotnet-sos/$__BuildType/netcoreapp2.1/publish/$__DistroRid
__dotnet_dump=$__RootBinDir/bin/dotnet-dump/$__BuildType/netcoreapp2.1/publish/$__DistroRid
mkdir -p "$__dotnet_sos"
mkdir -p "$__dotnet_dump"
cp "$__BinDir"/* "$__dotnet_sos"
echo "Copied SOS to $__dotnet_sos"
cp "$__BinDir"/* "$__dotnet_dump"
echo "Copied SOS to $__dotnet_dump"
fi
#
# Run xunit tests
#
if [ $__Test == true ]; then
if [ $__CrossBuild != true ]; then
if [ "$LLDB_PATH" == "" ]; then
export LLDB_PATH="$(which lldb-3.9.1 2> /dev/null)"
if [ "$LLDB_PATH" == "" ]; then
export LLDB_PATH="$(which lldb-3.9 2> /dev/null)"
if [ "$LLDB_PATH" == "" ]; then
export LLDB_PATH="$(which lldb-4.0 2> /dev/null)"
if [ "$LLDB_PATH" == "" ]; then
export LLDB_PATH="$(which lldb-5.0 2> /dev/null)"
if [ "$LLDB_PATH" == "" ]; then
export LLDB_PATH="$(which lldb 2> /dev/null)"
fi
fi
fi
fi
fi
if [ "$GDB_PATH" == "" ]; then
export GDB_PATH="$(which gdb 2> /dev/null)"
fi
echo "lldb: '$LLDB_PATH' gdb: '$GDB_PATH'"
"$__ProjectRoot/eng/common/build.sh" \
--test \
--configuration "$__BuildType" \
--verbosity "$__Verbosity" \
/bl:$__LogDir/Test.binlog \
/p:BuildArch="$__BuildArch" \
/p:PrivateBuildPath="$__PrivateBuildPath" \
/p:DotnetRuntimeVersion="$__DotnetRuntimeVersion" \
/p:DotnetRuntimeDownloadVersion="$__DotnetRuntimeDownloadVersion" \
/p:RuntimeSourceFeed="$__RuntimeSourceFeed" \
/p:RuntimeSourceFeedKey="$__RuntimeSourceFeedKey" \
$__TestArgs
if [ $? != 0 ]; then
exit 1
fi
fi
fi
echo "BUILD: Repo sucessfully built."
echo "BUILD: Product binaries are available at $__CMakeBinDir"

Просмотреть файл

@ -1,28 +1,14 @@
parameters:
# Job name
name: ''
# Agent OS (Windows_NT, Linux, MacOS, FreeBSD)
osGroup: Windows_NT
displayName: ''
osGroup: Windows
configuration: Release
platform: x64
# Additional variables
variables: {}
# Build strategy - matrix
strategy: ''
# Optional: Job timeout
timeoutInMinutes: 180
# Optional: Docker image to use
dockerImage: ''
# Optional: ROOTFS_DIR to use
crossrootfsDir: ''
# Optional: test only job if true
testOnly: false
# Depends on
dependsOn: ''
@ -30,42 +16,45 @@ jobs:
- template: /eng/common/templates/job/job.yml
parameters:
name: ${{ parameters.name }}
displayName: ${{ coalesce(parameters.displayName, parameters.name) }}
timeoutInMinutes: ${{ parameters.timeoutInMinutes }}
enableMicrobuild: true
enableMicrobuild: false
enableTelemetry: true
helixRepo: dotnet/diagnostics
helixRepo: dotnet/dotnet-monitor
artifacts:
publish:
logs: true
manifests: true
pool:
# Public Linux Build Pool
${{ if and(eq(parameters.osGroup, 'Linux'), eq(variables['System.TeamProject'], 'public')) }}:
name: NetCorePublic-Pool
queue: BuildPool.Ubuntu.1604.Amd64.Open
${{ if in(parameters.osGroup, 'Linux', 'Linux-musl') }}:
${{ if eq(variables['System.TeamProject'], 'public') }}:
vmImage: ubuntu-16.04
# Official Build Linux Pool
${{ if and(eq(parameters.osGroup, 'Linux'), ne(variables['System.TeamProject'], 'public')) }}:
name: NetCoreInternal-Pool
queue: BuildPool.Ubuntu.1604.Amd64
# Official Build Linux Pool
${{ if ne(variables['System.TeamProject'], 'public') }}:
name: NetCoreInternal-Pool
queue: BuildPool.Ubuntu.1604.Amd64
# FreeBSD builds only in the internal project
${{ if and(eq(parameters.osGroup, 'FreeBSD'), ne(variables['System.TeamProject'], 'public')) }}:
name: dnceng-freebsd-internal
# Build OSX Pool (we don't have on-prem OSX BuildPool)
${{ if in(parameters.osGroup, 'MacOS', 'MacOS_cross') }}:
# Build OSX Pool
${{ if in(parameters.osGroup, 'MacOS') }}:
vmImage: macOS-10.15
# Official Build Windows Pool
${{ if and(eq(parameters.osGroup, 'Windows_NT'), ne(variables['System.TeamProject'], 'public')) }}:
name: NetCoreInternal-Pool
queue: BuildPool.Windows.10.Amd64.VS2017
# Public Windows Build Pool
${{ if and(eq(parameters.osGroup, 'Windows_NT'), eq(variables['System.TeamProject'], 'public')) }}:
name: NetCorePublic-Pool
queue: BuildPool.Windows.10.Amd64.VS2017.Open
${{ if eq(parameters.osGroup, 'Windows') }}:
${{ if eq(variables['System.TeamProject'], 'public') }}:
name: Hosted VS2019
${{ if ne(parameters.strategy, '') }}:
strategy: ${{ parameters.strategy }}
${{ if ne(variables['System.TeamProject'], 'public') }}:
name: NetCoreInternal-Pool
queue: BuildPool.Windows.10.Amd64.VS2019
${{ if eq(parameters.osGroup, 'Linux') }}:
container: mcr.microsoft.com/dotnet-buildtools/prereqs:centos-7-359e48e-20200313130914
${{ if eq(parameters.osGroup, 'Linux-musl') }}:
container: mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.9-WithNode-20200602002639-0fc54a3
${{ if ne(parameters.dependsOn, '') }}:
dependsOn: ${{ parameters.dependsOn }}
@ -75,160 +64,56 @@ jobs:
variables:
- ${{ insert }}: ${{ parameters.variables }}
- _PortableLinuxBuild: CentOS_7_$(_BuildArch)_$(_BuildConfig)
- _DockerImageName: ${{ parameters.dockerImage }}
- _PhaseName : ${{ parameters.name }}
- _HelixType: build/product
- _HelixBuildConfig: $(_BuildConfig)
- _Pipeline_StreamDumpDir: $(Build.SourcesDirectory)/artifacts/tmp/$(_BuildConfig)/streams
- _InternalInstallArgs: ''
# For testing msrc's and service releases. The RuntimeSourceVersion is either "default" or the service release version to test
- _HelixBuildConfig: ${{ parameters.configuration }}
- _SignType: test
- _InternalBuildArgs: ''
- _InternalPublishArgs: ''
- ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
- _InternalInstallArgs:
-dotnetruntimeversion '$(DotnetRuntimeVersion)'
-dotnetruntimedownloadversion '$(DotnetRuntimeDownloadVersion)'
-runtimesourcefeed '$(RuntimeFeedUrl)'
-runtimesourcefeedkey '$(RuntimeFeedBase64SasToken)'
- _SignType: real
- _InternalBuildArgs: >-
-pack -sign
/p:DotNetSignType=$(_SignType)
/p:TeamName=$(_TeamName)
/p:OfficialBuildId=$(BUILD.BUILDNUMBER)
- _InternalPublishArgs: -publish /p:DotNetPublishUsingPipelines=true
# Only enable publishing in non-public, non PR scenarios.
- ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
- _HelixSource: official/dotnet/arcade/$(Build.SourceBranch)
- _HelixSource: official/dotnet/dotnet-monitor/$(Build.SourceBranch)
- ${{ if and(ne(variables['System.TeamProject'], 'public'), in(variables['Build.Reason'], 'PullRequest')) }}:
- _HelixSource: pr-internal/dotnet/dotnet-monitor/$(Build.SourceBranch)
- ${{ if or(eq(variables['System.TeamProject'], 'public'), in(variables['Build.Reason'], 'PullRequest')) }}:
- _HelixSource: pr/dotnet/arcade/$(Build.SourceBranch)
- _HelixSource: pr/dotnet/dotnet-monitor/$(Build.SourceBranch)
- ${{ if eq(parameters.testOnly, 'true') }}:
- _LinuxScript: $(Build.SourcesDirectory)/eng/citest.sh
- ${{ if ne(parameters.testOnly, 'true') }}:
- _LinuxScript: $(Build.SourcesDirectory)/eng/cibuild.sh
# This is only required for cross builds.
- ${{ if eq(parameters.crossrootfsDir, '') }}:
- _RootFs: ''
- ${{ if ne(parameters.crossrootfsDir, '') }}:
- _RootFs: --rootfs ${{ parameters.crossrootfsDir }}
- ${{ if eq(parameters.osGroup, 'Windows') }}:
- scriptExt: '.cmd'
- ${{ if ne(parameters.osGroup, 'Windows') }}:
- scriptExt: '.sh'
steps:
- ${{ if eq(parameters.osGroup, 'Windows_NT') }}:
- script: $(Build.SourcesDirectory)\eng\cibuild.cmd
-configuration $(_BuildConfig)
-architecture $(_BuildArch)
-prepareMachine
/p:OfficialBuildId=$(BUILD.BUILDNUMBER)
$(_InternalInstallArgs)
displayName: Build / Test
condition: succeeded()
- script: >-
$(Build.SourcesDirectory)/build$(scriptExt)
-ci
-c ${{ parameters.configuration }}
-platform ${{ parameters.platform }}
-prepareMachine
$(_InternalBuildArgs)
$(_InternalPublishArgs)
displayName: Build
condition: succeeded()
- ${{ if eq(parameters.osGroup, 'Linux') }}:
- ${{ if eq(parameters.testOnly, 'true') }}:
- task: DownloadPipelineArtifact@2
displayName: Download Linux Artifacts
inputs:
artifactName: $(_PortableLinuxBuild)
targetPath: '$(Build.SourcesDirectory)/artifacts/bin/Linux.$(_BuildArch).$(_BuildConfig)'
condition: succeeded()
- script: $(Build.SourcesDirectory)/eng/docker-build.sh
--docker-image $(_DockerImageName)
--source-directory $(Build.SourcesDirectory)
--container-name diagnostics-$(Build.BuildId)
$(_LinuxScript) $(_RootFs)
--configuration $(_BuildConfig)
--architecture $(_BuildArch)
--prepareMachine
/p:OfficialBuildId=$(BUILD.BUILDNUMBER)
/p:BUILD_BUILDNUMBER=$(BUILD.BUILDNUMBER)
$(_InternalInstallArgs)
displayName: Docker Build / Test
condition: succeeded()
- ${{ if eq(parameters.osGroup, 'MacOS') }}:
- script: $(Build.SourcesDirectory)/eng/cibuild.sh
--configuration $(_BuildConfig)
--architecture $(_BuildArch)
--prepareMachine
/p:OfficialBuildId=$(BUILD.BUILDNUMBER)
$(_InternalInstallArgs)
displayName: Build / Test
condition: succeeded()
- ${{ if eq(parameters.osGroup, 'MacOS_cross') }}:
- script: /bin/bash -c "sudo xcode-select -s /Applications/Xcode_12.2.app/Contents/Developer"
- script: $(Build.SourcesDirectory)/eng/build.sh
--restore
--ci
--stripsymbols
--configuration $(_BuildConfig)
--architecture $(_BuildArch)
--prepareMachine
/p:OfficialBuildId=$(BUILD.BUILDNUMBER)
$(_InternalInstallArgs)
displayName: Build / Test
condition: succeeded()
- ${{ if ne(variables['System.TeamProject'], 'public') }}:
- task: PublishBuildArtifacts@1
displayName: Publish Build Artifacts
inputs:
pathtoPublish: '$(Build.SourcesDirectory)/artifacts/$(_PublishArtifacts)'
artifactName: $(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
condition: ne(variables['_PublishArtifacts'], '')
- task: PublishBuildArtifacts@1
displayName: Publish Artifacts on failure
inputs:
PathtoPublish: '$(Build.SourcesDirectory)/artifacts/bin'
PublishLocation: Container
ArtifactName: Artifacts_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
continueOnError: true
condition: failed()
- task: PublishBuildArtifacts@1
displayName: Publish Dump Artifacts on failure
inputs:
PathtoPublish: '$(Build.SourcesDirectory)/artifacts/tmp/$(_BuildConfig)/dumps'
PublishLocation: Container
ArtifactName: Dumps_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
continueOnError: true
condition: failed()
- task: PublishBuildArtifacts@1
displayName: Publish Stream Artifacts on failure
inputs:
PathtoPublish: $(_Pipeline_StreamDumpDir)
PublishLocation: Container
ArtifactName: Streams_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
continueOnError: true
condition: failed()
- task: CopyFiles@2
displayName: Gather Logs
inputs:
sourceFolder: '$(Build.SourcesDirectory)/artifacts'
contents: '?(log|TestResults)/**'
targetFolder: '$(Build.StagingDirectory)/BuildLogs'
continueOnError: true
condition: always()
- task: PublishBuildArtifacts@1
displayName: Publish Logs
inputs:
PathtoPublish: '$(Build.StagingDirectory)/BuildLogs'
PublishLocation: Container
ArtifactName: Logs_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
continueOnError: true
condition: always()
# Publish test results to Azure Pipelines
- task: PublishTestResults@2
inputs:
testResultsFormat: xUnit
testResultsFiles: '**/*UnitTests*.xml'
searchFolder: '$(Build.SourcesDirectory)/artifacts/TestResults'
failTaskOnFailedTests: true
testRunTitle: 'Tests $(_PhaseName) $(_BuildArch) $(_BuildConfig)'
publishRunAttachments: true
mergeTestResults: true
buildConfiguration: ${{ parameters.name }}
continueOnError: true
condition: ne(variables['_BuildOnly'], 'true')
# TODO: When there's actual tests to run, add a -test to the parameter above.
# a better idea might be to run these in helix.
# # Publish test results to Azure Pipelines
# - task: PublishTestResults@2
# inputs:
# testResultsFormat: xUnit
# testResultsFiles: '**/*UnitTests*.xml'
# searchFolder: '$(Build.SourcesDirectory)/artifacts/TestResults'
# failTaskOnFailedTests: true
# testRunTitle: 'Tests ${{ parameters.name }}'
# publishRunAttachments: true
# mergeTestResults: true
# buildConfiguration: ${{ parameters.name }}
# continueOnError: true

Просмотреть файл

@ -1,22 +0,0 @@
@echo off
setlocal
set "_commonArgs=-restore -ci -prepareMachine -verbosity minimal -configuration Release"
set "_logDir=%~dp0..\artifacts\log\Release\"
echo Creating packages
powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0common\Build.ps1""" %_commonArgs% -pack -noBl /bl:'%_logDir%Pack.binlog' %*"
if NOT '%ERRORLEVEL%' == '0' goto ExitWithCode
echo Creating bundles
powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0Build.ps1""" %_commonArgs% -bundletools %*"
if NOT '%ERRORLEVEL%' == '0' goto ExitWithCode
echo Signing and publishing manifest
powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0common\Build.ps1""" %_commonArgs% -sign -publish -noBl /bl:'%_logDir%SignPublish.binlog' %*"
if NOT '%ERRORLEVEL%' == '0' goto ExitWithCode
exit /b 0
:ExitWithCode
exit /b !__exitCode!

Просмотреть файл

@ -1,43 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
source="${BASH_SOURCE[0]}"
# resolve $SOURCE until the file is no longer a symlink
while [[ -h $source ]]; do
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
source="$(readlink "$source")"
# if $source was a relative symlink, we need to resolve it relative to the path where
# the symlink file was located
[[ $source != /* ]] && source="$scriptroot/$source"
done
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
# Fix any CI lab docker image problems
__osname=$(uname -s)
if [ "$__osname" == "Linux" ]; then
if [ -e /etc/os-release ]; then
source /etc/os-release
if [[ $ID == "ubuntu" ]]; then
if [[ $VERSION_ID == "18.04" ]]; then
# Fix the CI lab's ubuntu 18.04 docker image: install curl.
sudo apt-get update
sudo apt-get install -y curl
fi
fi
elif [ -e /etc/redhat-release ]; then
__redhatRelease=$(</etc/redhat-release)
if [[ $__redhatRelease == "CentOS release 6."* || $__redhatRelease == "Red Hat Enterprise Linux Server release 6."* ]]; then
source scl_source enable python27 devtoolset-2
fi
fi
fi
"$scriptroot/build.sh" --restore --test --ci --stripsymbols $@
if [[ $? != 0 ]]; then
exit 1
fi

Просмотреть файл

@ -1,44 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
source="${BASH_SOURCE[0]}"
# resolve $SOURCE until the file is no longer a symlink
while [[ -h $source ]]; do
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
source="$(readlink "$source")"
# if $source was a relative symlink, we need to resolve it relative to the path where
# the symlink file was located
[[ $source != /* ]] && source="$scriptroot/$source"
done
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
# Fix any CI lab docker image problems
__osname=$(uname -s)
if [ "$__osname" == "Linux" ]; then
if [ -e /etc/os-release ]; then
source /etc/os-release
if [[ $ID == "ubuntu" ]]; then
if [[ $VERSION_ID == "18.04" ]]; then
# Fix the CI lab's ubuntu 18.04 docker image: install curl.
sudo apt-get update
sudo apt-get install -y curl
fi
fi
elif [ -e /etc/redhat-release ]; then
__redhatRelease=$(</etc/redhat-release)
if [[ $__redhatRelease == "CentOS release 6."* || $__redhatRelease == "Red Hat Enterprise Linux Server release 6."* ]]; then
source scl_source enable python27 devtoolset-2
fi
fi
fi
# Restore and build just the managed components (test infrastructure)
"$scriptroot/build.sh" --restore --skipnative --test --ci $@
if [[ $? != 0 ]]; then
exit 1
fi

Просмотреть файл

@ -1,20 +0,0 @@
SET (CMAKE_C_FLAGS_INIT "-Wall -std=c11")
SET (CMAKE_C_FLAGS_DEBUG_INIT "-g -O0")
SET (CLR_C_FLAGS_CHECKED_INIT "-g -O1")
# Refer to the below instruction to support __thread with -O2/-O3 on Linux/ARM
# https://github.com/dotnet/runtime/blob/master/docs/workflow/building/coreclr/linux-instructions.md
SET (CMAKE_C_FLAGS_RELEASE_INIT "-g -O1")
SET (CMAKE_C_FLAGS_RELWITHDEBINFO_INIT "-g -O1")
SET (CMAKE_CXX_FLAGS_INIT "-Wall -Wno-null-conversion -std=c++11")
SET (CMAKE_CXX_FLAGS_DEBUG_INIT "-g -O0")
SET (CLR_CXX_FLAGS_CHECKED_INIT "-g -O1")
SET (CMAKE_CXX_FLAGS_RELEASE_INIT "-g -O1")
SET (CMAKE_CXX_FLAGS_RELWITHDEBINFO_INIT "-g -O1")
SET (CLR_DEFINES_DEBUG_INIT DEBUG _DEBUG _DBG URTBLDENV_FRIENDLY=Checked BUILDENV_CHECKED=1)
SET (CLR_DEFINES_CHECKED_INIT DEBUG _DEBUG _DBG URTBLDENV_FRIENDLY=Checked BUILDENV_CHECKED=1)
SET (CLR_DEFINES_RELEASE_INIT NDEBUG URTBLDENV_FRIENDLY=Retail)
SET (CLR_DEFINES_RELWITHDEBINFO_INIT NDEBUG URTBLDENV_FRIENDLY=Retail)
SET (CMAKE_INSTALL_PREFIX $ENV{__CMakeBinDir})

Просмотреть файл

@ -1,20 +0,0 @@
SET (CMAKE_C_FLAGS_INIT "-Wall -std=c11")
SET (CMAKE_C_FLAGS_DEBUG_INIT "-g -O0")
SET (CLR_C_FLAGS_CHECKED_INIT "-g -O2")
# Refer to the below instruction to support __thread with -O2/-O3 on Linux/ARM
# https://github.com/dotnet/runtime/blob/master/docs/workflow/building/coreclr/linux-instructions.md
SET (CMAKE_C_FLAGS_RELEASE_INIT "-g -O3")
SET (CMAKE_C_FLAGS_RELWITHDEBINFO_INIT "-g -O2")
SET (CMAKE_CXX_FLAGS_INIT "-Wall -Wno-null-conversion -std=c++11")
SET (CMAKE_CXX_FLAGS_DEBUG_INIT "-g -O0")
SET (CLR_CXX_FLAGS_CHECKED_INIT "-g -O2")
SET (CMAKE_CXX_FLAGS_RELEASE_INIT "-g -O3")
SET (CMAKE_CXX_FLAGS_RELWITHDEBINFO_INIT "-g -O2")
SET (CLR_DEFINES_DEBUG_INIT DEBUG _DEBUG _DBG URTBLDENV_FRIENDLY=Checked BUILDENV_CHECKED=1)
SET (CLR_DEFINES_CHECKED_INIT DEBUG _DEBUG _DBG URTBLDENV_FRIENDLY=Checked BUILDENV_CHECKED=1)
SET (CLR_DEFINES_RELEASE_INIT NDEBUG URTBLDENV_FRIENDLY=Retail)
SET (CLR_DEFINES_RELWITHDEBINFO_INIT NDEBUG URTBLDENV_FRIENDLY=Retail)
SET (CMAKE_INSTALL_PREFIX $ENV{__CMakeBinDir})

Просмотреть файл

@ -1,157 +0,0 @@
set(CROSS_ROOTFS $ENV{ROOTFS_DIR})
set(TARGET_ARCH_NAME $ENV{TARGET_BUILD_ARCH})
set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_VERSION 1)
if(TARGET_ARCH_NAME STREQUAL "armel")
set(CMAKE_SYSTEM_PROCESSOR armv7l)
set(TOOLCHAIN "arm-linux-gnueabi")
if("$ENV{__DistroRid}" MATCHES "tizen.*")
set(TIZEN_TOOLCHAIN "armv7l-tizen-linux-gnueabi/6.2.1")
endif()
elseif(TARGET_ARCH_NAME STREQUAL "arm")
set(CMAKE_SYSTEM_PROCESSOR armv7l)
if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/armv6-alpine-linux-musleabihf)
set(TOOLCHAIN "armv6-alpine-linux-musleabihf")
else()
set(TOOLCHAIN "arm-linux-gnueabihf")
endif()
elseif(TARGET_ARCH_NAME STREQUAL "arm64")
set(CMAKE_SYSTEM_PROCESSOR aarch64)
if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/aarch64-alpine-linux-musl)
set(TOOLCHAIN "aarch64-alpine-linux-musl")
else()
set(TOOLCHAIN "aarch64-linux-gnu")
endif()
elseif(TARGET_ARCH_NAME STREQUAL "x86")
set(CMAKE_SYSTEM_PROCESSOR i686)
set(TOOLCHAIN "i686-linux-gnu")
elseif(TARGET_ARCH_NAME STREQUAL "mips64")
set(CMAKE_SYSTEM_PROCESSOR mips64)
set(TOOLCHAIN "mips64el-linux-gnuabi64")
else()
message(FATAL_ERROR "Arch is ${TARGET_ARCH_NAME}. Only armel, arm, arm64, mips64 and x86 are supported!")
endif()
if(DEFINED ENV{TOOLCHAIN})
set(TOOLCHAIN $ENV{TOOLCHAIN})
endif()
# This gets called with CLR_CMAKE_COMPILER set on the first invocation
# but the cmake variable is not getting populated into other calls.
# Use environment variable to keep CLR_CMAKE_COMPILER around.
if (NOT DEFINED CLR_CMAKE_COMPILER)
set(CLR_CMAKE_COMPILER $ENV{CLR_CMAKE_COMPILER})
else()
set(ENV{CLR_CMAKE_COMPILER} ${CLR_CMAKE_COMPILER})
endif()
# Specify include paths
if(TARGET_ARCH_NAME STREQUAL "armel")
if(DEFINED TIZEN_TOOLCHAIN)
include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/)
include_directories(SYSTEM ${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/armv7l-tizen-linux-gnueabi)
endif()
endif()
# add_compile_param - adds only new options without duplicates.
# arg0 - list with result options, arg1 - list with new options.
# arg2 - optional argument, quick summary string for optional using CACHE FORCE mode.
macro(add_compile_param)
if(NOT ${ARGC} MATCHES "^(2|3)$")
message(FATAL_ERROR "Wrong using add_compile_param! Two or three parameters must be given! See add_compile_param description.")
endif()
foreach(OPTION ${ARGV1})
if(NOT ${ARGV0} MATCHES "${OPTION}($| )")
set(${ARGV0} "${${ARGV0}} ${OPTION}")
if(${ARGC} EQUAL "3") # CACHE FORCE mode
set(${ARGV0} "${${ARGV0}}" CACHE STRING "${ARGV2}" FORCE)
endif()
endif()
endforeach()
endmacro()
# Specify link flags
add_compile_param(CROSS_LINK_FLAGS "--sysroot=${CROSS_ROOTFS}")
if (CLR_CMAKE_COMPILER STREQUAL "Clang")
add_compile_param(CROSS_LINK_FLAGS "--gcc-toolchain=${CROSS_ROOTFS}/usr")
add_compile_param(CROSS_LINK_FLAGS "--target=${TOOLCHAIN}")
endif()
if(TARGET_ARCH_NAME STREQUAL "armel")
if(DEFINED TIZEN_TOOLCHAIN) # For Tizen only
add_compile_param(CROSS_LINK_FLAGS "-B${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}")
add_compile_param(CROSS_LINK_FLAGS "-L${CROSS_ROOTFS}/lib")
add_compile_param(CROSS_LINK_FLAGS "-L${CROSS_ROOTFS}/usr/lib")
add_compile_param(CROSS_LINK_FLAGS "-L${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}")
endif()
elseif(TARGET_ARCH_NAME STREQUAL "x86")
add_compile_param(CROSS_LINK_FLAGS "-m32")
endif()
add_compile_param(CMAKE_EXE_LINKER_FLAGS "${CROSS_LINK_FLAGS}" "TOOLCHAIN_EXE_LINKER_FLAGS")
add_compile_param(CMAKE_SHARED_LINKER_FLAGS "${CROSS_LINK_FLAGS}" "TOOLCHAIN_EXE_LINKER_FLAGS")
add_compile_param(CMAKE_MODULE_LINKER_FLAGS "${CROSS_LINK_FLAGS}" "TOOLCHAIN_EXE_LINKER_FLAGS")
# Specify compile options
add_compile_options("--sysroot=${CROSS_ROOTFS}")
if (CLR_CMAKE_COMPILER STREQUAL "Clang")
add_compile_options("--target=${TOOLCHAIN}")
add_compile_options("--gcc-toolchain=${CROSS_ROOTFS}/usr")
endif()
if(TARGET_ARCH_NAME MATCHES "^(arm|armel|arm64)$")
set(CMAKE_C_COMPILER_TARGET ${TOOLCHAIN})
set(CMAKE_CXX_COMPILER_TARGET ${TOOLCHAIN})
set(CMAKE_ASM_COMPILER_TARGET ${TOOLCHAIN})
endif()
if(TARGET_ARCH_NAME MATCHES "^(arm|armel)$")
add_compile_options(-mthumb)
add_compile_options(-mfpu=vfpv3)
if(TARGET_ARCH_NAME STREQUAL "armel")
add_compile_options(-mfloat-abi=softfp)
if(DEFINED TIZEN_TOOLCHAIN)
add_compile_options(-Wno-deprecated-declarations) # compile-time option
add_compile_options(-D__extern_always_inline=inline) # compile-time option
endif()
endif()
elseif(TARGET_ARCH_NAME STREQUAL "x86")
add_compile_options(-m32)
add_compile_options(-Wno-error=unused-command-line-argument)
endif()
# Set LLDB include and library paths
if(TARGET_ARCH_NAME MATCHES "^(arm|armel|x86)$")
if(TARGET_ARCH_NAME STREQUAL "x86")
set(LLVM_CROSS_DIR "$ENV{LLVM_CROSS_HOME}")
else() # arm/armel case
set(LLVM_CROSS_DIR "$ENV{LLVM_ARM_HOME}")
endif()
if(LLVM_CROSS_DIR)
set(WITH_LLDB_LIBS "${LLVM_CROSS_DIR}/lib/" CACHE STRING "")
set(WITH_LLDB_INCLUDES "${LLVM_CROSS_DIR}/include" CACHE STRING "")
set(LLDB_H "${WITH_LLDB_INCLUDES}" CACHE STRING "")
set(LLDB "${LLVM_CROSS_DIR}/lib/liblldb.so" CACHE STRING "")
else()
if(TARGET_ARCH_NAME STREQUAL "x86")
set(WITH_LLDB_LIBS "${CROSS_ROOTFS}/usr/lib/i386-linux-gnu" CACHE STRING "")
set(CHECK_LLVM_DIR "${CROSS_ROOTFS}/usr/lib/llvm-3.8/include")
if(EXISTS "${CHECK_LLVM_DIR}" AND IS_DIRECTORY "${CHECK_LLVM_DIR}")
set(WITH_LLDB_INCLUDES "${CHECK_LLVM_DIR}")
else()
set(WITH_LLDB_INCLUDES "${CROSS_ROOTFS}/usr/lib/llvm-3.6/include")
endif()
else() # arm/armel case
set(WITH_LLDB_LIBS "${CROSS_ROOTFS}/usr/lib/${TOOLCHAIN}" CACHE STRING "")
set(WITH_LLDB_INCLUDES "${CROSS_ROOTFS}/usr/lib/llvm-3.6/include" CACHE STRING "")
endif()
endif()
endif()
set(CMAKE_FIND_ROOT_PATH "${CROSS_ROOTFS}")
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)

Просмотреть файл

@ -1,111 +0,0 @@
set(CROSS_ROOTFS $ENV{ROOTFS_DIR})
set(TARGET_ARCH_NAME $ENV{TARGET_BUILD_ARCH})
macro(set_cache_value)
set(${ARGV0} ${ARGV1} CACHE STRING "Result from TRY_RUN" FORCE)
set(${ARGV0}__TRYRUN_OUTPUT "dummy output" CACHE STRING "Output from TRY_RUN" FORCE)
endmacro()
if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/armv7-alpine-linux-musleabihf OR
EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/armv6-alpine-linux-musleabihf OR
EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/aarch64-alpine-linux-musl)
SET(ALPINE_LINUX 1)
elseif(EXISTS /System/Library/CoreServices)
set(DARWIN 1)
endif()
if(DARWIN)
if(TARGET_ARCH_NAME MATCHES "^(arm64|x64)$")
set_cache_value(FILE_OPS_CHECK_FERROR_OF_PREVIOUS_CALL_EXITCODE 1)
set_cache_value(GETPWUID_R_SETS_ERRNO_EXITCODE 1)
set_cache_value(HAS_POSIX_SEMAPHORES_EXITCODE 1)
set_cache_value(HAVE_BROKEN_FIFO_KEVENT_EXITCODE 1)
set_cache_value(HAVE_BROKEN_FIFO_SELECT_EXITCODE 1)
set_cache_value(HAVE_CLOCK_MONOTONIC_COARSE_EXITCODE 1)
set_cache_value(HAVE_CLOCK_MONOTONIC_EXITCODE 0)
set_cache_value(HAVE_CLOCK_THREAD_CPUTIME_EXITCODE 0)
set_cache_value(HAVE_CLOCK_GETTIME_NSEC_NP_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_ACOS_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_ASIN_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_ATAN2_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_EXP_EXITCODE 1)
set_cache_value(HAVE_COMPATIBLE_ILOGB0_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_ILOGBNAN_EXITCODE 1)
set_cache_value(HAVE_COMPATIBLE_LOG10_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_LOG_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_POW_EXITCODE 0)
set_cache_value(HAVE_FUNCTIONAL_PTHREAD_ROBUST_MUTEXES_EXITCODE 1)
set_cache_value(HAVE_LARGE_SNPRINTF_SUPPORT_EXITCODE 0)
set_cache_value(HAVE_MMAP_DEV_ZERO_EXITCODE 1)
set_cache_value(HAVE_PROCFS_CTL_EXITCODE 1)
set_cache_value(HAVE_PROCFS_MAPS_EXITCODE 1)
set_cache_value(HAVE_PROCFS_STATUS_EXITCODE 1)
set_cache_value(HAVE_PROCFS_STAT_EXITCODE 1)
set_cache_value(HAVE_SCHED_GETCPU_EXITCODE 1)
set_cache_value(HAVE_SCHED_GET_PRIORITY_EXITCODE 0)
set_cache_value(HAVE_VALID_NEGATIVE_INF_POW_EXITCODE 0)
set_cache_value(HAVE_VALID_POSITIVE_INF_POW_EXITCODE 0)
set_cache_value(HAVE_WORKING_CLOCK_GETTIME_EXITCODE 0)
set_cache_value(HAVE_WORKING_GETTIMEOFDAY_EXITCODE 0)
set_cache_value(MMAP_ANON_IGNORES_PROTECTION_EXITCODE 1)
set_cache_value(ONE_SHARED_MAPPING_PER_FILEREGION_PER_PROCESS_EXITCODE 1)
set_cache_value(PTHREAD_CREATE_MODIFIES_ERRNO_EXITCODE 1)
set_cache_value(REALPATH_SUPPORTS_NONEXISTENT_FILES_EXITCODE 1)
set_cache_value(SEM_INIT_MODIFIES_ERRNO_EXITCODE 1)
set_cache_value(SSCANF_CANNOT_HANDLE_MISSING_EXPONENT_EXITCODE 1)
set_cache_value(SSCANF_SUPPORT_ll_EXITCODE 0)
set_cache_value(UNGETC_NOT_RETURN_EOF_EXITCODE 1)
else()
message(FATAL_ERROR "Arch is ${TARGET_ARCH_NAME}. Only arm64 or x64 is supported for OSX cross build!")
endif()
elseif(TARGET_ARCH_NAME MATCHES "^(armel|arm|arm64|mips64|x86)$")
set_cache_value(FILE_OPS_CHECK_FERROR_OF_PREVIOUS_CALL_EXITCODE 1)
set_cache_value(GETPWUID_R_SETS_ERRNO_EXITCODE 0)
set_cache_value(HAS_POSIX_SEMAPHORES_EXITCODE 0)
set_cache_value(HAVE_CLOCK_MONOTONIC_COARSE_EXITCODE 0)
set_cache_value(HAVE_CLOCK_MONOTONIC_EXITCODE 0)
set_cache_value(HAVE_CLOCK_THREAD_CPUTIME_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_ACOS_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_ASIN_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_ATAN2_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_ILOGB0_EXITCODE 1)
set_cache_value(HAVE_COMPATIBLE_ILOGBNAN_EXITCODE 1)
set_cache_value(HAVE_COMPATIBLE_LOG10_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_LOG_EXITCODE 0)
set_cache_value(HAVE_COMPATIBLE_POW_EXITCODE 0)
set_cache_value(HAVE_LARGE_SNPRINTF_SUPPORT_EXITCODE 0)
set_cache_value(HAVE_MMAP_DEV_ZERO_EXITCODE 0)
set_cache_value(HAVE_PROCFS_CTL_EXITCODE 1)
set_cache_value(HAVE_PROCFS_MAPS_EXITCODE 0)
set_cache_value(HAVE_PROCFS_STATUS_EXITCODE 0)
set_cache_value(HAVE_PROCFS_STAT_EXITCODE 0)
set_cache_value(HAVE_SCHED_GETCPU_EXITCODE 0)
set_cache_value(HAVE_SCHED_GET_PRIORITY_EXITCODE 0)
set_cache_value(HAVE_VALID_NEGATIVE_INF_POW_EXITCODE 0)
set_cache_value(HAVE_VALID_POSITIVE_INF_POW_EXITCODE 0)
set_cache_value(HAVE_WORKING_CLOCK_GETTIME_EXITCODE 0)
set_cache_value(HAVE_WORKING_GETTIMEOFDAY_EXITCODE 0)
set_cache_value(ONE_SHARED_MAPPING_PER_FILEREGION_PER_PROCESS_EXITCODE 1)
set_cache_value(PTHREAD_CREATE_MODIFIES_ERRNO_EXITCODE 1)
set_cache_value(REALPATH_SUPPORTS_NONEXISTENT_FILES_EXITCODE 1)
set_cache_value(SEM_INIT_MODIFIES_ERRNO_EXITCODE 1)
if(ALPINE_LINUX)
set_cache_value(SSCANF_CANNOT_HANDLE_MISSING_EXPONENT_EXITCODE 0)
set_cache_value(SSCANF_SUPPORT_ll_EXITCODE 1)
set_cache_value(UNGETC_NOT_RETURN_EOF_EXITCODE 1)
else()
set_cache_value(SSCANF_CANNOT_HANDLE_MISSING_EXPONENT_EXITCODE 1)
set_cache_value(SSCANF_SUPPORT_ll_EXITCODE 0)
set_cache_value(UNGETC_NOT_RETURN_EOF_EXITCODE 0)
endif()
else()
message(FATAL_ERROR "Arch is ${TARGET_ARCH_NAME}. Only armel, arm, arm64, mips64 and x86 are supported!")
endif()
if(TARGET_ARCH_NAME STREQUAL "x86")
set_cache_value(HAVE_FUNCTIONAL_PTHREAD_ROBUST_MUTEXES_EXITCODE 0)
endif()

Просмотреть файл

@ -1,74 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
source_directory=
docker_image=
docker_container_name=
while [ $# -ne 0 ]; do
name=$1
case $name in
-s|--source-directory)
shift
source_directory=$1
;;
-i|--docker-image)
shift
docker_image=$1
;;
-c|--container-name)
shift
docker_container_name=$1
;;
*)
args="$args $1"
;;
esac
shift
done
echo "Initialize Docker Container"
docker_bin=$(which docker)
$docker_bin --version
# Get user id
user_name=$(whoami)
echo "user name: $user_name"
user_id=$(id -u $user_name)
echo "user id: $user_id"
# Download image
$docker_bin pull $docker_image
# Create local network to avoid port conflicts when multiple agents run on same machine
$docker_bin network create vsts_network_$docker_container_name
# Create and start container
docker_id="$($docker_bin create -it --rm --privileged --ulimit core=-1 \
--name vsts_container_$docker_container_name \
--network=vsts_network_$docker_container_name \
--volume $source_directory:$source_directory \
--workdir=$source_directory $docker_image bash --verbose)"
$docker_bin start $docker_id
# Create an user with the same uid in the container
container_user_name=vsts_$(echo $user_name | awk '{print tolower($0)}')
echo "container user name: $container_user_name"
# Add sudo user with same uid that can run any sudo command without password
$docker_bin exec $docker_id useradd -K MAIL_DIR=/dev/null -m -u $user_id $container_user_name
$docker_bin exec $docker_id groupadd sudouser
$docker_bin exec $docker_id usermod -a -G sudouser $container_user_name
$docker_bin exec $docker_id su -c "echo '%sudouser ALL=(ALL:ALL) NOPASSWD:ALL' >> /etc/sudoers"
echo "Execute $args"
$docker_bin exec --workdir=$source_directory --user $container_user_name $docker_id $args
lasterrorcode=$?
echo "Cleanup Docker Container/Network"
$docker_bin container stop $docker_id
$docker_bin network rm vsts_network_$docker_container_name
exit $lasterrorcode

Просмотреть файл

@ -1,55 +0,0 @@
#!/usr/bin/perl
#
# ./cleanup-docker.sh
#
printf "Cleaning up containers\n";
printf "----------------------\n";
my $psList = `docker ps -a`;
my @psItems = split /\n/, $psList;
foreach(@psItems) {
# match 'docker ps' output to capture the container name
if($_ =~ /.*\s+([^\s]+)$/ig) {
my $containerName = $1;
if($containerName !~ /NAME/ig) {
printf "delete $containerName\n";
my $deleteOutput = `docker rm -f $1`;
print "$deleteOutput\n";
}
}
}
printf "Cleaning up volumes\n";
printf "-------------------\n";
my $volumeList = `docker volume ls`;
@volumeItems = split /\n/, $volumeList;
foreach(@volumeItems) {
# match 'docker volume ls' output to capture the volume name
if($_ =~ /([^\s]+)\s+([^\s]+)$/ig) {
my $volumeName = $2;
if($volumeName !~ /NAME/ig) {
printf "delete $volumeName\n";
my $deleteVolumeOutput = `docker volume rm -f $volumeName`;
printf "$deleteVolumeOutput\n";
}
}
}
printf "Cleaning up images\n";
printf "------------------\n";
my $imageList = `docker images`;
@imageItems = split /\n/, $imageList;
foreach(@imageItems) {
# match 'docker images' output to capture the image id
if($_ =~ /([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+.*/ig) {
my $imageId = $3;
if($imageId !~ /IMAGE/ig) {
my $imageRepo = $1;
my $imageTag = $2;
printf "delete $imageId ($imageRepo:$imageTag)\n";
my $deleteImageOutput = `docker rmi -f $imageId`;
printf "$deleteImageOutput\n";
}
}
}

29
eng/dotnet-install.ps1 поставляемый
Просмотреть файл

@ -1,29 +0,0 @@
[CmdletBinding(PositionalBinding=$false)]
Param(
[string] $InstallDir="<auto>",
[string] $Architecture="<auto>",
[string] $Version = "Latest",
[string] $Runtime,
[string] $RuntimeSourceFeed = "",
[string] $RuntimeSourceFeedKey = "",
[switch] $SkipNonVersionedFiles,
[switch] $NoPath
)
. $PSScriptRoot\common\tools.ps1
try {
if ($Runtime) {
InstallDotNet $InstallDir $Version $Architecture $Runtime $SkipNonVersionedFiles -RuntimeSourceFeed $RuntimeSourceFeed -RuntimeSourceFeedKey $RuntimeSourceFeedKey
}
else {
InstallDotNetSdk $InstallDir $Version $Architecture
}
}
catch {
Write-Host $_.ScriptStackTrace
Write-PipelineTelemetryError -Category 'InitializeToolset' -Message $_
ExitWithExitCode 1
}
ExitWithExitCode 0

75
eng/dotnet-install.sh поставляемый
Просмотреть файл

@ -1,75 +0,0 @@
#!/usr/bin/env bash
source="${BASH_SOURCE[0]}"
# resolve $source until the file is no longer a symlink
while [[ -h "$source" ]]; do
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
source="$(readlink "$source")"
# if $source was a relative symlink, we need to resolve it relative to the path where the
# symlink file was located
[[ $source != /* ]] && source="$scriptroot/$source"
done
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
. "$scriptroot/common/tools.sh"
install_dir="<auto>"
architecture="<auto>"
version="Latest"
runtime=""
runtimeSourceFeed=""
runtimeSourceFeedKey=""
skip_non_versioned_files=false
while [[ $# > 0 ]]; do
opt="$(echo "$1" | awk '{print tolower($0)}')"
case "$opt" in
-i|--install-dir|-[Ii]nstall[Dd]ir)
shift
install_dir="$1"
;;
-v|--version|-[Vv]ersion)
shift
version="$1"
;;
--arch|--architecture|-[Aa]rch|-[Aa]rchitecture)
shift
architecture="$1"
;;
--runtime|-[Rr]untime)
shift
runtime="$1"
;;
-runtimesourcefeed)
shift
runtimeSourceFeed="$1"
;;
-runtimesourcefeedkey)
shift
runtimeSourceFeedKey="$1"
;;
--skip-non-versioned-files|-[Ss]kip[Nn]on[Vv]ersioned[Ff]iles)
skip_non_versioned_files=true
;;
--no-path|-[Nn]o[Pp]ath)
;;
*)
Write-PipelineTelemetryError -Category 'Build' -Message "Invalid argument: $1"
exit 1
;;
esac
shift
done
if [[ "$runtime" != "" ]]; then
InstallDotNet "$install_dir" "$version" "$architecture" $runtime $skip_non_versioned_files $runtimeSourceFeed $runtimeSourceFeedKey
else
InstallDotNetSdk "$install_dir" "$version" "$architecture"
fi
if [[ $exit_code != 0 ]]; then
Write-PipelineTelemetryError -Category 'InitializeToolset' -Message "dotnet-install.sh failed (exit code '$exit_code')." >&2
ExitWithExitCode $exit_code
fi
ExitWithExitCode 0

Просмотреть файл

@ -1,191 +0,0 @@
#!/usr/bin/env bash
#
# This file invokes cmake and generates the build system for Clang.
#
if [ $# -lt 5 ]
then
echo "Usage..."
echo "gen-buildsys-clang.sh <path to top level CMakeLists.txt> <ClangMajorVersion> <ClangMinorVersion> <Architecture> <ScriptDirectory> [build flavor] [coverage] [ninja] [scan-build] [cmakeargs]"
echo "Specify the path to the top level CMake file - <ProjectK>/src/NDP"
echo "Specify the clang version to use, split into major and minor version"
echo "Specify the target architecture."
echo "Specify the script directory."
echo "Optionally specify the build configuration (flavor.) Defaults to DEBUG."
echo "Optionally specify 'coverage' to enable code coverage build."
echo "Optionally specify 'scan-build' to enable build with clang static analyzer."
echo "Target ninja instead of make. ninja must be on the PATH."
echo "Pass additional arguments to CMake call."
exit 1
fi
# Set up the environment to be used for building with clang.
if command -v "clang-$2.$3" > /dev/null
then
desired_llvm_version="-$2.$3"
elif command -v "clang$2$3" > /dev/null
then
desired_llvm_version="$2$3"
elif command -v "clang-$2$3" > /dev/null
then
desired_llvm_version="-$2$3"
elif command -v clang > /dev/null
then
desired_llvm_version=
else
echo "Unable to find Clang Compiler"
exit 1
fi
export CC="$(command -v clang$desired_llvm_version)"
export CXX="$(command -v clang++$desired_llvm_version)"
build_arch="$4"
script_dir="$5"
buildtype=DEBUG
code_coverage=OFF
build_tests=OFF
scan_build=OFF
generator="Unix Makefiles"
__UnprocessedCMakeArgs=""
for i in "${@:6}"; do
upperI="$(echo $i | awk '{print toupper($0)}')"
case $upperI in
# Possible build types are DEBUG, CHECKED, RELEASE, RELWITHDEBINFO, MINSIZEREL.
DEBUG | CHECKED | RELEASE | RELWITHDEBINFO | MINSIZEREL)
buildtype=$upperI
;;
COVERAGE)
echo "Code coverage is turned on for this build."
code_coverage=ON
;;
NINJA)
generator=Ninja
;;
SCAN-BUILD)
echo "Static analysis is turned on for this build."
scan_build=ON
;;
*)
__UnprocessedCMakeArgs="${__UnprocessedCMakeArgs}${__UnprocessedCMakeArgs:+ }$i"
esac
done
OS=`uname`
# Locate llvm
# This can be a little complicated, because the common use-case of Ubuntu with
# llvm-3.5 installed uses a rather unusual llvm installation with the version
# number postfixed (i.e. llvm-ar-3.5), so we check for that first.
# On FreeBSD the version number is appended without point and dash (i.e.
# llvm-ar35).
# Additionally, OSX doesn't use the llvm- prefix.
if [ $OS = "Linux" -o $OS = "FreeBSD" -o $OS = "OpenBSD" -o $OS = "NetBSD" -o $OS = "SunOS" ]; then
llvm_prefix="llvm-"
elif [ $OS = "Darwin" ]; then
llvm_prefix=""
else
echo "Unable to determine build platform"
exit 1
fi
locate_llvm_exec() {
if command -v "$llvm_prefix$1$desired_llvm_version" > /dev/null 2>&1
then
echo "$(command -v $llvm_prefix$1$desired_llvm_version)"
elif command -v "$llvm_prefix$1" > /dev/null 2>&1
then
echo "$(command -v $llvm_prefix$1)"
else
exit 1
fi
}
llvm_ar="$(locate_llvm_exec ar)"
[[ $? -eq 0 ]] || { echo "Unable to locate llvm-ar"; exit 1; }
llvm_link="$(locate_llvm_exec link)"
[[ $? -eq 0 ]] || { echo "Unable to locate llvm-link"; exit 1; }
llvm_nm="$(locate_llvm_exec nm)"
[[ $? -eq 0 ]] || { echo "Unable to locate llvm-nm"; exit 1; }
if [ $OS = "Linux" -o $OS = "FreeBSD" -o $OS = "OpenBSD" -o $OS = "NetBSD" -o $OS = "SunOS" ]; then
llvm_objdump="$(locate_llvm_exec objdump)"
[[ $? -eq 0 ]] || { echo "Unable to locate llvm-objdump"; exit 1; }
fi
cmake_extra_defines=
if [[ -n "$LLDB_LIB_DIR" ]]; then
cmake_extra_defines="$cmake_extra_defines -DWITH_LLDB_LIBS=$LLDB_LIB_DIR"
fi
if [[ -n "$LLDB_INCLUDE_DIR" ]]; then
cmake_extra_defines="$cmake_extra_defines -DWITH_LLDB_INCLUDES=$LLDB_INCLUDE_DIR"
fi
if [ "$CROSSCOMPILE" == "1" ]; then
platform="$(uname)"
# OSX doesn't use rootfs
if ! [[ -n "$ROOTFS_DIR" || "$platform" == "Darwin" ]]; then
echo "ROOTFS_DIR not set for crosscompile"
exit 1
fi
if [[ -z $CONFIG_DIR ]]; then
CONFIG_DIR="$1/eng/cross"
fi
export TARGET_BUILD_ARCH=$build_arch
cmake_extra_defines="$cmake_extra_defines -C $CONFIG_DIR/tryrun.cmake"
cmake_extra_defines="$cmake_extra_defines -DCLR_UNIX_CROSS_BUILD=1"
if [[ "$platform" == "Darwin" ]]; then
cmake_extra_defines="$cmake_extra_defines -DCMAKE_SYSTEM_NAME=Darwin"
else
cmake_extra_defines="$cmake_extra_defines -DCMAKE_TOOLCHAIN_FILE=$CONFIG_DIR/toolchain.cmake"
fi
fi
if [ $OS == "Linux" ]; then
linux_id_file="/etc/os-release"
if [[ -n "$CROSSCOMPILE" ]]; then
linux_id_file="$ROOTFS_DIR/$linux_id_file"
fi
if [[ -e $linux_id_file ]]; then
source $linux_id_file
cmake_extra_defines="$cmake_extra_defines -DCLR_CMAKE_LINUX_ID=$ID"
fi
fi
if [ "$build_arch" == "armel" ]; then
cmake_extra_defines="$cmake_extra_defines -DARM_SOFTFP=1"
fi
clang_version=$( $CC --version | head -1 | sed 's/[^0-9]*\([0-9]*\.[0-9]*\).*/\1/' )
# Use O1 option when the clang version is smaller than 3.9
# Otherwise use O3 option in release build
if [[ ( ${clang_version%.*} -eq 3 && ${clang_version#*.} -lt 9 ) &&
( "$build_arch" == "arm" || "$build_arch" == "armel" ) ]]; then
overridefile=clang-compiler-override-arm.txt
else
overridefile=clang-compiler-override.txt
fi
__currentScriptDir="$script_dir"
cmake_command=cmake
if [[ "$scan_build" == "ON" ]]; then
export CCC_CC=$CC
export CCC_CXX=$CXX
export SCAN_BUILD_COMMAND=$(command -v scan-build$desired_llvm_version)
cmake_command="$SCAN_BUILD_COMMAND $cmake_command"
fi
$cmake_command \
-G "$generator" \
"-DCMAKE_USER_MAKE_RULES_OVERRIDE=${__currentScriptDir}/$overridefile" \
"-DCMAKE_AR=$llvm_ar" \
"-DCMAKE_LINKER=$llvm_link" \
"-DCMAKE_NM=$llvm_nm" \
"-DCMAKE_OBJDUMP=$llvm_objdump" \
"-DCMAKE_BUILD_TYPE=$buildtype" \
"-DCMAKE_EXPORT_COMPILE_COMMANDS=1 " \
"-DCLR_CMAKE_ENABLE_CODE_COVERAGE=$code_coverage" \
"-DCLR_CMAKE_COMPILER=Clang" \
$cmake_extra_defines \
$__UnprocessedCMakeArgs \
"$1"

Просмотреть файл

@ -1,60 +0,0 @@
@if not defined _echo @echo off
rem
rem This file invokes cmake and generates the build system for windows.
set argC=0
for %%x in (%*) do Set /A argC+=1
if %argC% lss 3 GOTO :USAGE
if %1=="/?" GOTO :USAGE
setlocal
set basePath=%~dp0
:: remove quotes
set "basePath=%basePath:"=%"
:: remove trailing slash
if %basePath:~-1%==\ set "basePath=%basePath:~0,-1%"
set __SourceDir=%1
set __VSVersion=%2
set __Arch=%3
set __CmakeGenerator=Visual Studio
if /i "%__NMakeMakefiles%" == "1" (
set __CmakeGenerator=NMake Makefiles
) else (
if /i "%__VSVersion%" == "vs2019" (set __CmakeGenerator=%__CmakeGenerator% 16 2019)
if /i "%__VSVersion%" == "vs2017" (set __CmakeGenerator=%__CmakeGenerator% 15 2017)
if /i "%__Arch%" == "x64" (set __ExtraCmakeParams=%__ExtraCmakeParams% -A x64)
if /i "%__Arch%" == "arm" (set __ExtraCmakeParams=%__ExtraCmakeParams% -A ARM)
if /i "%__Arch%" == "arm64" (set __ExtraCmakeParams=%__ExtraCmakeParams% -A ARM64)
if /i "%__Arch%" == "x86" (set __ExtraCmakeParams=%__ExtraCmakeParams% -A Win32)
)
:loop
if [%4] == [] goto end_loop
set __ExtraCmakeParams=%__ExtraCmakeParams% %4
shift
goto loop
:end_loop
if defined CMakePath goto DoGen
:: Eval the output from set-cmake-path.ps1
for /f "delims=" %%a in ('powershell -NoProfile -ExecutionPolicy ByPass "& "%basePath%\set-cmake-path.ps1""') do %%a
:DoGen
"%CMakePath%" "-DCMAKE_USER_MAKE_RULES_OVERRIDE=%basePath%\windows-compiler-override.txt" "-DCMAKE_INSTALL_PREFIX=%__CMakeBinDir%" "-DCLR_CMAKE_HOST_ARCH=%__Arch%" %__ExtraCmakeParams% -G "%__CmakeGenerator%" %__SourceDir%
endlocal
GOTO :DONE
:USAGE
echo "Usage..."
echo "gen-buildsys-win.bat <path to top level CMakeLists.txt> <VSVersion>"
echo "Specify the path to the top level CMake file - <ProjectK>/src/NDP"
echo "Specify the VSVersion to be used - VS2017 or VS2019"
EXIT /B 1
:DONE
EXIT /B 0

Просмотреть файл

@ -1,11 +0,0 @@
{
# Remove the CR character in case the sources are mapped from
# a Windows share and contain CRLF line endings
gsub(/\r/,"", $0);
# Skip empty lines and comment lines starting with semicolon
if (NF && !match($0, /^[:space:]*;/))
{
print "_" $0;
}
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше