diff --git a/.build/CBT/CBT.Core.dll b/.build/CBT/CBT.Core.dll
new file mode 100644
index 0000000..8098a29
Binary files /dev/null and b/.build/CBT/CBT.Core.dll differ
diff --git a/.build/CBT/build.props b/.build/CBT/build.props
new file mode 100644
index 0000000..c809c84
--- /dev/null
+++ b/.build/CBT/build.props
@@ -0,0 +1,158 @@
+
+
+
+
+
+ $(MSBuildAllProjects);$(MSBuildThisFileFullPath)
+
+ $(EnlistmentRoot.TrimEnd('\\'))
+
+
+ $(MSBuildThisFileDirectory)
+ $(CBTGlobalPath.TrimEnd('\\'))
+
+
+ $([System.IO.Path]::GetDirectoryName($(CBTGlobalPath)))\Local
+ $(CBTLocalPath.TrimEnd('\\'))
+
+ $(CBTLocalPath)\Extensions
+
+ Debug
+ $(DefaultProjectConfiguration)
+ $(DefaultProjectPlatform)
+
+
+
+
+
+ $([System.IO.Path]::Combine($(CBTLocalPath), 'CBTModules', 'CBTModules.proj'))
+ $([System.IO.Path]::Combine($(CBTLocalPath), 'CBTModules.proj'))
+ $([System.IO.Path]::Combine($(CBTLocalPath), 'CBTModules', 'packages.config'))
+ $([System.IO.Path]::Combine($(CBTLocalPath), 'packages.config'))
+ $([System.IO.Path]::GetFullPath($(CBTModulePackageConfigPath)))
+
+
+
+ $(MSBuildThisFileDirectory)CBT.Core.dll
+ $(MSBuildThisFileFullPath);$(CBTCoreAssemblyPath);$(CBTModulePackageConfigPath)
+
+
+
+
+
+ $(MSBuildThisFileDirectory)obj
+
+ $(CBTIntermediateOutputPath)\Modules
+ $(CBTModulePath)\$(MSBuildThisFile)
+ $(CBTModulePath)\Extensions
+ %24(CBTLocalBuildExtensionsPath)\%24(MSBuildThisFile)
+
+
+ $(CBTIntermediateOutputPath)\NuGet
+ $(CBTCoreAssemblyPath)
+ CBT.Core.Internal.DefaultNuGetDownloader
+ CBT.Core.Tasks.RestoreModules
+ $(CBTNuGetBinDir)\NuGet.exe
+ restore "$(CBTModulePackageConfigPath)" -NonInteractive
+ $(CBTModuleRestoreCommandArguments) $(CBTModuleRestoreCommandAdditionalArguments)
+
+
+ false
+
+ $(CBTCoreAssemblyPath.GetType().Assembly.GetType('System.AppDomain').GetProperty('CurrentDomain').GetValue(null).SetData('CBT_CORE_ASSEMBLY', $(CBTCoreAssemblyPath.GetType().Assembly.GetType('System.AppDomain').GetProperty('CurrentDomain').GetValue(null).Load($(CBTCoreAssemblyPath.GetType().Assembly.GetType('System.IO.File').GetMethod('ReadAllBytes').Invoke(null, $([System.IO.Directory]::GetFiles($([System.IO.Path]::GetDirectoryName($(CBTCoreAssemblyPath))), $([System.IO.Path]::GetFileName($(CBTCoreAssemblyPath)))))))))))
+ $(CBTCoreAssemblyPath.GetType().Assembly.GetType('System.AppDomain').GetProperty('CurrentDomain').GetValue(null).GetData('CBT_CORE_ASSEMBLY'))
+ $(CBTCoreAssemblyPath.GetType().Assembly.GetType('System.AppDomain').GetProperty('CurrentDomain').GetValue(null).GetData('CBT_CORE_ASSEMBLY').CreateInstance($(CBTModuleRestoreTaskName)).Execute($(CBTModuleImportsAfter.Split(';')), $(CBTModuleImportsBefore.Split(';')), $(CBTModuleExtensionsPath), $(CBTModulePropertiesFile), $(CBTNuGetDownloaderAssemblyPath), $(CBTNuGetDownloaderClassName), '$(CBTNuGetDownloaderArguments)', $(CBTModuleRestoreInputs.Split(';')), $(CBTModulePackageConfigPath), $(CBTModuleRestoreCommand), $(CBTModuleRestoreCommandArguments), $(MSBuildProjectFullPath), $(MSBuildBinPath)))
+
+
+
+
+
+ CBT1000
+
+
+ CBT1001
+
+
+ CBT1002
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true
+
+
+
+
+
+
+
+
+
+
+
+ $(_CurrentProjectJsonPath)
+
+
+ $(RestoreProjectStyle)
+ $(NuGetProjectStyle)
+
+
+ $(RestoreOutputAbsolutePath)
+
+
+ %(PackageReference.Identity)
+ %(PackageReference.Version)
+
+
+
+
+
+
+
+
+
+
diff --git a/.build/Local/CBTModules/CBTModules.proj b/.build/Local/CBTModules/CBTModules.proj
new file mode 100644
index 0000000..8a8d130
--- /dev/null
+++ b/.build/Local/CBTModules/CBTModules.proj
@@ -0,0 +1,60 @@
+
+
+
+ net46
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/COPYRIGHT-mpich.txt b/COPYRIGHT-mpich.txt
new file mode 100644
index 0000000..4bf0e57
--- /dev/null
+++ b/COPYRIGHT-mpich.txt
@@ -0,0 +1,40 @@
+
+ COPYRIGHT
+
+The following is a notice of limited availability of the code, and disclaimer
+which must be included in the prologue of the code and in all source listings
+of the code.
+
+Copyright Notice
+ + 2002 University of Chicago
+
+Permission is hereby granted to use, reproduce, prepare derivative works, and
+to redistribute to others. This software was authored by:
+
+Argonne National Laboratory Group
+W. Gropp: (630) 252-4318; FAX: (630) 252-5986; e-mail: gropp@mcs.anl.gov
+E. Lusk: (630) 252-7852; FAX: (630) 252-5986; e-mail: lusk@mcs.anl.gov
+Mathematics and Computer Science Division
+Argonne National Laboratory, Argonne IL 60439
+
+
+ GOVERNMENT LICENSE
+
+Portions of this material resulted from work developed under a U.S.
+Government Contract and are subject to the following license: the Government
+is granted for itself and others acting on its behalf a paid-up, nonexclusive,
+irrevocable worldwide license in this computer software to reproduce, prepare
+derivative works, and perform publicly and display publicly.
+
+ DISCLAIMER
+
+This computer code material was prepared, in part, as an account of work
+sponsored by an agency of the United States Government. Neither the United
+States, nor the University of Chicago, nor any of their employees, makes any
+warranty express or implied, or assumes any legal liability or responsibility
+for the accuracy, completeness, or usefulness of any information, apparatus,
+product, or process disclosed, or represents that its use would not infringe
+privately owned rights.
+
+
+
diff --git a/Directory.Build.props b/Directory.Build.props
new file mode 100644
index 0000000..3177de8
--- /dev/null
+++ b/Directory.Build.props
@@ -0,0 +1,190 @@
+
+
+
+
+
+ $(MSBuildAllProjects);$(MSBuildThisFileFullPath)
+
+
+ $(MSBuildThisFileDirectory.TrimEnd('\\'))
+ $(EnlistmentRoot)\packages
+
+
+
+
+
+
+
+
+
+
+ CustomDictionary.xml
+
+
+
+ False
+ true
+ 300
+ false
+ false
+
+
+
+
+ true
+
+
+
+
+
+
+
+ $(MSBuildExtensionsPath)\NuProj
+ $(CBTModule_NuProj)\tools
+
+ false
+
+
+
+
+ $(EnlistmentRoot)\out\$(Configuration)-$(Platform)\
+
+
+
+ amd64
+ i386
+ i386
+ amd64
+
+
+
+ $(EnlistmentRoot)/src
+
+ $(SrcRoot)\include
+ $(MPI_INC_ROOT)\x64
+ $(MPI_INC_ROOT)\x86
+ $(MPI_INC_ROOT);$(MPI_INC_ARCH_ROOT)
+ $(SrcRoot)\mpi
+
+ bin
+ $(MPI_DESTINATION)
+ $(StagingOutputRootPath)\$(MPI_BIN_DESTINATION)
+ $(StagingOutputRootPath)
+ $(MPI_DESTINATION)\sdk
+ 0x2000
+
+
+
+ false
+ 11.0
+ $(WindowsSdkDir)
+
+ Release
+ x86
+ NotSet
+ <_COMPONENTNAME_>mpi
+
+
+
+ "C:\Program Files\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin"
+
+
+
+ $(StagingOutputRootPath)$(MSBuildProjectName)\
+ 14.15.26726
+ 10.0.16299.0
+ v141
+ $(OutputPath)
+ $(Platform)\$(Configuration)
+ $(Configuration)
+ false
+ None
+
+
+
+ true
+ false
+
+
+
+ false
+ false
+ true
+
+
+
+
+ Disabled
+ _DEBUG;%(PreprocessorDefinitions)
+ MultiThreadedDebug
+ true
+
+
+
+
+
+
+
+ MaxSpeed
+ NDEBUG;%(PreprocessorDefinitions)
+ MultiThreaded
+ true
+
+
+
+
+
+
+
+ WIN32;%(PreprocessorDefinitions)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/LICENSE b/LICENSE.txt
similarity index 100%
rename from LICENSE
rename to LICENSE.txt
diff --git a/NuGet.Config b/NuGet.Config
new file mode 100644
index 0000000..6525673
--- /dev/null
+++ b/NuGet.Config
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/README.md b/README.md
index 72f1506..cd30990 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,59 @@
+# Microsoft MPI
+
+Microsoft MPI (MS-MPI) is a Microsoft implementation of the [Message Passing Interface standard](https://www.mpi-forum.org) for developing and running parallel applications on the Windows platform.
+
+MS-MPI offers several benefits:
+
+ - Ease of porting existing code that uses [MPICH](https://www.mpich.org).
+ - Security based on Active Directory Domain Services.
+ - High performance on the Windows operating system.
+ - Binary compatibility across different types of interconnectivity options.
+
+## MS-MPI downloads
+
+The following are current downloads for MS-MPI:
+
+ - [MS-MPI v10.0](https://www.microsoft.com/download/details.aspx?id=57467) (new\!) - see [Release notes](microsoft-mpi-release-notes.md)
+ - The MS-MPI SDK is also available on [Nuget](https://www.nuget.org/packages/msmpisdk/).
+
+Earlier versions of MS-MPI are available from the [Microsoft Download Center](https://go.microsoft.com/fwlink/p/?linkid=390734).
+
+## Community resources
+
+ - [Windows HPC MPI Forum](https://social.microsoft.com/forums/en-us/home?forum=windowshpcmpi)
+ - [Contact the MS-MPI Team](mailto:askmpi@microsoft.com)
+
+## Microsoft high performance computing resources
+
+ - Featured tutorial: [How to compile and run a simple MS-MPI program](https://blogs.technet.com/b/windowshpc/archive/2015/02/02/how-to-compile-and-run-a-simple-ms-mpi-program.aspx)
+ - Featured guide: [Set up a Windows RDMA cluster with HPC Pack and A8 and A9 instances to run MPI applications](https://azure.microsoft.com/documentation/articles/virtual-machines-windows-hpcpack-cluster-rdma/)
+ - [Microsoft High Performance Computing for Developers](https://msdn.microsoft.com/en-us/library/ff976568.aspx)
+ - [Microsoft HPC Pack (Windows HPC Server) Technical Library](https://technet.microsoft.com/library/cc514029)
+ - [Azure HPC Scenarios](https://www.microsoft.com/hpc)
+
+# Building
+
+## Prerequisites
+
+ - [Visial Studio 2017](https://docs.microsoft.com/visualstudio/install/install-visual-studio)
+
+ Please make sure to select the following workloads during installation:
+ - .NET desktop development (required for CBT/Nuget packages)
+ - Desktop development with C++
+
+ - [Windows SDK](https://developer.microsoft.com/windows/downloads/windows-10-sdk)
+ - [Windows WDK](https://docs.microsoft.com/windows-hardware/drivers/download-the-wdk)
+ - [GFortran](http://mingw-w64.org/doku.php)
+ - Update _GFORTRAN_BIN_ in Derectory.Build.props to the install location of GFortran
+ - [Perl](https://www.perl.org/get.html#win32)
+
+ Based on the installed VS/SDK/WDK versions, update _VCToolsVersion_ and _WindowsTargetPlatformVersion_ in Directory.Build.props
+
+Note that the build system uses [CommonBuildToolSet(CBT)](https://commonbuildtoolset.github.io/). You may need to unblock __CBT.core.dll__ (under .build/CBT) depending on your security configurations. Please refer to [CBT documentation](https://commonbuildtoolset.github.io/#/getting-started) for additional details.
+
+
+## Build
+To build, open a __Native Tools Command Prompt for Visual Studio__ and run ``msbuild`` from root folder.
# Contributing
diff --git a/dirs.proj b/dirs.proj
new file mode 100644
index 0000000..ba6a006
--- /dev/null
+++ b/dirs.proj
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/dirs.proj b/src/dirs.proj
new file mode 100644
index 0000000..b53afb1
--- /dev/null
+++ b/src/dirs.proj
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/include/binplace.proj b/src/include/binplace.proj
new file mode 100644
index 0000000..1b6826b
--- /dev/null
+++ b/src/include/binplace.proj
@@ -0,0 +1,21 @@
+
+
+
+
+
+
+ <_CopyItems Include="**\*.h;**\*.f90" Exclude="oacr.h"/>
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/include/mpi.f90 b/src/include/mpi.f90
new file mode 100644
index 0000000..2269306
--- /dev/null
+++ b/src/include/mpi.f90
@@ -0,0 +1,1717 @@
+! -*- Mode: F90; -*-
+! Copyright(c) Microsoft Corporation.All rights reserved.
+! Licensed under the MIT License.
+!
+! (C) 2004 by Argonne National Laboratory.
+! (C) 2015 by Microsoft Corporation
+!
+! MPICH COPYRIGHT
+!
+! The following is a notice of limited availability of the code, and disclaimer
+! which must be included in the prologue of the code and in all source listings
+! of the code.
+!
+! Copyright Notice
+! + 2002 University of Chicago
+!
+! Permission is hereby granted to use, reproduce, prepare derivative works, and
+! to redistribute to others. This software was authored by:
+!
+! Mathematics and Computer Science Division
+! Argonne National Laboratory, Argonne IL 60439
+!
+! (and)
+!
+! Department of Computer Science
+! University of Illinois at Urbana-Champaign
+!
+!
+! GOVERNMENT LICENSE
+!
+! Portions of this material resulted from work developed under a U.S.
+! Government Contract and are subject to the following license: the Government
+! is granted for itself and others acting on its behalf a paid-up, nonexclusive,
+! irrevocable worldwide license in this computer software to reproduce, prepare
+! derivative works, and perform publicly and display publicly.
+!
+! DISCLAIMER
+!
+! This computer code material was prepared, in part, as an account of work
+! sponsored by an agency of the United States Government. Neither the United
+! States, nor the University of Chicago, nor any of their employees, makes any
+! warranty express or implied, or assumes any legal liability or responsibility
+! for the accuracy, completeness, or usefulness of any information, apparatus,
+! product, or process disclosed, or represents that its use would not infringe
+! privately owned rights.
+!
+!
+ MODULE MPI_CONSTANTS
+ IMPLICIT NONE
+
+ INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
+ PARAMETER (MPI_SOURCE=3,MPI_TAG=4,MPI_ERROR=5)
+ INTEGER MPI_STATUS_SIZE
+ PARAMETER (MPI_STATUS_SIZE=5)
+ INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
+ INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
+ INTEGER MPI_ERRCODES_IGNORE(1)
+ CHARACTER*1 MPI_ARGVS_NULL(1,1)
+ CHARACTER*1 MPI_ARGV_NULL(1)
+ INTEGER MPI_SUCCESS
+ PARAMETER (MPI_SUCCESS=0)
+ INTEGER MPI_ERR_OTHER
+ PARAMETER (MPI_ERR_OTHER=15)
+ INTEGER MPI_ERR_WIN
+ PARAMETER (MPI_ERR_WIN=45)
+ INTEGER MPI_ERR_FILE
+ PARAMETER (MPI_ERR_FILE=27)
+ INTEGER MPI_ERR_COUNT
+ PARAMETER (MPI_ERR_COUNT=2)
+ INTEGER MPI_ERR_SPAWN
+ PARAMETER (MPI_ERR_SPAWN=42)
+ INTEGER MPI_ERR_BASE
+ PARAMETER (MPI_ERR_BASE=46)
+ INTEGER MPI_ERR_RMA_CONFLICT
+ PARAMETER (MPI_ERR_RMA_CONFLICT=49)
+ INTEGER MPI_ERR_IN_STATUS
+ PARAMETER (MPI_ERR_IN_STATUS=17)
+ INTEGER MPI_ERR_INFO_KEY
+ PARAMETER (MPI_ERR_INFO_KEY=29)
+ INTEGER MPI_ERR_LOCKTYPE
+ PARAMETER (MPI_ERR_LOCKTYPE=47)
+ INTEGER MPI_ERR_OP
+ PARAMETER (MPI_ERR_OP=9)
+ INTEGER MPI_ERR_ARG
+ PARAMETER (MPI_ERR_ARG=12)
+ INTEGER MPI_ERR_READ_ONLY
+ PARAMETER (MPI_ERR_READ_ONLY=40)
+ INTEGER MPI_ERR_SIZE
+ PARAMETER (MPI_ERR_SIZE=51)
+ INTEGER MPI_ERR_BUFFER
+ PARAMETER (MPI_ERR_BUFFER=1)
+ INTEGER MPI_ERR_DUP_DATAREP
+ PARAMETER (MPI_ERR_DUP_DATAREP=24)
+ INTEGER MPI_ERR_UNSUPPORTED_DATAREP
+ PARAMETER (MPI_ERR_UNSUPPORTED_DATAREP=43)
+ INTEGER MPI_ERR_LASTCODE
+ PARAMETER (MPI_ERR_LASTCODE=1073741823)
+ INTEGER MPI_ERR_TRUNCATE
+ PARAMETER (MPI_ERR_TRUNCATE=14)
+ INTEGER MPI_ERR_DISP
+ PARAMETER (MPI_ERR_DISP=52)
+ INTEGER MPI_ERR_PORT
+ PARAMETER (MPI_ERR_PORT=38)
+ INTEGER MPI_ERR_INFO_NOKEY
+ PARAMETER (MPI_ERR_INFO_NOKEY=31)
+ INTEGER MPI_ERR_ASSERT
+ PARAMETER (MPI_ERR_ASSERT=53)
+ INTEGER MPI_ERR_FILE_EXISTS
+ PARAMETER (MPI_ERR_FILE_EXISTS=25)
+ INTEGER MPI_ERR_PENDING
+ PARAMETER (MPI_ERR_PENDING=18)
+ INTEGER MPI_ERR_COMM
+ PARAMETER (MPI_ERR_COMM=5)
+ INTEGER MPI_ERR_KEYVAL
+ PARAMETER (MPI_ERR_KEYVAL=48)
+ INTEGER MPI_ERR_NAME
+ PARAMETER (MPI_ERR_NAME=33)
+ INTEGER MPI_ERR_REQUEST
+ PARAMETER (MPI_ERR_REQUEST=19)
+ INTEGER MPI_ERR_GROUP
+ PARAMETER (MPI_ERR_GROUP=8)
+ INTEGER MPI_ERR_TOPOLOGY
+ PARAMETER (MPI_ERR_TOPOLOGY=10)
+ INTEGER MPI_ERR_TYPE
+ PARAMETER (MPI_ERR_TYPE=3)
+ INTEGER MPI_ERR_TAG
+ PARAMETER (MPI_ERR_TAG=4)
+ INTEGER MPI_ERR_INFO_VALUE
+ PARAMETER (MPI_ERR_INFO_VALUE=30)
+ INTEGER MPI_ERR_NOT_SAME
+ PARAMETER (MPI_ERR_NOT_SAME=35)
+ INTEGER MPI_ERR_RMA_SYNC
+ PARAMETER (MPI_ERR_RMA_SYNC=50)
+ INTEGER MPI_ERR_INFO
+ PARAMETER (MPI_ERR_INFO=28)
+ INTEGER MPI_ERR_NO_MEM
+ PARAMETER (MPI_ERR_NO_MEM=34)
+ INTEGER MPI_ERR_BAD_FILE
+ PARAMETER (MPI_ERR_BAD_FILE=22)
+ INTEGER MPI_ERR_FILE_IN_USE
+ PARAMETER (MPI_ERR_FILE_IN_USE=26)
+ INTEGER MPI_ERR_UNKNOWN
+ PARAMETER (MPI_ERR_UNKNOWN=13)
+ INTEGER MPI_ERR_UNSUPPORTED_OPERATION
+ PARAMETER (MPI_ERR_UNSUPPORTED_OPERATION=44)
+ INTEGER MPI_ERR_QUOTA
+ PARAMETER (MPI_ERR_QUOTA=39)
+ INTEGER MPI_ERR_AMODE
+ PARAMETER (MPI_ERR_AMODE=21)
+ INTEGER MPI_ERR_ROOT
+ PARAMETER (MPI_ERR_ROOT=7)
+ INTEGER MPI_ERR_RANK
+ PARAMETER (MPI_ERR_RANK=6)
+ INTEGER MPI_ERR_DIMS
+ PARAMETER (MPI_ERR_DIMS=11)
+ INTEGER MPI_ERR_NO_SUCH_FILE
+ PARAMETER (MPI_ERR_NO_SUCH_FILE=37)
+ INTEGER MPI_ERR_SERVICE
+ PARAMETER (MPI_ERR_SERVICE=41)
+ INTEGER MPI_ERR_INTERN
+ PARAMETER (MPI_ERR_INTERN=16)
+ INTEGER MPI_ERR_IO
+ PARAMETER (MPI_ERR_IO=32)
+ INTEGER MPI_ERR_ACCESS
+ PARAMETER (MPI_ERR_ACCESS=20)
+ INTEGER MPI_ERR_NO_SPACE
+ PARAMETER (MPI_ERR_NO_SPACE=36)
+ INTEGER MPI_ERR_CONVERSION
+ PARAMETER (MPI_ERR_CONVERSION=23)
+ INTEGER MPI_ERRORS_ARE_FATAL
+ PARAMETER (MPI_ERRORS_ARE_FATAL=1409286144)
+ INTEGER MPI_ERRORS_RETURN
+ PARAMETER (MPI_ERRORS_RETURN=1409286145)
+ INTEGER MPI_IDENT
+ PARAMETER (MPI_IDENT=0)
+ INTEGER MPI_CONGRUENT
+ PARAMETER (MPI_CONGRUENT=1)
+ INTEGER MPI_SIMILAR
+ PARAMETER (MPI_SIMILAR=2)
+ INTEGER MPI_UNEQUAL
+ PARAMETER (MPI_UNEQUAL=3)
+ INTEGER MPI_MAX
+ PARAMETER (MPI_MAX=1476395009)
+ INTEGER MPI_MIN
+ PARAMETER (MPI_MIN=1476395010)
+ INTEGER MPI_SUM
+ PARAMETER (MPI_SUM=1476395011)
+ INTEGER MPI_PROD
+ PARAMETER (MPI_PROD=1476395012)
+ INTEGER MPI_LAND
+ PARAMETER (MPI_LAND=1476395013)
+ INTEGER MPI_BAND
+ PARAMETER (MPI_BAND=1476395014)
+ INTEGER MPI_LOR
+ PARAMETER (MPI_LOR=1476395015)
+ INTEGER MPI_BOR
+ PARAMETER (MPI_BOR=1476395016)
+ INTEGER MPI_LXOR
+ PARAMETER (MPI_LXOR=1476395017)
+ INTEGER MPI_BXOR
+ PARAMETER (MPI_BXOR=1476395018)
+ INTEGER MPI_MINLOC
+ PARAMETER (MPI_MINLOC=1476395019)
+ INTEGER MPI_MAXLOC
+ PARAMETER (MPI_MAXLOC=1476395020)
+ INTEGER MPI_REPLACE
+ PARAMETER (MPI_REPLACE=1476395021)
+ INTEGER MPI_NO_OP
+ PARAMETER (MPI_NO_OP=1476395022)
+ INTEGER MPI_COMM_WORLD
+ PARAMETER (MPI_COMM_WORLD=1140850688)
+ INTEGER MPI_COMM_SELF
+ PARAMETER (MPI_COMM_SELF=1140850689)
+ INTEGER MPI_COMM_TYPE_SHARED
+ PARAMETER (MPI_COMM_TYPE_SHARED=1)
+ INTEGER MPI_GROUP_EMPTY
+ PARAMETER (MPI_GROUP_EMPTY=1207959552)
+ INTEGER MPI_COMM_NULL
+ PARAMETER (MPI_COMM_NULL=67108864)
+ INTEGER MPI_WIN_NULL
+ PARAMETER (MPI_WIN_NULL=536870912)
+ INTEGER MPI_FILE_NULL
+ PARAMETER (MPI_FILE_NULL=0)
+ INTEGER MPI_GROUP_NULL
+ PARAMETER (MPI_GROUP_NULL=134217728)
+ INTEGER MPI_OP_NULL
+ PARAMETER (MPI_OP_NULL=402653184)
+ INTEGER MPI_DATATYPE_NULL
+ PARAMETER (MPI_DATATYPE_NULL=z'0c000000')
+ INTEGER MPI_REQUEST_NULL
+ PARAMETER (MPI_REQUEST_NULL=738197504)
+ INTEGER MPI_ERRHANDLER_NULL
+ PARAMETER (MPI_ERRHANDLER_NULL=335544320)
+ INTEGER MPI_INFO_NULL
+ PARAMETER (MPI_INFO_NULL=469762048)
+ INTEGER MPI_MESSAGE_NULL
+ PARAMETER (MPI_MESSAGE_NULL=805306368)
+ INTEGER MPI_MESSAGE_NO_PROC
+ PARAMETER (MPI_MESSAGE_NO_PROC=1879048192)
+ INTEGER MPI_TAG_UB
+ PARAMETER (MPI_TAG_UB=1681915906)
+ INTEGER MPI_HOST
+ PARAMETER (MPI_HOST=1681915908)
+ INTEGER MPI_IO
+ PARAMETER (MPI_IO=1681915910)
+ INTEGER MPI_WTIME_IS_GLOBAL
+ PARAMETER (MPI_WTIME_IS_GLOBAL=1681915912)
+ INTEGER MPI_UNIVERSE_SIZE
+ PARAMETER (MPI_UNIVERSE_SIZE=1681915914)
+ INTEGER MPI_LASTUSEDCODE
+ PARAMETER (MPI_LASTUSEDCODE=1681915916)
+ INTEGER MPI_APPNUM
+ PARAMETER (MPI_APPNUM=1681915918)
+ INTEGER MPI_WIN_BASE
+ PARAMETER (MPI_WIN_BASE=1711276034)
+ INTEGER MPI_WIN_SIZE
+ PARAMETER (MPI_WIN_SIZE=1711276036)
+ INTEGER MPI_WIN_DISP_UNIT
+ PARAMETER (MPI_WIN_DISP_UNIT=1711276038)
+ INTEGER MPI_MAX_ERROR_STRING
+ PARAMETER (MPI_MAX_ERROR_STRING=511)
+ INTEGER MPI_MAX_PORT_NAME
+ PARAMETER (MPI_MAX_PORT_NAME=255)
+ INTEGER MPI_MAX_OBJECT_NAME
+ PARAMETER (MPI_MAX_OBJECT_NAME=127)
+ INTEGER MPI_MAX_INFO_KEY
+ PARAMETER (MPI_MAX_INFO_KEY=254)
+ INTEGER MPI_MAX_INFO_VAL
+ PARAMETER (MPI_MAX_INFO_VAL=1023)
+ INTEGER MPI_MAX_PROCESSOR_NAME
+ PARAMETER (MPI_MAX_PROCESSOR_NAME=128-1)
+ INTEGER MPI_MAX_DATAREP_STRING
+ PARAMETER (MPI_MAX_DATAREP_STRING=127)
+ INTEGER MPI_MAX_LIBRARY_VERSION_STRING
+ PARAMETER (MPI_MAX_LIBRARY_VERSION_STRING=64-1)
+ INTEGER MPI_UNDEFINED
+ PARAMETER (MPI_UNDEFINED=(-32766))
+ INTEGER MPI_KEYVAL_INVALID
+ PARAMETER (MPI_KEYVAL_INVALID=603979776)
+ INTEGER MPI_BSEND_OVERHEAD
+ PARAMETER (MPI_BSEND_OVERHEAD=(95))
+ INTEGER MPI_PROC_NULL
+ PARAMETER (MPI_PROC_NULL=-1)
+ INTEGER MPI_ANY_SOURCE
+ PARAMETER (MPI_ANY_SOURCE=-2)
+ INTEGER MPI_ANY_TAG
+ PARAMETER (MPI_ANY_TAG=-1)
+ INTEGER MPI_ROOT
+ PARAMETER (MPI_ROOT=-3)
+ INTEGER MPI_GRAPH
+ PARAMETER (MPI_GRAPH=1)
+ INTEGER MPI_CART
+ PARAMETER (MPI_CART=2)
+ INTEGER MPI_DIST_GRAPH
+ PARAMETER (MPI_DIST_GRAPH=3)
+ INTEGER MPI_VERSION
+ PARAMETER (MPI_VERSION=2)
+ INTEGER MPI_SUBVERSION
+ PARAMETER (MPI_SUBVERSION=0)
+ INTEGER MPI_LOCK_EXCLUSIVE
+ PARAMETER (MPI_LOCK_EXCLUSIVE=234)
+ INTEGER MPI_LOCK_SHARED
+ PARAMETER (MPI_LOCK_SHARED=235)
+ INTEGER MPI_CHAR
+ PARAMETER (MPI_CHAR=z'4c000101')
+ INTEGER MPI_UNSIGNED_CHAR
+ PARAMETER (MPI_UNSIGNED_CHAR=z'4c000102')
+ INTEGER MPI_SHORT
+ PARAMETER (MPI_SHORT=z'4c000203')
+ INTEGER MPI_UNSIGNED_SHORT
+ PARAMETER (MPI_UNSIGNED_SHORT=z'4c000204')
+ INTEGER MPI_INT
+ PARAMETER (MPI_INT=z'4c000405')
+ INTEGER MPI_UNSIGNED
+ PARAMETER (MPI_UNSIGNED=z'4c000406')
+ INTEGER MPI_LONG
+ PARAMETER (MPI_LONG=z'4c000407')
+ INTEGER MPI_UNSIGNED_LONG
+ PARAMETER (MPI_UNSIGNED_LONG=z'4c000408')
+ INTEGER MPI_LONG_LONG
+ PARAMETER (MPI_LONG_LONG=z'4c000809')
+ INTEGER MPI_LONG_LONG_INT
+ PARAMETER (MPI_LONG_LONG_INT=z'4c000809')
+ INTEGER MPI_FLOAT
+ PARAMETER (MPI_FLOAT=z'4c00040a')
+ INTEGER MPI_DOUBLE
+ PARAMETER (MPI_DOUBLE=z'4c00080b')
+ INTEGER MPI_LONG_DOUBLE
+ PARAMETER (MPI_LONG_DOUBLE=z'4c00080c')
+ INTEGER MPI_BYTE
+ PARAMETER (MPI_BYTE=z'4c00010d')
+ INTEGER MPI_WCHAR
+ PARAMETER (MPI_WCHAR=z'4c00020e')
+ INTEGER MPI_PACKED
+ PARAMETER (MPI_PACKED=z'4c00010f')
+ INTEGER MPI_LB
+ PARAMETER (MPI_LB=z'4c000010')
+ INTEGER MPI_UB
+ PARAMETER (MPI_UB=z'4c000011')
+ INTEGER MPI_2INT
+ PARAMETER (MPI_2INT=z'4c000816')
+ INTEGER MPI_SIGNED_CHAR
+ PARAMETER (MPI_SIGNED_CHAR=z'4c000118')
+ INTEGER MPI_UNSIGNED_LONG_LONG
+ PARAMETER (MPI_UNSIGNED_LONG_LONG=z'4c000819')
+ INTEGER MPI_CHARACTER
+ PARAMETER (MPI_CHARACTER=z'4c00011a')
+ INTEGER MPI_INTEGER
+ PARAMETER (MPI_INTEGER=z'4c00041b')
+ INTEGER MPI_REAL
+ PARAMETER (MPI_REAL=z'4c00041c')
+ INTEGER MPI_LOGICAL
+ PARAMETER (MPI_LOGICAL=z'4c00041d')
+ INTEGER MPI_COMPLEX
+ PARAMETER (MPI_COMPLEX=z'4c00081e')
+ INTEGER MPI_DOUBLE_PRECISION
+ PARAMETER (MPI_DOUBLE_PRECISION=z'4c00081f')
+ INTEGER MPI_2INTEGER
+ PARAMETER (MPI_2INTEGER=z'4c000820')
+ INTEGER MPI_2REAL
+ PARAMETER (MPI_2REAL=z'4c000821')
+ INTEGER MPI_DOUBLE_COMPLEX
+ PARAMETER (MPI_DOUBLE_COMPLEX=z'4c001022')
+ INTEGER MPI_2DOUBLE_PRECISION
+ PARAMETER (MPI_2DOUBLE_PRECISION=z'4c001023')
+ INTEGER MPI_2COMPLEX
+ PARAMETER (MPI_2COMPLEX=z'4c001024')
+ INTEGER MPI_2DOUBLE_COMPLEX
+ PARAMETER (MPI_2DOUBLE_COMPLEX=z'4c002025')
+ INTEGER MPI_REAL2
+ PARAMETER (MPI_REAL2=z'0c000000')
+ INTEGER MPI_REAL4
+ PARAMETER (MPI_REAL4=z'4c000427')
+ INTEGER MPI_COMPLEX8
+ PARAMETER (MPI_COMPLEX8=z'4c000828')
+ INTEGER MPI_REAL8
+ PARAMETER (MPI_REAL8=z'4c000829')
+ INTEGER MPI_COMPLEX16
+ PARAMETER (MPI_COMPLEX16=z'4c00102a')
+ INTEGER MPI_REAL16
+ PARAMETER (MPI_REAL16=z'0c000000')
+ INTEGER MPI_COMPLEX32
+ PARAMETER (MPI_COMPLEX32=z'0c000000')
+ INTEGER MPI_INTEGER1
+ PARAMETER (MPI_INTEGER1=z'4c00012d')
+ INTEGER MPI_COMPLEX4
+ PARAMETER (MPI_COMPLEX4=z'0c000000')
+ INTEGER MPI_INTEGER2
+ PARAMETER (MPI_INTEGER2=z'4c00022f')
+ INTEGER MPI_INTEGER4
+ PARAMETER (MPI_INTEGER4=z'4c000430')
+ INTEGER MPI_INTEGER8
+ PARAMETER (MPI_INTEGER8=z'4c000831')
+ INTEGER MPI_INTEGER16
+ PARAMETER (MPI_INTEGER16=z'0c000000')
+
+ INCLUDE 'mpifptr.h'
+
+ INTEGER MPI_OFFSET
+ PARAMETER (MPI_OFFSET=z'4c00083c')
+ INTEGER MPI_COUNT
+ PARAMETER (MPI_COUNT=z'4c00083d')
+ INTEGER MPI_FLOAT_INT
+ PARAMETER (MPI_FLOAT_INT=z'8c000000')
+ INTEGER MPI_DOUBLE_INT
+ PARAMETER (MPI_DOUBLE_INT=z'8c000001')
+ INTEGER MPI_LONG_INT
+ PARAMETER (MPI_LONG_INT=z'8c000002')
+ INTEGER MPI_SHORT_INT
+ PARAMETER (MPI_SHORT_INT=z'8c000003')
+ INTEGER MPI_LONG_DOUBLE_INT
+ PARAMETER (MPI_LONG_DOUBLE_INT=z'8c000004')
+ INTEGER MPI_INTEGER_KIND
+ PARAMETER (MPI_INTEGER_KIND=4)
+ INTEGER MPI_OFFSET_KIND
+ PARAMETER (MPI_OFFSET_KIND=8)
+ INTEGER MPI_COUNT_KIND
+ PARAMETER (MPI_COUNT_KIND=8)
+ INTEGER MPI_COMBINER_NAMED
+ PARAMETER (MPI_COMBINER_NAMED=1)
+ INTEGER MPI_COMBINER_DUP
+ PARAMETER (MPI_COMBINER_DUP=2)
+ INTEGER MPI_COMBINER_CONTIGUOUS
+ PARAMETER (MPI_COMBINER_CONTIGUOUS=3)
+ INTEGER MPI_COMBINER_VECTOR
+ PARAMETER (MPI_COMBINER_VECTOR=4)
+ INTEGER MPI_COMBINER_HVECTOR_INTEGER
+ PARAMETER (MPI_COMBINER_HVECTOR_INTEGER=5)
+ INTEGER MPI_COMBINER_HVECTOR
+ PARAMETER (MPI_COMBINER_HVECTOR=6)
+ INTEGER MPI_COMBINER_INDEXED
+ PARAMETER (MPI_COMBINER_INDEXED=7)
+ INTEGER MPI_COMBINER_HINDEXED_INTEGER
+ PARAMETER (MPI_COMBINER_HINDEXED_INTEGER=8)
+ INTEGER MPI_COMBINER_HINDEXED
+ PARAMETER (MPI_COMBINER_HINDEXED=9)
+ INTEGER MPI_COMBINER_INDEXED_BLOCK
+ PARAMETER (MPI_COMBINER_INDEXED_BLOCK=10)
+ INTEGER MPI_COMBINER_STRUCT_INTEGER
+ PARAMETER (MPI_COMBINER_STRUCT_INTEGER=11)
+ INTEGER MPI_COMBINER_STRUCT
+ PARAMETER (MPI_COMBINER_STRUCT=12)
+ INTEGER MPI_COMBINER_SUBARRAY
+ PARAMETER (MPI_COMBINER_SUBARRAY=13)
+ INTEGER MPI_COMBINER_DARRAY
+ PARAMETER (MPI_COMBINER_DARRAY=14)
+ INTEGER MPI_COMBINER_F90_REAL
+ PARAMETER (MPI_COMBINER_F90_REAL=15)
+ INTEGER MPI_COMBINER_F90_COMPLEX
+ PARAMETER (MPI_COMBINER_F90_COMPLEX=16)
+ INTEGER MPI_COMBINER_F90_INTEGER
+ PARAMETER (MPI_COMBINER_F90_INTEGER=17)
+ INTEGER MPI_COMBINER_RESIZED
+ PARAMETER (MPI_COMBINER_RESIZED=18)
+ INTEGER MPI_COMBINER_HINDEXED_BLOCK
+ PARAMETER (MPI_COMBINER_HINDEXED_BLOCK=19)
+ INTEGER MPI_MODE_NOCHECK
+ PARAMETER (MPI_MODE_NOCHECK=1024)
+ INTEGER MPI_MODE_NOSTORE
+ PARAMETER (MPI_MODE_NOSTORE=2048)
+ INTEGER MPI_MODE_NOPUT
+ PARAMETER (MPI_MODE_NOPUT=4096)
+ INTEGER MPI_MODE_NOPRECEDE
+ PARAMETER (MPI_MODE_NOPRECEDE=8192)
+ INTEGER MPI_MODE_NOSUCCEED
+ PARAMETER (MPI_MODE_NOSUCCEED=16384)
+ INTEGER MPI_THREAD_SINGLE
+ PARAMETER (MPI_THREAD_SINGLE=0)
+ INTEGER MPI_THREAD_FUNNELED
+ PARAMETER (MPI_THREAD_FUNNELED=1)
+ INTEGER MPI_THREAD_SERIALIZED
+ PARAMETER (MPI_THREAD_SERIALIZED=2)
+ INTEGER MPI_THREAD_MULTIPLE
+ PARAMETER (MPI_THREAD_MULTIPLE=3)
+ INTEGER MPI_MODE_RDONLY
+ PARAMETER (MPI_MODE_RDONLY=2)
+ INTEGER MPI_MODE_RDWR
+ PARAMETER (MPI_MODE_RDWR=8)
+ INTEGER MPI_MODE_WRONLY
+ PARAMETER (MPI_MODE_WRONLY=4)
+ INTEGER MPI_MODE_DELETE_ON_CLOSE
+ PARAMETER (MPI_MODE_DELETE_ON_CLOSE=16)
+ INTEGER MPI_MODE_UNIQUE_OPEN
+ PARAMETER (MPI_MODE_UNIQUE_OPEN=32)
+ INTEGER MPI_MODE_CREATE
+ PARAMETER (MPI_MODE_CREATE=1)
+ INTEGER MPI_MODE_EXCL
+ PARAMETER (MPI_MODE_EXCL=64)
+ INTEGER MPI_MODE_APPEND
+ PARAMETER (MPI_MODE_APPEND=128)
+ INTEGER MPI_MODE_SEQUENTIAL
+ PARAMETER (MPI_MODE_SEQUENTIAL=256)
+ INTEGER MPI_SEEK_SET
+ PARAMETER (MPI_SEEK_SET=600)
+ INTEGER MPI_SEEK_CUR
+ PARAMETER (MPI_SEEK_CUR=602)
+ INTEGER MPI_SEEK_END
+ PARAMETER (MPI_SEEK_END=604)
+ INTEGER MPI_ORDER_C
+ PARAMETER (MPI_ORDER_C=56)
+ INTEGER MPI_ORDER_FORTRAN
+ PARAMETER (MPI_ORDER_FORTRAN=57)
+ INTEGER MPI_DISTRIBUTE_BLOCK
+ PARAMETER (MPI_DISTRIBUTE_BLOCK=121)
+ INTEGER MPI_DISTRIBUTE_CYCLIC
+ PARAMETER (MPI_DISTRIBUTE_CYCLIC=122)
+ INTEGER MPI_DISTRIBUTE_NONE
+ PARAMETER (MPI_DISTRIBUTE_NONE=123)
+ INTEGER MPI_DISTRIBUTE_DFLT_DARG
+ PARAMETER (MPI_DISTRIBUTE_DFLT_DARG=-49767)
+ INTEGER (KIND=8) MPI_DISPLACEMENT_CURRENT
+ PARAMETER (MPI_DISPLACEMENT_CURRENT=-54278278)
+ INTEGER MPI_BOTTOM, MPI_IN_PLACE
+ INTEGER MPI_UNWEIGHTED, MPI_WEIGHTS_EMPTY
+
+ COMMON /MPIPRIV1/ MPI_BOTTOM, MPI_IN_PLACE, MPI_STATUS_IGNORE
+
+ COMMON /MPIPRIV2/ MPI_STATUSES_IGNORE, MPI_ERRCODES_IGNORE
+!DEC$ ATTRIBUTES DLLIMPORT :: /MPIPRIV1/, /MPIPRIV2/
+
+ COMMON /MPIFCMB5/ MPI_UNWEIGHTED
+ COMMON /MPIFCMB9/ MPI_WEIGHTS_EMPTY
+!DEC$ ATTRIBUTES DLLIMPORT :: /MPIFCMB5/, /MPIFCMB9/
+
+ COMMON /MPIPRIVC/ MPI_ARGVS_NULL, MPI_ARGV_NULL
+!DEC$ ATTRIBUTES DLLIMPORT :: /MPIPRIVC/
+
+ END MODULE MPI_CONSTANTS
+
+ MODULE MPI_BASE
+ IMPLICIT NONE
+ INTERFACE
+ SUBROUTINE MPI_TYPE_CREATE_DARRAY(v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,ierror)
+ INTEGER v0, v1, v2, v3(*), v4(*), v5(*), v6(*), v7, v8, v9
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_DARRAY
+
+ SUBROUTINE MPI_COMM_FREE_KEYVAL(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_FREE_KEYVAL
+
+ SUBROUTINE MPI_TYPE_EXTENT(v0,v1,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0
+ INTEGER(KIND=MPI_ADDRESS_KIND) v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_EXTENT
+
+ SUBROUTINE MPI_TYPE_GET_NAME(v0,v1,v2,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER v2
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_GET_NAME
+
+ SUBROUTINE MPI_GROUP_INTERSECTION(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_INTERSECTION
+
+ SUBROUTINE MPI_WIN_LOCK(v0,v1,v2,v3,ierror)
+ INTEGER v0, v1, v2, v3
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_LOCK
+
+ SUBROUTINE MPI_CARTDIM_GET(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_CARTDIM_GET
+
+ SUBROUTINE MPI_WIN_GET_ERRHANDLER(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_GET_ERRHANDLER
+
+ SUBROUTINE MPI_COMM_SPLIT(v0,v1,v2,v3,ierror)
+ INTEGER v0, v1, v2, v3
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_SPLIT
+
+ SUBROUTINE MPI_COMM_SPLIT_TYPE(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0, v1, v2, v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_SPLIT_TYPE
+
+ SUBROUTINE MPI_CANCEL(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_CANCEL
+
+ SUBROUTINE MPI_WIN_POST(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_POST
+
+ SUBROUTINE MPI_WIN_COMPLETE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_COMPLETE
+
+ SUBROUTINE MPI_TEST_CANCELLED(v0,v1,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0(MPI_STATUS_SIZE)
+ LOGICAL v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TEST_CANCELLED
+
+ SUBROUTINE MPI_GROUP_SIZE(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_SIZE
+
+ SUBROUTINE MPI_ADD_ERROR_STRING(v0,v1,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER ierror
+ END SUBROUTINE MPI_ADD_ERROR_STRING
+
+ SUBROUTINE MPI_PACK_SIZE(v0,v1,v2,v3,ierror)
+ INTEGER v0, v1, v2, v3
+ INTEGER ierror
+ END SUBROUTINE MPI_PACK_SIZE
+
+ SUBROUTINE MPI_GET_ELEMENTS(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0(MPI_STATUS_SIZE), v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_GET_ELEMENTS
+
+ SUBROUTINE MPI_GET_ELEMENTS_X(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE,MPI_COUNT_KIND
+ INTEGER v0(MPI_STATUS_SIZE), v1
+ INTEGER(KIND=MPI_COUNT_KIND) v2
+ INTEGER ierror
+ END SUBROUTINE MPI_GET_ELEMENTS_X
+
+ SUBROUTINE MPI_ERRHANDLER_GET(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_ERRHANDLER_GET
+
+ SUBROUTINE MPI_FILE_GET_ERRHANDLER(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_FILE_GET_ERRHANDLER
+
+ SUBROUTINE MPI_TYPE_LB(v0,v1,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0
+ INTEGER(KIND=MPI_ADDRESS_KIND) v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_LB
+
+ SUBROUTINE MPI_REQUEST_FREE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_REQUEST_FREE
+
+ SUBROUTINE MPI_GROUP_RANGE_INCL(v0,v1,v2,v3,ierror)
+ INTEGER v0, v1, v2(3,*), v3
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_RANGE_INCL
+
+ SUBROUTINE MPI_TYPE_GET_TRUE_EXTENT(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0
+ INTEGER(KIND=MPI_ADDRESS_KIND) v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_GET_TRUE_EXTENT
+
+ SUBROUTINE MPI_TYPE_GET_TRUE_EXTENT_X(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_COUNT_KIND
+ INTEGER v0
+ INTEGER(KIND=MPI_COUNT_KIND) v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_GET_TRUE_EXTENT_X
+
+ SUBROUTINE MPI_BARRIER(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_BARRIER
+
+ SUBROUTINE MPI_IS_THREAD_MAIN(v0,ierror)
+ LOGICAL v0
+ INTEGER ierror
+ END SUBROUTINE MPI_IS_THREAD_MAIN
+
+ SUBROUTINE MPI_WIN_FREE_KEYVAL(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_FREE_KEYVAL
+
+ SUBROUTINE MPI_TYPE_COMMIT(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_COMMIT
+
+ SUBROUTINE MPI_GROUP_RANGE_EXCL(v0,v1,v2,v3,ierror)
+ INTEGER v0, v1, v2(3,*), v3
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_RANGE_EXCL
+
+ SUBROUTINE MPI_REQUEST_GET_STATUS(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0
+ LOGICAL v1
+ INTEGER v2(MPI_STATUS_SIZE)
+ INTEGER ierror
+ END SUBROUTINE MPI_REQUEST_GET_STATUS
+
+ SUBROUTINE MPI_QUERY_THREAD(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_QUERY_THREAD
+
+ SUBROUTINE MPI_ERRHANDLER_CREATE(v0,v1,ierror)
+ INTERFACE
+ SUBROUTINE v0(vv0,vv1)
+ INTEGER vv0,vv1
+ END SUBROUTINE
+ END INTERFACE
+ INTEGER v1
+ INTEGER ierror
+ END SUBROUTINE MPI_ERRHANDLER_CREATE
+
+ SUBROUTINE MPI_COMM_SPAWN_MULTIPLE(v0,v1,v2,v3,v4,v5,v6,v7,v8,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1(*), v2(v0,*)
+ INTEGER v3(*), v4(*), v5, v6, v7, v8(*)
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_SPAWN_MULTIPLE
+
+ SUBROUTINE MPI_COMM_REMOTE_GROUP(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_REMOTE_GROUP
+
+ SUBROUTINE MPI_TYPE_GET_EXTENT(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0
+ INTEGER(KIND=MPI_ADDRESS_KIND) v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_GET_EXTENT
+
+ SUBROUTINE MPI_TYPE_GET_EXTENT_X(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_COUNT_KIND
+ INTEGER v0
+ INTEGER(KIND=MPI_COUNT_KIND) v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_GET_EXTENT_X
+
+ SUBROUTINE MPI_COMM_COMPARE(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_COMPARE
+
+ SUBROUTINE MPI_INFO_GET_VALUELEN(v0,v1,v2,v3,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER v2
+ LOGICAL v3
+ INTEGER ierror
+ END SUBROUTINE MPI_INFO_GET_VALUELEN
+
+ SUBROUTINE MPI_INFO_GET(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER v2
+ CHARACTER (LEN=*) v3
+ LOGICAL v4
+ INTEGER ierror
+ END SUBROUTINE MPI_INFO_GET
+
+ SUBROUTINE MPI_OP_COMMUTATIVE(v0,v1,ierror)
+ INTEGER v0
+ LOGICAL v1
+ INTEGER ierror
+ END SUBROUTINE MPI_OP_COMMUTATIVE
+
+ SUBROUTINE MPI_OP_CREATE(v0,v1,v2,ierror)
+ EXTERNAL v0
+ LOGICAL v1
+ INTEGER v2
+ INTEGER ierror
+ END SUBROUTINE MPI_OP_CREATE
+
+ SUBROUTINE MPI_TYPE_CREATE_STRUCT(v0,v1,v2,v3,v4,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0, v1(*)
+ INTEGER(KIND=MPI_ADDRESS_KIND) v2(*)
+ INTEGER v3(*), v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_STRUCT
+
+ SUBROUTINE MPI_TYPE_VECTOR(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0, v1, v2, v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_VECTOR
+
+ SUBROUTINE MPI_WIN_GET_GROUP(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_GET_GROUP
+
+ SUBROUTINE MPI_GROUP_COMPARE(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_COMPARE
+
+ SUBROUTINE MPI_CART_SHIFT(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0, v1, v2, v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_CART_SHIFT
+
+ SUBROUTINE MPI_WIN_SET_ERRHANDLER(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_SET_ERRHANDLER
+
+ SUBROUTINE MPI_COMM_SPAWN(v0,v1,v2,v3,v4,v5,v6,v7,ierror)
+ CHARACTER (LEN=*) v0, v1(*)
+ INTEGER v2, v3, v4, v5, v6, v7(*)
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_SPAWN
+
+ SUBROUTINE MPI_COMM_GROUP(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_GROUP
+
+ SUBROUTINE MPI_WIN_CALL_ERRHANDLER(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_CALL_ERRHANDLER
+
+ SUBROUTINE MPI_LOOKUP_NAME(v0,v1,v2,ierror)
+ CHARACTER (LEN=*) v0
+ INTEGER v1
+ CHARACTER (LEN=*) v2
+ INTEGER ierror
+ END SUBROUTINE MPI_LOOKUP_NAME
+
+ SUBROUTINE MPI_INFO_FREE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_INFO_FREE
+
+ SUBROUTINE MPI_COMM_SET_ERRHANDLER(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_SET_ERRHANDLER
+
+ SUBROUTINE MPI_GRAPH_GET(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0, v1, v2, v3(*), v4(*)
+ INTEGER ierror
+ END SUBROUTINE MPI_GRAPH_GET
+
+ SUBROUTINE MPI_GROUP_FREE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_FREE
+
+ SUBROUTINE MPI_STATUS_SET_ELEMENTS(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0(MPI_STATUS_SIZE), v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_STATUS_SET_ELEMENTS
+
+ SUBROUTINE MPI_STATUS_SET_ELEMENTS_X(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE,MPI_COUNT_KIND
+ INTEGER v0(MPI_STATUS_SIZE), v1
+ INTEGER(KIND=MPI_COUNT_KIND) v2
+ INTEGER ierror
+ END SUBROUTINE MPI_STATUS_SET_ELEMENTS_X
+
+ SUBROUTINE MPI_WIN_TEST(v0,v1,ierror)
+ INTEGER v0
+ LOGICAL v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_TEST
+
+ SUBROUTINE MPI_WIN_FREE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_FREE
+
+ SUBROUTINE MPI_GRAPH_MAP(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0, v1, v2(*), v3(*), v4
+ INTEGER ierror
+ END SUBROUTINE MPI_GRAPH_MAP
+
+ SUBROUTINE MPI_DIST_GRAPH_NEIGHBORS_COUNT(v0,v1,v2,v3,ierror)
+ INTEGER v0, v1, v2
+ LOGICAL v3
+ INTEGER ierror
+ END SUBROUTINE MPI_DIST_GRAPH_NEIGHBORS_COUNT
+
+ SUBROUTINE MPI_PACK_EXTERNAL_SIZE(v0,v1,v2,v3,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ CHARACTER (LEN=*) v0
+ INTEGER v1, v2
+ INTEGER(KIND=MPI_ADDRESS_KIND) v3
+ INTEGER ierror
+ END SUBROUTINE MPI_PACK_EXTERNAL_SIZE
+
+ SUBROUTINE MPI_PUBLISH_NAME(v0,v1,v2,ierror)
+ CHARACTER (LEN=*) v0
+ INTEGER v1
+ CHARACTER (LEN=*) v2
+ INTEGER ierror
+ END SUBROUTINE MPI_PUBLISH_NAME
+
+ SUBROUTINE MPI_TYPE_CREATE_F90_REAL(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_F90_REAL
+
+ SUBROUTINE MPI_OPEN_PORT(v0,v1,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER ierror
+ END SUBROUTINE MPI_OPEN_PORT
+
+ SUBROUTINE MPI_GROUP_UNION(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_UNION
+
+ SUBROUTINE MPI_COMM_ACCEPT(v0,v1,v2,v3,v4,ierror)
+ CHARACTER (LEN=*) v0
+ INTEGER v1, v2, v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_ACCEPT
+
+ SUBROUTINE MPI_FILE_CREATE_ERRHANDLER(v0,v1,ierror)
+ INTERFACE
+ SUBROUTINE v0(vv0,vv1)
+ INTEGER vv0,vv1
+ END SUBROUTINE
+ END INTERFACE
+ INTEGER v1
+ INTEGER ierror
+ END SUBROUTINE MPI_FILE_CREATE_ERRHANDLER
+
+ SUBROUTINE MPI_WIN_GET_NAME(v0,v1,v2,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER v2
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_GET_NAME
+
+ SUBROUTINE MPI_INFO_CREATE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_INFO_CREATE
+
+ SUBROUTINE MPI_TYPE_CREATE_F90_INTEGER(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_F90_INTEGER
+
+ SUBROUTINE MPI_TYPE_SET_NAME(v0,v1,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_SET_NAME
+
+ SUBROUTINE MPI_ATTR_DELETE(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_ATTR_DELETE
+
+ SUBROUTINE MPI_GROUP_INCL(v0,v1,v2,v3,ierror)
+ INTEGER v0, v1, v2(*), v3
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_INCL
+
+ SUBROUTINE MPI_COMM_CREATE_ERRHANDLER(v0,v1,ierror)
+ INTERFACE
+ SUBROUTINE v0(vv0,vv1)
+ INTEGER vv0,vv1
+ END SUBROUTINE
+ END INTERFACE
+ INTEGER v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_CREATE_ERRHANDLER
+
+ SUBROUTINE MPI_COMM_CONNECT(v0,v1,v2,v3,v4,ierror)
+ CHARACTER (LEN=*) v0
+ INTEGER v1, v2, v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_CONNECT
+
+ SUBROUTINE MPI_ERROR_STRING(v0,v1,v2,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER v2
+ INTEGER ierror
+ END SUBROUTINE MPI_ERROR_STRING
+
+ SUBROUTINE MPI_TYPE_GET_CONTENTS(v0,v1,v2,v3,v4,v5,v6,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0, v1, v2, v3, v4(*)
+ INTEGER(KIND=MPI_ADDRESS_KIND) v5(*)
+ INTEGER v6(*)
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_GET_CONTENTS
+
+ SUBROUTINE MPI_TYPE_STRUCT(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0, v1(*), v2(*), v3(*), v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_STRUCT
+
+ SUBROUTINE MPI_TYPE_CREATE_INDEXED_BLOCK(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0, v1, v2(*), v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_INDEXED_BLOCK
+
+ SUBROUTINE MPI_TYPE_CREATE_HVECTOR(v0,v1,v2,v3,v4,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0, v1
+ INTEGER(KIND=MPI_ADDRESS_KIND) v2
+ INTEGER v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_HVECTOR
+
+ SUBROUTINE MPI_TYPE_FREE_KEYVAL(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_FREE_KEYVAL
+
+ SUBROUTINE MPI_START(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_START
+
+ SUBROUTINE MPI_ABORT(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_ABORT
+
+ SUBROUTINE MPI_INTERCOMM_CREATE(v0,v1,v2,v3,v4,v5,ierror)
+ INTEGER v0, v1, v2, v3, v4, v5
+ INTEGER ierror
+ END SUBROUTINE MPI_INTERCOMM_CREATE
+
+ SUBROUTINE MPI_COMM_RANK(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_RANK
+
+ SUBROUTINE MPI_COMM_GET_PARENT(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_GET_PARENT
+
+ SUBROUTINE MPI_FINALIZED(v0,ierror)
+ LOGICAL v0
+ INTEGER ierror
+ END SUBROUTINE MPI_FINALIZED
+
+ SUBROUTINE MPI_INTERCOMM_MERGE(v0,v1,v2,ierror)
+ INTEGER v0
+ LOGICAL v1
+ INTEGER v2
+ INTEGER ierror
+ END SUBROUTINE MPI_INTERCOMM_MERGE
+
+ SUBROUTINE MPI_INFO_GET_NTHKEY(v0,v1,v2,ierror)
+ INTEGER v0, v1
+ CHARACTER (LEN=*) v2
+ INTEGER ierror
+ END SUBROUTINE MPI_INFO_GET_NTHKEY
+
+ SUBROUTINE MPI_TYPE_MATCH_SIZE(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_MATCH_SIZE
+
+ SUBROUTINE MPI_STATUS_SET_CANCELLED(v0,v1,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0(MPI_STATUS_SIZE), v1
+ INTEGER ierror
+ END SUBROUTINE MPI_STATUS_SET_CANCELLED
+
+ SUBROUTINE MPI_FILE_SET_ERRHANDLER(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_FILE_SET_ERRHANDLER
+
+ SUBROUTINE MPI_INFO_DELETE(v0,v1,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER ierror
+ END SUBROUTINE MPI_INFO_DELETE
+
+ SUBROUTINE MPI_UNPUBLISH_NAME(v0,v1,v2,ierror)
+ CHARACTER (LEN=*) v0
+ INTEGER v1
+ CHARACTER (LEN=*) v2
+ INTEGER ierror
+ END SUBROUTINE MPI_UNPUBLISH_NAME
+
+ SUBROUTINE MPI_TYPE_CONTIGUOUS(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CONTIGUOUS
+
+ SUBROUTINE MPI_INITIALIZED(v0,ierror)
+ LOGICAL v0
+ INTEGER ierror
+ END SUBROUTINE MPI_INITIALIZED
+
+ SUBROUTINE MPI_TYPE_CREATE_RESIZED(v0,v1,v2,v3,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0
+ INTEGER(KIND=MPI_ADDRESS_KIND) v1, v2
+ INTEGER v3
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_RESIZED
+
+ SUBROUTINE MPI_TYPE_UB(v0,v1,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0
+ INTEGER(KIND=MPI_ADDRESS_KIND) v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_UB
+
+ SUBROUTINE MPI_INFO_DUP(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_INFO_DUP
+
+ SUBROUTINE MPI_TYPE_DUP(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_DUP
+
+ SUBROUTINE MPI_ERRHANDLER_SET(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_ERRHANDLER_SET
+
+ SUBROUTINE MPI_WIN_DELETE_ATTR(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_DELETE_ATTR
+
+ SUBROUTINE MPI_INFO_GET_NKEYS(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_INFO_GET_NKEYS
+
+ SUBROUTINE MPI_GROUP_EXCL(v0,v1,v2,v3,ierror)
+ INTEGER v0, v1, v2(*), v3
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_EXCL
+
+ SUBROUTINE MPI_INFO_SET(v0,v1,v2,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_INFO_SET
+
+ SUBROUTINE MPI_WAIT(v0,v1,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0, v1(MPI_STATUS_SIZE)
+ INTEGER ierror
+ END SUBROUTINE MPI_WAIT
+
+ SUBROUTINE MPI_COMM_DELETE_ATTR(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_DELETE_ATTR
+
+ SUBROUTINE MPI_COMM_GET_NAME(v0,v1,v2,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER v2
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_GET_NAME
+
+ SUBROUTINE MPI_TEST(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0
+ LOGICAL v1
+ INTEGER v2(MPI_STATUS_SIZE)
+ INTEGER ierror
+ END SUBROUTINE MPI_TEST
+
+ SUBROUTINE MPI_GET_COUNT(v0,v1,v2,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0(MPI_STATUS_SIZE), v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_GET_COUNT
+
+ SUBROUTINE MPI_ADD_ERROR_CLASS(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_ADD_ERROR_CLASS
+
+ SUBROUTINE MPI_COMM_FREE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_FREE
+
+ SUBROUTINE MPI_COMM_SET_NAME(v0,v1,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_SET_NAME
+
+ SUBROUTINE MPI_COMM_DISCONNECT(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_DISCONNECT
+
+ SUBROUTINE MPI_IPROBE(v0,v1,v2,v3,v4,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0, v1, v2
+ LOGICAL v3
+ INTEGER v4(MPI_STATUS_SIZE)
+ INTEGER ierror
+ END SUBROUTINE MPI_IPROBE
+
+ SUBROUTINE MPI_IMPROBE(v0,v1,v2,v3,v4,v5,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0, v1, v2, v3, v4
+ INTEGER v5(MPI_STATUS_SIZE)
+ INTEGER ierror
+ END SUBROUTINE MPI_IMPROBE
+
+ SUBROUTINE MPI_MPROBE(v0,v1,v2,v3,v4,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0, v1, v2, v3
+ INTEGER v4(MPI_STATUS_SIZE)
+ INTEGER ierror
+ END SUBROUTINE MPI_MPROBE
+
+ SUBROUTINE MPI_ADD_ERROR_CODE(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_ADD_ERROR_CODE
+
+ SUBROUTINE MPI_COMM_GET_ERRHANDLER(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_GET_ERRHANDLER
+
+ SUBROUTINE MPI_COMM_CREATE(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_CREATE
+
+ SUBROUTINE MPI_OP_FREE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_OP_FREE
+
+ SUBROUTINE MPI_TOPO_TEST(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TOPO_TEST
+
+ SUBROUTINE MPI_GET_PROCESSOR_NAME(v0,v1,ierror)
+ CHARACTER (LEN=*) v0
+ INTEGER v1
+ INTEGER ierror
+ END SUBROUTINE MPI_GET_PROCESSOR_NAME
+
+ SUBROUTINE MPI_COMM_SIZE(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_SIZE
+
+ SUBROUTINE MPI_WIN_UNLOCK(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_UNLOCK
+
+ SUBROUTINE MPI_WIN_FLUSH(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_FLUSH
+
+ SUBROUTINE MPI_WIN_FLUSH_LOCAL(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_FLUSH_LOCAL
+
+ SUBROUTINE MPI_ERRHANDLER_FREE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_ERRHANDLER_FREE
+
+ SUBROUTINE MPI_COMM_REMOTE_SIZE(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_REMOTE_SIZE
+
+ SUBROUTINE MPI_PROBE(v0,v1,v2,v3,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_STATUS_SIZE
+ INTEGER v0, v1, v2, v3(MPI_STATUS_SIZE)
+ INTEGER ierror
+ END SUBROUTINE MPI_PROBE
+
+ SUBROUTINE MPI_TYPE_HINDEXED(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0, v1(*), v2(*), v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_HINDEXED
+
+ SUBROUTINE MPI_WIN_WAIT(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_WAIT
+
+ SUBROUTINE MPI_WIN_SET_NAME(v0,v1,ierror)
+ INTEGER v0
+ CHARACTER (LEN=*) v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_SET_NAME
+
+ SUBROUTINE MPI_TYPE_SIZE(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_SIZE
+
+ SUBROUTINE MPI_TYPE_SIZE_X(v0,v1,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_COUNT_KIND
+ INTEGER v0
+ INTEGER(KIND=MPI_COUNT_KIND) v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_SIZE_X
+
+ SUBROUTINE MPI_TYPE_CREATE_SUBARRAY(v0,v1,v2,v3,v4,v5,v6,ierror)
+ INTEGER v0, v1(*), v2(*), v3(*), v4, v5, v6
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_SUBARRAY
+
+ SUBROUTINE MPI_WIN_CREATE_ERRHANDLER(v0,v1,ierror)
+ INTERFACE
+ SUBROUTINE v0(vv0,vv1)
+ INTEGER vv0,vv1
+ END SUBROUTINE
+ END INTERFACE
+ INTEGER v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_CREATE_ERRHANDLER
+
+ SUBROUTINE MPI_WIN_START(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_START
+
+ SUBROUTINE MPI_TYPE_FREE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_FREE
+
+ SUBROUTINE MPI_WIN_FENCE(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_WIN_FENCE
+
+ SUBROUTINE MPI_GRAPHDIMS_GET(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_GRAPHDIMS_GET
+
+ SUBROUTINE MPI_FILE_CALL_ERRHANDLER(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_FILE_CALL_ERRHANDLER
+
+ SUBROUTINE MPI_TYPE_GET_ENVELOPE(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0, v1, v2, v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_GET_ENVELOPE
+
+ SUBROUTINE MPI_TYPE_DELETE_ATTR(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_DELETE_ATTR
+
+ SUBROUTINE MPI_TYPE_CREATE_HINDEXED(v0,v1,v2,v3,v4,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0, v1(*)
+ INTEGER(KIND=MPI_ADDRESS_KIND) v2(*)
+ INTEGER v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_HINDEXED
+
+ SUBROUTINE MPI_TYPE_CREATE_HINDEXED_BLOCK(v0,v1,v2,v3,v4,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0, v1
+ INTEGER(KIND=MPI_ADDRESS_KIND) v2(*)
+ INTEGER v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_HINDEXED_BLOCK
+
+ SUBROUTINE MPI_TYPE_INDEXED(v0,v1,v2,v3,v4,ierror)
+ INTEGER v0, v1(*), v2(*), v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_INDEXED
+
+ SUBROUTINE MPI_GREQUEST_COMPLETE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_GREQUEST_COMPLETE
+
+ SUBROUTINE MPI_GRAPH_NEIGHBORS_COUNT(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_GRAPH_NEIGHBORS_COUNT
+
+ SUBROUTINE MPI_GET_VERSION(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_GET_VERSION
+
+ SUBROUTINE MPI_GET_LIBRARY_VERSION(v0,v1,ierror)
+ CHARACTER (LEN=*) v0
+ INTEGER v1
+ INTEGER ierror
+ END SUBROUTINE MPI_GET_LIBRARY_VERSION
+
+ SUBROUTINE MSMPI_GET_BSEND_OVERHEAD(size)
+ INTEGER size
+ END SUBROUTINE MSMPI_GET_BSEND_OVERHEAD
+
+ SUBROUTINE MSMPI_GET_VERSION(version)
+ INTEGER version
+ END SUBROUTINE MSMPI_GET_VERSION
+
+ SUBROUTINE MPI_TYPE_HVECTOR(v0,v1,v2,v3,v4,ierror)
+ USE MPI_CONSTANTS,ONLY:MPI_ADDRESS_KIND
+ INTEGER v0, v1
+ INTEGER(KIND=MPI_ADDRESS_KIND) v2
+ INTEGER v3, v4
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_HVECTOR
+
+ SUBROUTINE MPI_KEYVAL_FREE(v0,ierror)
+ INTEGER v0
+ INTEGER ierror
+ END SUBROUTINE MPI_KEYVAL_FREE
+
+ SUBROUTINE MPI_COMM_CALL_ERRHANDLER(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_CALL_ERRHANDLER
+
+ SUBROUTINE MPI_COMM_JOIN(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_JOIN
+
+ SUBROUTINE MPI_COMM_TEST_INTER(v0,v1,ierror)
+ INTEGER v0
+ LOGICAL v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_TEST_INTER
+
+ SUBROUTINE MPI_CLOSE_PORT(v0,ierror)
+ CHARACTER (LEN=*) v0
+ INTEGER ierror
+ END SUBROUTINE MPI_CLOSE_PORT
+
+ SUBROUTINE MPI_TYPE_CREATE_F90_COMPLEX(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_TYPE_CREATE_F90_COMPLEX
+
+ SUBROUTINE MPI_GROUP_DIFFERENCE(v0,v1,v2,ierror)
+ INTEGER v0, v1, v2
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_DIFFERENCE
+
+ SUBROUTINE MPI_COMM_DUP(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_COMM_DUP
+
+ SUBROUTINE MPI_ERROR_CLASS(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_ERROR_CLASS
+
+ SUBROUTINE MPI_GROUP_RANK(v0,v1,ierror)
+ INTEGER v0, v1
+ INTEGER ierror
+ END SUBROUTINE MPI_GROUP_RANK
+
+
+ SUBROUTINE MPI_INIT(ierror)
+ INTEGER ierror
+ END SUBROUTINE MPI_INIT
+
+ SUBROUTINE MPI_INIT_THREAD(v0,v1,ierror)
+ INTEGER v0, v1, ierror
+ END SUBROUTINE MPI_INIT_THREAD
+
+ FUNCTION MPI_WTIME()
+ DOUBLE PRECISION MPI_WTIME
+ END FUNCTION MPI_WTIME
+
+ FUNCTION MPI_WTICK()
+ DOUBLE PRECISION MPI_WTICK
+ END FUNCTION MPI_WTICK
+
+ FUNCTION PMPI_WTIME()
+ DOUBLE PRECISION PMPI_WTIME
+ END FUNCTION PMPI_WTIME
+
+ FUNCTION PMPI_WTICK()
+ DOUBLE PRECISION PMPI_WTICK
+ END FUNCTION PMPI_WTICK
+
+ SUBROUTINE MPI_NULL_DELETE_FN(a,b,c,d,e)
+ INTEGER a,b,c,d,e
+ END SUBROUTINE MPI_NULL_DELETE_FN
+
+ SUBROUTINE MPI_DUP_FN(a,b,c,d,e,f,g)
+ INTEGER a,b,c,d,e,g
+ LOGICAL f
+ END SUBROUTINE MPI_DUP_FN
+
+ SUBROUTINE MPI_NULL_COPY_FN(a,b,c,d,e,f,g)
+ INTEGER a,b,c,d,e,g
+ LOGICAL f
+ END SUBROUTINE MPI_NULL_COPY_FN
+
+ SUBROUTINE MPI_COMM_NULL_DELETE_FN(a,b,c,d,e)
+ USE MPI_CONSTANTS,ONLY: MPI_ADDRESS_KIND
+ INTEGER a,b,e
+ INTEGER (KIND=MPI_ADDRESS_KIND) c, d
+ END SUBROUTINE MPI_COMM_NULL_DELETE_FN
+
+ SUBROUTINE MPI_COMM_DUP_FN(a,b,c,d,e,f,g)
+ USE MPI_CONSTANTS,ONLY: MPI_ADDRESS_KIND
+ INTEGER a,b,g
+ INTEGER (KIND=MPI_ADDRESS_KIND) c,d,e
+ LOGICAL f
+ END SUBROUTINE MPI_COMM_DUP_FN
+
+ SUBROUTINE MPI_COMM_NULL_COPY_FN(a,b,c,d,e,f,g)
+ USE MPI_CONSTANTS,ONLY: MPI_ADDRESS_KIND
+ INTEGER a,b,g
+ INTEGER (KIND=MPI_ADDRESS_KIND) c,d,e
+ LOGICAL f
+ END SUBROUTINE MPI_COMM_NULL_COPY_FN
+
+ SUBROUTINE MPI_TYPE_NULL_DELETE_FN(a,b,c,d,e)
+ USE MPI_CONSTANTS,ONLY: MPI_ADDRESS_KIND
+ INTEGER a,b,e
+ INTEGER (KIND=MPI_ADDRESS_KIND) c, d
+ END SUBROUTINE MPI_TYPE_NULL_DELETE_FN
+
+ SUBROUTINE MPI_TYPE_DUP_FN(a,b,c,d,e,f,g)
+ USE MPI_CONSTANTS,ONLY: MPI_ADDRESS_KIND
+ INTEGER a,b,g
+ INTEGER (KIND=MPI_ADDRESS_KIND) c,d,e
+ LOGICAL f
+ END SUBROUTINE MPI_TYPE_DUP_FN
+
+ SUBROUTINE MPI_TYPE_NULL_COPY_FN(a,b,c,d,e,f,g)
+ USE MPI_CONSTANTS,ONLY: MPI_ADDRESS_KIND
+ INTEGER a,b,g
+ INTEGER (KIND=MPI_ADDRESS_KIND) c,d,e
+ LOGICAL f
+ END SUBROUTINE MPI_TYPE_NULL_COPY_FN
+
+ SUBROUTINE MPI_WIN_NULL_DELETE_FN(a,b,c,d,e)
+ USE MPI_CONSTANTS,ONLY: MPI_ADDRESS_KIND
+ INTEGER a,b,e
+ INTEGER (KIND=MPI_ADDRESS_KIND) c, d
+ END SUBROUTINE MPI_WIN_NULL_DELETE_FN
+
+ SUBROUTINE MPI_WIN_DUP_FN(a,b,c,d,e,f,g)
+ USE MPI_CONSTANTS,ONLY: MPI_ADDRESS_KIND
+ INTEGER a,b,g
+ INTEGER (KIND=MPI_ADDRESS_KIND) c,d,e
+ LOGICAL f
+ END SUBROUTINE MPI_WIN_DUP_FN
+
+ SUBROUTINE MPI_WIN_NULL_COPY_FN(a,b,c,d,e,f,g)
+ USE MPI_CONSTANTS,ONLY: MPI_ADDRESS_KIND
+ INTEGER a,b,g
+ INTEGER (KIND=MPI_ADDRESS_KIND) c,d,e
+ LOGICAL f
+ END SUBROUTINE MPI_WIN_NULL_COPY_FN
+
+ END INTERFACE
+ END MODULE MPI_BASE
+
+ MODULE MPI_SIZEOFS
+! This module contains the definitions for MPI_SIZEOF for the
+! predefined, named types in Fortran 90. This is provided
+! as a separate module to allow MPI_SIZEOF to supply the
+! basic size information even when we do not provide the
+! arbitrary choice types
+ IMPLICIT NONE
+
+ PUBLIC :: MPI_SIZEOF
+ INTERFACE MPI_SIZEOF
+ MODULE PROCEDURE MPI_SIZEOF_I, MPI_SIZEOF_R, &
+ & MPI_SIZEOF_L, MPI_SIZEOF_CH, MPI_SIZEOF_CX,&
+ & MPI_SIZEOF_IV, MPI_SIZEOF_RV, &
+ & MPI_SIZEOF_LV, MPI_SIZEOF_CHV, MPI_SIZEOF_CXV
+ MODULE PROCEDURE MPI_SIZEOF_D, MPI_SIZEOF_DV
+ END INTERFACE ! MPI_SIZEOF
+
+ CONTAINS
+
+ SUBROUTINE MPI_SIZEOF_I( X, SIZE, IERROR )
+ INTEGER X
+ INTEGER SIZE, IERROR
+ SIZE = 4
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_I
+
+ SUBROUTINE MPI_SIZEOF_R( X, SIZE, IERROR )
+ REAL X
+ INTEGER SIZE, IERROR
+ SIZE = 4
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_R
+
+! If reals and doubles have been forced to the same size (e.g., with
+! -i8 -r8 to compilers like g95), then the compiler may refuse to
+! allow interfaces that use real and double precision (failing to
+! determine which one is intended)
+ SUBROUTINE MPI_SIZEOF_D( X, SIZE, IERROR )
+ DOUBLE PRECISION X
+ INTEGER SIZE, IERROR
+ SIZE = 8
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_D
+
+ SUBROUTINE MPI_SIZEOF_L( X, SIZE, IERROR )
+ LOGICAL X
+ INTEGER SIZE, IERROR
+ SIZE = 4
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_L
+
+ SUBROUTINE MPI_SIZEOF_CH( X, SIZE, IERROR )
+ CHARACTER X
+ INTEGER SIZE, IERROR
+ SIZE = 1
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_CH
+
+ SUBROUTINE MPI_SIZEOF_CX( X, SIZE, IERROR )
+ COMPLEX X
+ INTEGER SIZE, IERROR
+ SIZE = 2*4
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_CX
+
+ SUBROUTINE MPI_SIZEOF_IV( X, SIZE, IERROR )
+ INTEGER X(*)
+ INTEGER SIZE, IERROR
+ SIZE = 4
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_IV
+
+ SUBROUTINE MPI_SIZEOF_RV( X, SIZE, IERROR )
+ REAL X(*)
+ INTEGER SIZE, IERROR
+ SIZE = 4
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_RV
+
+! If reals and doubles have been forced to the same size (e.g., with
+! -i8 -r8 to compilers like g95), then the compiler may refuse to
+! allow interfaces that use real and double precision (failing to
+! determine which one is intended)
+ SUBROUTINE MPI_SIZEOF_DV( X, SIZE, IERROR )
+ DOUBLE PRECISION X(*)
+ INTEGER SIZE, IERROR
+ SIZE = 8
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_DV
+
+ SUBROUTINE MPI_SIZEOF_LV( X, SIZE, IERROR )
+ LOGICAL X(*)
+ INTEGER SIZE, IERROR
+ SIZE = 4
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_LV
+
+ SUBROUTINE MPI_SIZEOF_CHV( X, SIZE, IERROR )
+ CHARACTER X(*)
+ INTEGER SIZE, IERROR
+ SIZE = 1
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_CHV
+
+ SUBROUTINE MPI_SIZEOF_CXV( X, SIZE, IERROR )
+ COMPLEX X(*)
+ INTEGER SIZE, IERROR
+ SIZE = 2*4
+ IERROR = 0
+ END SUBROUTINE MPI_SIZEOF_CXV
+
+! We don't include double complex. If we did, we'd need to include the
+! same hack as for real and double above if the compiler has been forced
+! to make them the same size.
+ END MODULE MPI_SIZEOFS
+
+ MODULE MPI
+ USE MPI_CONSTANTS
+ USE MPI_SIZEOFS
+ USE MPI_BASE
+ END MODULE MPI
diff --git a/src/include/mpi.h b/src/include/mpi.h
new file mode 100644
index 0000000..b1956a3
--- /dev/null
+++ b/src/include/mpi.h
@@ -0,0 +1,7005 @@
+/*
+ * Copyright (c) Microsoft Corporation. All rights reserved.
+ * Licensed under the MIT License.
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * (C) 2015 by Microsoft Corporation.
+ *
+ * MPICH COPYRIGHT
+ *
+ * The following is a notice of limited availability of the code, and disclaimer
+ * which must be included in the prologue of the code and in all source listings
+ * of the code.
+ *
+ * Copyright Notice
+ * + 2002 University of Chicago
+ *
+ * Permission is hereby granted to use, reproduce, prepare derivative works, and
+ * to redistribute to others. This software was authored by:
+ *
+ * Mathematics and Computer Science Division
+ * Argonne National Laboratory, Argonne IL 60439
+ *
+ * (and)
+ *
+ * Department of Computer Science
+ * University of Illinois at Urbana-Champaign
+ *
+ *
+ * GOVERNMENT LICENSE
+ *
+ * Portions of this material resulted from work developed under a U.S.
+ * Government Contract and are subject to the following license: the Government
+ * is granted for itself and others acting on its behalf a paid-up, nonexclusive,
+ * irrevocable worldwide license in this computer software to reproduce, prepare
+ * derivative works, and perform publicly and display publicly.
+ *
+ * DISCLAIMER
+ *
+ * This computer code material was prepared, in part, as an account of work
+ * sponsored by an agency of the United States Government. Neither the United
+ * States, nor the University of Chicago, nor any of their employees, makes any
+ * warranty express or implied, or assumes any legal liability or responsibility
+ * for the accuracy, completeness, or usefulness of any information, apparatus,
+ * product, or process disclosed, or represents that its use would not infringe
+ * privately owned rights.
+ *
+ */
+
+#ifndef MPI_INCLUDED
+#define MPI_INCLUDED
+
+#include
+#ifndef MSMPI_NO_SAL
+#include
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+#ifndef MSMPI_VER
+#define MSMPI_VER 0x100
+#endif
+
+
+/*---------------------------------------------------------------------------*/
+/* SAL ANNOTATIONS */
+/*---------------------------------------------------------------------------*/
+/*
+ * Define SAL annotations if they aren't defined yet.
+ */
+#ifndef _Success_
+#define _Success_( x )
+#endif
+#ifndef _Notref_
+#define _Notref_
+#endif
+#ifndef _When_
+#define _When_( x, y )
+#endif
+#ifndef _Pre_valid_
+#define _Pre_valid_
+#endif
+#ifndef _Pre_opt_valid_
+#define _Pre_opt_valid_
+#endif
+#ifndef _Post_invalid_
+#define _Post_invalid_
+#endif
+#ifndef _In_
+#define _In_
+#endif
+#ifndef _In_z_
+#define _In_z_
+#endif
+#ifndef _In_opt_
+#define _In_opt_
+#endif
+#ifndef _In_range_
+#define _In_range_( x, y )
+#endif
+#ifndef _In_reads_
+#define _In_reads_( x )
+#endif
+#ifndef _In_reads_z_
+#define _In_reads_z_( x )
+#endif
+#ifndef _In_reads_opt_
+#define _In_reads_opt_( x )
+#endif
+#ifndef _In_reads_bytes_opt_
+#define _In_reads_bytes_opt_( x )
+#endif
+#ifndef _Out_
+#define _Out_
+#endif
+#ifndef _Out_opt_
+#define _Out_opt_
+#endif
+#ifndef _Out_writes_z_
+#define _Out_writes_z_( x )
+#endif
+#ifndef _Out_writes_opt_
+#define _Out_writes_opt_( x )
+#endif
+#ifndef _Out_writes_to_opt_
+#define _Out_writes_to_opt_( x, y )
+#endif
+#ifndef _Out_writes_bytes_opt_
+#define _Out_writes_bytes_opt_( x )
+#endif
+#ifndef _Inout_
+#define _Inout_
+#endif
+#ifndef _Inout_opt_
+#define _Inout_opt_
+#endif
+#ifndef _Inout_updates_opt_
+#define _Inout_updates_opt_( x )
+#endif
+#ifndef _Deref_in_range_
+#define _Deref_in_range_( x, y )
+#endif
+#ifndef _Deref_out_range_
+#define _Deref_out_range_( x, y )
+#endif
+#ifndef _Pre_satisfies_
+#define _Pre_satisfies_( x )
+#endif
+#ifndef _Post_satisfies_
+#define _Post_satisfies_( x )
+#endif
+#ifndef _Post_equal_to_
+#define _Post_equal_to_( x )
+#endif
+
+#define _mpi_updates_(size) _When_(size != 0, _Inout_updates_(size))
+#define _mpi_reads_(size) _When_(size != 0, _In_reads_(size))
+#define _mpi_reads_bytes_(size) _When_(size != 0, _In_reads_bytes_(size))
+#define _mpi_writes_(size) _When_(size != 0, _Out_writes_(size))
+#define _mpi_writes_bytes_(size) _When_(size != 0, _Out_writes_bytes_(size))
+#define _mpi_writes_to_(size, count) _When_(size != 0, _Out_writes_to_(size, count))
+#define _mpi_out_flag_ _Out_ _Deref_out_range_(0, 1)
+#define _mpi_out_(param, sentinel) _Out_ _Post_satisfies_(*param == sentinel || *param >= 0)
+#define _mpi_out_range_(param, sentinel, ub) \
+ _Out_ _Post_satisfies_(*param == sentinel || (ub > 0 && *param >= 0 && *param <= ub))
+#define _mpi_position_(ub) _Inout_ _Deref_in_range_(0, ub) _Deref_out_range_(0, ub)
+#define _mpi_coll_rank_(param) _In_ _Pre_satisfies_(param == MPI_ROOT || param >= MPI_PROC_NULL)
+
+/*---------------------------------------------------------------------------*/
+/* MSMPI Calling convention */
+/*---------------------------------------------------------------------------*/
+
+#define MPIAPI __stdcall
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI ERROR CLASS */
+/*---------------------------------------------------------------------------*/
+
+#define MPI_SUCCESS 0 /* Successful return code */
+
+#define MPI_ERR_BUFFER 1 /* Invalid buffer pointer */
+#define MPI_ERR_COUNT 2 /* Invalid count argument */
+#define MPI_ERR_TYPE 3 /* Invalid datatype argument */
+#define MPI_ERR_TAG 4 /* Invalid tag argument */
+#define MPI_ERR_COMM 5 /* Invalid communicator */
+#define MPI_ERR_RANK 6 /* Invalid rank */
+#define MPI_ERR_ROOT 7 /* Invalid root */
+#define MPI_ERR_GROUP 8 /* Invalid group */
+#define MPI_ERR_OP 9 /* Invalid operation */
+#define MPI_ERR_TOPOLOGY 10 /* Invalid topology */
+#define MPI_ERR_DIMS 11 /* Invalid dimension argument */
+#define MPI_ERR_ARG 12 /* Invalid argument */
+#define MPI_ERR_UNKNOWN 13 /* Unknown error */
+#define MPI_ERR_TRUNCATE 14 /* Message truncated on receive */
+#define MPI_ERR_OTHER 15 /* Other error; use Error_string */
+#define MPI_ERR_INTERN 16 /* Internal error code */
+#define MPI_ERR_IN_STATUS 17 /* Error code is in status */
+#define MPI_ERR_PENDING 18 /* Pending request */
+#define MPI_ERR_REQUEST 19 /* Invalid request (handle) */
+#define MPI_ERR_ACCESS 20 /* Premission denied */
+#define MPI_ERR_AMODE 21 /* Error related to amode passed to MPI_File_open */
+#define MPI_ERR_BAD_FILE 22 /* Invalid file name (e.g., path name too long) */
+#define MPI_ERR_CONVERSION 23 /* Error in user data conversion function */
+#define MPI_ERR_DUP_DATAREP 24 /* Data representation identifier already registered */
+#define MPI_ERR_FILE_EXISTS 25 /* File exists */
+#define MPI_ERR_FILE_IN_USE 26 /* File operation could not be completed, file in use */
+#define MPI_ERR_FILE 27 /* Invalid file handle */
+#define MPI_ERR_INFO 28 /* Invalid info argument */
+#define MPI_ERR_INFO_KEY 29 /* Key longer than MPI_MAX_INFO_KEY */
+#define MPI_ERR_INFO_VALUE 30 /* Value longer than MPI_MAX_INFO_VAL */
+#define MPI_ERR_INFO_NOKEY 31 /* Invalid key passed to MPI_Info_delete */
+#define MPI_ERR_IO 32 /* Other I/O error */
+#define MPI_ERR_NAME 33 /* Invalid service name in MPI_Lookup_name */
+#define MPI_ERR_NO_MEM 34 /* Alloc_mem could not allocate memory */
+#define MPI_ERR_NOT_SAME 35 /* Collective argument/sequence not the same on all processes */
+#define MPI_ERR_NO_SPACE 36 /* Not enough space */
+#define MPI_ERR_NO_SUCH_FILE 37 /* File does not exist */
+#define MPI_ERR_PORT 38 /* Invalid port name in MPI_comm_connect*/
+#define MPI_ERR_QUOTA 39 /* Quota exceeded */
+#define MPI_ERR_READ_ONLY 40 /* Read-only file or file system */
+#define MPI_ERR_SERVICE 41 /* Invalid service name in MPI_Unpublish_name */
+#define MPI_ERR_SPAWN 42 /* Error in spawning processes */
+#define MPI_ERR_UNSUPPORTED_DATAREP 43 /* Unsupported dararep in MPI_File_set_view */
+#define MPI_ERR_UNSUPPORTED_OPERATION 44 /* Unsupported operation on file */
+#define MPI_ERR_WIN 45 /* Invalid win argument */
+#define MPI_ERR_BASE 46 /* Invalid base passed to MPI_Free_mem */
+#define MPI_ERR_LOCKTYPE 47 /* Invalid locktype argument */
+#define MPI_ERR_KEYVAL 48 /* Invalid keyval */
+#define MPI_ERR_RMA_CONFLICT 49 /* Conflicting accesses to window */
+#define MPI_ERR_RMA_SYNC 50 /* Wrong synchronization of RMA calls */
+#define MPI_ERR_SIZE 51 /* Invalid size argument */
+#define MPI_ERR_DISP 52 /* Invalid disp argument */
+#define MPI_ERR_ASSERT 53 /* Invalid assert argument */
+
+#define MPI_ERR_LASTCODE 0x3fffffff /* Last valid error code for a predefined error class */
+
+#define MPICH_ERR_LAST_CLASS 53
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI Basic integer types */
+/*---------------------------------------------------------------------------*/
+
+/* Address size integer */
+#ifdef _WIN64
+typedef int64_t MPI_Aint;
+#else
+typedef int MPI_Aint;
+#endif
+
+/* Fortran INTEGER */
+typedef int MPI_Fint;
+
+/* File offset */
+typedef int64_t MPI_Offset;
+
+//
+// MPI-3 standard defines this type that can be used to address locations
+// within either memory or files as well as express count values.
+//
+typedef int64_t MPI_Count;
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Datatype */
+/*---------------------------------------------------------------------------*/
+
+typedef int MPI_Datatype;
+#define MPI_DATATYPE_NULL ((MPI_Datatype)0x0c000000)
+
+#define MPI_CHAR ((MPI_Datatype)0x4c000101)
+#define MPI_UNSIGNED_CHAR ((MPI_Datatype)0x4c000102)
+#define MPI_SHORT ((MPI_Datatype)0x4c000203)
+#define MPI_UNSIGNED_SHORT ((MPI_Datatype)0x4c000204)
+#define MPI_INT ((MPI_Datatype)0x4c000405)
+#define MPI_UNSIGNED ((MPI_Datatype)0x4c000406)
+#define MPI_LONG ((MPI_Datatype)0x4c000407)
+#define MPI_UNSIGNED_LONG ((MPI_Datatype)0x4c000408)
+#define MPI_LONG_LONG_INT ((MPI_Datatype)0x4c000809)
+#define MPI_LONG_LONG MPI_LONG_LONG_INT
+#define MPI_FLOAT ((MPI_Datatype)0x4c00040a)
+#define MPI_DOUBLE ((MPI_Datatype)0x4c00080b)
+#define MPI_LONG_DOUBLE ((MPI_Datatype)0x4c00080c)
+#define MPI_BYTE ((MPI_Datatype)0x4c00010d)
+#define MPI_WCHAR ((MPI_Datatype)0x4c00020e)
+
+#define MPI_PACKED ((MPI_Datatype)0x4c00010f)
+#define MPI_LB ((MPI_Datatype)0x4c000010)
+#define MPI_UB ((MPI_Datatype)0x4c000011)
+
+#define MPI_C_COMPLEX ((MPI_Datatype)0x4c000812)
+#define MPI_C_FLOAT_COMPLEX ((MPI_Datatype)0x4c000813)
+#define MPI_C_DOUBLE_COMPLEX ((MPI_Datatype)0x4c001014)
+#define MPI_C_LONG_DOUBLE_COMPLEX ((MPI_Datatype)0x4c001015)
+
+#define MPI_2INT ((MPI_Datatype)0x4c000816)
+#define MPI_C_BOOL ((MPI_Datatype)0x4c000117)
+#define MPI_SIGNED_CHAR ((MPI_Datatype)0x4c000118)
+#define MPI_UNSIGNED_LONG_LONG ((MPI_Datatype)0x4c000819)
+
+/* Fortran types */
+#define MPI_CHARACTER ((MPI_Datatype)0x4c00011a)
+#define MPI_INTEGER ((MPI_Datatype)0x4c00041b)
+#define MPI_REAL ((MPI_Datatype)0x4c00041c)
+#define MPI_LOGICAL ((MPI_Datatype)0x4c00041d)
+#define MPI_COMPLEX ((MPI_Datatype)0x4c00081e)
+#define MPI_DOUBLE_PRECISION ((MPI_Datatype)0x4c00081f)
+#define MPI_2INTEGER ((MPI_Datatype)0x4c000820)
+#define MPI_2REAL ((MPI_Datatype)0x4c000821)
+#define MPI_DOUBLE_COMPLEX ((MPI_Datatype)0x4c001022)
+#define MPI_2DOUBLE_PRECISION ((MPI_Datatype)0x4c001023)
+#define MPI_2COMPLEX ((MPI_Datatype)0x4c001024)
+#define MPI_2DOUBLE_COMPLEX ((MPI_Datatype)0x4c002025)
+
+/* Size-specific types (see MPI 2.2, 16.2.5) */
+#define MPI_REAL2 MPI_DATATYPE_NULL
+#define MPI_REAL4 ((MPI_Datatype)0x4c000427)
+#define MPI_COMPLEX8 ((MPI_Datatype)0x4c000828)
+#define MPI_REAL8 ((MPI_Datatype)0x4c000829)
+#define MPI_COMPLEX16 ((MPI_Datatype)0x4c00102a)
+#define MPI_REAL16 MPI_DATATYPE_NULL
+#define MPI_COMPLEX32 MPI_DATATYPE_NULL
+#define MPI_INTEGER1 ((MPI_Datatype)0x4c00012d)
+#define MPI_COMPLEX4 MPI_DATATYPE_NULL
+#define MPI_INTEGER2 ((MPI_Datatype)0x4c00022f)
+#define MPI_INTEGER4 ((MPI_Datatype)0x4c000430)
+#define MPI_INTEGER8 ((MPI_Datatype)0x4c000831)
+#define MPI_INTEGER16 MPI_DATATYPE_NULL
+#define MPI_INT8_T ((MPI_Datatype)0x4c000133)
+#define MPI_INT16_T ((MPI_Datatype)0x4c000234)
+#define MPI_INT32_T ((MPI_Datatype)0x4c000435)
+#define MPI_INT64_T ((MPI_Datatype)0x4c000836)
+#define MPI_UINT8_T ((MPI_Datatype)0x4c000137)
+#define MPI_UINT16_T ((MPI_Datatype)0x4c000238)
+#define MPI_UINT32_T ((MPI_Datatype)0x4c000439)
+#define MPI_UINT64_T ((MPI_Datatype)0x4c00083a)
+
+#ifdef _WIN64
+#define MPI_AINT ((MPI_Datatype)0x4c00083b)
+#else
+#define MPI_AINT ((MPI_Datatype)0x4c00043b)
+#endif
+#define MPI_OFFSET ((MPI_Datatype)0x4c00083c)
+#define MPI_COUNT ((MPI_Datatype)0x4c00083d)
+
+/*
+ * The layouts for the types MPI_DOUBLE_INT etc. are
+ *
+ * struct { double a; int b; }
+ */
+#define MPI_FLOAT_INT ((MPI_Datatype)0x8c000000)
+#define MPI_DOUBLE_INT ((MPI_Datatype)0x8c000001)
+#define MPI_LONG_INT ((MPI_Datatype)0x8c000002)
+#define MPI_SHORT_INT ((MPI_Datatype)0x8c000003)
+#define MPI_LONG_DOUBLE_INT ((MPI_Datatype)0x8c000004)
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Comm */
+/*---------------------------------------------------------------------------*/
+
+typedef int MPI_Comm;
+#define MPI_COMM_NULL ((MPI_Comm)0x04000000)
+
+#define MPI_COMM_WORLD ((MPI_Comm)0x44000000)
+#define MPI_COMM_SELF ((MPI_Comm)0x44000001)
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Comm Split Types */
+/*---------------------------------------------------------------------------*/
+enum
+{
+ MPI_COMM_TYPE_SHARED = 1,
+};
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Win */
+/*---------------------------------------------------------------------------*/
+
+typedef int MPI_Win;
+#define MPI_WIN_NULL ((MPI_Win)0x20000000)
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI_File */
+/*---------------------------------------------------------------------------*/
+
+typedef struct ADIOI_FileD* MPI_File;
+#define MPI_FILE_NULL ((MPI_File)0)
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Op */
+/*---------------------------------------------------------------------------*/
+
+typedef int MPI_Op;
+#define MPI_OP_NULL ((MPI_Op)0x18000000)
+
+#define MPI_MAX ((MPI_Op)0x58000001)
+#define MPI_MIN ((MPI_Op)0x58000002)
+#define MPI_SUM ((MPI_Op)0x58000003)
+#define MPI_PROD ((MPI_Op)0x58000004)
+#define MPI_LAND ((MPI_Op)0x58000005)
+#define MPI_BAND ((MPI_Op)0x58000006)
+#define MPI_LOR ((MPI_Op)0x58000007)
+#define MPI_BOR ((MPI_Op)0x58000008)
+#define MPI_LXOR ((MPI_Op)0x58000009)
+#define MPI_BXOR ((MPI_Op)0x5800000a)
+#define MPI_MINLOC ((MPI_Op)0x5800000b)
+#define MPI_MAXLOC ((MPI_Op)0x5800000c)
+#define MPI_REPLACE ((MPI_Op)0x5800000d)
+#define MPI_NO_OP ((MPI_Op)0x5800000e)
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Info */
+/*---------------------------------------------------------------------------*/
+
+typedef int MPI_Info;
+#define MPI_INFO_NULL ((MPI_Info)0x1c000000)
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Request */
+/*---------------------------------------------------------------------------*/
+
+typedef int MPI_Request;
+#define MPI_REQUEST_NULL ((MPI_Request)0x2c000000)
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Group */
+/*---------------------------------------------------------------------------*/
+
+typedef int MPI_Group;
+#define MPI_GROUP_NULL ((MPI_Group)0x08000000)
+
+#define MPI_GROUP_EMPTY ((MPI_Group)0x48000000)
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Errhandler */
+/*---------------------------------------------------------------------------*/
+
+typedef int MPI_Errhandler;
+#define MPI_ERRHANDLER_NULL ((MPI_Errhandler)0x14000000)
+
+#define MPI_ERRORS_ARE_FATAL ((MPI_Errhandler)0x54000000)
+#define MPI_ERRORS_RETURN ((MPI_Errhandler)0x54000001)
+
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Message */
+/*---------------------------------------------------------------------------*/
+
+typedef int MPI_Message;
+#define MPI_MESSAGE_NULL ((MPI_Message)0x30000000)
+#define MPI_MESSAGE_NO_PROC ((MPI_Message)0x70000000)
+
+/*---------------------------------------------------------------------------*/
+/* MPI_Status */
+/*---------------------------------------------------------------------------*/
+
+typedef struct MPI_Status
+{
+ int internal[2];
+
+ int MPI_SOURCE;
+ int MPI_TAG;
+ int MPI_ERROR;
+
+} MPI_Status;
+
+#define MPI_STATUS_IGNORE ((MPI_Status*)(MPI_Aint)1)
+#define MPI_STATUSES_IGNORE ((MPI_Status*)(MPI_Aint)1)
+
+
+/*---------------------------------------------------------------------------*/
+/* MISC CONSTANTS */
+/*---------------------------------------------------------------------------*/
+
+/* Used in: Count, Index, Rank, Color, Toplogy, Precision, Exponent range */
+#define MPI_UNDEFINED (-32766)
+
+/* Used in: Rank */
+#define MPI_PROC_NULL (-1)
+#define MPI_ANY_SOURCE (-2)
+#define MPI_ROOT (-3)
+
+/* Used in: Tag */
+#define MPI_ANY_TAG (-1)
+
+/* Used for: Buffer address */
+#define MPI_BOTTOM ((void*)0)
+#define MPI_UNWEIGHTED ((int*)1)
+#define MPI_WEIGHTS_EMPTY ((int*)2)
+
+/*---------------------------------------------------------------------------*/
+/* Macro for function return values. */
+/*---------------------------------------------------------------------------*/
+#define MPI_METHOD _Success_( return == MPI_SUCCESS ) int MPIAPI
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 3: Point-to-Point Communication */
+/*---------------------------------------------------------------------------*/
+
+/*---------------------------------------------*/
+/* Section 3.2: Blocking Communication */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Send(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+PMPI_Send(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+MPI_Recv(
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_Recv(
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Status* status
+ );
+
+_Pre_satisfies_(status != MPI_STATUS_IGNORE)
+MPI_METHOD
+MPI_Get_count(
+ _In_ const MPI_Status* status,
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(count, MPI_UNDEFINED) int* count
+ );
+
+_Pre_satisfies_(status != MPI_STATUS_IGNORE)
+MPI_METHOD
+PMPI_Get_count(
+ _In_ const MPI_Status* status,
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(count, MPI_UNDEFINED) int* count
+ );
+
+
+/*---------------------------------------------*/
+/* Section 3.4: Communication Modes */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Bsend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+PMPI_Bsend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+MPI_Ssend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+PMPI_Ssend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+MPI_Rsend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+PMPI_Rsend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 3.6: Buffer Allocation */
+/*---------------------------------------------*/
+
+/* Upper bound on bsend overhead for each message */
+#define MSMPI_BSEND_OVERHEAD_V1 95
+#define MSMPI_BSEND_OVERHEAD_V2 MSMPI_BSEND_OVERHEAD_V1
+
+#if MSMPI_VER > 0x300
+# define MPI_BSEND_OVERHEAD MSMPI_Get_bsend_overhead()
+#else
+# define MPI_BSEND_OVERHEAD MSMPI_BSEND_OVERHEAD_V1
+#endif
+
+MPI_METHOD
+MPI_Buffer_attach(
+ _In_ void* buffer,
+ _In_range_(>=, 0) int size
+ );
+
+MPI_METHOD
+PMPI_Buffer_attach(
+ _In_ void* buffer,
+ _In_range_(>=, 0) int size
+ );
+
+MPI_METHOD
+MPI_Buffer_detach(
+ _Out_ void* buffer_addr,
+ _Out_ int* size
+ );
+
+MPI_METHOD
+PMPI_Buffer_detach(
+ _Out_ void* buffer_addr,
+ _Out_ int* size
+ );
+
+
+/*---------------------------------------------*/
+/* Section 3.7: Nonblocking Communication */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Isend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Isend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Ibsend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Ibsend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Issend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Issend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Irsend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Irsend(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Irecv(
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Irecv(
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+
+/*---------------------------------------------*/
+/* Section 3.7.3: Communication Completion */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Wait(
+ _Inout_ _Post_equal_to_(MPI_REQUEST_NULL) MPI_Request* request,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_Wait(
+ _Inout_ _Post_equal_to_(MPI_REQUEST_NULL) MPI_Request* request,
+ _Out_ MPI_Status* status
+ );
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+MPI_Test(
+ _Inout_ _Post_equal_to_(MPI_REQUEST_NULL) MPI_Request* request,
+ _mpi_out_flag_ int* flag,
+ _Out_ MPI_Status* status
+ );
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+PMPI_Test(
+ _Inout_ _Post_equal_to_(MPI_REQUEST_NULL) MPI_Request* request,
+ _mpi_out_flag_ int* flag,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_Request_free(
+ _Inout_ _Post_equal_to_(MPI_REQUEST_NULL) MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Request_free(
+ _Inout_ _Post_equal_to_(MPI_REQUEST_NULL) MPI_Request* request
+ );
+
+
+/*---------------------------------------------*/
+/* Section 3.7.5: Multiple Completions */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Waitany(
+ _In_range_(>=, 0) int count,
+ _mpi_updates_(count) MPI_Request array_of_requests[],
+ _mpi_out_range_(index, MPI_UNDEFINED, (count - 1)) int* index,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_Waitany(
+ _In_range_(>=, 0) int count,
+ _mpi_updates_(count) MPI_Request array_of_requests[],
+ _mpi_out_range_(index, MPI_UNDEFINED, (count - 1)) int* index,
+ _Out_ MPI_Status* status
+ );
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+MPI_Testany(
+ _In_range_(>=, 0) int count,
+ _mpi_updates_(count) MPI_Request array_of_requests[],
+ _mpi_out_range_(index, MPI_UNDEFINED, (count - 1)) int* index,
+ _mpi_out_flag_ int* flag,
+ _Out_ MPI_Status* status
+ );
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+PMPI_Testany(
+ _In_range_(>=, 0) int count,
+ _mpi_updates_(count) MPI_Request array_of_requests[],
+ _mpi_out_range_(index, MPI_UNDEFINED, (count - 1)) int* index,
+ _mpi_out_flag_ int* flag,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_Waitall(
+ _In_range_(>=, 0) int count,
+ _mpi_updates_(count) MPI_Request array_of_requests[],
+ _Out_writes_opt_(count) MPI_Status array_of_statuses[]
+ );
+
+MPI_METHOD
+PMPI_Waitall(
+ _In_range_(>=, 0) int count,
+ _mpi_updates_(count) MPI_Request array_of_requests[],
+ _Out_writes_opt_(count) MPI_Status array_of_statuses[]
+ );
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+MPI_Testall(
+ _In_range_(>=, 0) int count,
+ _mpi_updates_(count) MPI_Request array_of_requests[],
+ _mpi_out_flag_ int* flag,
+ _Out_writes_opt_(count) MPI_Status array_of_statuses[]
+ );
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+PMPI_Testall(
+ _In_range_(>=, 0) int count,
+ _mpi_updates_(count) MPI_Request array_of_requests[],
+ _mpi_out_flag_ int* flag,
+ _Out_writes_opt_(count) MPI_Status array_of_statuses[]
+);
+
+MPI_METHOD
+MPI_Waitsome(
+ _In_range_(>=, 0) int incount,
+ _mpi_updates_(incount) MPI_Request array_of_requests[],
+ _mpi_out_range_(outcount, MPI_UNDEFINED, incount) int* outcount,
+ _mpi_writes_to_(incount,*outcount) int array_of_indices[],
+ _Out_writes_to_opt_(incount, *outcount) MPI_Status array_of_statuses[]
+ );
+
+MPI_METHOD
+PMPI_Waitsome(
+ _In_range_(>=, 0) int incount,
+ _mpi_updates_(incount) MPI_Request array_of_requests[],
+ _mpi_out_range_(outcount, MPI_UNDEFINED, incount) int* outcount,
+ _mpi_writes_to_(incount,*outcount) int array_of_indices[],
+ _Out_writes_to_opt_(incount, *outcount) MPI_Status array_of_statuses[]
+ );
+
+_Success_(return == MPI_SUCCESS && *outcount > 0)
+int
+MPIAPI
+MPI_Testsome(
+ _In_range_(>=, 0) int incount,
+ _mpi_updates_(incount) MPI_Request array_of_requests[],
+ _mpi_out_range_(outcount, MPI_UNDEFINED, incount) int* outcount,
+ _mpi_writes_to_(incount,*outcount) int array_of_indices[],
+ _Out_writes_to_opt_(incount, *outcount) MPI_Status array_of_statuses[]
+ );
+
+_Success_(return == MPI_SUCCESS && *outcount > 0)
+int
+MPIAPI
+PMPI_Testsome(
+ _In_range_(>=, 0) int incount,
+ _mpi_updates_(incount) MPI_Request array_of_requests[],
+ _mpi_out_range_(outcount, MPI_UNDEFINED, incount) int* outcount,
+ _mpi_writes_to_(incount,*outcount) int array_of_indices[],
+ _Out_writes_to_opt_(incount, *outcount) MPI_Status array_of_statuses[]
+ );
+
+
+/*---------------------------------------------*/
+/* Section 3.7.6: Test of status */
+/*---------------------------------------------*/
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+MPI_Request_get_status(
+ _In_ MPI_Request request,
+ _mpi_out_flag_ int* flag,
+ _Out_ MPI_Status* status
+ );
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+PMPI_Request_get_status(
+ _In_ MPI_Request request,
+ _mpi_out_flag_ int* flag,
+ _Out_ MPI_Status* status
+ );
+
+
+/*---------------------------------------------*/
+/* Section 3.8: Probe and Cancel */
+/*---------------------------------------------*/
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+MPI_Iprobe(
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _mpi_out_flag_ int* flag,
+ _Out_ MPI_Status* status
+ );
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+PMPI_Iprobe(
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _mpi_out_flag_ int* flag,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_Probe(
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_Probe(
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Status* status
+ );
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+MPI_Improbe(
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _mpi_out_flag_ int* flag,
+ _Out_ MPI_Message* message,
+ _Out_ MPI_Status* status
+ );
+
+_Success_(return == MPI_SUCCESS && *flag != 0)
+int
+MPIAPI
+PMPI_Improbe(
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _mpi_out_flag_ int* flag,
+ _Out_ MPI_Message* message,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_Mprobe(
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Message* message,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_Mprobe(
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Message* message,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_Mrecv(
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Inout_ MPI_Message* message,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_Mrecv(
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Inout_ MPI_Message* message,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_Imrecv(
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Inout_ MPI_Message* message,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Imrecv(
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Inout_ MPI_Message* message,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(*request != MPI_REQUEST_NULL)
+MPI_METHOD
+MPI_Cancel(
+ _In_ MPI_Request* request
+ );
+
+_Pre_satisfies_(*request != MPI_REQUEST_NULL)
+MPI_METHOD
+PMPI_Cancel(
+ _In_ MPI_Request* request
+ );
+
+_Pre_satisfies_(status != MPI_STATUS_IGNORE)
+MPI_METHOD
+MPI_Test_cancelled(
+ _In_ const MPI_Status* status,
+ _mpi_out_flag_ int* flag
+ );
+
+_Pre_satisfies_(status != MPI_STATUS_IGNORE)
+MPI_METHOD
+PMPI_Test_cancelled(
+ _In_ const MPI_Status* status,
+ _mpi_out_flag_ int* flag
+ );
+
+
+/*---------------------------------------------*/
+/* Section 3.9: Persistent Communication */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Send_init(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Send_init(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Bsend_init(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Bsend_init(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Ssend_init(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Ssend_init(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Rsend_init(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Rsend_init(
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Recv_init(
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Recv_init(
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int tag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(*request != MPI_REQUEST_NULL)
+MPI_METHOD
+MPI_Start(
+ _Inout_ MPI_Request* request
+ );
+
+_Pre_satisfies_(*request != MPI_REQUEST_NULL)
+MPI_METHOD
+PMPI_Start(
+ _Inout_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Startall(
+ _In_range_(>=, 0) int count,
+ _mpi_updates_(count) MPI_Request array_of_requests[]
+ );
+
+MPI_METHOD
+PMPI_Startall(
+ _In_range_(>=, 0) int count,
+ _mpi_updates_(count) MPI_Request array_of_requests[]
+ );
+
+
+/*---------------------------------------------*/
+/* Section 3.10: Send-Recv */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Sendrecv(
+ _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int sendtag,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int recvtag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_Sendrecv(
+ _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int sendtag,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int recvtag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_Sendrecv_replace(
+ _Inout_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int sendtag,
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int recvtag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_Sendrecv_replace(
+ _Inout_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, MPI_PROC_NULL) int dest,
+ _In_range_(>=, 0) int sendtag,
+ _In_range_(>=, MPI_ANY_SOURCE) int source,
+ _In_range_(>=, MPI_ANY_TAG) int recvtag,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Status* status
+ );
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 4: Datatypes */
+/*---------------------------------------------------------------------------*/
+
+/*---------------------------------------------*/
+/* Section 4.1: Derived Datatypes */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Type_contiguous(
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_contiguous(
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+MPI_Type_vector(
+ _In_range_(>=, 0) int count,
+ _In_range_(>=, 0) int blocklength,
+ _In_ int stride,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_vector(
+ _In_range_(>=, 0) int count,
+ _In_range_(>=, 0) int blocklength,
+ _In_ int stride,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+MPI_Type_create_hvector(
+ _In_range_(>=, 0) int count,
+ _In_range_(>=, 0) int blocklength,
+ _In_ MPI_Aint stride,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_create_hvector(
+ _In_range_(>=, 0) int count,
+ _In_range_(>=, 0) int blocklength,
+ _In_ MPI_Aint stride,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+MPI_Type_indexed(
+ _In_range_(>=, 0) int count,
+ _mpi_reads_(count) const int array_of_blocklengths[],
+ _mpi_reads_(count) const int array_of_displacements[],
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_indexed(
+ _In_range_(>=, 0) int count,
+ _mpi_reads_(count) const int array_of_blocklengths[],
+ _mpi_reads_(count) const int array_of_displacements[],
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+MPI_Type_create_hindexed(
+ _In_range_(>=, 0) int count,
+ _mpi_reads_(count) const int array_of_blocklengths[],
+ _mpi_reads_(count) const MPI_Aint array_of_displacements[],
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_create_hindexed(
+ _In_range_(>=, 0) int count,
+ _mpi_reads_(count) const int array_of_blocklengths[],
+ _mpi_reads_(count) const MPI_Aint array_of_displacements[],
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+MPI_Type_create_hindexed_block(
+ _In_range_(>=, 0) int count,
+ _In_range_(>=, 0) int blocklength,
+ _mpi_reads_(count) const MPI_Aint array_of_displacements[],
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_create_hindexed_block(
+ _In_range_(>=, 0) int count,
+ _In_range_(>=, 0) int blocklength,
+ _mpi_reads_(count) const MPI_Aint array_of_displacements[],
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+MPI_Type_create_indexed_block(
+ _In_range_(>=, 0) int count,
+ _In_range_(>=, 0) int blocklength,
+ _mpi_reads_(count) const int array_of_displacements[],
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_create_indexed_block(
+ _In_range_(>=, 0) int count,
+ _In_range_(>=, 0) int blocklength,
+ _mpi_reads_(count) const int array_of_displacements[],
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+MPI_Type_create_struct(
+ _In_range_(>=, 0) int count,
+ _mpi_reads_(count) const int array_of_blocklengths[],
+ _mpi_reads_(count) const MPI_Aint array_of_displacements[],
+ _mpi_reads_(count) const MPI_Datatype array_of_types[],
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_create_struct(
+ _In_range_(>=, 0) int count,
+ _mpi_reads_(count) const int array_of_blocklengths[],
+ _mpi_reads_(count) const MPI_Aint array_of_displacements[],
+ _mpi_reads_(count) const MPI_Datatype array_of_types[],
+ _Out_ MPI_Datatype* newtype
+ );
+
+
+#define MPI_ORDER_C 56
+#define MPI_ORDER_FORTRAN 57
+
+MPI_METHOD
+MPI_Type_create_subarray(
+ _In_range_(>=, 0) int ndims,
+ _mpi_reads_(ndims) const int array_of_sizes[],
+ _mpi_reads_(ndims) const int array_of_subsizes[],
+ _mpi_reads_(ndims) const int array_of_starts[],
+ _In_range_(MPI_ORDER_C, MPI_ORDER_FORTRAN) int order,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_create_subarray(
+ _In_range_(>=, 0) int ndims,
+ _In_reads_opt_(ndims) const int array_of_sizes[],
+ _In_reads_opt_(ndims) const int array_of_subsizes[],
+ _In_reads_opt_(ndims) const int array_of_starts[],
+ _In_range_(MPI_ORDER_C, MPI_ORDER_FORTRAN) int order,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+
+#define MPI_DISTRIBUTE_BLOCK 121
+#define MPI_DISTRIBUTE_CYCLIC 122
+#define MPI_DISTRIBUTE_NONE 123
+#define MPI_DISTRIBUTE_DFLT_DARG (-49767)
+
+_Pre_satisfies_(
+ order == MPI_DISTRIBUTE_DFLT_DARG ||
+ (order >= MPI_DISTRIBUTE_BLOCK && order <= MPI_DISTRIBUTE_NONE)
+ )
+MPI_METHOD
+MPI_Type_create_darray(
+ _In_range_(>=, 0) int size,
+ _In_range_(>=, 0) int rank,
+ _In_range_(>=, 0) int ndims,
+ _mpi_reads_(ndims) const int array_of_gsizes[],
+ _mpi_reads_(ndims) const int array_of_distribs[],
+ _mpi_reads_(ndims) const int array_of_dargs[],
+ _mpi_reads_(ndims) const int array_of_psizes[],
+ _In_ int order,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+_Pre_satisfies_(
+ order == MPI_DISTRIBUTE_DFLT_DARG ||
+ (order >= MPI_DISTRIBUTE_BLOCK && order <= MPI_DISTRIBUTE_NONE)
+ )
+MPI_METHOD
+PMPI_Type_create_darray(
+ _In_range_(>=, 0) int size,
+ _In_range_(>=, 0) int rank,
+ _In_range_(>=, 0) int ndims,
+ _mpi_reads_(ndims) const int array_of_gsizes[],
+ _mpi_reads_(ndims) const int array_of_distribs[],
+ _mpi_reads_(ndims) const int array_of_dargs[],
+ _mpi_reads_(ndims) const int array_of_psizes[],
+ _In_ int order,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+
+/*---------------------------------------------*/
+/* Section 4.1.5: Datatype Address and Size */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Get_address(
+ _In_ const void* location,
+ _Out_ MPI_Aint* address
+ );
+
+MPI_METHOD
+PMPI_Get_address(
+ _In_ const void* location,
+ _Out_ MPI_Aint* address
+ );
+
+MPI_METHOD
+MPI_Type_size(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(size, MPI_UNDEFINED) int* size
+ );
+
+MPI_METHOD
+PMPI_Type_size(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(size, MPI_UNDEFINED) int* size
+ );
+
+MPI_METHOD
+MPI_Type_size_x(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(size, MPI_UNDEFINED) MPI_Count *size
+ );
+
+MPI_METHOD
+PMPI_Type_size_x(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(size, MPI_UNDEFINED) MPI_Count *size
+ );
+
+
+/*---------------------------------------------*/
+/* Section 4.1.7: Datatype Extent and Bounds */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Type_get_extent(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(lb, MPI_UNDEFINED) MPI_Aint* lb,
+ _mpi_out_(extent, MPI_UNDEFINED) MPI_Aint* extent
+ );
+
+MPI_METHOD
+PMPI_Type_get_extent(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(lb, MPI_UNDEFINED) MPI_Aint* lb,
+ _mpi_out_(extent, MPI_UNDEFINED) MPI_Aint* extent
+ );
+
+MPI_METHOD
+MPI_Type_get_extent_x(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(lb, MPI_UNDEFINED) MPI_Count *lb,
+ _mpi_out_(extent, MPI_UNDEFINED) MPI_Count *extent
+ );
+
+MPI_METHOD
+PMPI_Type_get_extent_x(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(lb, MPI_UNDEFINED) MPI_Count *lb,
+ _mpi_out_(extent, MPI_UNDEFINED) MPI_Count *extent
+ );
+
+MPI_METHOD
+MPI_Type_create_resized(
+ _In_ MPI_Datatype oldtype,
+ _In_ MPI_Aint lb,
+ _In_range_(>=, 0) MPI_Aint extent,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_create_resized(
+ _In_ MPI_Datatype oldtype,
+ _In_ MPI_Aint lb,
+ _In_range_(>=, 0) MPI_Aint extent,
+ _Out_ MPI_Datatype* newtype
+ );
+
+
+/*---------------------------------------------*/
+/* Section 4.1.8: Datatype True Extent */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Type_get_true_extent(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(true_lb, MPI_UNDEFINED) MPI_Aint* true_lb,
+ _mpi_out_(true_extent, MPI_UNDEFINED) MPI_Aint* true_extent
+ );
+
+MPI_METHOD
+PMPI_Type_get_true_extent(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(true_lb, MPI_UNDEFINED) MPI_Aint* true_lb,
+ _mpi_out_(true_extent, MPI_UNDEFINED) MPI_Aint* true_extent
+ );
+
+MPI_METHOD
+MPI_Type_get_true_extent_x(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(true_lb, MPI_UNDEFINED) MPI_Count *true_lb,
+ _mpi_out_(true_extent, MPI_UNDEFINED) MPI_Count *true_extent
+ );
+
+MPI_METHOD
+PMPI_Type_get_true_extent_x(
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(true_lb, MPI_UNDEFINED) MPI_Count *true_lb,
+ _mpi_out_(true_extent, MPI_UNDEFINED) MPI_Count *true_extent
+ );
+
+
+/*---------------------------------------------*/
+/* Section 4.1.9: Datatype Commit and Free */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Type_commit(
+ _In_ MPI_Datatype* datatype
+ );
+
+MPI_METHOD
+PMPI_Type_commit(
+ _In_ MPI_Datatype* datatype
+ );
+
+MPI_METHOD
+MPI_Type_free(
+ _Deref_out_range_(==, MPI_DATATYPE_NULL) _Inout_ MPI_Datatype* datatype
+ );
+
+MPI_METHOD
+PMPI_Type_free(
+ _Deref_out_range_(==, MPI_DATATYPE_NULL) _Inout_ MPI_Datatype* datatype
+ );
+
+
+/*---------------------------------------------*/
+/* Section 4.1.10: Datatype Duplication */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Type_dup(
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_dup(
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+
+/*---------------------------------------------*/
+/* Section 4.1.11: Datatype and Communication */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Get_elements(
+ _In_ const MPI_Status* status,
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(count, MPI_UNDEFINED) int* count
+ );
+
+MPI_METHOD
+PMPI_Get_elements(
+ _In_ const MPI_Status* status,
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(count, MPI_UNDEFINED) int* count
+ );
+
+MPI_METHOD
+MPI_Get_elements_x(
+ _In_ const MPI_Status* status,
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(count, MPI_UNDEFINED) MPI_Count *count
+ );
+
+MPI_METHOD
+PMPI_Get_elements_x(
+ _In_ const MPI_Status* status,
+ _In_ MPI_Datatype datatype,
+ _mpi_out_(count, MPI_UNDEFINED) MPI_Count *count
+ );
+
+
+/*---------------------------------------------*/
+/* Section 4.1.13: Decoding a Datatype */
+/*---------------------------------------------*/
+
+/* Datatype combiners result */
+enum
+{
+ MPI_COMBINER_NAMED = 1,
+ MPI_COMBINER_DUP = 2,
+ MPI_COMBINER_CONTIGUOUS = 3,
+ MPI_COMBINER_VECTOR = 4,
+ MPI_COMBINER_HVECTOR_INTEGER = 5,
+ MPI_COMBINER_HVECTOR = 6,
+ MPI_COMBINER_INDEXED = 7,
+ MPI_COMBINER_HINDEXED_INTEGER = 8,
+ MPI_COMBINER_HINDEXED = 9,
+ MPI_COMBINER_INDEXED_BLOCK = 10,
+ MPI_COMBINER_STRUCT_INTEGER = 11,
+ MPI_COMBINER_STRUCT = 12,
+ MPI_COMBINER_SUBARRAY = 13,
+ MPI_COMBINER_DARRAY = 14,
+ MPI_COMBINER_F90_REAL = 15,
+ MPI_COMBINER_F90_COMPLEX = 16,
+ MPI_COMBINER_F90_INTEGER = 17,
+ MPI_COMBINER_RESIZED = 18,
+ MPI_COMBINER_HINDEXED_BLOCK = 19
+};
+
+MPI_METHOD
+MPI_Type_get_envelope(
+ _In_ MPI_Datatype datatype,
+ _Out_ _Deref_out_range_(>=, 0) int* num_integers,
+ _Out_ _Deref_out_range_(>=, 0) int* num_addresses,
+ _Out_ _Deref_out_range_(>=, 0) int* num_datatypes,
+ _Out_ _Deref_out_range_(MPI_COMBINER_NAMED, MPI_COMBINER_RESIZED) int* combiner
+ );
+
+MPI_METHOD
+PMPI_Type_get_envelope(
+ _In_ MPI_Datatype datatype,
+ _Out_ _Deref_out_range_(>=, 0) int* num_integers,
+ _Out_ _Deref_out_range_(>=, 0) int* num_addresses,
+ _Out_ _Deref_out_range_(>=, 0) int* num_datatypes,
+ _Out_ _Deref_out_range_(MPI_COMBINER_NAMED, MPI_COMBINER_RESIZED) int* combiner
+ );
+
+MPI_METHOD
+MPI_Type_get_contents(
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, 0) int max_integers,
+ _In_range_(>=, 0) int max_addresses,
+ _In_range_(>=, 0) int max_datatypes,
+ _mpi_writes_(max_integers) int array_of_integers[],
+ _mpi_writes_(max_addresses) MPI_Aint array_of_addresses[],
+ _mpi_writes_(max_datatypes) MPI_Datatype array_of_datatypes[]
+ );
+
+MPI_METHOD
+PMPI_Type_get_contents(
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, 0) int max_integers,
+ _In_range_(>=, 0) int max_addresses,
+ _In_range_(>=, 0) int max_datatypes,
+ _mpi_writes_(max_integers) int array_of_integers[],
+ _mpi_writes_(max_addresses) MPI_Aint array_of_addresses[],
+ _mpi_writes_(max_datatypes) MPI_Datatype array_of_datatypes[]
+ );
+
+
+/*---------------------------------------------*/
+/* Section 4.2: Datatype Pack and Unpack */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Pack(
+ _In_opt_ const void* inbuf,
+ _In_range_(>=, 0) int incount,
+ _In_ MPI_Datatype datatype,
+ _mpi_writes_bytes_(outsize) void* outbuf,
+ _In_range_(>=, 0) int outsize,
+ _mpi_position_(outsize) int* position,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+PMPI_Pack(
+ _In_opt_ const void* inbuf,
+ _In_range_(>=, 0) int incount,
+ _In_ MPI_Datatype datatype,
+ _mpi_writes_bytes_(outsize) void* outbuf,
+ _In_range_(>=, 0) int outsize,
+ _mpi_position_(outsize) int* position,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+MPI_Unpack(
+ _mpi_reads_bytes_(insize) const void* inbuf,
+ _In_range_(>=, 0) int insize,
+ _mpi_position_(insize) int* position,
+ _When_(insize > 0, _Out_opt_) void* outbuf,
+ _In_range_(>=, 0) int outcount,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+PMPI_Unpack(
+ _mpi_reads_bytes_(insize) const void* inbuf,
+ _In_range_(>=, 0) int insize,
+ _mpi_position_(insize) int* position,
+ _When_(insize > 0, _Out_opt_) void* outbuf,
+ _In_range_(>=, 0) int outcount,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+MPI_Pack_size(
+ _In_range_(>=, 0) int incount,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Comm comm,
+ _mpi_out_(size, MPI_UNDEFINED) int *size
+ );
+
+MPI_METHOD
+PMPI_Pack_size(
+ _In_range_(>=, 0) int incount,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Comm comm,
+ _mpi_out_(size, MPI_UNDEFINED) int *size
+ );
+
+
+/*---------------------------------------------*/
+/* Section 4.3: Canonical Pack and Unpack */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Pack_external(
+ _In_z_ const char* datarep,
+ _In_opt_ const void* inbuf,
+ _In_range_(>=, 0) int incount,
+ _In_ MPI_Datatype datatype,
+ _mpi_writes_bytes_(outsize) void* outbuf,
+ _In_range_(>=, 0) MPI_Aint outsize,
+ _mpi_position_(outsize) MPI_Aint* position
+ );
+
+MPI_METHOD
+PMPI_Pack_external(
+ _In_z_ const char* datarep,
+ _In_opt_ const void* inbuf,
+ _In_range_(>=, 0) int incount,
+ _In_ MPI_Datatype datatype,
+ _mpi_writes_bytes_(outsize) void* outbuf,
+ _In_range_(>=, 0) MPI_Aint outsize,
+ _mpi_position_(outsize) MPI_Aint* position
+ );
+
+MPI_METHOD
+MPI_Unpack_external(
+ _In_z_ const char* datarep,
+ _In_reads_bytes_opt_(insize) const void* inbuf,
+ _In_range_(>=, 0) MPI_Aint insize,
+ _mpi_position_(insize) MPI_Aint* position,
+ _When_(insize > 0, _Out_opt_) void* outbuf,
+ _In_range_(>=, 0) int outcount,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+PMPI_Unpack_external(
+ _In_z_ const char* datarep,
+ _In_reads_bytes_opt_(insize) const void* inbuf,
+ _In_range_(>=, 0) MPI_Aint insize,
+ _mpi_position_(insize) MPI_Aint* position,
+ _When_(insize > 0, _Out_opt_) void* outbuf,
+ _In_range_(>=, 0) int outcount,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+MPI_Pack_external_size(
+ _In_z_ const char* datarep,
+ _In_range_(>=, 0) int incount,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* size
+ );
+
+MPI_METHOD
+PMPI_Pack_external_size(
+ _In_z_ const char* datarep,
+ _In_range_(>=, 0) int incount,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* size
+ );
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 5: Collective Communication */
+/*---------------------------------------------------------------------------*/
+
+#define MPI_IN_PLACE ((void*)(MPI_Aint)-1)
+
+/*---------------------------------------------*/
+/* Section 5.3: Barrier Synchronization */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Barrier(
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+PMPI_Barrier(
+ _In_ MPI_Comm comm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 5.4: Broadcast */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Bcast(
+ _Pre_opt_valid_ void* buffer,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+MPI_METHOD
+PMPI_Bcast(
+ _Pre_opt_valid_ void* buffer,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 5.5: Gather */
+/*---------------------------------------------*/
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Gather(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Gather(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Gatherv(
+ _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_opt_ const int recvcounts[],
+ _In_opt_ const int displs[],
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Gatherv(
+ _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_opt_ const int recvcounts[],
+ _In_opt_ const int displs[],
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 5.6: Scatter */
+/*---------------------------------------------*/
+
+_Pre_satisfies_(sendbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Scatter(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(sendbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Scatter(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(sendbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Scatterv(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int displs[],
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(sendbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Scatterv(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int displs[],
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 5.6: Gather-to-all */
+/*---------------------------------------------*/
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Allgather(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Allgather(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Allgatherv(
+ _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int displs[],
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Allgatherv(
+ _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int displs[],
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 5.6: All-to-All Scatter/Gather */
+/*---------------------------------------------*/
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Alltoall(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Alltoall(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Alltoallv(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int sdispls[],
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int rdispls[],
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Alltoallv(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int sdispls[],
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int rdispls[],
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Alltoallw(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int sdispls[],
+ _In_opt_ const MPI_Datatype sendtypes[],
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int rdispls[],
+ _In_ const MPI_Datatype recvtypes[],
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Alltoallw(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int sdispls[],
+ _In_opt_ const MPI_Datatype sendtypes[],
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int rdispls[],
+ _In_ const MPI_Datatype recvtypes[],
+ _In_ MPI_Comm comm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 5.9: Global Reduction Operations */
+/*---------------------------------------------*/
+
+typedef
+void
+(MPIAPI MPI_User_function)(
+ _In_opt_ void* invec,
+ _Inout_opt_ void* inoutvec,
+ _In_ int* len,
+ _In_ MPI_Datatype* datatype
+ );
+
+MPI_METHOD
+MPI_Op_commutative(
+ _In_ MPI_Op op,
+ _Out_ int* commute
+ );
+
+MPI_METHOD
+PMPI_Op_commutative(
+ _In_ MPI_Op op,
+ _Out_ int* commute
+ );
+
+MPI_METHOD
+MPI_Op_create(
+ _In_ MPI_User_function* user_fn,
+ _In_ int commute,
+ _Out_ MPI_Op* op
+ );
+
+MPI_METHOD
+PMPI_Op_create(
+ _In_ MPI_User_function* user_fn,
+ _In_ int commute,
+ _Out_ MPI_Op* op
+ );
+
+MPI_METHOD
+MPI_Op_free(
+ _Inout_ MPI_Op* op
+ );
+
+MPI_METHOD
+PMPI_Op_free(
+ _Inout_ MPI_Op* op
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Reduce(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Reduce(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Allreduce(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Allreduce(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(inbuf != MPI_IN_PLACE)
+_Pre_satisfies_(inoutbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Reduce_local(
+ _In_opt_ _In_range_(!=, inoutbuf) const void *inbuf,
+ _Inout_opt_ void *inoutbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op
+ );
+
+_Pre_satisfies_(inbuf != MPI_IN_PLACE)
+_Pre_satisfies_(inoutbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Reduce_local(
+ _In_opt_ _In_range_(!=, inoutbuf) const void *inbuf,
+ _Inout_opt_ void *inoutbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op
+ );
+
+/*---------------------------------------------*/
+/* Section 5.10: Reduce-Scatter */
+/*---------------------------------------------*/
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Reduce_scatter_block(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=,0) int recvcount,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Reduce_scatter_block(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Reduce_scatter(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Reduce_scatter(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 5.11: Scan */
+/*---------------------------------------------*/
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Scan(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Scan(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Exscan(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Exscan(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm
+ );
+
+
+//
+// Section 5.12: Nonblocking Collective Operations
+//
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Iallgather(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Iallgather(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Iallgatherv(
+ _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int displs[],
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Iallgatherv(
+ _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int displs[],
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Iallreduce(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Iallreduce(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Ialltoall(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Ialltoall(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Ialltoallv(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int sdispls[],
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int rdispls[],
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Ialltoallv(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int sdispls[],
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int rdispls[],
+ _In_ MPI_Datatype recvtype,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Ialltoallw(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int sdispls[],
+ _In_opt_ const MPI_Datatype sendtypes[],
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int rdispls[],
+ _In_ const MPI_Datatype recvtypes[],
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Ialltoallw(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int sdispls[],
+ _In_opt_ const MPI_Datatype sendtypes[],
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ const int rdispls[],
+ _In_ const MPI_Datatype recvtypes[],
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Ibarrier(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Ibarrier(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Ibcast(
+ _Pre_opt_valid_ void* buffer,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Ibcast(
+ _Pre_opt_valid_ void* buffer,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Iexscan(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Iexscan(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Igather(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Igather(
+ _In_opt_ _When_(sendtype == recvtype, _In_range_(!=, recvbuf)) const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Igatherv(
+ _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_opt_ const int recvcounts[],
+ _In_opt_ const int displs[],
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Igatherv(
+ _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _Out_opt_ void* recvbuf,
+ _In_opt_ const int recvcounts[],
+ _In_opt_ const int displs[],
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Ireduce(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Ireduce(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Ireduce_scatter(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Ireduce_scatter(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_ const int recvcounts[],
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Iscan(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Iscan(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Ireduce_scatter_block(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=,0) int recvcount,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(recvbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Ireduce_scatter_block(
+ _In_opt_ _In_range_(!=, recvbuf) const void* sendbuf,
+ _Out_opt_ void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(sendbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Iscatter(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(sendbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Iscatter(
+ _In_range_(!=, recvbuf) _In_opt_ const void* sendbuf,
+ _In_range_(>=, 0) int sendcount,
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(sendbuf != MPI_IN_PLACE)
+MPI_METHOD
+MPI_Iscatterv(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int displs[],
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+_Pre_satisfies_(sendbuf != MPI_IN_PLACE)
+MPI_METHOD
+PMPI_Iscatterv(
+ _In_opt_ const void* sendbuf,
+ _In_opt_ const int sendcounts[],
+ _In_opt_ const int displs[],
+ _In_ MPI_Datatype sendtype,
+ _When_(root != MPI_PROC_NULL, _Out_opt_) void* recvbuf,
+ _In_range_(>=, 0) int recvcount,
+ _In_ MPI_Datatype recvtype,
+ _mpi_coll_rank_(root) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Request* request
+ );
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 6: Groups, Contexts, Communicators, and Caching */
+/*---------------------------------------------------------------------------*/
+
+/*---------------------------------------------*/
+/* Section 6.3: Group Management */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Group_size(
+ _In_ MPI_Group group,
+ _Out_ _Deref_out_range_(>, 0) int* size
+ );
+
+MPI_METHOD
+PMPI_Group_size(
+ _In_ MPI_Group group,
+ _Out_ _Deref_out_range_(>, 0) int* size
+ );
+
+MPI_METHOD
+MPI_Group_rank(
+ _In_ MPI_Group group,
+ _Out_ _Deref_out_range_(>=, MPI_UNDEFINED) int* rank
+ );
+
+MPI_METHOD
+PMPI_Group_rank(
+ _In_ MPI_Group group,
+ _Out_ _Deref_out_range_(>=, MPI_UNDEFINED) int* rank
+ );
+
+MPI_METHOD
+MPI_Group_translate_ranks(
+ _In_ MPI_Group group1,
+ _In_ int n,
+ _In_reads_opt_(n) const int ranks1[],
+ _In_ MPI_Group group2,
+ _Out_writes_opt_(n) int ranks2[]
+ );
+
+MPI_METHOD
+PMPI_Group_translate_ranks(
+ _In_ MPI_Group group1,
+ _In_ int n,
+ _In_reads_opt_(n) const int ranks1[],
+ _In_ MPI_Group group2,
+ _Out_writes_opt_(n) int ranks2[]
+ );
+
+/* Results of the compare operations */
+#define MPI_IDENT 0
+#define MPI_CONGRUENT 1
+#define MPI_SIMILAR 2
+#define MPI_UNEQUAL 3
+
+MPI_METHOD
+MPI_Group_compare(
+ _In_ MPI_Group group1,
+ _In_ MPI_Group group2,
+ _Out_ int* result
+ );
+
+MPI_METHOD
+PMPI_Group_compare(
+ _In_ MPI_Group group1,
+ _In_ MPI_Group group2,
+ _Out_ int* result
+ );
+
+MPI_METHOD
+MPI_Comm_group(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Group* group
+ );
+
+MPI_METHOD
+PMPI_Comm_group(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Group* group
+ );
+
+MPI_METHOD
+MPI_Group_union(
+ _In_ MPI_Group group1,
+ _In_ MPI_Group group2,
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+PMPI_Group_union(
+ _In_ MPI_Group group1,
+ _In_ MPI_Group group2,
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+MPI_Group_intersection(
+ _In_ MPI_Group group1,
+ _In_ MPI_Group group2,
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+PMPI_Group_intersection(
+ _In_ MPI_Group group1,
+ _In_ MPI_Group group2,
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+MPI_Group_difference(
+ _In_ MPI_Group group1,
+ _In_ MPI_Group group2,
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+PMPI_Group_difference(
+ _In_ MPI_Group group1,
+ _In_ MPI_Group group2,
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+MPI_Group_incl(
+ _In_ MPI_Group group,
+ _In_range_(>=, 0) int n,
+ _In_reads_opt_(n) const int ranks[],
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+PMPI_Group_incl(
+ _In_ MPI_Group group,
+ _In_range_(>=, 0) int n,
+ _In_reads_opt_(n) const int ranks[],
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+MPI_Group_excl(
+ _In_ MPI_Group group,
+ _In_range_(>=, 0) int n,
+ _In_reads_opt_(n) const int ranks[],
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+PMPI_Group_excl(
+ _In_ MPI_Group group,
+ _In_range_(>=, 0) int n,
+ _In_reads_opt_(n) const int ranks[],
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+MPI_Group_range_incl(
+ _In_ MPI_Group group,
+ _In_range_(>=, 0) int n,
+ _In_reads_opt_(n) int ranges[][3],
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+PMPI_Group_range_incl(
+ _In_ MPI_Group group,
+ _In_range_(>=, 0) int n,
+ _In_reads_opt_(n) int ranges[][3],
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+MPI_Group_range_excl(
+ _In_ MPI_Group group,
+ _In_range_(>=, 0) int n,
+ _In_reads_opt_(n) int ranges[][3],
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+PMPI_Group_range_excl(
+ _In_ MPI_Group group,
+ _In_range_(>=, 0) int n,
+ _In_reads_opt_(n) int ranges[][3],
+ _Out_ MPI_Group* newgroup
+ );
+
+MPI_METHOD
+MPI_Group_free(
+ _Inout_ MPI_Group* group
+ );
+
+MPI_METHOD
+PMPI_Group_free(
+ _Inout_ MPI_Group* group
+ );
+
+
+/*---------------------------------------------*/
+/* Section 6.4: Communicator Management */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Comm_size(
+ _In_ MPI_Comm comm,
+ _Out_ _Deref_out_range_(>, 0) int* size
+ );
+
+MPI_METHOD
+PMPI_Comm_size(
+ _In_ MPI_Comm comm,
+ _Out_ _Deref_out_range_(>, 0) int* size
+ );
+
+MPI_METHOD
+MPI_Comm_rank(
+ _In_ MPI_Comm comm,
+ _Out_ _Deref_out_range_(>=, 0) int* rank
+ );
+
+MPI_METHOD
+PMPI_Comm_rank(
+ _In_ MPI_Comm comm,
+ _Out_ _Deref_out_range_(>=, 0) int* rank
+ );
+
+MPI_METHOD
+MPI_Comm_compare(
+ _In_ MPI_Comm comm1,
+ _In_ MPI_Comm comm2,
+ _Out_ int* result
+ );
+
+MPI_METHOD
+PMPI_Comm_compare(
+ _In_ MPI_Comm comm1,
+ _In_ MPI_Comm comm2,
+ _Out_ int* result
+ );
+
+MPI_METHOD
+MPI_Comm_dup(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+PMPI_Comm_dup(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+MPI_Comm_create(
+ _In_ MPI_Comm comm,
+ _In_ MPI_Group group,
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+PMPI_Comm_create(
+ _In_ MPI_Comm comm,
+ _In_ MPI_Group group,
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+MPI_Comm_split(
+ _In_ MPI_Comm comm,
+ _In_ int color,
+ _In_ int key,
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+PMPI_Comm_split(
+ _In_ MPI_Comm comm,
+ _In_ int color,
+ _In_ int key,
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+MPI_Comm_split_type(
+ _In_ MPI_Comm comm,
+ _In_ int split_type,
+ _In_ int key,
+ _In_ MPI_Info info,
+ _Out_ MPI_Comm *newcomm
+ );
+
+MPI_METHOD
+PMPI_Comm_split_type(
+ _In_ MPI_Comm comm,
+ _In_ int split_type,
+ _In_ int key,
+ _In_ MPI_Info info,
+ _Out_ MPI_Comm *newcomm
+ );
+
+MPI_METHOD
+MPI_Comm_free(
+ _Inout_ MPI_Comm* comm
+ );
+
+MPI_METHOD
+PMPI_Comm_free(
+ _Inout_ MPI_Comm* comm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 6.6: Inter-Communication */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Comm_test_inter(
+ _In_ MPI_Comm comm,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_Comm_test_inter(
+ _In_ MPI_Comm comm,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+MPI_Comm_remote_size(
+ _In_ MPI_Comm comm,
+ _Out_ int* size
+ );
+
+MPI_METHOD
+PMPI_Comm_remote_size(
+ _In_ MPI_Comm comm,
+ _Out_ int* size
+ );
+
+MPI_METHOD
+MPI_Comm_remote_group(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Group* group
+ );
+
+MPI_METHOD
+PMPI_Comm_remote_group(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Group* group
+ );
+
+MPI_METHOD
+MPI_Intercomm_create(
+ _In_ MPI_Comm local_comm,
+ _In_range_(>=, 0) int local_leader,
+ _In_ MPI_Comm peer_comm,
+ _In_range_(>=, 0) int remote_leader,
+ _In_range_(>=, 0) int tag,
+ _Out_ MPI_Comm* newintercomm
+ );
+
+MPI_METHOD
+PMPI_Intercomm_create(
+ _In_ MPI_Comm local_comm,
+ _In_range_(>=, 0) int local_leader,
+ _In_ MPI_Comm peer_comm,
+ _In_range_(>=, 0) int remote_leader,
+ _In_range_(>=, 0) int tag,
+ _Out_ MPI_Comm* newintercomm
+ );
+
+MPI_METHOD
+MPI_Intercomm_merge(
+ _In_ MPI_Comm intercomm,
+ _In_ int high,
+ _Out_ MPI_Comm* newintracomm
+ );
+
+MPI_METHOD
+PMPI_Intercomm_merge(
+ _In_ MPI_Comm intercomm,
+ _In_ int high,
+ _Out_ MPI_Comm* newintracomm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 6.7: Caching */
+/*---------------------------------------------*/
+
+#define MPI_KEYVAL_INVALID 0x24000000
+
+typedef
+int
+(MPIAPI MPI_Comm_copy_attr_function)(
+ _In_ MPI_Comm oldcomm,
+ _In_ int comm_keyval,
+ _In_opt_ void* extra_state,
+ _In_opt_ void* attribute_val_in,
+ _Out_ void* attribute_val_out,
+ _mpi_out_flag_ int* flag
+ );
+
+typedef
+int
+(MPIAPI MPI_Comm_delete_attr_function)(
+ _In_ MPI_Comm comm,
+ _In_ int comm_keyval,
+ _In_opt_ void* attribute_val,
+ _In_opt_ void* extra_state
+ );
+
+#define MPI_COMM_NULL_COPY_FN ((MPI_Comm_copy_attr_function*)0)
+#define MPI_COMM_NULL_DELETE_FN ((MPI_Comm_delete_attr_function*)0)
+#define MPI_COMM_DUP_FN ((MPI_Comm_copy_attr_function*)MPIR_Dup_fn)
+
+MPI_METHOD
+MPI_Comm_create_keyval(
+ _In_opt_ MPI_Comm_copy_attr_function* comm_copy_attr_fn,
+ _In_opt_ MPI_Comm_delete_attr_function* comm_delete_attr_fn,
+ _Out_ int* comm_keyval,
+ _In_opt_ void* extra_state
+ );
+
+MPI_METHOD
+PMPI_Comm_create_keyval(
+ _In_opt_ MPI_Comm_copy_attr_function* comm_copy_attr_fn,
+ _In_opt_ MPI_Comm_delete_attr_function* comm_delete_attr_fn,
+ _Out_ int* comm_keyval,
+ _In_opt_ void* extra_state
+ );
+
+MPI_METHOD
+MPI_Comm_free_keyval(
+ _Inout_ int* comm_keyval
+ );
+
+MPI_METHOD
+PMPI_Comm_free_keyval(
+ _Inout_ int* comm_keyval
+ );
+
+MPI_METHOD
+MPI_Comm_set_attr(
+ _In_ MPI_Comm comm,
+ _In_ int comm_keyval,
+ _In_opt_ void* attribute_val
+ );
+
+MPI_METHOD
+PMPI_Comm_set_attr(
+ _In_ MPI_Comm comm,
+ _In_ int comm_keyval,
+ _In_opt_ void* attribute_val
+ );
+
+
+/* Predefined comm attribute key values */
+/* C Versions (return pointer to value),
+ Fortran Versions (return integer value).
+
+ DO NOT CHANGE THESE. The values encode:
+ builtin kind (0x1 in bit 30-31)
+ Keyval object (0x9 in bits 26-29)
+ for communicator (0x1 in bits 22-25)
+
+ Fortran versions of the attributes are formed by adding one to
+ the C version.
+ */
+#define MPI_TAG_UB 0x64400001
+#define MPI_HOST 0x64400003
+#define MPI_IO 0x64400005
+#define MPI_WTIME_IS_GLOBAL 0x64400007
+#define MPI_UNIVERSE_SIZE 0x64400009
+#define MPI_LASTUSEDCODE 0x6440000b
+#define MPI_APPNUM 0x6440000d
+
+MPI_METHOD
+MPI_Comm_get_attr(
+ _In_ MPI_Comm comm,
+ _In_ int comm_keyval,
+ _When_(*flag != 0, _Out_) void* attribute_val,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_Comm_get_attr(
+ _In_ MPI_Comm comm,
+ _In_ int comm_keyval,
+ _When_(*flag != 0, _Out_) void* attribute_val,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+MPI_Comm_delete_attr(
+ _In_ MPI_Comm comm,
+ _In_ int comm_keyval
+ );
+
+MPI_METHOD
+PMPI_Comm_delete_attr(
+ _In_ MPI_Comm comm,
+ _In_ int comm_keyval
+ );
+
+
+typedef
+int
+(MPIAPI MPI_Win_copy_attr_function)(
+ _In_ MPI_Win oldwin,
+ _In_ int win_keyval,
+ _In_opt_ void* extra_state,
+ _In_opt_ void* attribute_val_in,
+ _Out_ void* attribute_val_out,
+ _mpi_out_flag_ int* flag
+ );
+
+typedef
+int
+(MPIAPI MPI_Win_delete_attr_function)(
+ _In_ MPI_Win win,
+ _In_ int win_keyval,
+ _In_opt_ void* attribute_val,
+ _In_opt_ void* extra_state
+ );
+
+#define MPI_WIN_NULL_COPY_FN ((MPI_Win_copy_attr_function*)0)
+#define MPI_WIN_NULL_DELETE_FN ((MPI_Win_delete_attr_function*)0)
+#define MPI_WIN_DUP_FN ((MPI_Win_copy_attr_function*)MPIR_Dup_fn)
+
+MPI_METHOD
+MPI_Win_create_keyval(
+ _In_opt_ MPI_Win_copy_attr_function* win_copy_attr_fn,
+ _In_opt_ MPI_Win_delete_attr_function* win_delete_attr_fn,
+ _Out_ int* win_keyval,
+ _In_opt_ void* extra_state
+ );
+
+MPI_METHOD
+PMPI_Win_create_keyval(
+ _In_opt_ MPI_Win_copy_attr_function* win_copy_attr_fn,
+ _In_opt_ MPI_Win_delete_attr_function* win_delete_attr_fn,
+ _Out_ int* win_keyval,
+ _In_opt_ void* extra_state
+ );
+
+MPI_METHOD
+MPI_Win_free_keyval(
+ _Inout_ int* win_keyval
+ );
+
+MPI_METHOD
+PMPI_Win_free_keyval(
+ _Inout_ int* win_keyval
+ );
+
+MPI_METHOD
+MPI_Win_set_attr(
+ _In_ MPI_Win win,
+ _In_ int win_keyval,
+ _In_opt_ void* attribute_val
+ );
+
+MPI_METHOD
+PMPI_Win_set_attr(
+ _In_ MPI_Win win,
+ _In_ int win_keyval,
+ _In_opt_ void* attribute_val
+ );
+
+
+/* Predefined window key value attributes */
+#define MPI_WIN_BASE 0x66000001
+#define MPI_WIN_SIZE 0x66000003
+#define MPI_WIN_DISP_UNIT 0x66000005
+#define MPI_WIN_CREATE_FLAVOR 0x66000007
+#define MPI_WIN_MODEL 0x66000009
+
+/* MPI Window Create Flavors */
+#define MPI_WIN_FLAVOR_CREATE 1
+#define MPI_WIN_FLAVOR_ALLOCATE 2
+#define MPI_WIN_FLAVOR_DYNAMIC 3
+#define MPI_WIN_FLAVOR_SHARED 4
+
+/* MPI Window Models */
+#define MPI_WIN_SEPARATE 1
+#define MPI_WIN_UNIFIED 2
+
+MPI_METHOD
+MPI_Win_get_attr(
+ _In_ MPI_Win win,
+ _In_ int win_keyval,
+ _When_(*flag != 0, _Out_) void* attribute_val,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_Win_get_attr(
+ _In_ MPI_Win win,
+ _In_ int win_keyval,
+ _When_(*flag != 0, _Out_) void* attribute_val,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+MPI_Win_delete_attr(
+ _In_ MPI_Win win,
+ _In_ int win_keyval
+ );
+
+MPI_METHOD
+PMPI_Win_delete_attr(
+ _In_ MPI_Win win,
+ _In_ int win_keyval
+ );
+
+
+typedef
+int
+(MPIAPI MPI_Type_copy_attr_function)(
+ MPI_Datatype olddatatype,
+ int datatype_keyval,
+ _In_opt_ void* extra_state,
+ _In_opt_ void* attribute_val_in,
+ _Out_ void* attribute_val_out,
+ _mpi_out_flag_ int* flag
+ );
+
+typedef
+int
+(MPIAPI MPI_Type_delete_attr_function)(
+ MPI_Datatype datatype,
+ int datatype_keyval,
+ _In_opt_ void* attribute_val,
+ _In_opt_ void* extra_state
+ );
+
+#define MPI_TYPE_NULL_COPY_FN ((MPI_Type_copy_attr_function*)0)
+#define MPI_TYPE_NULL_DELETE_FN ((MPI_Type_delete_attr_function*)0)
+#define MPI_TYPE_DUP_FN ((MPI_Type_copy_attr_function*)MPIR_Dup_fn)
+
+MPI_METHOD
+MPI_Type_create_keyval(
+ _In_opt_ MPI_Type_copy_attr_function* type_copy_attr_fn,
+ _In_opt_ MPI_Type_delete_attr_function* type_delete_attr_fn,
+ _Out_ int* type_keyval,
+ _In_opt_ void* extra_state
+ );
+
+MPI_METHOD
+PMPI_Type_create_keyval(
+ _In_opt_ MPI_Type_copy_attr_function* type_copy_attr_fn,
+ _In_opt_ MPI_Type_delete_attr_function* type_delete_attr_fn,
+ _Out_ int* type_keyval,
+ _In_opt_ void* extra_state
+ );
+
+MPI_METHOD
+MPI_Type_free_keyval(
+ _Inout_ int* type_keyval
+ );
+
+MPI_METHOD
+PMPI_Type_free_keyval(
+ _Inout_ int* type_keyval
+ );
+
+MPI_METHOD
+MPI_Type_set_attr(
+ _In_ MPI_Datatype type,
+ _In_ int type_keyval,
+ _In_opt_ void* attribute_val
+ );
+
+MPI_METHOD
+PMPI_Type_set_attr(
+ _In_ MPI_Datatype type,
+ _In_ int type_keyval,
+ _In_opt_ void* attribute_val
+ );
+
+MPI_METHOD
+MPI_Type_get_attr(
+ _In_ MPI_Datatype type,
+ _In_ int type_keyval,
+ _When_(*flag != 0, _Out_) void* attribute_val,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_Type_get_attr(
+ _In_ MPI_Datatype type,
+ _In_ int type_keyval,
+ _When_(*flag != 0, _Out_) void* attribute_val,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+MPI_Type_delete_attr(
+ _In_ MPI_Datatype type,
+ _In_ int type_keyval
+ );
+
+MPI_METHOD
+PMPI_Type_delete_attr(
+ _In_ MPI_Datatype type,
+ _In_ int type_keyval
+ );
+
+
+/*---------------------------------------------*/
+/* Section 6.8: Naming Objects */
+/*---------------------------------------------*/
+
+#define MPI_MAX_OBJECT_NAME 128
+
+MPI_METHOD
+MPI_Comm_set_name(
+ _In_ MPI_Comm comm,
+ _In_z_ const char* comm_name
+ );
+
+MPI_METHOD
+PMPI_Comm_set_name(
+ _In_ MPI_Comm comm,
+ _In_z_ const char* comm_name
+ );
+
+MPI_METHOD
+MPI_Comm_get_name(
+ _In_ MPI_Comm comm,
+ _Out_writes_z_(MPI_MAX_OBJECT_NAME) char* comm_name,
+ _Out_ int* resultlen
+ );
+
+MPI_METHOD
+PMPI_Comm_get_name(
+ _In_ MPI_Comm comm,
+ _Out_writes_z_(MPI_MAX_OBJECT_NAME) char* comm_name,
+ _Out_ int* resultlen
+ );
+
+MPI_METHOD
+MPI_Type_set_name(
+ _In_ MPI_Datatype datatype,
+ _In_z_ const char* type_name
+ );
+
+MPI_METHOD
+PMPI_Type_set_name(
+ _In_ MPI_Datatype datatype,
+ _In_z_ const char* type_name
+ );
+
+MPI_METHOD
+MPI_Type_get_name(
+ _In_ MPI_Datatype datatype,
+ _Out_writes_z_(MPI_MAX_OBJECT_NAME) char* type_name,
+ _Out_ int* resultlen
+ );
+
+MPI_METHOD
+PMPI_Type_get_name(
+ _In_ MPI_Datatype datatype,
+ _Out_writes_z_(MPI_MAX_OBJECT_NAME) char* type_name,
+ _Out_ int* resultlen
+ );
+
+MPI_METHOD
+MPI_Win_set_name(
+ _In_ MPI_Win win,
+ _In_z_ const char* win_name
+ );
+
+MPI_METHOD
+PMPI_Win_set_name(
+ _In_ MPI_Win win,
+ _In_z_ const char* win_name
+ );
+
+MPI_METHOD
+MPI_Win_get_name(
+ _In_ MPI_Win win,
+ _Out_writes_z_(MPI_MAX_OBJECT_NAME) char* win_name,
+ _Out_ int* resultlen
+ );
+
+MPI_METHOD
+PMPI_Win_get_name(
+ _In_ MPI_Win win,
+ _Out_writes_z_(MPI_MAX_OBJECT_NAME) char* win_name,
+ _Out_ int* resultlen
+ );
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 7: Process Topologies */
+/*---------------------------------------------------------------------------*/
+
+MPI_METHOD
+MPI_Cart_create(
+ _In_ MPI_Comm comm_old,
+ _In_range_(>=, 0) int ndims,
+ _In_reads_opt_(ndims) const int dims[],
+ _In_reads_opt_(ndims) const int periods[],
+ _In_ int reorder,
+ _Out_ MPI_Comm* comm_cart
+ );
+
+MPI_METHOD
+PMPI_Cart_create(
+ _In_ MPI_Comm comm_old,
+ _In_range_(>=, 0) int ndims,
+ _In_reads_opt_(ndims) const int dims[],
+ _In_reads_opt_(ndims) const int periods[],
+ _In_ int reorder,
+ _Out_ MPI_Comm* comm_cart
+ );
+
+MPI_METHOD
+MPI_Dims_create(
+ _In_range_(>, 0) int nnodes,
+ _In_range_(>=, 0) int ndims,
+ _Inout_updates_opt_(ndims) int dims[]
+ );
+
+MPI_METHOD
+PMPI_Dims_create(
+ _In_range_(>, 0) int nnodes,
+ _In_range_(>=, 0) int ndims,
+ _Inout_updates_opt_(ndims) int dims[]
+ );
+
+MPI_METHOD
+MPI_Graph_create(
+ _In_ MPI_Comm comm_old,
+ _In_range_(>=, 0) int nnodes,
+ _In_reads_opt_(nnodes) const int index[],
+ _In_reads_opt_(nnodes) const int edges[],
+ _In_ int reorder,
+ _Out_ MPI_Comm* comm_graph
+ );
+
+MPI_METHOD
+PMPI_Graph_create(
+ _In_ MPI_Comm comm_old,
+ _In_range_(>=, 0) int nnodes,
+ _In_reads_opt_(nnodes) const int index[],
+ _In_opt_ const int edges[],
+ _In_ int reorder,
+ _Out_ MPI_Comm* comm_graph
+ );
+
+MPI_METHOD
+MPI_Dist_graph_create_adjacent(
+ _In_ MPI_Comm comm_old,
+ _In_range_(>=, 0) int indegree,
+ _In_reads_opt_(indegree) const int sources[],
+ _In_reads_opt_(indegree) const int sourceweights[],
+ _In_range_(>=, 0) int outdegree,
+ _In_reads_opt_(outdegree) const int destinations[],
+ _In_reads_opt_(outdegree) const int destweights[],
+ _In_ MPI_Info info,
+ _In_range_(0,1) int reorder,
+ _Out_ MPI_Comm* comm_dist_graph
+);
+
+MPI_METHOD
+PMPI_Dist_graph_create_adjacent(
+ _In_ MPI_Comm comm_old,
+ _In_range_(>=, 0) int indegree,
+ _In_reads_opt_(indegree) const int sources[],
+ _In_reads_opt_(indegree) const int sourceweights[],
+ _In_range_(>=, 0) int outdegree,
+ _In_reads_opt_(outdegree) const int destinations[],
+ _In_reads_opt_(outdegree) const int destweights[],
+ _In_ MPI_Info info,
+ _In_range_(0,1) int reorder,
+ _Out_ MPI_Comm* comm_dist_graph
+);
+
+MPI_METHOD
+MPI_Dist_graph_create(
+ _In_ MPI_Comm comm_old,
+ _In_range_(>=, 0) int n,
+ _In_reads_opt_(n) const int sources[],
+ _In_reads_opt_(n) const int degrees[],
+ _In_opt_ const int destinations[],
+ _In_opt_ const int weights[],
+ _In_ MPI_Info info,
+ _In_range_(0, 1) int reorder,
+ _Out_ MPI_Comm *comm_dist_graph
+ );
+
+MPI_METHOD
+PMPI_Dist_graph_create(
+ _In_ MPI_Comm comm_old,
+ _In_range_(>=, 0) int n,
+ _In_reads_opt_(n) const int sources[],
+ _In_reads_opt_(n) const int degrees[],
+ _In_opt_ const int destinations[],
+ _In_opt_ const int weights[],
+ _In_ MPI_Info info,
+ _In_range_(0, 1) int reorder,
+ _Out_ MPI_Comm *comm_dist_graph
+ );
+
+/* Topology types */
+enum
+{
+ MPI_GRAPH = 1,
+ MPI_CART = 2,
+ MPI_DIST_GRAPH = 3
+};
+
+MPI_METHOD
+MPI_Topo_test(
+ _In_ MPI_Comm comm,
+ _Out_ int* status
+ );
+
+MPI_METHOD
+PMPI_Topo_test(
+ _In_ MPI_Comm comm,
+ _Out_ int* status
+ );
+
+MPI_METHOD
+MPI_Graphdims_get(
+ _In_ MPI_Comm comm,
+ _Out_ int* nnodes,
+ _Out_ int* nedges
+ );
+
+MPI_METHOD
+PMPI_Graphdims_get(
+ _In_ MPI_Comm comm,
+ _Out_ int* nnodes,
+ _Out_ int* nedges
+ );
+
+MPI_METHOD
+MPI_Graph_get(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int maxindex,
+ _In_range_(>=, 0) int maxedges,
+ _Out_writes_opt_(maxindex) int index[],
+ _Out_writes_opt_(maxedges) int edges[]
+ );
+
+MPI_METHOD
+PMPI_Graph_get(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int maxindex,
+ _In_range_(>=, 0) int maxedges,
+ _Out_writes_opt_(maxindex) int index[],
+ _Out_writes_opt_(maxedges) int edges[]
+ );
+
+MPI_METHOD
+MPI_Cartdim_get(
+ _In_ MPI_Comm comm,
+ _Out_ int* ndims
+ );
+
+MPI_METHOD
+PMPI_Cartdim_get(
+ _In_ MPI_Comm comm,
+ _Out_ int* ndims
+ );
+
+MPI_METHOD
+MPI_Cart_get(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int maxdims,
+ _Out_writes_opt_(maxdims) int dims[],
+ _Out_writes_opt_(maxdims) int periods[],
+ _Out_writes_opt_(maxdims) int coords[]
+ );
+
+MPI_METHOD
+PMPI_Cart_get(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int maxdims,
+ _Out_writes_opt_(maxdims) int dims[],
+ _Out_writes_opt_(maxdims) int periods[],
+ _Out_writes_opt_(maxdims) int coords[]
+ );
+
+MPI_METHOD
+MPI_Cart_rank(
+ _In_ MPI_Comm comm,
+ _In_ const int coords[],
+ _Out_ _Deref_out_range_(>=, 0) int* rank
+ );
+
+MPI_METHOD
+PMPI_Cart_rank(
+ _In_ MPI_Comm comm,
+ _In_ const int coords[],
+ _Out_ _Deref_out_range_(>=, 0) int* rank
+ );
+
+MPI_METHOD
+MPI_Cart_coords(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int rank,
+ _In_range_(>=, 0) int maxdims,
+ _Out_writes_opt_(maxdims) int coords[]
+ );
+
+MPI_METHOD
+PMPI_Cart_coords(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int rank,
+ _In_range_(>=, 0) int maxdims,
+ _Out_writes_opt_(maxdims) int coords[]
+ );
+
+MPI_METHOD
+MPI_Graph_neighbors_count(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int rank,
+ _Out_ _Deref_out_range_(>=, 0) int* nneighbors
+ );
+
+MPI_METHOD
+PMPI_Graph_neighbors_count(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int rank,
+ _Out_ _Deref_out_range_(>=, 0) int* nneighbors
+ );
+
+MPI_METHOD
+MPI_Graph_neighbors(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int rank,
+ _In_range_(>=, 0) int maxneighbors,
+ _Out_writes_opt_(maxneighbors) int neighbors[]
+ );
+
+MPI_METHOD
+PMPI_Graph_neighbors(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int rank,
+ _In_range_(>=, 0) int maxneighbors,
+ _Out_writes_opt_(maxneighbors) int neighbors[]
+ );
+
+MPI_METHOD
+MPI_Cart_shift(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int direction,
+ _In_ int disp,
+ _Out_ _Deref_out_range_(>=, MPI_PROC_NULL) int* rank_source,
+ _Out_ _Deref_out_range_(>=, MPI_PROC_NULL) int* rank_dest
+ );
+
+MPI_METHOD
+PMPI_Cart_shift(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int direction,
+ _In_ int disp,
+ _Out_ _Deref_out_range_(>=, MPI_PROC_NULL) int* rank_source,
+ _Out_ _Deref_out_range_(>=, MPI_PROC_NULL) int* rank_dest
+ );
+
+MPI_METHOD
+MPI_Cart_sub(
+ _In_ MPI_Comm comm,
+ _In_ const int remain_dims[],
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+PMPI_Cart_sub(
+ _In_ MPI_Comm comm,
+ _In_ const int remain_dims[],
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+MPI_Cart_map(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int ndims,
+ _In_reads_opt_(ndims) const int dims[],
+ _In_reads_opt_(ndims) const int periods[],
+ _Out_ _Deref_out_range_(>=, MPI_UNDEFINED) int* newrank
+ );
+
+MPI_METHOD
+PMPI_Cart_map(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int ndims,
+ _In_reads_opt_(ndims) const int dims[],
+ _In_reads_opt_(ndims) const int periods[],
+ _Out_ _Deref_out_range_(>=, MPI_UNDEFINED) int* newrank
+ );
+
+MPI_METHOD
+MPI_Graph_map(
+ _In_ MPI_Comm comm,
+ _In_range_(>, 0) int nnodes,
+ _In_reads_opt_(nnodes) const int index[],
+ _In_opt_ const int edges[],
+ _Out_ _Deref_out_range_(>=, MPI_UNDEFINED) int* newrank
+ );
+
+MPI_METHOD
+PMPI_Graph_map(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int nnodes,
+ _In_reads_opt_(nnodes) const int index[],
+ _In_opt_ const int edges[],
+ _Out_ _Deref_out_range_(>=, MPI_UNDEFINED) int* newrank
+ );
+
+MPI_METHOD
+MPI_Dist_graph_neighbors_count(
+ _In_ MPI_Comm comm,
+ _Out_ _Deref_out_range_(>=, 0) int *indegree,
+ _Out_ _Deref_out_range_(>=, 0) int *outdegree,
+ _Out_ _Deref_out_range_(>=, 0) int *weighted
+ );
+
+MPI_METHOD
+PMPI_Dist_graph_neighbors_count(
+ _In_ MPI_Comm comm,
+ _Out_ _Deref_out_range_(>=, 0) int *indegree,
+ _Out_ _Deref_out_range_(>=, 0) int *outdegree,
+ _Out_ _Deref_out_range_(>=, 0) int *weighted
+ );
+
+MPI_METHOD
+MPI_Dist_graph_neighbors(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int maxindegree,
+ _Out_writes_opt_(maxindegree) int sources[],
+ _Out_writes_opt_(maxindegree) int sourceweights[],
+ _In_range_(>=, 0) int maxoutdegree,
+ _Out_writes_opt_(maxoutdegree) int destinations[],
+ _Out_writes_opt_(maxoutdegree) int destweights[]
+ );
+
+MPI_METHOD
+PMPI_Dist_graph_neighbors(
+ _In_ MPI_Comm comm,
+ _In_range_(>=, 0) int maxindegree,
+ _Out_writes_opt_(maxindegree) int sources[],
+ _Out_writes_opt_(maxindegree) int sourceweights[],
+ _In_range_(>=, 0) int maxoutdegree,
+ _Out_writes_opt_(maxoutdegree) int destinations[],
+ _Out_writes_opt_(maxoutdegree) int destweights[]
+ );
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 8: Environmental Management */
+/*---------------------------------------------------------------------------*/
+
+/*---------------------------------------------*/
+/* Section 8.1: Implementation Information */
+/*---------------------------------------------*/
+
+#define MPI_VERSION 2
+#define MPI_SUBVERSION 0
+
+MPI_METHOD
+MPI_Get_version(
+ _Out_ int* version,
+ _Out_ int* subversion
+ );
+
+MPI_METHOD
+PMPI_Get_version(
+ _Out_ int* version,
+ _Out_ int* subversion
+ );
+
+#define MPI_MAX_LIBRARY_VERSION_STRING 64
+
+MPI_METHOD
+MPI_Get_library_version(
+ _Out_writes_z_(MPI_MAX_LIBRARY_VERSION_STRING) char* version,
+ _Out_ int* resultlen
+);
+
+MPI_METHOD
+PMPI_Get_library_version(
+ _Out_writes_z_(MPI_MAX_LIBRARY_VERSION_STRING) char* version,
+ _Out_ int* resultlen
+);
+
+#define MPI_MAX_PROCESSOR_NAME 128
+
+MPI_METHOD
+MPI_Get_processor_name(
+ _Out_writes_z_(MPI_MAX_PROCESSOR_NAME) char* name,
+ _Out_ int* resultlen
+ );
+
+MPI_METHOD
+PMPI_Get_processor_name(
+ _Out_writes_z_(MPI_MAX_PROCESSOR_NAME) char* name,
+ _Out_ int* resultlen
+ );
+
+/*---------------------------------------------*/
+/* Section 8.2: Memory Allocation */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Alloc_mem(
+ _In_ MPI_Aint size,
+ _In_ MPI_Info info,
+ _Out_ void* baseptr
+ );
+
+MPI_METHOD
+PMPI_Alloc_mem(
+ _In_ MPI_Aint size,
+ _In_ MPI_Info info,
+ _Out_ void* baseptr
+ );
+
+MPI_METHOD
+MPI_Free_mem(
+ _In_ _Post_invalid_ void* base
+ );
+
+MPI_METHOD
+PMPI_Free_mem(
+ _In_ _Post_invalid_ void* base
+ );
+
+
+/*---------------------------------------------*/
+/* Section 8.3: Error Handling */
+/*---------------------------------------------*/
+
+typedef
+void
+(MPIAPI MPI_Comm_errhandler_fn)(
+ _In_ MPI_Comm* comm,
+ _Inout_ int* errcode,
+ ...
+ );
+
+MPI_METHOD
+MPI_Comm_create_errhandler(
+ _In_ MPI_Comm_errhandler_fn* function,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+PMPI_Comm_create_errhandler(
+ _In_ MPI_Comm_errhandler_fn* function,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+MPI_Comm_set_errhandler(
+ _In_ MPI_Comm comm,
+ _In_ MPI_Errhandler errhandler
+ );
+
+MPI_METHOD
+PMPI_Comm_set_errhandler(
+ _In_ MPI_Comm comm,
+ _In_ MPI_Errhandler errhandler
+ );
+
+MPI_METHOD
+MPI_Comm_get_errhandler(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+PMPI_Comm_get_errhandler(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+
+typedef
+void
+(MPIAPI MPI_Win_errhandler_fn)(
+ _In_ MPI_Win* win,
+ _Inout_ int* errcode,
+ ...
+ );
+
+MPI_METHOD
+MPI_Win_create_errhandler(
+ _In_ MPI_Win_errhandler_fn* function,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+PMPI_Win_create_errhandler(
+ _In_ MPI_Win_errhandler_fn* function,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+MPI_Win_set_errhandler(
+ _In_ MPI_Win win,
+ _In_ MPI_Errhandler errhandler
+ );
+
+MPI_METHOD
+PMPI_Win_set_errhandler(
+ _In_ MPI_Win win,
+ _In_ MPI_Errhandler errhandler
+ );
+
+MPI_METHOD
+MPI_Win_get_errhandler(
+ _In_ MPI_Win win,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+PMPI_Win_get_errhandler(
+ _In_ MPI_Win win,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+
+typedef
+void
+(MPIAPI MPI_File_errhandler_fn)(
+ _In_ MPI_File* file,
+ _Inout_ int* errcode,
+ ...
+ );
+
+MPI_METHOD
+MPI_File_create_errhandler(
+ _In_ MPI_File_errhandler_fn* function,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+PMPI_File_create_errhandler(
+ _In_ MPI_File_errhandler_fn* function,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+MPI_File_set_errhandler(
+ _In_ MPI_File file,
+ _In_ MPI_Errhandler errhandler
+ );
+
+MPI_METHOD
+PMPI_File_set_errhandler(
+ _In_ MPI_File file,
+ _In_ MPI_Errhandler errhandler
+ );
+
+MPI_METHOD
+MPI_File_get_errhandler(
+ _In_ MPI_File file,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+PMPI_File_get_errhandler(
+ _In_ MPI_File file,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+MPI_Errhandler_free(
+ _Inout_ MPI_Errhandler* errhandler
+ );
+
+MPI_METHOD
+PMPI_Errhandler_free(
+ _Inout_ MPI_Errhandler* errhandler
+ );
+
+#define MPI_MAX_ERROR_STRING 512
+
+MPI_METHOD
+MPI_Error_string(
+ _In_ int errorcode,
+ _Out_writes_z_(MPI_MAX_ERROR_STRING) char* string,
+ _Out_ int* resultlen
+ );
+
+MPI_METHOD
+PMPI_Error_string(
+ _In_ int errorcode,
+ _Out_writes_z_(MPI_MAX_ERROR_STRING) char* string,
+ _Out_ int* resultlen
+ );
+
+
+/*---------------------------------------------*/
+/* Section 8.4: Error Codes and Classes */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Error_class(
+ _In_ int errorcode,
+ _Out_ int* errorclass
+ );
+
+MPI_METHOD
+PMPI_Error_class(
+ _In_ int errorcode,
+ _Out_ int* errorclass
+ );
+
+MPI_METHOD
+MPI_Add_error_class(
+ _Out_ int* errorclass
+ );
+
+MPI_METHOD
+PMPI_Add_error_class(
+ _Out_ int* errorclass
+ );
+
+MPI_METHOD
+MPI_Add_error_code(
+ _In_ int errorclass,
+ _Out_ int* errorcode
+ );
+
+MPI_METHOD
+PMPI_Add_error_code(
+ _In_ int errorclass,
+ _Out_ int* errorcode
+ );
+
+MPI_METHOD
+MPI_Add_error_string(
+ _In_ int errorcode,
+ _In_z_ const char* string
+ );
+
+MPI_METHOD
+PMPI_Add_error_string(
+ _In_ int errorcode,
+ _In_z_ const char* string
+ );
+
+MPI_METHOD
+MPI_Comm_call_errhandler(
+ _In_ MPI_Comm comm,
+ _In_ int errorcode
+ );
+
+MPI_METHOD
+PMPI_Comm_call_errhandler(
+ _In_ MPI_Comm comm,
+ _In_ int errorcode
+ );
+
+MPI_METHOD
+MPI_Win_call_errhandler(
+ _In_ MPI_Win win,
+ _In_ int errorcode
+ );
+
+MPI_METHOD
+PMPI_Win_call_errhandler(
+ _In_ MPI_Win win,
+ _In_ int errorcode
+ );
+
+MPI_METHOD
+MPI_File_call_errhandler(
+ _In_ MPI_File file,
+ _In_ int errorcode
+ );
+
+MPI_METHOD
+PMPI_File_call_errhandler(
+ _In_ MPI_File file,
+ _In_ int errorcode
+ );
+
+
+/*---------------------------------------------*/
+/* Section 8.6: Timers and Synchronization */
+/*---------------------------------------------*/
+
+double
+MPIAPI
+MPI_Wtime(
+ void
+ );
+
+double
+MPIAPI
+PMPI_Wtime(
+ void
+ );
+
+double
+MPIAPI
+MPI_Wtick(
+ void
+ );
+
+double
+MPIAPI
+PMPI_Wtick(
+ void
+ );
+
+
+/*---------------------------------------------*/
+/* Section 8.7: Startup */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Init(
+ _In_opt_ const int* argc,
+ _Notref_ _In_reads_opt_(*argc) char*** argv
+ );
+
+MPI_METHOD
+PMPI_Init(
+ _In_opt_ int* argc,
+ _Notref_ _In_reads_opt_(*argc) char*** argv
+ );
+
+MPI_METHOD
+MPI_Finalize(
+ void
+ );
+
+MPI_METHOD
+PMPI_Finalize(
+ void
+ );
+
+MPI_METHOD
+MPI_Initialized(
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_Initialized(
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+MPI_Abort(
+ _In_ MPI_Comm comm,
+ _In_ int errorcode
+ );
+
+MPI_METHOD
+PMPI_Abort(
+ _In_ MPI_Comm comm,
+ _In_ int errorcode
+ );
+
+MPI_METHOD
+MPI_Finalized(
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_Finalized(
+ _mpi_out_flag_ int* flag
+ );
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 9: The Info Object */
+/*---------------------------------------------------------------------------*/
+
+#define MPI_MAX_INFO_KEY 255
+#define MPI_MAX_INFO_VAL 1024
+
+MPI_METHOD
+MPI_Info_create(
+ _Out_ MPI_Info* info
+ );
+
+MPI_METHOD
+PMPI_Info_create(
+ _Out_ MPI_Info* info
+ );
+
+MPI_METHOD
+MPI_Info_set(
+ _In_ MPI_Info info,
+ _In_z_ const char* key,
+ _In_z_ const char* value
+ );
+
+MPI_METHOD
+PMPI_Info_set(
+ _In_ MPI_Info info,
+ _In_z_ const char* key,
+ _In_z_ const char* value
+ );
+
+MPI_METHOD
+MPI_Info_delete(
+ _In_ MPI_Info info,
+ _In_z_ const char* key
+ );
+
+MPI_METHOD
+PMPI_Info_delete(
+ _In_ MPI_Info info,
+ _In_z_ const char* key
+ );
+
+MPI_METHOD
+MPI_Info_get(
+ _In_ MPI_Info info,
+ _In_z_ const char* key,
+ _In_ int valuelen,
+ _When_(*flag != 0, _Out_writes_z_(valuelen)) char* value,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_Info_get(
+ _In_ MPI_Info info,
+ _In_z_ const char* key,
+ _In_ int valuelen,
+ _When_(*flag != 0, _Out_writes_z_(valuelen)) char* value,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+MPI_Info_get_valuelen(
+ _In_ MPI_Info info,
+ _In_z_ const char* key,
+ _Out_ _Deref_out_range_(0, MPI_MAX_INFO_VAL) int* valuelen,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_Info_get_valuelen(
+ _In_ MPI_Info info,
+ _In_z_ const char* key,
+ _Out_ _Deref_out_range_(0, MPI_MAX_INFO_VAL) int* valuelen,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+MPI_Info_get_nkeys(
+ _In_ MPI_Info info,
+ _Out_ int* nkeys
+ );
+
+MPI_METHOD
+PMPI_Info_get_nkeys(
+ _In_ MPI_Info info,
+ _Out_ int* nkeys
+ );
+
+MPI_METHOD
+MPI_Info_get_nthkey(
+ _In_ MPI_Info info,
+ _In_range_(>=, 0) int n,
+ _Out_writes_z_(MPI_MAX_INFO_KEY) char* key
+ );
+
+MPI_METHOD
+PMPI_Info_get_nthkey(
+ _In_ MPI_Info info,
+ _In_range_(>=, 0) int n,
+ _Out_writes_z_(MPI_MAX_INFO_KEY) char* key
+ );
+
+MPI_METHOD
+MPI_Info_dup(
+ _In_ MPI_Info info,
+ _Out_ MPI_Info* newinfo
+ );
+
+MPI_METHOD
+PMPI_Info_dup(
+ _In_ MPI_Info info,
+ _Out_ MPI_Info* newinfo
+ );
+
+MPI_METHOD
+MPI_Info_free(
+ _Inout_ MPI_Info* info
+ );
+
+MPI_METHOD
+PMPI_Info_free(
+ _Inout_ MPI_Info* info
+ );
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 10: Process Creation and Management */
+/*---------------------------------------------------------------------------*/
+
+/*---------------------------------------------*/
+/* Section 10.3: Process Manager Interface */
+/*---------------------------------------------*/
+
+#define MPI_ARGV_NULL ((char**)0)
+#define MPI_ARGVS_NULL ((char***)0)
+
+#define MPI_ERRCODES_IGNORE ((int*)0)
+
+MPI_METHOD
+MPI_Comm_spawn(
+ _In_z_ const char* command,
+ _In_ char* argv[],
+ _In_range_(>=, 0) int maxprocs,
+ _In_ MPI_Info info,
+ _In_range_(>=, 0) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Comm* intercomm,
+ _Out_writes_(maxprocs) int array_of_errcodes[]
+ );
+
+MPI_METHOD
+PMPI_Comm_spawn(
+ _In_z_ const char* command,
+ _In_ char* argv[],
+ _In_range_(>=, 0) int maxprocs,
+ _In_ MPI_Info info,
+ _In_range_(>=, 0) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Comm* intercomm,
+ _Out_writes_(maxprocs) int array_of_errcodes[]
+ );
+
+MPI_METHOD
+MPI_Comm_get_parent(
+ _Out_ MPI_Comm* parent
+ );
+
+MPI_METHOD
+PMPI_Comm_get_parent(
+ _Out_ MPI_Comm* parent
+ );
+
+MPI_METHOD
+MPI_Comm_spawn_multiple(
+ _In_range_(>, 0) int count,
+ _In_reads_z_(count) char* array_of_commands[],
+ _In_reads_z_(count) char** array_of_argv[],
+ _In_reads_(count) const int array_of_maxprocs[],
+ _In_reads_(count) const MPI_Info array_of_info[],
+ _In_range_(>=, 0) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Comm* intercomm,
+ _Out_ int array_of_errcodes[]
+ );
+
+MPI_METHOD
+PMPI_Comm_spawn_multiple(
+ _In_range_(>, 0) int count,
+ _In_reads_z_(count) char* array_of_commands[],
+ _In_reads_z_(count) char** array_of_argv[],
+ _In_reads_(count) const int array_of_maxprocs[],
+ _In_reads_(count) const MPI_Info array_of_info[],
+ _In_range_(>=, 0) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Comm* intercomm,
+ _Out_ int array_of_errcodes[]
+ );
+
+
+/*---------------------------------------------*/
+/* Section 10.4: Establishing Communication */
+/*---------------------------------------------*/
+
+#define MPI_MAX_PORT_NAME 256
+
+MPI_METHOD
+MPI_Open_port(
+ _In_ MPI_Info info,
+ _Out_writes_z_(MPI_MAX_PORT_NAME) char* port_name
+ );
+
+MPI_METHOD
+PMPI_Open_port(
+ _In_ MPI_Info info,
+ _Out_writes_z_(MPI_MAX_PORT_NAME) char* port_name
+ );
+
+MPI_METHOD
+MPI_Close_port(
+ _In_z_ const char* port_name
+ );
+
+MPI_METHOD
+PMPI_Close_port(
+ _In_z_ const char* port_name
+ );
+
+MPI_METHOD
+MPI_Comm_accept(
+ _In_z_ const char* port_name,
+ _In_ MPI_Info info,
+ _In_range_(>=, 0) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+PMPI_Comm_accept(
+ _In_z_ const char* port_name,
+ _In_ MPI_Info info,
+ _In_range_(>=, 0) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+MPI_Comm_connect(
+ _In_z_ const char* port_name,
+ _In_ MPI_Info info,
+ _In_range_(>=, 0) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Comm* newcomm
+ );
+
+MPI_METHOD
+PMPI_Comm_connect(
+ _In_z_ const char* port_name,
+ _In_ MPI_Info info,
+ _In_range_(>=, 0) int root,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Comm* newcomm
+ );
+
+
+/*---------------------------------------------*/
+/* Section 10.4.4: Name Publishing */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Publish_name(
+ _In_z_ const char* service_name,
+ _In_ MPI_Info info,
+ _In_z_ const char* port_name
+ );
+
+MPI_METHOD
+PMPI_Publish_name(
+ _In_z_ const char* service_name,
+ _In_ MPI_Info info,
+ _In_z_ const char* port_name
+ );
+
+MPI_METHOD
+MPI_Unpublish_name(
+ _In_z_ const char* service_name,
+ _In_ MPI_Info info,
+ _In_z_ const char* port_name
+ );
+
+MPI_METHOD
+PMPI_Unpublish_name(
+ _In_z_ const char* service_name,
+ _In_ MPI_Info info,
+ _In_z_ const char* port_name
+ );
+
+MPI_METHOD
+MPI_Lookup_name(
+ _In_z_ const char* service_name,
+ _In_ MPI_Info info,
+ _Out_writes_z_(MPI_MAX_PORT_NAME) char* port_name
+ );
+
+MPI_METHOD
+PMPI_Lookup_name(
+ _In_z_ const char* service_name,
+ _In_ MPI_Info info,
+ _Out_writes_z_(MPI_MAX_PORT_NAME) char* port_name
+ );
+
+
+/*---------------------------------------------*/
+/* Section 10.5: Other Functionality */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Comm_disconnect(
+ _In_ MPI_Comm* comm
+ );
+
+MPI_METHOD
+PMPI_Comm_disconnect(
+ _In_ MPI_Comm* comm
+ );
+
+MPI_METHOD
+MPI_Comm_join(
+ _In_ int fd,
+ _Out_ MPI_Comm* intercomm
+ );
+
+MPI_METHOD
+PMPI_Comm_join(
+ _In_ int fd,
+ _Out_ MPI_Comm* intercomm
+ );
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 11: One-Sided Communications */
+/*---------------------------------------------------------------------------*/
+
+MPI_METHOD
+MPI_Win_create(
+ _In_ void* base,
+ _In_range_(>=, 0) MPI_Aint size,
+ _In_range_(>, 0) int disp_unit,
+ _In_ MPI_Info info,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Win* win
+ );
+
+MPI_METHOD
+PMPI_Win_create(
+ _In_ void* base,
+ _In_range_(>=, 0) MPI_Aint size,
+ _In_range_(>, 0) int disp_unit,
+ _In_ MPI_Info info,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Win* win
+ );
+
+MPI_METHOD
+MPI_Win_allocate(
+ _In_range_(>= , 0) MPI_Aint size,
+ _In_range_(>, 0) int disp_unit,
+ _In_ MPI_Info info,
+ _In_ MPI_Comm comm,
+ _Out_ void *baseptr,
+ _Out_ MPI_Win *win
+ );
+
+MPI_METHOD
+PMPI_Win_allocate(
+ _In_range_(>= , 0) MPI_Aint size,
+ _In_range_(>, 0) int disp_unit,
+ _In_ MPI_Info info,
+ _In_ MPI_Comm comm,
+ _Out_ void *baseptr,
+ _Out_ MPI_Win *win
+ );
+
+MPI_METHOD
+MPI_Win_allocate_shared(
+ _In_range_(>=, 0) MPI_Aint size,
+ _In_range_(>, 0) int disp_unit,
+ _In_ MPI_Info info,
+ _In_ MPI_Comm comm,
+ _Out_ void *baseptr,
+ _Out_ MPI_Win *win
+ );
+
+MPI_METHOD
+PMPI_Win_allocate_shared(
+ _In_range_(>=, 0) MPI_Aint size,
+ _In_range_(>, 0) int disp_unit,
+ _In_ MPI_Info info,
+ _In_ MPI_Comm comm,
+ _Out_ void *baseptr,
+ _Out_ MPI_Win *win
+ );
+
+MPI_METHOD
+MPI_Win_shared_query(
+ _In_ MPI_Win win,
+ _In_range_(>=, MPI_PROC_NULL) int rank,
+ _Out_ MPI_Aint *size,
+ _Out_ int *disp_unit,
+ _Out_ void *baseptr
+ );
+
+MPI_METHOD
+PMPI_Win_shared_query(
+ _In_ MPI_Win win,
+ _In_range_(>=, MPI_PROC_NULL) int rank,
+ _Out_ MPI_Aint *size,
+ _Out_ int *disp_unit,
+ _Out_ void *baseptr
+ );
+
+MPI_METHOD
+MPI_Win_create_dynamic(
+ _In_ MPI_Info info,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Win* win
+ );
+
+MPI_METHOD
+PMPI_Win_create_dynamic(
+ _In_ MPI_Info info,
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Win* win
+ );
+
+MPI_METHOD
+MPI_Win_free(
+ _Inout_ MPI_Win* win
+ );
+
+MPI_METHOD
+PMPI_Win_free(
+ _Inout_ MPI_Win* win
+ );
+
+MPI_METHOD
+MPI_Win_get_group(
+ _In_ MPI_Win win,
+ _Out_ MPI_Group* group
+ );
+
+MPI_METHOD
+PMPI_Win_get_group(
+ _In_ MPI_Win win,
+ _Out_ MPI_Group* group
+ );
+
+MPI_METHOD
+MPI_Win_attach(
+ _In_ MPI_Win win,
+ _In_ void* base,
+ _In_range_(>=, 0) MPI_Aint size
+ );
+
+MPI_METHOD
+PMPI_Win_attach(
+ _In_ MPI_Win win,
+ _In_ void* base,
+ _In_range_(>=, 0) MPI_Aint size
+ );
+
+MPI_METHOD
+MPI_Win_detach(
+ _In_ MPI_Win win,
+ _In_ void* base
+ );
+
+MPI_METHOD
+PMPI_Win_detach(
+ _In_ MPI_Win win,
+ _In_ void* base
+ );
+
+MPI_METHOD
+MPI_Put(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>=, 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>=, MPI_PROC_NULL) int target_rank,
+ _In_range_(>=, 0) MPI_Aint target_disp,
+ _In_range_(>=, 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Put(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>=, 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>=, MPI_PROC_NULL) int target_rank,
+ _In_range_(>=, 0) MPI_Aint target_disp,
+ _In_range_(>=, 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Rput(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>=, 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>=, MPI_PROC_NULL) int target_rank,
+ _In_range_(>=, 0) MPI_Aint target_disp,
+ _In_range_(>=, 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Win win,
+ _Out_ MPI_Request *request
+ );
+
+MPI_METHOD
+PMPI_Rput(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>=, 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>=, MPI_PROC_NULL) int target_rank,
+ _In_range_(>=, 0) MPI_Aint target_disp,
+ _In_range_(>=, 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Win win,
+ _Out_ MPI_Request *request
+ );
+
+MPI_METHOD
+MPI_Get(
+ _In_opt_ void* origin_addr,
+ _In_range_(>=, 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>=, MPI_PROC_NULL) int target_rank,
+ _In_range_(>=, 0) MPI_Aint target_disp,
+ _In_range_(>=, 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Get(
+ _In_opt_ void* origin_addr,
+ _In_range_(>=, 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>=, MPI_PROC_NULL) int target_rank,
+ _In_range_(>=, 0) MPI_Aint target_disp,
+ _In_range_(>=, 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Rget(
+ _In_opt_ void* origin_addr,
+ _In_range_(>= , 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>= , MPI_PROC_NULL) int target_rank,
+ _In_range_(>= , 0) MPI_Aint target_disp,
+ _In_range_(>= , 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Win win,
+ _Out_ MPI_Request *request
+ );
+
+MPI_METHOD
+PMPI_Rget(
+ _In_opt_ void* origin_addr,
+ _In_range_(>= , 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>= , MPI_PROC_NULL) int target_rank,
+ _In_range_(>= , 0) MPI_Aint target_disp,
+ _In_range_(>= , 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Win win,
+ _Out_ MPI_Request *request
+ );
+
+MPI_METHOD
+MPI_Accumulate(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>=, 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>=, MPI_PROC_NULL) int target_rank,
+ _In_range_(>=, 0) MPI_Aint target_disp,
+ _In_range_(>=, 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Accumulate(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>=, 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>=, MPI_PROC_NULL) int target_rank,
+ _In_range_(>=, 0) MPI_Aint target_disp,
+ _In_range_(>=, 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Raccumulate(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>=, 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>=, MPI_PROC_NULL) int target_rank,
+ _In_range_(>=, 0) MPI_Aint target_disp,
+ _In_range_(>=, 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Win win,
+ _Out_ MPI_Request *request
+ );
+
+MPI_METHOD
+PMPI_Raccumulate(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>=, 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_range_(>=, MPI_PROC_NULL) int target_rank,
+ _In_range_(>=, 0) MPI_Aint target_disp,
+ _In_range_(>=, 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Win win,
+ _Out_ MPI_Request *request
+ );
+
+MPI_METHOD
+MPI_Get_accumulate(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>= , 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_opt_ void* result_addr,
+ _In_range_(>= , 0) int result_count,
+ _In_ MPI_Datatype result_datatype,
+ _In_range_(>= , MPI_PROC_NULL) int target_rank,
+ _In_range_(>= , 0) MPI_Aint target_disp,
+ _In_range_(>= , 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Win win
+);
+
+MPI_METHOD
+PMPI_Get_accumulate(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>= , 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_opt_ void* result_addr,
+ _In_range_(>= , 0) int result_count,
+ _In_ MPI_Datatype result_datatype,
+ _In_range_(>= , MPI_PROC_NULL) int target_rank,
+ _In_range_(>= , 0) MPI_Aint target_disp,
+ _In_range_(>= , 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Win win
+);
+
+MPI_METHOD
+MPI_Rget_accumulate(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>= , 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_opt_ void* result_addr,
+ _In_range_(>= , 0) int result_count,
+ _In_ MPI_Datatype result_datatype,
+ _In_range_(>= , MPI_PROC_NULL) int target_rank,
+ _In_range_(>= , 0) MPI_Aint target_disp,
+ _In_range_(>= , 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Win win,
+ _Out_ MPI_Request *request
+);
+
+MPI_METHOD
+PMPI_Rget_accumulate(
+ _In_opt_ const void* origin_addr,
+ _In_range_(>= , 0) int origin_count,
+ _In_ MPI_Datatype origin_datatype,
+ _In_opt_ void* result_addr,
+ _In_range_(>= , 0) int result_count,
+ _In_ MPI_Datatype result_datatype,
+ _In_range_(>= , MPI_PROC_NULL) int target_rank,
+ _In_range_(>= , 0) MPI_Aint target_disp,
+ _In_range_(>= , 0) int target_count,
+ _In_ MPI_Datatype target_datatype,
+ _In_ MPI_Op op,
+ _In_ MPI_Win win,
+ _Out_ MPI_Request *request
+);
+
+MPI_METHOD
+MPI_Fetch_and_op(
+ _In_opt_ const void* origin_addr,
+ _When_(target_rank != MPI_PROC_NULL, _In_)
+ _When_(target_rank == MPI_PROC_NULL, _In_opt_)
+ void* result_addr,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>= , MPI_PROC_NULL) int target_rank,
+ _In_range_(>= , 0) MPI_Aint target_disp,
+ _In_ MPI_Op op,
+ _In_ MPI_Win win
+);
+
+MPI_METHOD
+PMPI_Fetch_and_op(
+ _In_opt_ const void* origin_addr,
+ _When_(target_rank != MPI_PROC_NULL, _In_)
+ _When_(target_rank == MPI_PROC_NULL, _In_opt_)
+ void* result_addr,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>= , MPI_PROC_NULL) int target_rank,
+ _In_range_(>= , 0) MPI_Aint target_disp,
+ _In_ MPI_Op op,
+ _In_ MPI_Win win
+);
+
+MPI_METHOD
+MPI_Compare_and_swap(
+ _When_(target_rank != MPI_PROC_NULL, _In_)
+ _When_(target_rank == MPI_PROC_NULL, _In_opt_)
+ const void* origin_addr,
+ _When_(target_rank != MPI_PROC_NULL, _In_)
+ _When_(target_rank == MPI_PROC_NULL, _In_opt_)
+ const void* compare_addr,
+ _When_(target_rank != MPI_PROC_NULL, _In_)
+ _When_(target_rank == MPI_PROC_NULL, _In_opt_)
+ void* result_addr,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>= , MPI_PROC_NULL) int target_rank,
+ _In_range_(>= , 0) MPI_Aint target_disp,
+ _In_ MPI_Win win
+);
+
+MPI_METHOD
+PMPI_Compare_and_swap(
+ _When_(target_rank != MPI_PROC_NULL, _In_)
+ _When_(target_rank == MPI_PROC_NULL, _In_opt_)
+ const void* origin_addr,
+ _When_(target_rank != MPI_PROC_NULL, _In_)
+ _When_(target_rank == MPI_PROC_NULL, _In_opt_)
+ const void* compare_addr,
+ _When_(target_rank != MPI_PROC_NULL, _In_)
+ _When_(target_rank == MPI_PROC_NULL, _In_opt_)
+ void* result_addr,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>= , MPI_PROC_NULL) int target_rank,
+ _In_range_(>= , 0) MPI_Aint target_disp,
+ _In_ MPI_Win win
+);
+
+/* Asserts for one-sided communication */
+#define MPI_MODE_NOCHECK 1024
+#define MPI_MODE_NOSTORE 2048
+#define MPI_MODE_NOPUT 4096
+#define MPI_MODE_NOPRECEDE 8192
+#define MPI_MODE_NOSUCCEED 16384
+
+MPI_METHOD
+MPI_Win_fence(
+ _In_ int assert,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_fence(
+ _In_ int assert,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_start(
+ _In_ MPI_Group group,
+ _In_ int assert,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_start(
+ _In_ MPI_Group group,
+ _In_ int assert,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_complete(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_complete(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_post(
+ _In_ MPI_Group group,
+ _In_ int assert,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_post(
+ _In_ MPI_Group group,
+ _In_ int assert,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_wait(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_wait(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_test(
+ _In_ MPI_Win win,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_Win_test(
+ _In_ MPI_Win win,
+ _mpi_out_flag_ int* flag
+ );
+
+#define MPI_LOCK_EXCLUSIVE 234
+#define MPI_LOCK_SHARED 235
+
+MPI_METHOD
+MPI_Win_lock(
+ _In_ int lock_type,
+ _In_range_(>=, MPI_PROC_NULL) int rank,
+ _In_ int assert,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_lock(
+ _In_ int lock_type,
+ _In_range_(>=, MPI_PROC_NULL) int rank,
+ _In_ int assert,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_lock_all(
+ _In_ int assert,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_lock_all(
+ _In_ int assert,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_unlock(
+ _In_range_(>=, MPI_PROC_NULL) int rank,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_unlock(
+ _In_range_(>=, MPI_PROC_NULL) int rank,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_unlock_all(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_unlock_all(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_flush(
+ _In_range_(>=, MPI_PROC_NULL) int rank,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_flush(
+ _In_range_(>=, MPI_PROC_NULL) int rank,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_flush_all(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_flush_all(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_flush_local(
+ _In_range_(>= , MPI_PROC_NULL) int rank,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_flush_local(
+ _In_range_(>= , MPI_PROC_NULL) int rank,
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_flush_local_all(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_flush_local_all(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+MPI_Win_sync(
+ _In_ MPI_Win win
+ );
+
+MPI_METHOD
+PMPI_Win_sync(
+ _In_ MPI_Win win
+ );
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 12: External Interfaces */
+/*---------------------------------------------------------------------------*/
+
+/*---------------------------------------------*/
+/* Section 12.2: Generalized Requests */
+/*---------------------------------------------*/
+
+typedef
+int
+(MPIAPI MPI_Grequest_query_function)(
+ _In_opt_ void* extra_state,
+ _Out_ MPI_Status* status
+ );
+
+typedef
+int
+(MPIAPI MPI_Grequest_free_function)(
+ _In_opt_ void* extra_state
+ );
+
+typedef
+int
+(MPIAPI MPI_Grequest_cancel_function)(
+ _In_opt_ void* extra_state,
+ _In_ int complete
+ );
+
+MPI_METHOD
+MPI_Grequest_start(
+ _In_ MPI_Grequest_query_function* query_fn,
+ _In_ MPI_Grequest_free_function* free_fn,
+ _In_ MPI_Grequest_cancel_function* cancel_fn,
+ _In_opt_ void* extra_state,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_Grequest_start(
+ _In_ MPI_Grequest_query_function* query_fn,
+ _In_ MPI_Grequest_free_function* free_fn,
+ _In_ MPI_Grequest_cancel_function* cancel_fn,
+ _In_opt_ void* extra_state,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_Grequest_complete(
+ _In_ MPI_Request request
+ );
+
+MPI_METHOD
+PMPI_Grequest_complete(
+ _In_ MPI_Request request
+ );
+
+
+/*---------------------------------------------*/
+/* Section 12.3: Information with Status */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Status_set_elements(
+ _In_ MPI_Status* status,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, 0) int count
+ );
+
+MPI_METHOD
+PMPI_Status_set_elements(
+ _In_ MPI_Status* status,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, 0) int count
+ );
+
+MPI_METHOD
+MPI_Status_set_elements_x(
+ _In_ MPI_Status* status,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, 0) MPI_Count count
+ );
+
+MPI_METHOD
+PMPI_Status_set_elements_x(
+ _In_ MPI_Status* status,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, 0) MPI_Count count
+ );
+
+MPI_METHOD
+MPI_Status_set_cancelled(
+ _In_ MPI_Status* status,
+ _In_range_(0,1) int flag
+ );
+
+MPI_METHOD
+PMPI_Status_set_cancelled(
+ _In_ MPI_Status* status,
+ _In_range_(0,1) int flag
+ );
+
+
+/*---------------------------------------------*/
+/* Section 12.4: Threads */
+/*---------------------------------------------*/
+
+#define MPI_THREAD_SINGLE 0
+#define MPI_THREAD_FUNNELED 1
+#define MPI_THREAD_SERIALIZED 2
+#define MPI_THREAD_MULTIPLE 3
+
+MPI_METHOD
+MPI_Init_thread(
+ _In_opt_ const int* argc,
+ _Notref_ _In_reads_opt_(*argc) char*** argv,
+ _In_ int required,
+ _Out_ int* provided
+ );
+
+MPI_METHOD
+PMPI_Init_thread(
+ _In_opt_ int* argc,
+ _Notref_ _In_reads_opt_(*argc) char*** argv,
+ _In_ int required,
+ _Out_ int* provided
+ );
+
+MPI_METHOD
+MPI_Query_thread(
+ _Out_ int* provided
+ );
+
+MPI_METHOD
+PMPI_Query_thread(
+ _Out_ int* provided
+ );
+
+MPI_METHOD
+MPI_Is_thread_main(
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_Is_thread_main(
+ _mpi_out_flag_ int* flag
+ );
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 13: I/O */
+/*---------------------------------------------------------------------------*/
+
+/*---------------------------------------------*/
+/* Section 13.2: File Manipulation */
+/*---------------------------------------------*/
+
+#define MPI_MODE_CREATE 0x00000001
+#define MPI_MODE_RDONLY 0x00000002
+#define MPI_MODE_WRONLY 0x00000004
+#define MPI_MODE_RDWR 0x00000008
+#define MPI_MODE_DELETE_ON_CLOSE 0x00000010
+#define MPI_MODE_UNIQUE_OPEN 0x00000020
+#define MPI_MODE_EXCL 0x00000040
+#define MPI_MODE_APPEND 0x00000080
+#define MPI_MODE_SEQUENTIAL 0x00000100
+#define MSMPI_MODE_HIDDEN 0x00000200
+
+MPI_METHOD
+MPI_File_open(
+ _In_ MPI_Comm comm,
+ _In_z_ const char* filename,
+ _In_ int amode,
+ _In_ MPI_Info info,
+ _Out_ MPI_File* fh
+ );
+
+MPI_METHOD
+PMPI_File_open(
+ _In_ MPI_Comm comm,
+ _In_z_ const char* filename,
+ _In_ int amode,
+ _In_ MPI_Info info,
+ _Out_ MPI_File* fh
+ );
+
+MPI_METHOD
+MPI_File_close(
+ _In_ MPI_File* fh
+ );
+
+MPI_METHOD
+PMPI_File_close(
+ _In_ MPI_File* fh
+ );
+
+MPI_METHOD
+MPI_File_delete(
+ _In_z_ const char* filename,
+ _In_ MPI_Info info
+ );
+
+MPI_METHOD
+PMPI_File_delete(
+ _In_z_ const char* filename,
+ _In_ MPI_Info info
+ );
+
+MPI_METHOD
+MPI_File_set_size(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset size
+ );
+
+MPI_METHOD
+PMPI_File_set_size(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset size
+ );
+
+MPI_METHOD
+MPI_File_preallocate(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset size
+ );
+
+MPI_METHOD
+PMPI_File_preallocate(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset size
+ );
+
+MPI_METHOD
+MPI_File_get_size(
+ _In_ MPI_File fh,
+ _Out_ MPI_Offset* size
+ );
+
+MPI_METHOD
+PMPI_File_get_size(
+ _In_ MPI_File fh,
+ _Out_ MPI_Offset* size
+ );
+
+MPI_METHOD
+MPI_File_get_group(
+ _In_ MPI_File fh,
+ _Out_ MPI_Group* group
+ );
+
+MPI_METHOD
+PMPI_File_get_group(
+ _In_ MPI_File fh,
+ _Out_ MPI_Group* group
+ );
+
+MPI_METHOD
+MPI_File_get_amode(
+ _In_ MPI_File fh,
+ _Out_ int* amode
+ );
+
+MPI_METHOD
+PMPI_File_get_amode(
+ _In_ MPI_File fh,
+ _Out_ int* amode
+ );
+
+MPI_METHOD
+MPI_File_set_info(
+ _In_ MPI_File fh,
+ _In_ MPI_Info info
+ );
+
+MPI_METHOD
+PMPI_File_set_info(
+ _In_ MPI_File fh,
+ _In_ MPI_Info info
+ );
+
+MPI_METHOD
+MPI_File_get_info(
+ _In_ MPI_File fh,
+ _Out_ MPI_Info* info_used
+ );
+
+MPI_METHOD
+PMPI_File_get_info(
+ _In_ MPI_File fh,
+ _Out_ MPI_Info* info_used
+ );
+
+
+/*---------------------------------------------*/
+/* Section 13.3: File Views */
+/*---------------------------------------------*/
+
+#define MPI_DISPLACEMENT_CURRENT (-54278278)
+
+MPI_METHOD
+MPI_File_set_view(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset disp,
+ _In_ MPI_Datatype etype,
+ _In_ MPI_Datatype filetype,
+ _In_z_ const char* datarep,
+ _In_ MPI_Info info
+ );
+
+MPI_METHOD
+PMPI_File_set_view(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset disp,
+ _In_ MPI_Datatype etype,
+ _In_ MPI_Datatype filetype,
+ _In_z_ const char* datarep,
+ _In_ MPI_Info info
+ );
+
+#define MPI_MAX_DATAREP_STRING 128
+
+MPI_METHOD
+MPI_File_get_view(
+ _In_ MPI_File fh,
+ _Out_ MPI_Offset* disp,
+ _Out_ MPI_Datatype* etype,
+ _Out_ MPI_Datatype* filetype,
+ _Out_writes_z_(MPI_MAX_DATAREP_STRING) char* datarep
+ );
+
+MPI_METHOD
+PMPI_File_get_view(
+ _In_ MPI_File fh,
+ _Out_ MPI_Offset* disp,
+ _Out_ MPI_Datatype* etype,
+ _Out_ MPI_Datatype* filetype,
+ _Out_writes_z_(MPI_MAX_DATAREP_STRING) char* datarep
+ );
+
+
+/*---------------------------------------------*/
+/* Section 13.4: Data Access */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_File_read_at(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_read_at(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_read_at_all(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_read_at_all(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_write_at(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_write_at(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_write_at_all(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_write_at_all(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_iread_at(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_File_iread_at(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_File_iwrite_at(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_File_iwrite_at(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_File_read(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_read(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_read_all(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_read_all(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_write(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_write(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_write_all(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_write_all(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+
+MPI_METHOD
+MPI_File_iread(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_File_iread(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_File_iwrite(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_File_iwrite(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+
+/* File seek whence */
+#define MPI_SEEK_SET 600
+#define MPI_SEEK_CUR 602
+#define MPI_SEEK_END 604
+
+MPI_METHOD
+MPI_File_seek(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_ int whence
+ );
+
+MPI_METHOD
+PMPI_File_seek(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_ int whence
+ );
+
+MPI_METHOD
+MPI_File_get_position(
+ _In_ MPI_File fh,
+ _Out_ MPI_Offset* offset
+ );
+
+MPI_METHOD
+PMPI_File_get_position(
+ _In_ MPI_File fh,
+ _Out_ MPI_Offset* offset
+ );
+
+MPI_METHOD
+MPI_File_get_byte_offset(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _Out_ MPI_Offset* disp
+ );
+
+MPI_METHOD
+PMPI_File_get_byte_offset(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _Out_ MPI_Offset* disp
+ );
+
+MPI_METHOD
+MPI_File_read_shared(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_read_shared(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_write_shared(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_write_shared(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_iread_shared(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_File_iread_shared(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_File_iwrite_shared(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+PMPI_File_iwrite_shared(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Request* request
+ );
+
+MPI_METHOD
+MPI_File_read_ordered(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_read_ordered(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_write_ordered(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_write_ordered(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_seek_shared(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_ int whence
+ );
+
+MPI_METHOD
+PMPI_File_seek_shared(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_ int whence
+ );
+
+MPI_METHOD
+MPI_File_get_position_shared(
+ _In_ MPI_File fh,
+ _Out_ MPI_Offset* offset
+ );
+
+MPI_METHOD
+PMPI_File_get_position_shared(
+ _In_ MPI_File fh,
+ _Out_ MPI_Offset* offset
+ );
+
+MPI_METHOD
+MPI_File_read_at_all_begin(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+PMPI_File_read_at_all_begin(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+MPI_File_read_at_all_end(
+ _In_ MPI_File fh,
+ _Out_ void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_read_at_all_end(
+ _In_ MPI_File fh,
+ _Out_ void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_write_at_all_begin(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+PMPI_File_write_at_all_begin(
+ _In_ MPI_File fh,
+ _In_ MPI_Offset offset,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+MPI_File_write_at_all_end(
+ _In_ MPI_File fh,
+ _In_ const void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_write_at_all_end(
+ _In_ MPI_File fh,
+ _In_ const void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_read_all_begin(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+PMPI_File_read_all_begin(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+MPI_File_read_all_end(
+ _In_ MPI_File fh,
+ _Out_ void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_read_all_end(
+ _In_ MPI_File fh,
+ _Out_ void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_write_all_begin(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+PMPI_File_write_all_begin(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+MPI_File_write_all_end(
+ _In_ MPI_File fh,
+ _In_ const void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_write_all_end(
+ _In_ MPI_File fh,
+ _In_ const void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_read_ordered_begin(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+PMPI_File_read_ordered_begin(
+ _In_ MPI_File fh,
+ _Out_opt_ void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+MPI_File_read_ordered_end(
+ _In_ MPI_File fh,
+ _Out_ void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_read_ordered_end(
+ _In_ MPI_File fh,
+ _Out_ void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+MPI_File_write_ordered_begin(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+PMPI_File_write_ordered_begin(
+ _In_ MPI_File fh,
+ _In_opt_ const void* buf,
+ _In_range_(>=, 0) int count,
+ _In_ MPI_Datatype datatype
+ );
+
+MPI_METHOD
+MPI_File_write_ordered_end(
+ _In_ MPI_File fh,
+ _In_ const void* buf,
+ _Out_ MPI_Status* status
+ );
+
+MPI_METHOD
+PMPI_File_write_ordered_end(
+ _In_ MPI_File fh,
+ _In_ const void* buf,
+ _Out_ MPI_Status* status
+ );
+
+
+/*---------------------------------------------*/
+/* Section 13.5: File Interoperability */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_File_get_type_extent(
+ _In_ MPI_File fh,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* extent
+ );
+
+MPI_METHOD
+PMPI_File_get_type_extent(
+ _In_ MPI_File fh,
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* extent
+ );
+
+
+typedef
+int
+(MPIAPI MPI_Datarep_conversion_function)(
+ _Inout_ void* userbuf,
+ _In_ MPI_Datatype datatype,
+ _In_range_(>=, 0) int count,
+ _Inout_ void* filebuf,
+ _In_ MPI_Offset position,
+ _In_opt_ void* extra_state
+ );
+
+typedef
+int
+(MPIAPI MPI_Datarep_extent_function)(
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* file_extent,
+ _In_opt_ void* extra_state
+ );
+
+#define MPI_CONVERSION_FN_NULL ((MPI_Datarep_conversion_function*)0)
+
+MPI_METHOD
+MPI_Register_datarep(
+ _In_z_ const char* datarep,
+ _In_opt_ MPI_Datarep_conversion_function* read_conversion_fn,
+ _In_opt_ MPI_Datarep_conversion_function* write_conversion_fn,
+ _In_ MPI_Datarep_extent_function* dtype_file_extent_fn,
+ _In_opt_ void* extra_state
+ );
+
+MPI_METHOD
+PMPI_Register_datarep(
+ _In_z_ const char* datarep,
+ _In_opt_ MPI_Datarep_conversion_function* read_conversion_fn,
+ _In_opt_ MPI_Datarep_conversion_function* write_conversion_fn,
+ _In_ MPI_Datarep_extent_function* dtype_file_extent_fn,
+ _In_opt_ void* extra_state
+ );
+
+
+/*---------------------------------------------*/
+/* Section 13.6: Consistency and Semantics */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_File_set_atomicity(
+ _In_ MPI_File fh,
+ _In_range_(0, 1) int flag
+ );
+
+MPI_METHOD
+PMPI_File_set_atomicity(
+ _In_ MPI_File fh,
+ _In_range_(0, 1) int flag
+ );
+
+MPI_METHOD
+MPI_File_get_atomicity(
+ _In_ MPI_File fh,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+PMPI_File_get_atomicity(
+ _In_ MPI_File fh,
+ _mpi_out_flag_ int* flag
+ );
+
+MPI_METHOD
+MPI_File_sync(
+ _In_ MPI_File fh
+ );
+
+MPI_METHOD
+PMPI_File_sync(
+ _In_ MPI_File fh
+ );
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 14: Profiling Interface */
+/*---------------------------------------------------------------------------*/
+
+MPI_METHOD
+MPI_Pcontrol(
+ _In_ const int level,
+ ...
+ );
+
+MPI_METHOD
+PMPI_Pcontrol(
+ _In_ const int level,
+ ...
+ );
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 15: Deprecated Functions */
+/*---------------------------------------------------------------------------*/
+
+#ifdef MSMPI_NO_DEPRECATE_20
+#define MSMPI_DEPRECATE_20( x )
+#else
+#define MSMPI_DEPRECATE_20( x ) __declspec(deprecated( \
+ "Deprecated in MPI 2.0, use '" #x "'. " \
+ "To disable deprecation, define MSMPI_NO_DEPRECATE_20." ))
+#endif
+
+MSMPI_DEPRECATE_20( MPI_Type_create_hvector )
+MPI_METHOD
+MPI_Type_hvector(
+ _In_range_(>=, 0) int count,
+ _In_range_(>=, 0) int blocklength,
+ _In_ MPI_Aint stride,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Type_create_hvector )
+MPI_METHOD
+PMPI_Type_hvector(
+ _In_range_(>=, 0) int count,
+ _In_range_(>=, 0) int blocklength,
+ _In_ MPI_Aint stride,
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MSMPI_DEPRECATE_20( MPI_Type_create_hindexed )
+MPI_METHOD
+MPI_Type_hindexed(
+ _In_range_(>=, 0) int count,
+ _In_reads_opt_(count) const int array_of_blocklengths[],
+ _In_reads_opt_(count) const MPI_Aint array_of_displacements[],
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Type_create_hindexed )
+MPI_METHOD
+PMPI_Type_hindexed(
+ _In_range_(>=, 0) int count,
+ _In_reads_opt_(count) const int array_of_blocklengths[],
+ _In_reads_opt_(count) const MPI_Aint array_of_displacements[],
+ _In_ MPI_Datatype oldtype,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MSMPI_DEPRECATE_20( MPI_Type_create_struct )
+MPI_METHOD
+MPI_Type_struct(
+ _In_range_(>=, 0) int count,
+ _In_reads_opt_(count) const int array_of_blocklengths[],
+ _In_reads_opt_(count) const MPI_Aint array_of_displacements[],
+ _In_reads_opt_(count) const MPI_Datatype array_of_types[],
+ _Out_ MPI_Datatype* newtype
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Type_create_struct )
+MPI_METHOD
+PMPI_Type_struct(
+ _In_range_(>=, 0) int count,
+ _In_reads_opt_(count) const int array_of_blocklengths[],
+ _In_reads_opt_(count) const MPI_Aint array_of_displacements[],
+ _In_reads_opt_(count) const MPI_Datatype array_of_types[],
+ _Out_ MPI_Datatype* newtype
+ );
+
+MSMPI_DEPRECATE_20( MPI_Get_address )
+MPI_METHOD
+MPI_Address(
+ _In_ void* location,
+ _Out_ MPI_Aint* address
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Get_address )
+MPI_METHOD
+PMPI_Address(
+ _In_ void* location,
+ _Out_ MPI_Aint* address
+ );
+
+MSMPI_DEPRECATE_20( MPI_Type_get_extent )
+MPI_METHOD
+MPI_Type_extent(
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* extent
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Type_get_extent )
+MPI_METHOD
+PMPI_Type_extent(
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* extent
+ );
+
+MSMPI_DEPRECATE_20( MPI_Type_get_extent )
+MPI_METHOD
+MPI_Type_lb(
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* displacement
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Type_get_extent )
+MPI_METHOD
+PMPI_Type_lb(
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* displacement
+ );
+
+MSMPI_DEPRECATE_20( MPI_Type_get_extent )
+MPI_METHOD
+MPI_Type_ub(
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* displacement
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Type_get_extent )
+MPI_METHOD
+PMPI_Type_ub(
+ _In_ MPI_Datatype datatype,
+ _Out_ MPI_Aint* displacement
+ );
+
+
+typedef MPI_Comm_copy_attr_function MPI_Copy_function;
+typedef MPI_Comm_delete_attr_function MPI_Delete_function;
+
+#define MPI_NULL_COPY_FN ((MPI_Copy_function*)0)
+#define MPI_NULL_DELETE_FN ((MPI_Delete_function*)0)
+#define MPI_DUP_FN MPIR_Dup_fn
+
+
+MSMPI_DEPRECATE_20( MPI_Comm_create_keyval )
+MPI_METHOD
+MPI_Keyval_create(
+ _In_opt_ MPI_Copy_function* copy_fn,
+ _In_opt_ MPI_Delete_function* delete_fn,
+ _Out_ int* keyval,
+ _In_opt_ void* extra_state
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Comm_create_keyval )
+MPI_METHOD
+PMPI_Keyval_create(
+ _In_opt_ MPI_Copy_function* copy_fn,
+ _In_opt_ MPI_Delete_function* delete_fn,
+ _Out_ int* keyval,
+ _In_opt_ void* extra_state
+ );
+
+MSMPI_DEPRECATE_20( MPI_Comm_free_keyval )
+MPI_METHOD
+MPI_Keyval_free(
+ _Inout_ int* keyval
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Comm_free_keyval )
+MPI_METHOD
+PMPI_Keyval_free(
+ _Inout_ int* keyval
+ );
+
+MSMPI_DEPRECATE_20( MPI_Comm_set_attr )
+MPI_METHOD
+MPI_Attr_put(
+ _In_ MPI_Comm comm,
+ _In_ int keyval,
+ _In_opt_ void* attribute_val
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Comm_set_attr )
+MPI_METHOD
+PMPI_Attr_put(
+ _In_ MPI_Comm comm,
+ _In_ int keyval,
+ _In_opt_ void* attribute_val
+ );
+
+MSMPI_DEPRECATE_20( MPI_Comm_get_attr )
+MPI_METHOD
+MPI_Attr_get(
+ _In_ MPI_Comm comm,
+ _In_ int keyval,
+ _Out_ void* attribute_val,
+ _mpi_out_flag_ int* flag
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Comm_get_attr )
+MPI_METHOD
+PMPI_Attr_get(
+ _In_ MPI_Comm comm,
+ _In_ int keyval,
+ _Out_ void* attribute_val,
+ _mpi_out_flag_ int* flag
+ );
+
+MSMPI_DEPRECATE_20( MPI_Comm_delete_attr )
+MPI_METHOD
+MPI_Attr_delete(
+ _In_ MPI_Comm comm,
+ _In_ int keyval
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Comm_delete_attr )
+MPI_METHOD
+PMPI_Attr_delete(
+ _In_ MPI_Comm comm,
+ _In_ int keyval
+ );
+
+
+typedef MPI_Comm_errhandler_fn MPI_Handler_function;
+
+MSMPI_DEPRECATE_20( MPI_Comm_create_errhandler )
+MPI_METHOD
+MPI_Errhandler_create(
+ _In_ MPI_Handler_function* function,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Comm_create_errhandler )
+MPI_METHOD
+PMPI_Errhandler_create(
+ _In_ MPI_Handler_function* function,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MSMPI_DEPRECATE_20( MPI_Comm_set_errhandler )
+MPI_METHOD
+MPI_Errhandler_set(
+ _In_ MPI_Comm comm,
+ _In_ MPI_Errhandler errhandler
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Comm_set_errhandler )
+MPI_METHOD
+PMPI_Errhandler_set(
+ _In_ MPI_Comm comm,
+ _In_ MPI_Errhandler errhandler
+ );
+
+MSMPI_DEPRECATE_20( MPI_Comm_get_errhandler )
+MPI_METHOD
+MPI_Errhandler_get(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+MSMPI_DEPRECATE_20( PMPI_Comm_get_errhandler )
+MPI_METHOD
+PMPI_Errhandler_get(
+ _In_ MPI_Comm comm,
+ _Out_ MPI_Errhandler* errhandler
+ );
+
+
+/*---------------------------------------------------------------------------*/
+/* Chapter 16: Language Bindings */
+/*---------------------------------------------------------------------------*/
+
+/*---------------------------------------------*/
+/* Section 16.2: Fortran Support */
+/*---------------------------------------------*/
+
+MPI_METHOD
+MPI_Type_create_f90_real(
+ _In_ int p,
+ _In_ int r,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_create_f90_real(
+ _In_ int p,
+ _In_ int r,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+MPI_Type_create_f90_complex(
+ _In_ int p,
+ _In_ int r,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_create_f90_complex(
+ _In_ int p,
+ _In_ int r,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+MPI_Type_create_f90_integer(
+ _In_ int r,
+ _Out_ MPI_Datatype* newtype
+ );
+
+MPI_METHOD
+PMPI_Type_create_f90_integer(
+ _In_ int r,
+ _Out_ MPI_Datatype* newtype
+ );
+
+/* typeclasses */
+#define MPI_TYPECLASS_REAL 1
+#define MPI_TYPECLASS_INTEGER 2
+#define MPI_TYPECLASS_COMPLEX 3
+
+MPI_METHOD
+MPI_Type_match_size(
+ _In_ int typeclass,
+ _In_ int size,
+ _Out_ MPI_Datatype* datatype
+ );
+
+MPI_METHOD
+PMPI_Type_match_size(
+ _In_ int typeclass,
+ _In_ int size,
+ _Out_ MPI_Datatype* datatype
+ );
+
+
+/*---------------------------------------------*/
+/* Section 16.3: Language Interoperability */
+/*---------------------------------------------*/
+
+#define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
+#define PMPI_Comm_c2f(comm) (MPI_Fint)(comm)
+
+#define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
+#define PMPI_Comm_f2c(comm) (MPI_Comm)(comm)
+
+
+#define MPI_Type_f2c(datatype) (MPI_Datatype)(datatype)
+#define PMPI_Type_f2c(datatype) (MPI_Datatype)(datatype)
+
+#define MPI_Type_c2f(datatype) (MPI_Fint)(datatype)
+#define PMPI_Type_c2f(datatype) (MPI_Fint)(datatype)
+
+
+#define MPI_Group_f2c(group) (MPI_Group)(group)
+#define PMPI_Group_f2c(group) (MPI_Group)(group)
+
+#define MPI_Group_c2f(group) (MPI_Fint)(group)
+#define PMPI_Group_c2f(group) (MPI_Fint)(group)
+
+
+#define MPI_Request_f2c(request) (MPI_Request)(request)
+#define PMPI_Request_f2c(request) (MPI_Request)(request)
+
+#define MPI_Request_c2f(request) (MPI_Fint)(request)
+#define PMPI_Request_c2f(request) (MPI_Fint)(request)
+
+
+#define MPI_Win_f2c(win) (MPI_Win)(win)
+#define PMPI_Win_f2c(win) (MPI_Win)(win)
+
+#define MPI_Win_c2f(win) (MPI_Fint)(win)
+#define PMPI_Win_c2f(win) (MPI_Fint)(win)
+
+
+#define MPI_Op_c2f(op) (MPI_Fint)(op)
+#define PMPI_Op_c2f(op) (MPI_Fint)(op)
+
+#define MPI_Op_f2c(op) (MPI_Op)(op)
+#define PMPI_Op_f2c(op) (MPI_Op)(op)
+
+
+#define MPI_Info_c2f(info) (MPI_Fint)(info)
+#define PMPI_Info_c2f(info) (MPI_Fint)(info)
+
+#define MPI_Info_f2c(info) (MPI_Info)(info)
+#define PMPI_Info_f2c(info) (MPI_Info)(info)
+
+
+#define MPI_Message_c2f(msg) (MPI_Fint)(msg)
+#define PMPI_Message_c2f(msg) (MPI_Fint)(msg)
+
+#define MPI_Message_f2c(msg) (MPI_Message)(msg)
+#define PMPI_Message_f2c(msg) (MPI_Message)(msg)
+
+
+#define MPI_Errhandler_c2f(errhandler) (MPI_Fint)(errhandler)
+#define PMPI_Errhandler_c2f(errhandler) (MPI_Fint)(errhandler)
+
+#define MPI_Errhandler_f2c(errhandler) (MPI_Errhandler)(errhandler)
+#define PMPI_Errhandler_f2c(errhandler) (MPI_Errhandler)(errhandler)
+
+
+MPI_File
+MPIAPI
+MPI_File_f2c(
+ _In_ MPI_Fint file
+ );
+
+MPI_File
+MPIAPI
+PMPI_File_f2c(
+ _In_ MPI_Fint file
+ );
+
+MPI_Fint
+MPIAPI
+MPI_File_c2f(
+ _In_ MPI_File file
+ );
+
+MPI_Fint
+MPIAPI
+PMPI_File_c2f(
+ _In_ MPI_File file
+ );
+
+MPI_METHOD
+MPI_Status_f2c(
+ _In_ const MPI_Fint* f_status,
+ _Out_ MPI_Status* c_status
+ );
+
+MPI_METHOD
+PMPI_Status_f2c(
+ _In_ const MPI_Fint* f_status,
+ _Out_ MPI_Status* c_status
+ );
+
+MPI_METHOD
+MPI_Status_c2f(
+ _In_ const MPI_Status* c_status,
+ _Out_ MPI_Fint* f_status
+ );
+
+MPI_METHOD
+PMPI_Status_c2f(
+ _In_ const MPI_Status* c_status,
+ _Out_ MPI_Fint* f_status
+ );
+
+
+#if !defined(_MPICH_DLL_)
+#define MPIU_DLL_SPEC __declspec(dllimport)
+#else
+#define MPIU_DLL_SPEC
+#endif
+
+extern MPIU_DLL_SPEC MPI_Fint* MPI_F_STATUS_IGNORE;
+extern MPIU_DLL_SPEC MPI_Fint* MPI_F_STATUSES_IGNORE;
+
+
+/*---------------------------------------------------------------------------*/
+/* Implementation Specific */
+/*---------------------------------------------------------------------------*/
+
+MPI_METHOD
+MPIR_Dup_fn(
+ _In_ MPI_Comm oldcomm,
+ _In_ int keyval,
+ _In_opt_ void* extra_state,
+ _In_opt_ void* attribute_val_in,
+ _Out_ void* attribute_val_out,
+ _mpi_out_flag_ int* flag
+ );
+
+
+#if MSMPI_VER >= 0x300
+
+MPI_METHOD
+MSMPI_Get_bsend_overhead();
+
+#endif
+
+
+#if MSMPI_VER >= 0x300
+
+MPI_METHOD
+MSMPI_Get_version();
+
+#else
+# define MSMPI_Get_version() (MSMPI_VER)
+#endif
+
+typedef void
+(MPIAPI MSMPI_Request_callback)(
+ _In_ MPI_Status* status
+ );
+
+MPI_METHOD
+MSMPI_Request_set_apc(
+ _In_ MPI_Request request,
+ _In_ MSMPI_Request_callback* callback_fn,
+ _In_ MPI_Status* callback_status
+ );
+
+typedef struct _MSMPI_LOCK_QUEUE
+{
+ struct _MSMPI_LOCK_QUEUE* volatile next;
+ volatile MPI_Aint flags;
+
+} MSMPI_Lock_queue;
+
+void
+MPIAPI
+MSMPI_Queuelock_acquire(
+ _Out_ MSMPI_Lock_queue* queue
+ );
+
+void
+MPIAPI
+MSMPI_Queuelock_release(
+ _In_ MSMPI_Lock_queue* queue
+ );
+
+MPI_METHOD
+MSMPI_Waitsome_interruptible(
+ _In_range_(>=, 0) int incount,
+ _Inout_updates_opt_(incount) MPI_Request array_of_requests[],
+ _Out_ _Deref_out_range_(MPI_UNDEFINED, incount) int* outcount,
+ _Out_writes_to_opt_(incount,*outcount) int array_of_indices[],
+ _Out_writes_to_opt_(incount,*outcount) MPI_Status array_of_statuses[]
+ );
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* MPI_INCLUDED */
diff --git a/src/include/mpif.h b/src/include/mpif.h
new file mode 100644
index 0000000..d143fee
--- /dev/null
+++ b/src/include/mpif.h
@@ -0,0 +1,536 @@
+! /* -*- Mode: Fortran; -*- */
+!
+! Copyright (c) Microsoft Corporation. All rights reserved.
+! Licensed under the MIT License.
+!
+! (C) 2001 by Argonne National Laboratory.
+! (C) 2015 by Microsoft Corporation
+!
+! MPICH COPYRIGHT
+!
+! The following is a notice of limited availability of the code, and disclaimer
+! which must be included in the prologue of the code and in all source listings
+! of the code.
+!
+! Copyright Notice
+! + 2002 University of Chicago
+!
+! Permission is hereby granted to use, reproduce, prepare derivative works, and
+! to redistribute to others. This software was authored by:
+!
+! Mathematics and Computer Science Division
+! Argonne National Laboratory, Argonne IL 60439
+!
+! (and)
+!
+! Department of Computer Science
+! University of Illinois at Urbana-Champaign
+!
+!
+! GOVERNMENT LICENSE
+!
+! Portions of this material resulted from work developed under a U.S.
+! Government Contract and are subject to the following license: the Government
+! is granted for itself and others acting on its behalf a paid-up, nonexclusive,
+! irrevocable worldwide license in this computer software to reproduce, prepare
+! derivative works, and perform publicly and display publicly.
+!
+! DISCLAIMER
+!
+! This computer code material was prepared, in part, as an account of work
+! sponsored by an agency of the United States Government. Neither the United
+! States, nor the University of Chicago, nor any of their employees, makes any
+! warranty express or implied, or assumes any legal liability or responsibility
+! for the accuracy, completeness, or usefulness of any information, apparatus,
+! product, or process disclosed, or represents that its use would not infringe
+! privately owned rights.
+!
+!
+ INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR
+ PARAMETER (MPI_SOURCE=3,MPI_TAG=4,MPI_ERROR=5)
+ INTEGER MPI_STATUS_SIZE
+ PARAMETER (MPI_STATUS_SIZE=5)
+ INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
+ INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1)
+ INTEGER MPI_ERRCODES_IGNORE(1)
+ CHARACTER*1 MPI_ARGVS_NULL(1,1)
+ CHARACTER*1 MPI_ARGV_NULL(1)
+ INTEGER MPI_SUCCESS
+ PARAMETER (MPI_SUCCESS=0)
+ INTEGER MPI_ERR_OTHER
+ PARAMETER (MPI_ERR_OTHER=15)
+ INTEGER MPI_ERR_WIN
+ PARAMETER (MPI_ERR_WIN=45)
+ INTEGER MPI_ERR_FILE
+ PARAMETER (MPI_ERR_FILE=27)
+ INTEGER MPI_ERR_COUNT
+ PARAMETER (MPI_ERR_COUNT=2)
+ INTEGER MPI_ERR_SPAWN
+ PARAMETER (MPI_ERR_SPAWN=42)
+ INTEGER MPI_ERR_BASE
+ PARAMETER (MPI_ERR_BASE=46)
+ INTEGER MPI_ERR_RMA_CONFLICT
+ PARAMETER (MPI_ERR_RMA_CONFLICT=49)
+ INTEGER MPI_ERR_IN_STATUS
+ PARAMETER (MPI_ERR_IN_STATUS=17)
+ INTEGER MPI_ERR_INFO_KEY
+ PARAMETER (MPI_ERR_INFO_KEY=29)
+ INTEGER MPI_ERR_LOCKTYPE
+ PARAMETER (MPI_ERR_LOCKTYPE=47)
+ INTEGER MPI_ERR_OP
+ PARAMETER (MPI_ERR_OP=9)
+ INTEGER MPI_ERR_ARG
+ PARAMETER (MPI_ERR_ARG=12)
+ INTEGER MPI_ERR_READ_ONLY
+ PARAMETER (MPI_ERR_READ_ONLY=40)
+ INTEGER MPI_ERR_SIZE
+ PARAMETER (MPI_ERR_SIZE=51)
+ INTEGER MPI_ERR_BUFFER
+ PARAMETER (MPI_ERR_BUFFER=1)
+ INTEGER MPI_ERR_DUP_DATAREP
+ PARAMETER (MPI_ERR_DUP_DATAREP=24)
+ INTEGER MPI_ERR_UNSUPPORTED_DATAREP
+ PARAMETER (MPI_ERR_UNSUPPORTED_DATAREP=43)
+ INTEGER MPI_ERR_LASTCODE
+ PARAMETER (MPI_ERR_LASTCODE=1073741823)
+ INTEGER MPI_ERR_TRUNCATE
+ PARAMETER (MPI_ERR_TRUNCATE=14)
+ INTEGER MPI_ERR_DISP
+ PARAMETER (MPI_ERR_DISP=52)
+ INTEGER MPI_ERR_PORT
+ PARAMETER (MPI_ERR_PORT=38)
+ INTEGER MPI_ERR_INFO_NOKEY
+ PARAMETER (MPI_ERR_INFO_NOKEY=31)
+ INTEGER MPI_ERR_ASSERT
+ PARAMETER (MPI_ERR_ASSERT=53)
+ INTEGER MPI_ERR_FILE_EXISTS
+ PARAMETER (MPI_ERR_FILE_EXISTS=25)
+ INTEGER MPI_ERR_PENDING
+ PARAMETER (MPI_ERR_PENDING=18)
+ INTEGER MPI_ERR_COMM
+ PARAMETER (MPI_ERR_COMM=5)
+ INTEGER MPI_ERR_KEYVAL
+ PARAMETER (MPI_ERR_KEYVAL=48)
+ INTEGER MPI_ERR_NAME
+ PARAMETER (MPI_ERR_NAME=33)
+ INTEGER MPI_ERR_REQUEST
+ PARAMETER (MPI_ERR_REQUEST=19)
+ INTEGER MPI_ERR_GROUP
+ PARAMETER (MPI_ERR_GROUP=8)
+ INTEGER MPI_ERR_TOPOLOGY
+ PARAMETER (MPI_ERR_TOPOLOGY=10)
+ INTEGER MPI_ERR_TYPE
+ PARAMETER (MPI_ERR_TYPE=3)
+ INTEGER MPI_ERR_TAG
+ PARAMETER (MPI_ERR_TAG=4)
+ INTEGER MPI_ERR_INFO_VALUE
+ PARAMETER (MPI_ERR_INFO_VALUE=30)
+ INTEGER MPI_ERR_NOT_SAME
+ PARAMETER (MPI_ERR_NOT_SAME=35)
+ INTEGER MPI_ERR_RMA_SYNC
+ PARAMETER (MPI_ERR_RMA_SYNC=50)
+ INTEGER MPI_ERR_INFO
+ PARAMETER (MPI_ERR_INFO=28)
+ INTEGER MPI_ERR_NO_MEM
+ PARAMETER (MPI_ERR_NO_MEM=34)
+ INTEGER MPI_ERR_BAD_FILE
+ PARAMETER (MPI_ERR_BAD_FILE=22)
+ INTEGER MPI_ERR_FILE_IN_USE
+ PARAMETER (MPI_ERR_FILE_IN_USE=26)
+ INTEGER MPI_ERR_UNKNOWN
+ PARAMETER (MPI_ERR_UNKNOWN=13)
+ INTEGER MPI_ERR_UNSUPPORTED_OPERATION
+ PARAMETER (MPI_ERR_UNSUPPORTED_OPERATION=44)
+ INTEGER MPI_ERR_QUOTA
+ PARAMETER (MPI_ERR_QUOTA=39)
+ INTEGER MPI_ERR_AMODE
+ PARAMETER (MPI_ERR_AMODE=21)
+ INTEGER MPI_ERR_ROOT
+ PARAMETER (MPI_ERR_ROOT=7)
+ INTEGER MPI_ERR_RANK
+ PARAMETER (MPI_ERR_RANK=6)
+ INTEGER MPI_ERR_DIMS
+ PARAMETER (MPI_ERR_DIMS=11)
+ INTEGER MPI_ERR_NO_SUCH_FILE
+ PARAMETER (MPI_ERR_NO_SUCH_FILE=37)
+ INTEGER MPI_ERR_SERVICE
+ PARAMETER (MPI_ERR_SERVICE=41)
+ INTEGER MPI_ERR_INTERN
+ PARAMETER (MPI_ERR_INTERN=16)
+ INTEGER MPI_ERR_IO
+ PARAMETER (MPI_ERR_IO=32)
+ INTEGER MPI_ERR_ACCESS
+ PARAMETER (MPI_ERR_ACCESS=20)
+ INTEGER MPI_ERR_NO_SPACE
+ PARAMETER (MPI_ERR_NO_SPACE=36)
+ INTEGER MPI_ERR_CONVERSION
+ PARAMETER (MPI_ERR_CONVERSION=23)
+ INTEGER MPI_ERRORS_ARE_FATAL
+ PARAMETER (MPI_ERRORS_ARE_FATAL=1409286144)
+ INTEGER MPI_ERRORS_RETURN
+ PARAMETER (MPI_ERRORS_RETURN=1409286145)
+ INTEGER MPI_IDENT
+ PARAMETER (MPI_IDENT=0)
+ INTEGER MPI_CONGRUENT
+ PARAMETER (MPI_CONGRUENT=1)
+ INTEGER MPI_SIMILAR
+ PARAMETER (MPI_SIMILAR=2)
+ INTEGER MPI_UNEQUAL
+ PARAMETER (MPI_UNEQUAL=3)
+ INTEGER MPI_MAX
+ PARAMETER (MPI_MAX=1476395009)
+ INTEGER MPI_MIN
+ PARAMETER (MPI_MIN=1476395010)
+ INTEGER MPI_SUM
+ PARAMETER (MPI_SUM=1476395011)
+ INTEGER MPI_PROD
+ PARAMETER (MPI_PROD=1476395012)
+ INTEGER MPI_LAND
+ PARAMETER (MPI_LAND=1476395013)
+ INTEGER MPI_BAND
+ PARAMETER (MPI_BAND=1476395014)
+ INTEGER MPI_LOR
+ PARAMETER (MPI_LOR=1476395015)
+ INTEGER MPI_BOR
+ PARAMETER (MPI_BOR=1476395016)
+ INTEGER MPI_LXOR
+ PARAMETER (MPI_LXOR=1476395017)
+ INTEGER MPI_BXOR
+ PARAMETER (MPI_BXOR=1476395018)
+ INTEGER MPI_MINLOC
+ PARAMETER (MPI_MINLOC=1476395019)
+ INTEGER MPI_MAXLOC
+ PARAMETER (MPI_MAXLOC=1476395020)
+ INTEGER MPI_REPLACE
+ PARAMETER (MPI_REPLACE=1476395021)
+ INTEGER MPI_NO_OP
+ PARAMETER (MPI_NO_OP=1476395022)
+ INTEGER MPI_COMM_WORLD
+ PARAMETER (MPI_COMM_WORLD=1140850688)
+ INTEGER MPI_COMM_SELF
+ PARAMETER (MPI_COMM_SELF=1140850689)
+ INTEGER MPI_COMM_TYPE_SHARED
+ PARAMETER (MPI_COMM_TYPE_SHARED=1)
+ INTEGER MPI_GROUP_EMPTY
+ PARAMETER (MPI_GROUP_EMPTY=1207959552)
+ INTEGER MPI_COMM_NULL
+ PARAMETER (MPI_COMM_NULL=67108864)
+ INTEGER MPI_WIN_NULL
+ PARAMETER (MPI_WIN_NULL=536870912)
+ INTEGER MPI_FILE_NULL
+ PARAMETER (MPI_FILE_NULL=0)
+ INTEGER MPI_GROUP_NULL
+ PARAMETER (MPI_GROUP_NULL=134217728)
+ INTEGER MPI_OP_NULL
+ PARAMETER (MPI_OP_NULL=402653184)
+ INTEGER MPI_DATATYPE_NULL
+ PARAMETER (MPI_DATATYPE_NULL=z'0c000000')
+ INTEGER MPI_REQUEST_NULL
+ PARAMETER (MPI_REQUEST_NULL=738197504)
+ INTEGER MPI_ERRHANDLER_NULL
+ PARAMETER (MPI_ERRHANDLER_NULL=335544320)
+ INTEGER MPI_INFO_NULL
+ PARAMETER (MPI_INFO_NULL=469762048)
+ INTEGER MPI_MESSAGE_NULL
+ PARAMETER (MPI_MESSAGE_NULL=805306368)
+ INTEGER MPI_MESSAGE_NO_PROC
+ PARAMETER (MPI_MESSAGE_NO_PROC=1879048192)
+ INTEGER MPI_TAG_UB
+ PARAMETER (MPI_TAG_UB=1681915906)
+ INTEGER MPI_HOST
+ PARAMETER (MPI_HOST=1681915908)
+ INTEGER MPI_IO
+ PARAMETER (MPI_IO=1681915910)
+ INTEGER MPI_WTIME_IS_GLOBAL
+ PARAMETER (MPI_WTIME_IS_GLOBAL=1681915912)
+ INTEGER MPI_UNIVERSE_SIZE
+ PARAMETER (MPI_UNIVERSE_SIZE=1681915914)
+ INTEGER MPI_LASTUSEDCODE
+ PARAMETER (MPI_LASTUSEDCODE=1681915916)
+ INTEGER MPI_APPNUM
+ PARAMETER (MPI_APPNUM=1681915918)
+ INTEGER MPI_WIN_BASE
+ PARAMETER (MPI_WIN_BASE=1711276034)
+ INTEGER MPI_WIN_SIZE
+ PARAMETER (MPI_WIN_SIZE=1711276036)
+ INTEGER MPI_WIN_DISP_UNIT
+ PARAMETER (MPI_WIN_DISP_UNIT=1711276038)
+ INTEGER MPI_MAX_ERROR_STRING
+ PARAMETER (MPI_MAX_ERROR_STRING=511)
+ INTEGER MPI_MAX_PORT_NAME
+ PARAMETER (MPI_MAX_PORT_NAME=255)
+ INTEGER MPI_MAX_OBJECT_NAME
+ PARAMETER (MPI_MAX_OBJECT_NAME=127)
+ INTEGER MPI_MAX_INFO_KEY
+ PARAMETER (MPI_MAX_INFO_KEY=254)
+ INTEGER MPI_MAX_INFO_VAL
+ PARAMETER (MPI_MAX_INFO_VAL=1023)
+ INTEGER MPI_MAX_PROCESSOR_NAME
+ PARAMETER (MPI_MAX_PROCESSOR_NAME=128-1)
+ INTEGER MPI_MAX_DATAREP_STRING
+ PARAMETER (MPI_MAX_DATAREP_STRING=127)
+ INTEGER MPI_MAX_LIBRARY_VERSION_STRING
+ PARAMETER (MPI_MAX_LIBRARY_VERSION_STRING=64-1)
+ INTEGER MPI_UNDEFINED
+ PARAMETER (MPI_UNDEFINED=(-32766))
+ INTEGER MPI_KEYVAL_INVALID
+ PARAMETER (MPI_KEYVAL_INVALID=603979776)
+ INTEGER MPI_BSEND_OVERHEAD
+ PARAMETER (MPI_BSEND_OVERHEAD=(95))
+ INTEGER MPI_PROC_NULL
+ PARAMETER (MPI_PROC_NULL=-1)
+ INTEGER MPI_ANY_SOURCE
+ PARAMETER (MPI_ANY_SOURCE=-2)
+ INTEGER MPI_ANY_TAG
+ PARAMETER (MPI_ANY_TAG=-1)
+ INTEGER MPI_ROOT
+ PARAMETER (MPI_ROOT=-3)
+ INTEGER MPI_GRAPH
+ PARAMETER (MPI_GRAPH=1)
+ INTEGER MPI_CART
+ PARAMETER (MPI_CART=2)
+ INTEGER MPI_DIST_GRAPH
+ PARAMETER (MPI_DIST_GRAPH=3)
+ INTEGER MPI_VERSION
+ PARAMETER (MPI_VERSION=2)
+ INTEGER MPI_SUBVERSION
+ PARAMETER (MPI_SUBVERSION=0)
+ INTEGER MPI_LOCK_EXCLUSIVE
+ PARAMETER (MPI_LOCK_EXCLUSIVE=234)
+ INTEGER MPI_LOCK_SHARED
+ PARAMETER (MPI_LOCK_SHARED=235)
+ INTEGER MPI_CHAR
+ PARAMETER (MPI_CHAR=z'4c000101')
+ INTEGER MPI_UNSIGNED_CHAR
+ PARAMETER (MPI_UNSIGNED_CHAR=z'4c000102')
+ INTEGER MPI_SHORT
+ PARAMETER (MPI_SHORT=z'4c000203')
+ INTEGER MPI_UNSIGNED_SHORT
+ PARAMETER (MPI_UNSIGNED_SHORT=z'4c000204')
+ INTEGER MPI_INT
+ PARAMETER (MPI_INT=z'4c000405')
+ INTEGER MPI_UNSIGNED
+ PARAMETER (MPI_UNSIGNED=z'4c000406')
+ INTEGER MPI_LONG
+ PARAMETER (MPI_LONG=z'4c000407')
+ INTEGER MPI_UNSIGNED_LONG
+ PARAMETER (MPI_UNSIGNED_LONG=z'4c000408')
+ INTEGER MPI_LONG_LONG
+ PARAMETER (MPI_LONG_LONG=z'4c000809')
+ INTEGER MPI_LONG_LONG_INT
+ PARAMETER (MPI_LONG_LONG_INT=z'4c000809')
+ INTEGER MPI_FLOAT
+ PARAMETER (MPI_FLOAT=z'4c00040a')
+ INTEGER MPI_DOUBLE
+ PARAMETER (MPI_DOUBLE=z'4c00080b')
+ INTEGER MPI_LONG_DOUBLE
+ PARAMETER (MPI_LONG_DOUBLE=z'4c00080c')
+ INTEGER MPI_BYTE
+ PARAMETER (MPI_BYTE=z'4c00010d')
+ INTEGER MPI_WCHAR
+ PARAMETER (MPI_WCHAR=z'4c00020e')
+ INTEGER MPI_PACKED
+ PARAMETER (MPI_PACKED=z'4c00010f')
+ INTEGER MPI_LB
+ PARAMETER (MPI_LB=z'4c000010')
+ INTEGER MPI_UB
+ PARAMETER (MPI_UB=z'4c000011')
+ INTEGER MPI_2INT
+ PARAMETER (MPI_2INT=z'4c000816')
+ INTEGER MPI_SIGNED_CHAR
+ PARAMETER (MPI_SIGNED_CHAR=z'4c000118')
+ INTEGER MPI_UNSIGNED_LONG_LONG
+ PARAMETER (MPI_UNSIGNED_LONG_LONG=z'4c000819')
+ INTEGER MPI_CHARACTER
+ PARAMETER (MPI_CHARACTER=z'4c00011a')
+ INTEGER MPI_INTEGER
+ PARAMETER (MPI_INTEGER=z'4c00041b')
+ INTEGER MPI_REAL
+ PARAMETER (MPI_REAL=z'4c00041c')
+ INTEGER MPI_LOGICAL
+ PARAMETER (MPI_LOGICAL=z'4c00041d')
+ INTEGER MPI_COMPLEX
+ PARAMETER (MPI_COMPLEX=z'4c00081e')
+ INTEGER MPI_DOUBLE_PRECISION
+ PARAMETER (MPI_DOUBLE_PRECISION=z'4c00081f')
+ INTEGER MPI_2INTEGER
+ PARAMETER (MPI_2INTEGER=z'4c000820')
+ INTEGER MPI_2REAL
+ PARAMETER (MPI_2REAL=z'4c000821')
+ INTEGER MPI_DOUBLE_COMPLEX
+ PARAMETER (MPI_DOUBLE_COMPLEX=z'4c001022')
+ INTEGER MPI_2DOUBLE_PRECISION
+ PARAMETER (MPI_2DOUBLE_PRECISION=z'4c001023')
+ INTEGER MPI_2COMPLEX
+ PARAMETER (MPI_2COMPLEX=z'4c001024')
+ INTEGER MPI_2DOUBLE_COMPLEX
+ PARAMETER (MPI_2DOUBLE_COMPLEX=z'4c002025')
+ INTEGER MPI_REAL2
+ PARAMETER (MPI_REAL2=z'0c000000')
+ INTEGER MPI_REAL4
+ PARAMETER (MPI_REAL4=z'4c000427')
+ INTEGER MPI_COMPLEX8
+ PARAMETER (MPI_COMPLEX8=z'4c000828')
+ INTEGER MPI_REAL8
+ PARAMETER (MPI_REAL8=z'4c000829')
+ INTEGER MPI_COMPLEX16
+ PARAMETER (MPI_COMPLEX16=z'4c00102a')
+ INTEGER MPI_REAL16
+ PARAMETER (MPI_REAL16=z'0c000000')
+ INTEGER MPI_COMPLEX32
+ PARAMETER (MPI_COMPLEX32=z'0c000000')
+ INTEGER MPI_INTEGER1
+ PARAMETER (MPI_INTEGER1=z'4c00012d')
+ INTEGER MPI_COMPLEX4
+ PARAMETER (MPI_COMPLEX4=z'0c000000')
+ INTEGER MPI_INTEGER2
+ PARAMETER (MPI_INTEGER2=z'4c00022f')
+ INTEGER MPI_INTEGER4
+ PARAMETER (MPI_INTEGER4=z'4c000430')
+ INTEGER MPI_INTEGER8
+ PARAMETER (MPI_INTEGER8=z'4c000831')
+ INTEGER MPI_INTEGER16
+ PARAMETER (MPI_INTEGER16=z'0c000000')
+
+ INCLUDE 'mpifptr.h'
+
+ INTEGER MPI_OFFSET
+ PARAMETER (MPI_OFFSET=z'4c00083c')
+ INTEGER MPI_COUNT
+ PARAMETER (MPI_COUNT=z'4c00083d')
+ INTEGER MPI_FLOAT_INT
+ PARAMETER (MPI_FLOAT_INT=z'8c000000')
+ INTEGER MPI_DOUBLE_INT
+ PARAMETER (MPI_DOUBLE_INT=z'8c000001')
+ INTEGER MPI_LONG_INT
+ PARAMETER (MPI_LONG_INT=z'8c000002')
+ INTEGER MPI_SHORT_INT
+ PARAMETER (MPI_SHORT_INT=z'8c000003')
+ INTEGER MPI_LONG_DOUBLE_INT
+ PARAMETER (MPI_LONG_DOUBLE_INT=z'8c000004')
+ INTEGER MPI_INTEGER_KIND
+ PARAMETER (MPI_INTEGER_KIND=4)
+ INTEGER MPI_OFFSET_KIND
+ PARAMETER (MPI_OFFSET_KIND=8)
+ INTEGER MPI_COUNT_KIND
+ PARAMETER (MPI_COUNT_KIND=8)
+ INTEGER MPI_COMBINER_NAMED
+ PARAMETER (MPI_COMBINER_NAMED=1)
+ INTEGER MPI_COMBINER_DUP
+ PARAMETER (MPI_COMBINER_DUP=2)
+ INTEGER MPI_COMBINER_CONTIGUOUS
+ PARAMETER (MPI_COMBINER_CONTIGUOUS=3)
+ INTEGER MPI_COMBINER_VECTOR
+ PARAMETER (MPI_COMBINER_VECTOR=4)
+ INTEGER MPI_COMBINER_HVECTOR_INTEGER
+ PARAMETER (MPI_COMBINER_HVECTOR_INTEGER=5)
+ INTEGER MPI_COMBINER_HVECTOR
+ PARAMETER (MPI_COMBINER_HVECTOR=6)
+ INTEGER MPI_COMBINER_INDEXED
+ PARAMETER (MPI_COMBINER_INDEXED=7)
+ INTEGER MPI_COMBINER_HINDEXED_INTEGER
+ PARAMETER (MPI_COMBINER_HINDEXED_INTEGER=8)
+ INTEGER MPI_COMBINER_HINDEXED
+ PARAMETER (MPI_COMBINER_HINDEXED=9)
+ INTEGER MPI_COMBINER_INDEXED_BLOCK
+ PARAMETER (MPI_COMBINER_INDEXED_BLOCK=10)
+ INTEGER MPI_COMBINER_STRUCT_INTEGER
+ PARAMETER (MPI_COMBINER_STRUCT_INTEGER=11)
+ INTEGER MPI_COMBINER_STRUCT
+ PARAMETER (MPI_COMBINER_STRUCT=12)
+ INTEGER MPI_COMBINER_SUBARRAY
+ PARAMETER (MPI_COMBINER_SUBARRAY=13)
+ INTEGER MPI_COMBINER_DARRAY
+ PARAMETER (MPI_COMBINER_DARRAY=14)
+ INTEGER MPI_COMBINER_F90_REAL
+ PARAMETER (MPI_COMBINER_F90_REAL=15)
+ INTEGER MPI_COMBINER_F90_COMPLEX
+ PARAMETER (MPI_COMBINER_F90_COMPLEX=16)
+ INTEGER MPI_COMBINER_F90_INTEGER
+ PARAMETER (MPI_COMBINER_F90_INTEGER=17)
+ INTEGER MPI_COMBINER_RESIZED
+ PARAMETER (MPI_COMBINER_RESIZED=18)
+ INTEGER MPI_COMBINER_HINDEXED_BLOCK
+ PARAMETER (MPI_COMBINER_HINDEXED_BLOCK=19)
+ INTEGER MPI_MODE_NOCHECK
+ PARAMETER (MPI_MODE_NOCHECK=1024)
+ INTEGER MPI_MODE_NOSTORE
+ PARAMETER (MPI_MODE_NOSTORE=2048)
+ INTEGER MPI_MODE_NOPUT
+ PARAMETER (MPI_MODE_NOPUT=4096)
+ INTEGER MPI_MODE_NOPRECEDE
+ PARAMETER (MPI_MODE_NOPRECEDE=8192)
+ INTEGER MPI_MODE_NOSUCCEED
+ PARAMETER (MPI_MODE_NOSUCCEED=16384)
+ INTEGER MPI_THREAD_SINGLE
+ PARAMETER (MPI_THREAD_SINGLE=0)
+ INTEGER MPI_THREAD_FUNNELED
+ PARAMETER (MPI_THREAD_FUNNELED=1)
+ INTEGER MPI_THREAD_SERIALIZED
+ PARAMETER (MPI_THREAD_SERIALIZED=2)
+ INTEGER MPI_THREAD_MULTIPLE
+ PARAMETER (MPI_THREAD_MULTIPLE=3)
+ INTEGER MPI_MODE_RDONLY
+ PARAMETER (MPI_MODE_RDONLY=2)
+ INTEGER MPI_MODE_RDWR
+ PARAMETER (MPI_MODE_RDWR=8)
+ INTEGER MPI_MODE_WRONLY
+ PARAMETER (MPI_MODE_WRONLY=4)
+ INTEGER MPI_MODE_DELETE_ON_CLOSE
+ PARAMETER (MPI_MODE_DELETE_ON_CLOSE=16)
+ INTEGER MPI_MODE_UNIQUE_OPEN
+ PARAMETER (MPI_MODE_UNIQUE_OPEN=32)
+ INTEGER MPI_MODE_CREATE
+ PARAMETER (MPI_MODE_CREATE=1)
+ INTEGER MPI_MODE_EXCL
+ PARAMETER (MPI_MODE_EXCL=64)
+ INTEGER MPI_MODE_APPEND
+ PARAMETER (MPI_MODE_APPEND=128)
+ INTEGER MPI_MODE_SEQUENTIAL
+ PARAMETER (MPI_MODE_SEQUENTIAL=256)
+ INTEGER MPI_SEEK_SET
+ PARAMETER (MPI_SEEK_SET=600)
+ INTEGER MPI_SEEK_CUR
+ PARAMETER (MPI_SEEK_CUR=602)
+ INTEGER MPI_SEEK_END
+ PARAMETER (MPI_SEEK_END=604)
+ INTEGER MPI_ORDER_C
+ PARAMETER (MPI_ORDER_C=56)
+ INTEGER MPI_ORDER_FORTRAN
+ PARAMETER (MPI_ORDER_FORTRAN=57)
+ INTEGER MPI_DISTRIBUTE_BLOCK
+ PARAMETER (MPI_DISTRIBUTE_BLOCK=121)
+ INTEGER MPI_DISTRIBUTE_CYCLIC
+ PARAMETER (MPI_DISTRIBUTE_CYCLIC=122)
+ INTEGER MPI_DISTRIBUTE_NONE
+ PARAMETER (MPI_DISTRIBUTE_NONE=123)
+ INTEGER MPI_DISTRIBUTE_DFLT_DARG
+ PARAMETER (MPI_DISTRIBUTE_DFLT_DARG=-49767)
+ INTEGER (KIND=8) MPI_DISPLACEMENT_CURRENT
+ PARAMETER (MPI_DISPLACEMENT_CURRENT=-54278278)
+ INTEGER MPI_BOTTOM, MPI_IN_PLACE
+ INTEGER MPI_UNWEIGHTED, MPI_WEIGHTS_EMPTY
+ EXTERNAL MPI_DUP_FN, MPI_NULL_DELETE_FN, MPI_NULL_COPY_FN
+ EXTERNAL MPI_WTIME, MPI_WTICK
+ EXTERNAL PMPI_WTIME, PMPI_WTICK
+ EXTERNAL MPI_COMM_DUP_FN, MPI_COMM_NULL_DELETE_FN
+ EXTERNAL MPI_COMM_NULL_COPY_FN
+ EXTERNAL MPI_WIN_DUP_FN, MPI_WIN_NULL_DELETE_FN
+ EXTERNAL MPI_WIN_NULL_COPY_FN
+ EXTERNAL MPI_TYPE_DUP_FN, MPI_TYPE_NULL_DELETE_FN
+ EXTERNAL MPI_TYPE_NULL_COPY_FN
+ EXTERNAL MPI_CONVERSION_FN_NULL
+ DOUBLE PRECISION MPI_WTIME, MPI_WTICK
+ DOUBLE PRECISION PMPI_WTIME, PMPI_WTICK
+
+ COMMON /MPIPRIV1/ MPI_BOTTOM, MPI_IN_PLACE, MPI_STATUS_IGNORE
+
+ COMMON /MPIPRIV2/ MPI_STATUSES_IGNORE, MPI_ERRCODES_IGNORE
+!DEC$ ATTRIBUTES DLLIMPORT :: /MPIPRIV1/, /MPIPRIV2/
+
+ COMMON /MPIFCMB5/ MPI_UNWEIGHTED
+ COMMON /MPIFCMB9/ MPI_WEIGHTS_EMPTY
+!DEC$ ATTRIBUTES DLLIMPORT :: /MPIFCMB5/, /MPIFCMB9/
+
+ COMMON /MPIPRIVC/ MPI_ARGVS_NULL, MPI_ARGV_NULL
+!DEC$ ATTRIBUTES DLLIMPORT :: /MPIPRIVC/
diff --git a/src/include/mpio.h b/src/include/mpio.h
new file mode 100644
index 0000000..a172d52
--- /dev/null
+++ b/src/include/mpio.h
@@ -0,0 +1,5 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#error do not include mpio.h, use mpi.h
+
diff --git a/src/include/mpiwarning.h b/src/include/mpiwarning.h
new file mode 100644
index 0000000..9ededd5
--- /dev/null
+++ b/src/include/mpiwarning.h
@@ -0,0 +1,9 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma warning(disable:4200) // nonstandard extension used : zero-sized array in struct/union
+#pragma warning(disable:4201) // nonstandard extension used : nameless struct/union
+#pragma warning(disable:4204) // nonstandard extension used : non-constant aggregate initializer
+#ifdef _PREFIX_
+#pragma warning(disable:4616) // #pragma warning : warning number 'number' not a valid compiler warning
+#endif
diff --git a/src/include/mspms.h b/src/include/mspms.h
new file mode 100644
index 0000000..abbccb4
--- /dev/null
+++ b/src/include/mspms.h
@@ -0,0 +1,209 @@
+/*++
+
+ Copyright (c) Microsoft Corporation. All rights reserved.
+ Licensed under the MIT License.
+
+Module:
+
+ mspms.h
+
+Abstract:
+
+ MSMPI process management service interface definitions.
+
+--*/
+
+#ifdef DEFINE_GUID
+
+DEFINE_GUID(
+ PM_SERVICE_INTERFACE_V1,
+ 0x5aa2c905,
+ 0xd8af,
+ 0x4cf2,
+ 0x92, 0x56, 0x80, 0xf3, 0xbc, 0xa0, 0x0f, 0x94
+ );
+
+DEFINE_GUID(
+ PM_MANAGER_INTERFACE_V1,
+ 0x4b1130e6,
+ 0x724d,
+ 0x43b8,
+ 0xb7, 0x63, 0x75, 0x72, 0xcb, 0xe6, 0x00, 0xfa
+ );
+
+DEFINE_GUID(
+ PM_SERVICE_INTERFACE_LAUNCH,
+ 0x2a89cc11,
+ 0x21ff,
+ 0x47d5,
+ 0x94, 0xf4, 0x28, 0x52, 0x9f, 0x7f, 0x4f, 0x5e
+ );
+
+#endif
+
+#ifndef _MSPMS_H_
+#define _MSPMS_H_
+
+#ifndef MSMPI_NO_SAL
+#include
+#endif
+
+#include
+#include
+
+
+/*---------------------------------------------------------------------------*/
+/* SAL ANNOTATIONS */
+/*---------------------------------------------------------------------------*/
+/*
+* Define SAL annotations if they aren't defined yet.
+*/
+#ifndef _In_
+#define _In_
+#endif
+#ifndef _In_z_
+#define _In_z_
+#endif
+#ifndef _Inout_
+#define _Inout_
+#endif
+#ifndef _Out_
+#define _Out_
+#endif
+#ifndef _Outptr_
+#define _Outptr_
+#endif
+
+
+#define MSPMS_MAX_NAME_LENGTH 256
+
+//
+// The launch type allows launch managers to specify their security context
+// requirements for their launch callback.
+// PmiLaunchTypeSelf:
+// The launch callback is invoked without any impersonation or client context.
+// This is used to launch processes under the same credentials as the launch manager.
+// PmiLaunchTypeImpersonate:
+// The client security context is impersonated for the launch callback.
+// PmiLaunchTypeUserSid:
+// The client's user identifier is passed to the launch callback.
+//
+enum PmiLaunchType
+{
+ PmiLaunchTypeSelf,
+ PmiLaunchTypeImpersonate,
+ PmiLaunchTypeUserSid
+};
+
+typedef HRESULT (WINAPI FN_PmiLaunch)(
+ _In_z_ const char* App, //smpd.exe
+ _In_z_ const char* Args, //args to smpd.exe
+ _In_z_ const char* Context //job context string
+);
+
+typedef HRESULT (WINAPI FN_PmiLaunchUserSid)(
+ _In_opt_ PSID Sid,
+ _In_z_ const char* App, //smpd.exe
+ _In_z_ const char* Args, //args to smpd.exe
+ _In_z_ const char* Context //job context string
+);
+
+typedef struct _PmiManagerInterface
+{
+ size_t Size;
+ union
+ {
+ FN_PmiLaunch* AsSelf;
+ FN_PmiLaunch* Impersonate;
+ FN_PmiLaunchUserSid* UserSid;
+ } Launch;
+ enum PmiLaunchType LaunchType;
+} PmiManagerInterface;
+
+typedef struct _PmiServiceInitData
+{
+ size_t Size;
+ const char* Name;
+} PmiServiceInitData;
+
+//
+// Initialize the service
+//
+typedef HRESULT (WINAPI FN_PmiServiceInitialize)(
+ _In_ const PmiServiceInitData* InitData // Init data
+);
+
+//
+// Cause the calling thread to listen on the supplied address for requests
+// from Mpiexec.exe to launch the manager process.
+//
+typedef HRESULT (WINAPI FN_PmiServiceListen)(
+ _In_ const SOCKADDR_INET* Address, // INET address on which to listen
+ _In_ const PmiManagerInterface* Manager, // Interface to use to launch the smpd manager
+ _In_ REFGUID Version // Version GUID of the PmiManagerInterface
+);
+
+//
+// Signal to the thread that it is time to stop processing completions.
+//
+typedef HRESULT (WINAPI FN_PmiServicePostStop)();
+
+//
+// Finalize the service
+//
+typedef VOID (WINAPI FN_PmiServiceFinalize)();
+
+typedef struct _PmiServiceInterface
+{
+ size_t Size;
+ FN_PmiServiceInitialize* Initialize;
+ FN_PmiServiceListen* Listen;
+ FN_PmiServicePostStop* PostStop;
+ FN_PmiServiceFinalize* Finalize;
+} PmiServiceInterface;
+
+
+HRESULT
+WINAPI
+MSMPI_Get_pm_interface(
+ _In_ REFGUID RequestedVersion,
+ _Inout_ PmiServiceInterface* Interface
+);
+
+
+HRESULT
+WINAPI
+MSMPI_pm_query_interface(
+ _In_ REFGUID RequestedVersion,
+ _Inout_ void** Interface
+);
+
+
+typedef HRESULT(WINAPI FN_PmiCreateLaunchCtx)(
+ _In_ HANDLE clientToken,
+ _In_z_ const void* launchCtx,
+ _In_z_ const char* jobCtx
+ );
+
+typedef HRESULT(WINAPI FN_PmiStartLaunchCtx)(_In_ const void* launchCtx);
+typedef HRESULT(WINAPI FN_PmiEndLaunchCtx)(_In_ const void* launchCtx);
+typedef HRESULT(WINAPI FN_PmiCleanupLaunchCtx)(_In_ const void* launchCtx);
+typedef void (WINAPI FN_PmiGetLaunchInfo)(
+ _In_ const void* launchCtx,
+ _Outptr_opt_ const char** ppJobObjName,
+ _Outptr_opt_ const char** ppPwd,
+ _Out_opt_ BOOL* pSaveCreds
+ );
+
+typedef struct _PmiServiceLaunchInterface
+{
+ FN_PmiCreateLaunchCtx* CreateLaunchCtx;
+ FN_PmiStartLaunchCtx* StartLaunchCtx;
+ FN_PmiEndLaunchCtx* EndLaunchCtx;
+ FN_PmiCleanupLaunchCtx* CleanupLaunchCtx;
+ FN_PmiGetLaunchInfo* GetLaunchInfo;
+}PmiServiceLaunchInterface;
+
+
+
+#endif // _MSPMS_H_
diff --git a/src/include/oacr.h b/src/include/oacr.h
new file mode 100644
index 0000000..26aa343
--- /dev/null
+++ b/src/include/oacr.h
@@ -0,0 +1,22 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#ifndef OACR_INC_H
+#define OACR_INC_H
+
+#define HRESULT_NOT_CHECKED 25031
+#define COMPARING_HRESULT_TO_INT 6221
+#define RETVAL_IGNORED_FUNC_COULD_FAIL 6031
+#define NONCONST_BUFFER_PARAM 25033
+#define EXCEPT_BLOCK_EMPTY 6322
+#define PRINTF_FORMAT_STRING_PARAM_NEEDS_REVIEW 25141
+#define UNSAFE_STRING_FUNCTION 25025
+#define USE_WIDE_API 25068
+#define DIFFERENT_PARAM_TYPE_SIZE 25054
+
+#define OACR_REVIEWED_CALL( reviewer, functionCall ) functionCall
+#define OACR_WARNING_SUPPRESS( cWarning, comment ) __pragma ( warning( suppress: cWarning ) )
+#define OACR_WARNING_ENABLE( cWarning, comment ) __pragma ( warning( default: cWarning ) )
+#define OACR_USE_PTR(p) __noop
+#define OACR_WARNING_DISABLE( cWarning, comment ) __pragma(warning(disable:cWarning))
+#endif
\ No newline at end of file
diff --git a/src/include/pmidbg.h b/src/include/pmidbg.h
new file mode 100644
index 0000000..2d58ec5
--- /dev/null
+++ b/src/include/pmidbg.h
@@ -0,0 +1,500 @@
+#pragma once
+#ifndef _PmiDbg_H
+#define _PmiDbg_H
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+//
+// Summary:
+// This header provides the extensions interface for the MSMPI Process
+// Management Interface (PMI) for debuggers and profiling tools.
+//
+// Loading and Initialization:
+// The PMI infrastructure will load the registered extensions in each
+// process created to support a job. Each created PMI process will
+// enumerate all extensions and load these extensions. Once all
+// extensions are loaded, the PMIDBG_NOTIFY_INITIALIZE notification
+// will be sent to all loaded extensions using the mechanism described
+// in the Notifications section described below.
+// NOTE:
+// There are no implied or explicit ordering guarantees to the loading
+// or notification system. The only guarantee provided is that the
+// initialization function PmiDbgInitExtension provided by the
+// extension will be the first function that is called and the
+// notifications (if there is any) will be sent to all the loaded
+// extension via the Notification mechanism
+//
+// Registration and Installation:
+// The actual extension DLLs may be installed anywhere. A 32 bit and a
+// 64 bit version should be provided. To register the extension to be
+// loaded, the user, either manually or through some installer, should
+// create a registry key under
+// HKLM\Software\Microsoft\Mpi\PmiExtensions. The default value of
+// this key must be the path to the extension DLL. If just the DLL
+// name is provided, standard DLL load paths will apply.
+//
+// Notifications, Interogation, and Injection:
+// Each extension will receive notifications through the "Notify"
+// callback during the various phases of execution. This "Notify"
+// callback should be provided by the extension. During these
+// callbacks, the extension can use the provided Control callback to
+// exchange information with the host process.
+//
+// Versioning:
+// The PMIDBG version is defined as a 16bit value where the high 8
+// bits represent the major and the low 8 bits minor version. All
+// versions are incremental and only additive. When the extension is
+// loaded, it is given the local version information supported by the
+// host process. The extension must do a min() of the two to determine
+// the common version. For extensions supporting a lower version than
+// the host, nothing special is required. For extensions supporting a
+// higher version than the host, they must to restrict their
+// extensions use of the PMIDBG APIs to those based on the actual
+// version of the host.
+//
+//-----------------------------------------------------------------------------
+
+#ifndef MSMPI_NO_SAL
+#include
+#endif
+
+
+/*---------------------------------------------------------------------------*/
+/* SAL ANNOTATIONS */
+/*---------------------------------------------------------------------------*/
+/*
+* Define SAL annotations if they aren't defined yet.
+*/
+#ifndef __in
+#define __in
+#endif
+#ifndef __inout_bcount
+#define __inout_bcount( x )
+#endif
+#ifndef __out
+#define __out
+#endif
+
+
+#define PMIDBG_VERSION_1 MAKEWORD(1,0)
+#define PMIDBG_VERSION_1_1 MAKEWORD(1,1)
+#ifndef PMIDBG_VERSION
+# define PMIDBG_VERSION PMIDBG_VERSION_1_1
+#endif
+
+//
+// To register an extension, create a key under the below path in HKLM
+// with a default value of the path to the dll. The 64 bit registry
+// root should point to the 64 bit version of the extension, and the
+// 32 bit registry root should point to the 32 bit version.
+//
+#define PMIDBG_REG_PATH_A "Software\\Microsoft\\Mpi\\PmiExtensions"
+#define PMIDBG_REG_PATH_W L##PMIDBG_REG_PATH_A
+#define PMIDBG_REG_PATH TEXT(PMIDBG_REG_PATH_A)
+
+//
+// Summary:
+// Enumeration values used to identify the role of the host process
+// that has loaded the extension.
+//
+typedef enum _PMIDBG_HOST_TYPE
+{
+ //
+ // Signifies that the extension is being loaded by the controller
+ // (MPIEXEC). This extension will remain loaded until the entire
+ // task completes or aborts. The extension will be loaded once per
+ // task on a single machine.
+ //
+ PMIDBG_HOST_CONTROLLER = 1,
+
+ //
+ // Signifies that the extension is being loaded by the node
+ // manager (SMPD). This extension will remain loaded until the
+ // entire task completes or aborts. The extension will be loaded
+ // once on each machine that is involved in the job.
+ //
+ PMIDBG_HOST_MANAGER,
+
+} PMIDBG_HOST_TYPE;
+
+
+
+//
+// Summary:
+// Enumeration values used to identify the notification event that is
+// occurring. This value is sent through the notification callback
+// provided by the extension.
+//
+typedef enum _PMIDBG_NOTIFY_TYPE
+{
+ //
+ // This notification is sent in all host types after all
+ // extensions have been loaded into the host process. Extensions
+ // should use this to collect global information and open any
+ // communication ports that are required.
+ //
+ PMIDBG_NOTIFY_INITIALIZE = 0,
+
+
+ //
+ // This notification is sent when the controller is about to tell
+ // all managers to create the worker processes. Extensions can
+ // use this get to the list of machines and world size
+ // information.
+ //
+ PMIDBG_NOTIFY_BEFORE_CREATE_PROCESSES,
+
+ //
+ // This notification is sent when the controller has receive
+ // confirmation that all worker processes have been created
+ // successfully.
+ //
+ PMIDBG_NOTIFY_AFTER_CREATE_PROCESSES,
+
+
+ //
+ // This notification is sent when the manager is about to create
+ // the worker process. Extensions can obtain the program,
+ // arguments, and rank information of the process that is about to
+ // be created.
+ //
+ PMIDBG_NOTIFY_BEFORE_CREATE_PROCESS,
+
+ //
+ // This notification is sent when the manager has created a
+ // suspended the worker process. Extensions can obtain the
+ // program, arguments, startup info, and rank information of the
+ // process that is about to be created. Additionally, they can
+ // also get the process and thread handles and ids and override
+ // the default behavior to call ResumeThread on the new thread
+ // handle.
+ //
+ PMIDBG_NOTIFY_AFTER_CREATE_PROCESS,
+
+ //
+ // This notification is send before unloading the extension in any
+ // role. This notification is sent either at the end of the task,
+ // or immediately following a notification where the Unload
+ // callback was invoked because of an error.
+ //
+ PMIDBG_NOTIFY_FINALIZE,
+
+} PMIDBG_NOTIFY_TYPE;
+
+
+//
+// Summary:
+// Identifies the various interogaton routines that can be performed
+// by extensions.
+//
+typedef enum _PMIDBG_OPCODE_TYPE
+{
+ //
+ // This operation may be sent during any notification on any host
+ // type. The pBuffer argument must point to a PMIDBG_SYSTEM_INFO*
+ // and cbBuffer must be greater than or equal to
+ // sizeof(PMIDBG_SYSTEM_INFO*).
+ //
+ PMIDBG_OPCODE_GET_SYSTEM_INFO = 0,
+
+ //
+ // This operation may be sent during any notification on any host
+ // type. The pBuffer argument must point to a char* and cbBuffer
+ // must be greater than or equal to sizeof(char*).
+ //
+ PMIDBG_OPCODE_GET_JOB_CONTEXT,
+
+ //
+ // This operation may only be sent during the
+ // PMIDBG_NOTIFY_BEFORE_CREATE_PROCESSES notification on the
+ // controller for the job. The pBuffer argument must point to a
+ // UINT and cbBuffer must be greater than or equal to
+ // sizeof(UINT).
+ //
+ PMIDBG_OPCODE_GET_WORLD_SIZE,
+
+ //
+ // This operation may only be sent during the
+ // PMIDBG_NOTIFY_BEFORE_CREATE_PROCESSES notification on the
+ // controller for the job. The pBuffer argument must point to a
+ // PMIDBG_ENUM_WORLD_NODES structure and cbBuffer must be greater
+ // than or equal to sizeof(PMIDBG_ENUM_WORLD_NODES). To start the
+ // enumeration, set the Context field of the
+ // PMIDBG_ENUM_WORLD_NODES structure to PMIDBG_ENUM_BEGIN. The
+ // name of the machine can be obtained from the Hostname field of
+ // the PMIDBG_ENUM_WORLD_NODES structure on return of the Control
+ // callback. The Context field will be set to PMIDBG_ENUM_END
+ // when there are no more items in the list.
+ //
+ PMIDBG_OPCODE_ENUM_WORLD_NODES,
+
+ //
+ // This operation may only be sent during the
+ // PMIDBG_NOTIFY_BEFORE_CREATE_PROCESS and
+ // PMIDBG_NOTIFY_AFTER_CREATE_PROCESS notifications on the manager
+ // for a machine. The pBuffer argument must point to a char* and
+ // cbBuffer must be greater than or equal to sizeof(char*).
+ //
+ PMIDBG_OPCODE_GET_PROCESS_COMMAND,
+
+ //
+ // This operation may only be sent during the
+ // PMIDBG_NOTIFY_BEFORE_CREATE_PROCESS and
+ // PMIDBG_NOTIFY_AFTER_CREATE_PROCESS notifications on the manager
+ // for a machine. The pBuffer argument must point to a char* and
+ // cbBuffer must be greater than or equal to sizeof(char*).
+ //
+ PMIDBG_OPCODE_GET_PROCESS_ARGUMENTS,
+
+ //
+ // This operation may only be sent during the
+ // PMIDBG_NOTIFY_BEFORE_CREATE_PROCESS and
+ // PMIDBG_NOTIFY_AFTER_CREATE_PROCESS notifications on the manager
+ // for a machine. The pBuffer argument must point to a int and
+ // cbBuffer must be greater than or equal to sizeof(int).
+ //
+ PMIDBG_OPCODE_GET_PROCESS_RANK,
+
+ //
+ // This operation may only be sent during the
+ // PMIDBG_NOTIFY_AFTER_CREATE_PROCESS notification on the manager
+ // for a machine. The pBuffer argument must point to a
+ // PROCESS_INFORMATION* and cbBuffer must be greater than or equal
+ // to sizeof(PROCESS_INFORMATION*).
+ //
+ PMIDBG_OPCODE_GET_PROCESS_INFORMATION,
+
+ //
+ // This operation may only be sent during the
+ // PMIDBG_NOTIFY_AFTER_CREATE_PROCESS notification on the manager
+ // for a machine to prevent ResumeThread from being called on the
+ // process that was just created. This leaves the process in a
+ // suspended state. The extension can then use the handle of the
+ // thread to control the startup of the worker process. The
+ // pBuffer and cbBuffer arguments are unused.
+ //
+ PMIDBG_OPCODE_OVERRIDE_PROCESS_RESUME,
+
+ //
+ // This operation may only be sent during the
+ // PMIDBG_NOTIFY_BEFORE_CREATE_PROCESSES notification on the
+ // controller for the job. The pBuffer argument must point to an
+ // int* and cbBuffer must be greater than or equal to sizeof(int*).
+ //
+ PMIDBG_OPCODE_GET_PROCSIZE_ADDR,
+
+ //
+ // This operation may only be sent during the
+ // PMIDBG_NOTIFY_BEFORE_CREATE_PROCESSES notification on the
+ // controller for the job. The pBuffer argument must point to a
+ // MPIR_PROCDESC* and cbBuffer must be greater
+ // than or equal to sizeof(MPIR_PROCDESC*).
+ //
+ PMIDBG_OPCODE_GET_PROCTABLE_ADDR,
+
+ //
+ // This operation may only be sent during the
+ // PMIDBG_NOTIFY_BEFORE_CREATE_PROCESSES notification on the
+ // controller for the job. The pBuffer argument must point to an
+ // int and cbBuffer must be greater than or equal to sizeof(int).
+ // The possible returned values are described in the
+ // MPIDBG_DBG_MODE enum
+ //
+ PMIDBG_OPCODE_GET_DEBUG_MODE,
+
+} PMIDBG_OPCODE_TYPE;
+
+
+//
+// Summary:
+// This is the callback function provided by the system to extensions
+// to allow the extension to get and set information from within host
+// process.
+//
+// Parameters:
+// type - The type of operation requested by the extension.
+// pData - The pointer to the data buffer provided in the
+// notification callback.
+// Note: This must be the pData argument from the Notify
+// callback.
+// pBuffer - This is a pointer to a buffer that is used by the
+// operation type. See the details of the specific
+// operation to know the type of data to pass here.
+// cbBuffer - This is the size of the buffer pointed to by pBuffer
+//
+// Returns:
+// An HRESULT indicating status. Callers should use SUCCEEDED and
+// FAILED macros to test for error conditions.
+//
+typedef HRESULT ( __stdcall FN_PmiDbgControl ) (
+ __in PMIDBG_OPCODE_TYPE type,
+ __in void* pData,
+ __inout_bcount(cbBuffer) void* pBuffer,
+ __in SIZE_T cbBuffer
+ );
+
+
+//
+// Summary:
+// This callback function can be used during notification events to
+// signify that the extension has entered an error state and needs to
+// be unloaded. Once the notification returns, the extension will be
+// finalized and unloaded.
+//
+typedef HRESULT ( __stdcall FN_PmiDbgUnload)();
+
+
+//
+// Summary:
+// This callback is provided by the extension to receive notification
+// from the host process.
+//
+// Parameters:
+// type - The type of notification that is being sent.
+// pData - The opaque data buffer that can be used for
+// Control operations.
+//
+typedef VOID ( __stdcall FN_PmiDbgNotify)(
+ __in PMIDBG_NOTIFY_TYPE type,
+ __in void* pData
+ );
+
+
+//
+// Summary:
+// This structure provides the information about the host process.
+//
+// Fields:
+// Version - The current supported version of the host process
+// Host - The role of the current host process.
+// AppName - The simple text name for the host process.
+// LocalName - The hostname of the local machine.
+// Control - The callback function to get and set information
+// within the host process
+// Unload - The callback function used to trigger an
+// unload of the current extension
+//
+typedef struct _PMIDBG_SYSTEM_INFO
+{
+ ULONG Version;
+ PMIDBG_HOST_TYPE Host;
+ const char* AppName;
+ const char* LocalName;
+ FN_PmiDbgControl* Control;
+ FN_PmiDbgUnload* Unload;
+
+} PMIDBG_SYSTEM_INFO;
+
+
+//
+// Summary:
+// This structure provides the information about the extension and
+// the its supported callbacks.
+//
+// Fields:
+// Version - The min version of the host process and the extension.
+// Notify - The notification callback invoked when events occur
+// in the host process.
+//
+// Remarks:
+// The value specified in the Version field is the maximum supported
+// version by the extension, it may not be fully supported by the
+// PMI Host process, so the extension must inspect the Version field
+// of the PMIDBG_SYSTEM_INFO struct during the Initialize callback to
+// determine the actual version of the interface being supported.
+//
+typedef struct _PMIDBG_FUNCTIONS
+{
+ ULONG Version;
+ FN_PmiDbgNotify* Notify;
+
+} PMIDBG_FUNCTIONS;
+
+
+//
+// Summary:
+// This is the export provided by all extensions. It is called after
+// the DLL is loaded into the host process. If the extension returns
+// FALSE, the extension is immediately removed from the list and
+// unloaded.
+//
+typedef BOOL ( __stdcall FN_PmiDbgInitExtension)(
+ __in HKEY hKey,
+ __in const PMIDBG_SYSTEM_INFO* pInfo,
+ __out PMIDBG_FUNCTIONS* pFunctions
+ );
+
+#define PMIDBG_INIT_EXTENSION_FN_NAME "PmiDbgInitExtension"
+
+
+//
+// Summary:
+// This structure is used during the PMIDBG_OPCODE_ENUM_WORLD_NODES
+// control operation to access the list of nodes involved in a job.
+//
+// Fields:
+// Context - Opaque context value to identify the current
+// element in the list. To begin the iteration,
+// set the value to PMIDBG_ENUM_BEGIN. This field
+// will be set to PMIDBG_ENUM_END when there are
+// no more items in the list.
+// Hostname - On return from the Control callback, this
+// contains the hostname of the node.
+//
+typedef struct _PMIDBG_ENUM_WORLD_NODES
+{
+ LONG_PTR Context;
+ const char* Hostname;
+
+} PMIDBG_ENUM_WORLD_NODES;
+
+
+//
+// Summary:
+// This structure is used during the PMIDBG_OPCODE_GET_PROCTABLE_ADDR
+// operation to access the information about the MPI processes.
+//
+// Fields:
+// hostname - The name of the host where the process lives in
+// executable_name - The executable name of the process
+// pid - The pid of the process
+//
+typedef struct
+{
+ char* host_name;
+ char* executable_name;
+ int pid;
+} MPIR_PROCDESC;
+
+
+//
+// Summary:
+// This enum describe the possible returned values for the
+// PMIDBG_OPCODE_GET_DEBUG_MODE operation
+//
+// Values:
+// MPIDBG_DBG_LAUNCH - The job was launched under the debugger
+// MPIDBG_DBG_ATTACH - The job was not launched under the debugger.
+// The debugger has attached to the processes
+// MPIDBG_DBG_DUMP - The job is a debugging job for dump files
+//
+typedef enum _MPIDBG_DBG_MODE
+{
+ MPIDBG_DBG_LAUNCH = 0,
+ MPIDBG_DBG_ATTACH,
+ MPIDBG_DBG_DUMP
+} MPIDBG_DBG_MODE;
+
+
+//
+// Values for the debug_state, this seems to be all we need at the moment
+// but that may change...
+//
+#define MPIR_NULL 0
+#define MPIR_DEBUG_SPAWNED 1
+#define MPIR_DEBUG_ABORTING 2
+
+
+#define PMIDBG_ENUM_BEGIN ((LONG_PTR)0)
+#define PMIDBG_ENUM_END ((LONG_PTR)-1)
+
+#endif //#ifndef _PmiDbg_H
diff --git a/src/include/x64/mpifptr.h b/src/include/x64/mpifptr.h
new file mode 100644
index 0000000..1d4a288
--- /dev/null
+++ b/src/include/x64/mpifptr.h
@@ -0,0 +1,9 @@
+! -*- Mode: F77; F90; -*-
+!
+! Copyright(c) Microsoft Corporation.All rights reserved.
+! Licensed under the MIT License.
+!
+ INTEGER MPI_AINT
+ PARAMETER (MPI_AINT=z'4c00083b')
+ INTEGER MPI_ADDRESS_KIND
+ PARAMETER(MPI_ADDRESS_KIND = 8)
diff --git a/src/include/x86/mpifptr.h b/src/include/x86/mpifptr.h
new file mode 100644
index 0000000..1028a1d
--- /dev/null
+++ b/src/include/x86/mpifptr.h
@@ -0,0 +1,9 @@
+! -*- Mode: F77; F90; -*-
+!
+! Copyright(c) Microsoft Corporation.All rights reserved.
+! Licensed under the MIT License.
+!
+ INTEGER MPI_AINT
+ PARAMETER (MPI_AINT=z'4c00043b')
+ INTEGER MPI_ADDRESS_KIND
+ PARAMETER(MPI_ADDRESS_KIND = 4)
diff --git a/src/launchSvc/LaunchSvc.h b/src/launchSvc/LaunchSvc.h
new file mode 100644
index 0000000..298c344
--- /dev/null
+++ b/src/launchSvc/LaunchSvc.h
@@ -0,0 +1,39 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+#include
+#include "MsmpiLaunchSvc.h"
+
+//
+// Service settings
+//
+#define SERVICE_NAME L"MsmpiLaunchSvc"
+#define SERVICE_START_TYPE SERVICE_AUTO_START
+
+//
+// A singleton class that will run as a windows service application.
+// It handles the interaction with SCM and manages the launch service.
+//
+class WindowsSvc
+{
+private:
+ SERVICE_TABLE_ENTRYW m_ctrlDispatchTable[2];
+ SERVICE_STATUS_HANDLE m_serviceStatusHandle;
+ SERVICE_STATUS m_serviceStatus;
+
+private:
+ WindowsSvc();
+ HRESULT ChangeState(_In_ DWORD newState);
+
+public:
+ MsmpiLaunchService m_launcher;
+
+ HRESULT Start();
+
+ static WindowsSvc ms_windowsSvc;
+
+ static VOID WINAPI ServiceMain(_In_ DWORD argc, _In_ LPWSTR * argv);
+ static VOID WINAPI ServiceCtrlHandler(_In_ DWORD ctrl);
+};
diff --git a/src/launchSvc/LaunchSvcMain.cpp b/src/launchSvc/LaunchSvcMain.cpp
new file mode 100644
index 0000000..a18d64e
--- /dev/null
+++ b/src/launchSvc/LaunchSvcMain.cpp
@@ -0,0 +1,244 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "LaunchSvc.h"
+#include
+
+EventLogger gEventLogger;
+WindowsSvc WindowsSvc::ms_windowsSvc;
+
+MsmpiLaunchService& Launcher()
+{
+ return WindowsSvc::ms_windowsSvc.m_launcher;
+}
+
+//
+// Entry point
+//
+int __cdecl main(int /*argc*/, const char* /*argv[]*/)
+{
+ if (!gEventLogger.Open(SERVICE_NAME))
+ {
+ return GetLastError();
+ }
+
+ gEventLogger.WriteEvent(EVENTLOG_INFORMATION_TYPE, SVC_CATEGORY, SERVICE_EVENT, L"Starting Launch Service");
+
+ //
+ // Start MsMpi Launch Service
+ //
+ HRESULT result = WindowsSvc::ms_windowsSvc.Start();
+
+ if (FAILED(result))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to start launch service. Error=0x%x\n",
+ result);
+ return result;
+ }
+
+ gEventLogger.WriteEvent(
+ EVENTLOG_INFORMATION_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Ended Launch Service. Result=0x%x\n",
+ result);
+
+ return result;
+}
+
+
+/*---------------------------------------------------------------------------*/
+/* WindowsSvc Class Member Functions */
+/*---------------------------------------------------------------------------*/
+
+WindowsSvc::WindowsSvc()
+{
+ m_serviceStatus.dwCheckPoint = 0;
+ m_serviceStatus.dwControlsAccepted = 0;
+ m_serviceStatus.dwCurrentState = SERVICE_START_PENDING;
+ m_serviceStatus.dwServiceSpecificExitCode = 0;
+ m_serviceStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
+ m_serviceStatus.dwWaitHint = 0;
+ m_serviceStatus.dwWin32ExitCode = 0;
+
+ m_serviceStatusHandle = nullptr;
+
+ m_ctrlDispatchTable[0] = { SERVICE_NAME, ServiceMain };
+ m_ctrlDispatchTable[1] = { nullptr, nullptr };
+}
+
+
+HRESULT WindowsSvc::ChangeState(_In_ DWORD newState)
+{
+ switch (newState)
+ {
+ case SERVICE_RUNNING:
+ m_serviceStatus.dwCurrentState = SERVICE_RUNNING;
+ m_serviceStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN;
+ break;
+ default:
+ m_serviceStatus.dwCurrentState = newState;
+ break;
+ }
+
+ if (!SetServiceStatus(m_serviceStatusHandle, &m_serviceStatus))
+ {
+ HRESULT result = HRESULT_FROM_WIN32(GetLastError());
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to change service state to %d. Error=0x%x\n",
+ newState,
+ result);
+ return result;
+ }
+
+ return S_OK;
+}
+
+
+//
+// Does service specific initializations and registers service loop and control handlers
+//
+HRESULT WindowsSvc::Start()
+{
+ HANDLE processToken;
+ HRESULT result;
+
+ //
+ // Adjust process priviliges so it is able to load user profiles
+ //
+ if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &processToken))
+ {
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ result = SecurityUtils::GrantPrivilege(processToken, SE_BACKUP_NAME, TRUE);
+ if (FAILED(result))
+ {
+ CloseHandle(processToken);
+ return result;
+ }
+
+ result = SecurityUtils::GrantPrivilege(processToken, SE_RESTORE_NAME, TRUE);
+
+ CloseHandle(processToken);
+
+ if (FAILED(result))
+ {
+ return result;
+ }
+
+ //
+ // Load msmpi service launcher
+ //
+ result = m_launcher.Load();
+ if (FAILED(result))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"msmpi.dll did not load properly. Error=0x%x\n",
+ result);
+
+ return result;
+ }
+
+ //
+ // Register to SCM
+ //
+ if (!StartServiceCtrlDispatcherW(m_ctrlDispatchTable))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to start launch service. Error=0x%x\n",
+ result);
+
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ return S_OK;
+}
+
+
+//
+// Handles service control requests
+//
+VOID WINAPI WindowsSvc::ServiceCtrlHandler(_In_ DWORD ctrl)
+{
+ switch (ctrl)
+ {
+ //case SERVICE_CONTROL_SHUTDOWN ?
+ case SERVICE_CONTROL_STOP:
+ ms_windowsSvc.m_launcher.Stop();
+ break;
+ default:
+ break;
+ }
+}
+
+
+//
+// Main service loop
+//
+VOID WINAPI WindowsSvc::ServiceMain(_In_ DWORD argc, _In_ LPWSTR *argv)
+{
+ ms_windowsSvc.m_serviceStatusHandle =
+ RegisterServiceCtrlHandlerW(SERVICE_NAME, ServiceCtrlHandler);
+
+ if (ms_windowsSvc.m_serviceStatusHandle == nullptr)
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to register service control handler. Error=0x%x\n",
+ HRESULT_FROM_WIN32(GetLastError()));
+
+ return;
+ }
+
+ if (!ms_windowsSvc.m_launcher.ParseOptions(argc, argv))
+ {
+ ms_windowsSvc.m_serviceStatus.dwWin32ExitCode = ERROR_SERVICE_SPECIFIC_ERROR;
+ ms_windowsSvc.m_serviceStatus.dwServiceSpecificExitCode = ERROR_INVALID_PARAMETER;
+ OACR_WARNING_SUPPRESS(HRESULT_NOT_CHECKED, "Don't care about the status of state change - no possible recovery.");
+ ms_windowsSvc.ChangeState(SERVICE_STOPPED);
+ return;
+ }
+
+ //
+ // Start running launch service
+ //
+
+ HRESULT result = ms_windowsSvc.ChangeState(SERVICE_RUNNING);
+ if (FAILED(result))
+ {
+ return;
+ }
+
+ result = ms_windowsSvc.m_launcher.Run();
+ if (FAILED(result))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to start listening to PMI clients. Error=0x%x\n",
+ result);
+
+ ms_windowsSvc.m_serviceStatus.dwWin32ExitCode = ERROR_SERVICE_SPECIFIC_ERROR;
+ ms_windowsSvc.m_serviceStatus.dwServiceSpecificExitCode = result;
+ }
+
+ OACR_WARNING_SUPPRESS(HRESULT_NOT_CHECKED, "Don't care about the status of state change - no possible recovery.");
+ ms_windowsSvc.ChangeState(SERVICE_STOPPED);
+}
diff --git a/src/launchSvc/MsmpiLaunchSvc.cpp b/src/launchSvc/MsmpiLaunchSvc.cpp
new file mode 100644
index 0000000..8776197
--- /dev/null
+++ b/src/launchSvc/MsmpiLaunchSvc.cpp
@@ -0,0 +1,1466 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include
+#include
+#include
+#include
+#include "assertutil.h"
+#include "MsmpiLaunchSvc.h"
+#include "kernel32util.h"
+#include "mpiutil.h"
+#include "util.h"
+#include
+
+extern EventLogger gEventLogger;
+extern MsmpiLaunchService& Launcher();
+
+/*--------------------------------------------------------------------------*/
+/* LaunchContext */
+/*--------------------------------------------------------------------------*/
+
+LaunchContext::LaunchContext()
+ : m_pmiHandle(nullptr)
+ , m_parentThread(nullptr)
+ , m_mgrProcess(nullptr)
+ , m_primaryToken(nullptr)
+ , m_userToken(nullptr)
+ , m_userProfile(nullptr)
+{
+}
+
+
+LaunchContext::~LaunchContext()
+{
+ Dispose();
+}
+
+
+void LaunchContext::Dispose()
+{
+ CloseHandle(m_parentThread);
+ CloseHandle(m_mgrProcess);
+ CloseHandle(m_primaryToken);
+ UnloadUserProfile(m_userToken, m_userProfile);
+ CloseHandle(m_userToken);
+
+ ZeroMemory(this, sizeof(*this));
+}
+
+
+/*--------------------------------------------------------------------------*/
+/* ContextPool */
+/*--------------------------------------------------------------------------*/
+
+ContextPool::ContextPool()
+ : m_activeContextCount(0)
+{
+ InitializeSRWLock(&m_lock);
+
+ for (DWORD i = 0; i < _countof(m_activeIndices); ++i)
+ {
+ m_activeIndices[i] = i;
+ }
+}
+
+
+ContextPool::~ContextPool()
+{
+ AcquireSRWLockExclusive(&m_lock);
+
+ WORD eType = m_activeContextCount == 0 ? EVENTLOG_INFORMATION_TYPE : EVENTLOG_WARNING_TYPE;
+ gEventLogger.WriteEvent(
+ eType,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Service has %d active launch contexts",
+ m_activeContextCount);
+
+ for (DWORD i = 0; i < m_activeContextCount; ++i)
+ {
+ m_contexts[m_activeIndices[i]].Dispose();
+ }
+
+ ReleaseSRWLockExclusive(&m_lock);
+}
+
+
+LaunchContext* ContextPool::CreateNewContext()
+{
+ Assert(m_activeContextCount <= _countof(m_contexts));
+
+ LaunchContext* pNewContext = nullptr;
+
+ AcquireSRWLockExclusive(&m_lock);
+
+ if (m_activeContextCount == _countof(m_contexts))
+ {
+ goto exit_fn;
+ }
+
+ pNewContext = m_contexts + m_activeIndices[m_activeContextCount];
+ ++m_activeContextCount;
+
+exit_fn:
+ ReleaseSRWLockExclusive(&m_lock);
+ return pNewContext;
+}
+
+
+BOOL ContextPool::DestroyContext(_In_opt_ LaunchContext* pDeleteContext)
+{
+ BOOL result = TRUE;
+ if (pDeleteContext == nullptr)
+ {
+ return result;
+ }
+
+ AcquireSRWLockExclusive(&m_lock);
+
+ ptrdiff_t idx = pDeleteContext - m_contexts;
+
+ if (m_activeContextCount == 0
+ || idx < 0 || static_cast(idx) >= _countof(m_contexts))
+ {
+ Assert(false);
+ result = FALSE;
+ goto exit_fn;
+ }
+
+ m_contexts[idx].Dispose();
+
+ DWORD idxActive = 0;
+ for (; idxActive < m_activeContextCount; ++idxActive)
+ {
+ if (m_activeIndices[idxActive] == static_cast(idx))
+ {
+ break;
+ }
+ }
+ Assert(idxActive < m_activeContextCount);
+
+ --m_activeContextCount;
+ DWORD destroyedIdx = m_activeIndices[idxActive];
+ m_activeIndices[idxActive] = m_activeIndices[m_activeContextCount];
+ m_activeIndices[m_activeContextCount] = destroyedIdx;
+
+exit_fn:
+ ReleaseSRWLockExclusive(&m_lock);
+ return result;
+}
+
+
+//
+// Returns the first context that satisfies the given match function
+//
+template
+LaunchContext*
+ContextPool::FindContext(
+ _In_ IsMatch compareFunct,
+ _In_ const T* pData
+ )
+{
+ AcquireSRWLockShared(&m_lock);
+
+ LaunchContext* pCursor = nullptr;
+
+ for (DWORD i = 0; i < m_activeContextCount; ++i)
+ {
+ if (compareFunct(m_contexts + m_activeIndices[i], pData))
+ {
+ pCursor = m_contexts + m_activeIndices[i];
+ break;
+ }
+ }
+
+ ReleaseSRWLockShared(&m_lock);
+ return pCursor;
+}
+
+
+/*--------------------------------------------------------------------------*/
+/* LaunchContext match functions */
+/*--------------------------------------------------------------------------*/
+
+//
+// Returns true if the launch ctx's thread matches given thread handle
+//
+BOOL IsMatchCtxThread(_In_ const LaunchContext* pCtx, _In_ const HANDLE* pThread)
+{
+ if (pCtx == nullptr || pThread == nullptr)
+ {
+ return FALSE;
+ }
+
+ return pCtx->m_parentThread == *pThread;
+}
+
+
+//
+// Returns true if the launch ctx matches given data
+//
+BOOL IsMatchCtxHandle(_In_ const LaunchContext* pCtx, _In_ const void* pData)
+{
+ return (pCtx != nullptr) && (pCtx->m_pmiHandle == pData);
+}
+
+
+//
+// Returns true if the launch context has a valid but empty job object
+//
+BOOL IsMatchMgrProcess(_In_ const LaunchContext* pCtx, _In_ const HANDLE* pProcHandle)
+{
+ return (pCtx != nullptr) && (pCtx->m_mgrProcess == *pProcHandle);
+}
+
+
+/*--------------------------------------------------------------------------*/
+/* ProcessQueue */
+/*--------------------------------------------------------------------------*/
+
+ProcessQueue::ProcessQueue()
+ : m_newProcessEvent(nullptr)
+ , m_count(0)
+ , m_thread(nullptr)
+ , m_run(FALSE)
+{
+ InitializeCriticalSection(&m_lock);
+}
+
+
+ProcessQueue::~ProcessQueue()
+{
+ Stop();
+ DeleteCriticalSection(&m_lock);
+ CloseHandle(m_newProcessEvent);
+}
+
+
+HRESULT ProcessQueue::Initialize()
+{
+ if (m_newProcessEvent != nullptr)
+ {
+ return HRESULT_FROM_WIN32(ERROR_ALREADY_INITIALIZED);
+ }
+
+ m_newProcessEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+ if (m_newProcessEvent == nullptr)
+ {
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ m_runningProcesses[NEW_PROCESS_EVENT_IDX] = m_newProcessEvent;
+ m_count = 1;
+
+ return S_OK;
+}
+
+
+//
+// Safely adds a new process to running processes list and signals new process event
+//
+HRESULT ProcessQueue::AddProcess(_In_ HANDLE newProcess)
+{
+ EnterCriticalSection(&m_lock);
+
+ Assert(m_count < _countof(m_runningProcesses));
+
+ m_runningProcesses[m_count] = newProcess;
+ ++m_count;
+
+ LeaveCriticalSection(&m_lock);
+
+ SetEvent(m_newProcessEvent);
+
+ return S_OK;
+}
+
+
+DWORD ProcessQueue::GetCountSafe()
+{
+ EnterCriticalSection(&m_lock);
+
+ DWORD countSafe = m_count;
+
+ LeaveCriticalSection(&m_lock);
+
+ return countSafe;
+}
+
+
+//
+// Safely updates the running processes list and returns the handle of the deleted process
+//
+HANDLE ProcessQueue::DeleteProcess(_In_ DWORD idx)
+{
+ EnterCriticalSection(&m_lock);
+
+ Assert(idx < m_count);
+
+ HANDLE deletedProcess = m_runningProcesses[idx];
+
+ --m_count;
+ m_runningProcesses[idx] = m_runningProcesses[m_count];
+
+ LeaveCriticalSection(&m_lock);
+
+ return deletedProcess;
+}
+
+
+HRESULT ProcessQueue::Start()
+{
+ m_run = TRUE;
+
+ m_thread = CreateThread(
+ nullptr,
+ 0,
+ WaitProcesses,
+ this,
+ 0,
+ nullptr);
+
+ if (m_thread == nullptr)
+ {
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ return S_OK;
+}
+
+
+HRESULT ProcessQueue::Stop()
+{
+ m_run = FALSE;
+
+ SetEvent(m_newProcessEvent);
+
+ DWORD result = WaitForSingleObject(m_thread, SHUTDOWN_TIMEOUT);
+
+ switch (result)
+ {
+ case WAIT_OBJECT_0:
+ result = NO_ERROR;
+ break;
+ case WAIT_TIMEOUT:
+ result = ERROR_APP_HANG;
+ break;
+ case WAIT_FAILED:
+ result = GetLastError();
+ break;
+ case WAIT_ABANDONED:
+ default:
+ result = ERROR_INVALID_STATE;
+ break;
+ }
+
+ CloseHandle(m_thread);
+
+ return HRESULT_FROM_WIN32(result);
+}
+
+
+DWORD ProcessQueue::WaitProcesses(_In_ LPVOID pData)
+{
+ if (pData == nullptr)
+ {
+ return ERROR_BAD_ARGUMENTS;
+ }
+
+ ProcessQueue* pProcessQueue = static_cast(pData);
+
+ while (pProcessQueue->m_run)
+ {
+ DWORD count = pProcessQueue->GetCountSafe();
+
+ DWORD signalIdx = WaitForMultipleObjects(
+ count,
+ pProcessQueue->m_runningProcesses,
+ FALSE,
+ INFINITE);
+
+ if (signalIdx == WAIT_FAILED)
+ {
+ return GetLastError();
+ }
+
+ signalIdx -= WAIT_OBJECT_0;
+ Assert(signalIdx < count);
+
+ if (signalIdx == NEW_PROCESS_EVENT_IDX)
+ {
+ //
+ // A new process was added. Update the count, and continue waiting
+ //
+ continue;
+ }
+
+ //
+ // A process terminated, delete it from the wait list and trigger it's context cleanup.
+ //
+ HANDLE terminatedProcess = pProcessQueue->DeleteProcess(signalIdx);
+ Launcher().ManagerProcessTerminated(terminatedProcess);
+ }
+
+ return S_OK;
+}
+
+
+/*--------------------------------------------------------------------------*/
+/* MsmpiLaunchService */
+/*--------------------------------------------------------------------------*/
+
+MsmpiLaunchService::MsmpiLaunchService()
+ : m_pmiModule(nullptr)
+ , m_servicePort(DEFAULT_SERVICE_PORT)
+ , m_pMemberGroupSid(nullptr)
+{
+ m_pmiService.Size = sizeof(PmiServiceInterface);
+}
+
+
+MsmpiLaunchService::~MsmpiLaunchService()
+{
+ free(m_pMemberGroupSid);
+ FreeLibrary(m_pmiModule);
+ m_pmiModule = nullptr;
+}
+
+
+//
+// Loads the MSPMS provider and initializes PMI interfaces
+//
+HRESULT MsmpiLaunchService::Load()
+{
+ wchar_t msmpiModulePath[MAX_PATH];
+ DWORD cbPath = sizeof(msmpiModulePath);
+ HRESULT result;
+
+ //
+ // Initialize the queue that checks smpd manager process lifetimes
+ //
+ result = m_mgrQueue.Initialize();
+
+ if (FAILED(result))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to initialize smpd manager queue. Error=0x%x\n",
+ result);
+
+ return result;
+ }
+
+ //
+ // MSPMS provider that is installed on the system is written in registry.
+ // Read module path.
+ //
+ result = RegistryUtils::ReadKey(
+ HKEY_LOCAL_MACHINE,
+ MSPMI_MODULE_PATH_KEY,
+ MSPMI_PROVIDER_VALUE,
+ &cbPath,
+ msmpiModulePath
+ );
+
+ if (FAILED(result))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to read registry %s %s Error=0x%x\n",
+ MSPMI_MODULE_PATH_KEY,
+ MSPMI_PROVIDER_VALUE,
+ result);
+
+ return result;
+ }
+
+ //
+ // Load the registered MSPMS module.
+ //
+ OACR_REVIEWED_CALL(
+ mpicr,
+ m_pmiModule = LoadLibraryExW(msmpiModulePath, nullptr, 0));
+
+ if (m_pmiModule == nullptr)
+ {
+ DWORD error = GetLastError();
+
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to load library. Error=0x%x\n",
+ error);
+
+ return HRESULT_FROM_WIN32(error);
+ }
+
+ //
+ // Get interface function addresses and get interface structures
+ //
+ PFN_MSMPI_PM_QUERY_INTERFACE fnPmiQueryIf = (PFN_MSMPI_PM_QUERY_INTERFACE)
+ GetProcAddress(m_pmiModule, MSPMI_PROC_PMI_QUERY_IF);
+ if (fnPmiQueryIf == nullptr)
+ {
+ DWORD error = GetLastError();
+
+ // %S in wide format string means parameter is 8b char
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to get process address %S. Error=0x%x\n",
+ MSPMI_PROC_PMI_QUERY_IF,
+ error);
+
+ return HRESULT_FROM_WIN32(error);
+ }
+
+ result = fnPmiQueryIf(
+ PM_SERVICE_INTERFACE_LAUNCH,
+ reinterpret_cast(&m_pMspmiServiceLaunch));
+ if (FAILED(result))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to get PmiServiceLaunchInterface. Error=0x%x\n",
+ result);
+
+ return result;
+ }
+
+ PFN_MSMPI_GET_PM_INTERFACE fnGetPmi = (PFN_MSMPI_GET_PM_INTERFACE)
+ GetProcAddress(m_pmiModule, MSPMI_PROC_GET_PMI);
+ if (fnGetPmi == nullptr)
+ {
+ DWORD error = GetLastError();
+
+ // %S in wide format string means parameter is 8b char
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to get process address %S. Error=0x%x\n",
+ MSPMI_PROC_GET_PMI,
+ error);
+
+ return HRESULT_FROM_WIN32(error);
+ }
+
+ result = fnGetPmi(PM_SERVICE_INTERFACE_V1, &m_pmiService);
+ if (FAILED(result))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to get PmiServiceInterface. Error=0x%x\n",
+ result);
+
+ return result;
+ }
+
+ //
+ // Initialize interface structures
+ //
+ PmiServiceInitData pmiSvcInitData;
+
+ pmiSvcInitData.Size = sizeof(pmiSvcInitData);
+ pmiSvcInitData.Name = "MSMPI Launch Service";
+
+ result = m_pmiService.Initialize(&pmiSvcInitData);
+ if (FAILED(result))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to initialize PMI service. Error=0x%x\n",
+ result);
+
+ return result;
+ }
+
+ m_pMspmiServiceLaunch->CreateLaunchCtx = ServiceCreateLaunchCtx;
+ m_pMspmiServiceLaunch->StartLaunchCtx = ServiceStartLaunchCtx;
+ m_pMspmiServiceLaunch->CleanupLaunchCtx = ServiceCleanupLaunchCtx;
+
+ return result;
+}
+
+
+BOOL MsmpiLaunchService::ParseOptions(_In_ DWORD argc, _In_ LPWSTR *argv)
+{
+ WCHAR logBuffer[MAX_LOG_TEXT] = { 0 };
+ HRESULT hr = S_OK;
+
+ for (DWORD i = 0; i < argc && SUCCEEDED(hr); i += 1)
+ {
+ hr = StringCchCatW(logBuffer, _countof(logBuffer), argv[i]);
+ hr = StringCchCatW(logBuffer, _countof(logBuffer), L" ");
+ }
+
+ gEventLogger.WriteEvent(
+ EVENTLOG_INFORMATION_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Service parameters are :\n %s",
+ logBuffer
+ );
+
+ for (DWORD i = 1; i < argc; i += 2)
+ {
+ if (i + 1 == argc)
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Missing arguments in options.\n"
+ );
+ return FALSE;
+ }
+
+ if ((_wcsicmp(argv[i], L"-p") == 0)
+ || (_wcsicmp(argv[i], L"-port") == 0))
+ {
+ //
+ // Server port: Service listens to clients on specified port
+ //
+ int port = _wtoi(argv[i + 1]);
+ if (port <= 0 || port > USHRT_MAX)
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Invalid value of server port is set for service : %s\n",
+ argv[i+1]
+ );
+ return FALSE;
+ }
+ m_servicePort = static_cast(port);
+ }
+ else if ((_wcsicmp(argv[i], L"-g") == 0)
+ || (_wcsicmp(argv[i], L"-group") == 0))
+ {
+ //
+ // Client group membership: Clients must be member of this group to be
+ // able to connect
+ //
+ HRESULT result = SecurityUtils::GetSidForAccount(
+ nullptr,
+ argv[i + 1],
+ &m_pMemberGroupSid);
+ if (FAILED(result))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Invalid group for client membership : %s\nError=0x%x",
+ argv[i + 1],
+ result
+ );
+ return FALSE;
+ }
+ }
+ }
+
+ return TRUE;
+}
+
+
+HRESULT MsmpiLaunchService::Run()
+{
+ HRESULT result;
+ SOCKADDR_INET svcAddr;
+ PmiManagerInterface pmiManager;
+
+ //
+ // Start manager process lifetime management
+ //
+ result = m_mgrQueue.Start();
+
+ if (FAILED(result))
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ SVC_CATEGORY,
+ SERVICE_EVENT,
+ L"Failed to start smpd manager queue. Error=0x%x\n",
+ result);
+
+ return result;
+ }
+
+ //
+ // Start PMI service
+ //
+ pmiManager.Size = sizeof(PmiManagerInterface);
+ pmiManager.LaunchType = PmiLaunchTypeImpersonate;
+ pmiManager.Launch.Impersonate = ServiceCreateManagerProcess;
+
+ svcAddr.si_family = AF_INET;
+ svcAddr.Ipv4.sin_port = _byteswap_ushort(m_servicePort);
+ svcAddr.Ipv4.sin_addr.S_un.S_addr = INADDR_ANY;
+
+ result = m_pmiService.Listen(&svcAddr, &pmiManager, PM_MANAGER_INTERFACE_V1);
+
+ m_pmiService.Finalize();
+
+ return result;
+}
+
+
+OACR_WARNING_DISABLE(HRESULT_NOT_CHECKED, "Don't care about the result of stop commands - no possible recovery.");
+VOID MsmpiLaunchService::Stop()
+{
+ m_pmiService.PostStop();
+ m_mgrQueue.Stop();
+}
+OACR_WARNING_ENABLE(HRESULT_NOT_CHECKED, "Don't care about the result of stop commands - no possible recovery.");
+
+
+VOID MsmpiLaunchService::ManagerProcessTerminated(_In_ HANDLE mgrProcess)
+{
+ LaunchContext* pCtx = m_contextPool.FindContext(IsMatchMgrProcess, &mgrProcess);
+
+ Assert(pCtx != nullptr);
+
+ Launcher().m_contextPool.DestroyContext(pCtx);
+
+ gEventLogger.WriteEvent(
+ EVENTLOG_INFORMATION_TYPE,
+ 1,
+ 100,
+ L"SmpdMgr terminated for launch context 0x%p",
+ pCtx);
+}
+
+
+//
+// MSPMS passes us multibyte arguments for backcompat purposes. The
+// function should convert multibyte args into wchar_t because they
+// might contain true unicode.
+//
+HRESULT WINAPI
+MsmpiLaunchService::ServiceCreateManagerProcess(
+ _In_z_ PCSTR app,
+ _In_z_ PCSTR args,
+ _In_z_ PCSTR /*context*/
+ )
+{
+ STARTUPINFOW si;
+ PROCESS_INFORMATION pi;
+ HRESULT result;
+ HANDLE currentThread = GetCurrentThread();
+ LPCSTR pJobObjName = nullptr;
+ HANDLE jobObj = nullptr;
+ PVOID pEnvBlock = nullptr;
+ LaunchContext* pCtx =
+ Launcher().m_contextPool.FindContext(IsMatchCtxThread, ¤tThread);
+ wchar_t currentDirectory[MAX_PATH] = L"";
+ const wchar_t* pCurrentDirectoryPointer;
+ wchar_t* appW = nullptr;
+ wchar_t* argsW = nullptr;
+ wchar_t* pJobObjNameW = nullptr;
+
+ if (pCtx == nullptr)
+ {
+ return E_HANDLE;
+ }
+
+ Launcher().m_pMspmiServiceLaunch->GetLaunchInfo(pCtx->m_pmiHandle, &pJobObjName, nullptr, nullptr);
+ if (pJobObjName != nullptr && pJobObjName[0] != 0)
+ {
+ DWORD len = static_cast(strlen( pJobObjName ) + 1);
+ pJobObjNameW = new wchar_t[len];
+ if( pJobObjNameW == nullptr )
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"Cannot convert job object name to unicode .\nError=0x%x\nJobName %S",
+ E_OUTOFMEMORY,
+ pJobObjName);
+ result = E_OUTOFMEMORY;
+ goto exit_fn;
+ }
+
+ if( MultiByteToWideChar(
+ CP_UTF8,
+ MB_ERR_INVALID_CHARS,
+ pJobObjName,
+ -1,
+ pJobObjNameW,
+ len ) == 0 )
+ {
+ DWORD gle = GetLastError();
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"Cannot convert job object name to unicode .\nError=0x%x\nJobName %S",
+ gle,
+ pJobObjName);
+ result = HRESULT_FROM_WIN32(gle);
+ goto exit_fn;
+
+ }
+
+ jobObj = OpenJobObjectW(JOB_OBJECT_ASSIGN_PROCESS, TRUE, pJobObjNameW);
+
+ if (jobObj == nullptr)
+ {
+ DWORD gle = GetLastError();
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"Cannot open job object.\nError=0x%x\nJobName %s",
+ gle,
+ pJobObjNameW);
+ result = HRESULT_FROM_WIN32(gle);
+ goto exit_fn;
+ }
+ }
+
+ BOOL success = CreateEnvironmentBlock(&pEnvBlock, pCtx->m_primaryToken, TRUE);
+ if (!success)
+ {
+ DWORD gle = GetLastError();
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"CreateEnvironmentBlock Error=0x%x\nCtx 0x%p - Token 0x%p",
+ gle,
+ pCtx,
+ pCtx->m_primaryToken);
+ result = HRESULT_FROM_WIN32(gle);
+ goto exit_fn;
+ }
+
+ success = ExpandEnvironmentStringsForUserW(
+ pCtx->m_primaryToken,
+ L"%USERPROFILE%",
+ currentDirectory,
+ _countof(currentDirectory)
+ );
+ if (!success)
+ {
+ DWORD gle = GetLastError();
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"ExpandEnvironmentStringsForUser Error=0x%x\nCtx 0x%p - Token 0x%p",
+ gle,
+ pCtx,
+ pCtx->m_primaryToken);
+ result = HRESULT_FROM_WIN32(gle);
+ goto exit_fn;
+ }
+
+ if (currentDirectory[0] != L'\0')
+ {
+ pCurrentDirectoryPointer = currentDirectory;
+ }
+ else
+ {
+ pCurrentDirectoryPointer = nullptr;
+ }
+
+ //
+ // MSPMS passes us multibyte arguments for backcompat purposes.
+ // We need to convert the multibyte into wchar_t because they might
+ // contain true unicode
+ //
+ DWORD len = static_cast( strlen( app ) + 1 );
+
+ //
+ // At most n UTF-16 characters are required per n UTF-8 characters
+ //
+ appW = new wchar_t[len];
+ if( appW == nullptr )
+ {
+ result = E_OUTOFMEMORY;
+ goto exit_fn;
+ }
+
+ if( MultiByteToWideChar(
+ CP_UTF8,
+ MB_ERR_INVALID_CHARS,
+ app,
+ -1,
+ appW,
+ len ) == 0 )
+ {
+ DWORD gle = GetLastError();
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"MultiByteToWideChar Error=0x%x\nCtx 0x%p - String %S",
+ gle,
+ pCtx,
+ app);
+ result = HRESULT_FROM_WIN32( gle );
+ goto exit_fn;
+ }
+
+ len = static_cast( strlen( args ) + 1 );
+ argsW = new wchar_t[len];
+ if( argsW == nullptr )
+ {
+ result = E_OUTOFMEMORY;
+ goto exit_fn;
+ }
+
+ if( MultiByteToWideChar(
+ CP_UTF8,
+ MB_ERR_INVALID_CHARS,
+ args,
+ -1,
+ argsW,
+ len ) == 0 )
+ {
+ DWORD gle = GetLastError();
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"MultiByteToWideChar Error=0x%x\nCtx 0x%p - String %S",
+ gle,
+ pCtx,
+ args);
+ result = HRESULT_FROM_WIN32( gle );
+ goto exit_fn;
+ }
+
+ GetStartupInfoW(&si);
+ si.lpDesktop = L"";
+
+ BOOL mgrCreated = OACR_REVIEWED_CALL(
+ mpicr,
+ CreateProcessAsUserW(
+ pCtx->m_primaryToken,
+ appW, // Application Name
+ argsW, // Command Line
+ nullptr, // Process Security Attributes,
+ nullptr, // Thread Security Attributes,
+ TRUE, // Inherit Parent Handles,
+ CREATE_NO_WINDOW | // Process CreationFlags,
+ CREATE_UNICODE_ENVIRONMENT |
+ CREATE_SUSPENDED,
+ pEnvBlock, // lpEnvironment,
+ pCurrentDirectoryPointer,// lpCurrentDirectory,
+ &si, // lpStartupInfo,
+ &pi // lpProcessInformation
+ ));
+
+ if (!mgrCreated)
+ {
+ DWORD gle = GetLastError();
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"CreateProcessAsUser Error=0x%x\nCtx 0x%p - Token 0x%p",
+ gle,
+ pCtx,
+ pCtx->m_primaryToken);
+ result = HRESULT_FROM_WIN32(gle);
+ goto exit_fn;
+ }
+
+ ResumeThread(pi.hThread);
+ CloseHandle(pi.hThread);
+ pCtx->m_mgrProcess = pi.hProcess;
+ pCtx->m_parentThread = nullptr;
+ pCtx->m_pmiHandle = nullptr;
+
+ if (jobObj != nullptr)
+ {
+ if (!AssignProcessToJobObject(jobObj, pi.hProcess))
+ {
+ DWORD gle = GetLastError();
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"Failed to assign process to job object.\nError=0x%x JobObj=%s\n",
+ gle,
+ pJobObjNameW);
+
+ TerminateProcess(pi.hProcess, gle);
+ result = HRESULT_FROM_WIN32(gle);
+ goto exit_fn;
+ }
+ }
+
+ result = Launcher().m_mgrQueue.AddProcess(pi.hProcess);
+
+exit_fn:
+ if (pEnvBlock != nullptr)
+ {
+ DestroyEnvironmentBlock(pEnvBlock);
+ }
+ if (jobObj != nullptr)
+ {
+ CloseHandle(jobObj);
+ }
+ delete[] appW;
+ delete[] argsW;
+ delete[] pJobObjNameW;
+ if (FAILED(result))
+ {
+ Launcher().m_contextPool.DestroyContext(pCtx);
+ }
+ return result;
+}
+
+
+HRESULT
+MsmpiLaunchService::ServiceCreateLaunchCtx(
+ _In_ HANDLE clientToken,
+ _In_ const void* launchCtx,
+ _In_z_ const char* /*jobCtx*/
+ )
+{
+ //
+ // This function is called in process's security context; i.e. under system account
+ //
+
+ PROFILEINFOW userProfile;
+ WCHAR user[MAX_PATH] = { 0 };
+ WCHAR domain[MAX_PATH] = { 0 };
+ DWORD cchUser = _countof(user);
+ DWORD cchDomain = _countof(domain);
+ LaunchContext* pNewContext = nullptr;
+
+ HRESULT result = SecurityUtils::GetTokenUser(clientToken, user, &cchUser, domain, &cchDomain);
+ if (FAILED(result))
+ {
+ return result;
+ }
+
+ if (Launcher().m_pMemberGroupSid != nullptr)
+ {
+ //
+ // Check if client is member of required group
+ //
+ BOOL isMember;
+ if (!CheckTokenMembership(clientToken, Launcher().m_pMemberGroupSid, &isMember))
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"Cannot verify the membership of %s\\%s.\nError=0x%x",
+ domain,
+ user,
+ result);
+
+ return result;
+ }
+
+ if (!isMember)
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"Client %s\\%s is not a member of required group.",
+ domain,
+ user);
+
+ return HRESULT_FROM_WIN32(ERROR_MEMBER_NOT_IN_GROUP);
+ }
+ }
+
+ pNewContext = Launcher().m_contextPool.CreateNewContext();
+ if (pNewContext == nullptr)
+ {
+ gEventLogger.WriteEvent(
+ EVENTLOG_WARNING_TYPE,
+ 1,
+ 100,
+ L"Server is busy. Rejecting new clients"
+ );
+
+ return HRESULT_FROM_WIN32(RPC_S_SERVER_TOO_BUSY);
+ }
+
+ Assert(pNewContext->m_mgrProcess == nullptr && pNewContext->m_pmiHandle == nullptr &&
+ pNewContext->m_parentThread == nullptr && pNewContext->m_primaryToken == nullptr &&
+ pNewContext->m_userProfile == nullptr && pNewContext->m_userToken == nullptr);
+
+ pNewContext->m_pmiHandle = launchCtx;
+ pNewContext->m_userToken = clientToken;
+
+ ZeroMemory(&userProfile, sizeof(PROFILEINFOW));
+ userProfile.dwSize = sizeof(PROFILEINFOW);
+ userProfile.lpUserName = user;
+
+ if (!LoadUserProfileW(clientToken, &userProfile))
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+ gEventLogger.WriteEvent(
+ EVENTLOG_ERROR_TYPE,
+ 1,
+ 100,
+ L"Failed to load user profile for %s\\%s. Error: 0x%x",
+ domain,
+ user,
+ result);
+
+ goto exit_fn;
+ }
+
+ pNewContext->m_userProfile = userProfile.hProfile;
+
+ gEventLogger.WriteEvent(
+ EVENTLOG_INFORMATION_TYPE,
+ 1,
+ 100,
+ L"Created new launch context 0x%p",
+ pNewContext);
+
+exit_fn:
+ if (FAILED(result))
+ {
+ Launcher().m_contextPool.DestroyContext(pNewContext);
+ }
+ return result;
+}
+
+
+HRESULT WINAPI MsmpiLaunchService::ServiceStartLaunchCtx(_In_ const void* launchCtx)
+{
+ //
+ // This function is called in client's security context;
+ // i.e. uses client's impersonation token
+ //
+
+ HANDLE hToken = nullptr;
+ HANDLE hPrimaryToken = nullptr;
+ HANDLE hCurrentThread = GetCurrentThread();
+ HRESULT result;
+ BOOL tokenIsInteractive;
+ WORD logType = EVENTLOG_INFORMATION_TYPE;
+ LPCSTR pPwd = nullptr;
+ BOOL saveCreds = FALSE;
+ LaunchContext* pCtx =
+ Launcher().m_contextPool.FindContext(IsMatchCtxHandle, launchCtx);
+
+ if (pCtx == nullptr)
+ {
+ result = E_HANDLE;
+ goto fail_fn;
+ }
+
+ pCtx->m_parentThread = hCurrentThread;
+
+ if (!OpenThreadToken(
+ hCurrentThread,
+ TOKEN_ALL_ACCESS,
+ TRUE,
+ &hToken))
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+ goto fail_fn;
+ }
+
+ result = SecurityUtils::IsGroupMember(WinInteractiveSid, hToken, &tokenIsInteractive);
+ if (FAILED(result))
+ {
+ goto fail_fn;
+ }
+
+ gEventLogger.WriteEvent(
+ EVENTLOG_INFORMATION_TYPE,
+ 1,
+ 100,
+ L"CreateManagerProcess request user interactive: %d.\n",
+ tokenIsInteractive
+ );
+
+ //
+ // MSPMS passes us multibyte arguments for backcompat purposes.
+ // pPwd will need to be converted to wchar_t before usage
+ //
+ Launcher().m_pMspmiServiceLaunch->GetLaunchInfo(launchCtx, nullptr, &pPwd, &saveCreds);
+
+ if (tokenIsInteractive && (pPwd == nullptr || pPwd[0] == '\0'))
+ {
+ //
+ // User did not provide a pwd but the token is interactive and we can use it
+ //
+ if (!DuplicateTokenEx(
+ hToken,
+ TOKEN_ALL_ACCESS,
+ nullptr,
+ SecurityImpersonation,
+ TokenPrimary,
+ &hPrimaryToken)
+ )
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+ goto fail_fn;
+ }
+ }
+ else
+ {
+ wchar_t* pPwdW = nullptr;
+ if( pPwd != nullptr )
+ {
+ DWORD len = static_cast( strlen( pPwd ) + 1 );
+ pPwdW = new wchar_t[len * 2];
+ if( pPwdW == nullptr )
+ {
+ result = E_OUTOFMEMORY;
+ goto fail_fn;
+ }
+
+ if( MultiByteToWideChar(
+ CP_UTF8,
+ MB_ERR_INVALID_CHARS,
+ pPwd,
+ -1,
+ pPwdW,
+ len * 2) == 0 )
+ {
+ delete[] pPwdW;
+ result = HRESULT_FROM_WIN32( GetLastError() );
+ goto fail_fn;
+ }
+ }
+
+ //
+ // Token does not have the sufficient rights. Need to do LogonUser.
+ //
+ result = Launcher().DoLogonUser(pPwdW, saveCreds, &hPrimaryToken);
+ delete[] pPwdW;
+ if (FAILED(result))
+ {
+ goto fail_fn;
+ }
+ }
+
+ pCtx->m_primaryToken = hPrimaryToken;
+ result = S_OK;
+
+ goto exit_fn;
+
+fail_fn:
+ logType = EVENTLOG_INFORMATION_TYPE;
+ if (hPrimaryToken != nullptr)
+ {
+ CloseHandle(hPrimaryToken);
+ }
+ if (pCtx != nullptr)
+ {
+ Launcher().m_contextPool.DestroyContext(pCtx);
+ }
+
+exit_fn:
+
+ gEventLogger.WriteEvent(logType, 1, 100, L"StartLaunchCtx 0x%x", result);
+
+ if (hToken != nullptr)
+ {
+ CloseHandle(hToken);
+ }
+ return result;
+}
+
+
+HRESULT WINAPI MsmpiLaunchService::ServiceCleanupLaunchCtx(_In_ const void* launchCtx)
+{
+ LaunchContext* pCtx =
+ Launcher().m_contextPool.FindContext(IsMatchCtxHandle, launchCtx);
+
+ if (pCtx == nullptr)
+ {
+ return E_HANDLE;
+ }
+
+ if (pCtx->m_mgrProcess != 0)
+ {
+ //
+ // There is a process started with it, so leave cleanup to process termination.
+ //
+ return S_OK;
+ }
+
+ Launcher().m_contextPool.DestroyContext(pCtx);
+
+ gEventLogger.WriteEvent(
+ EVENTLOG_INFORMATION_TYPE,
+ 1,
+ 100,
+ L"Cleaned up launch context 0x%p",
+ pCtx);
+
+ return S_OK;
+}
+
+
+HRESULT MsmpiLaunchService::DoLogonUser(
+ _In_ PCWSTR pPwd,
+ _In_ BOOL saveCreds,
+ _Outptr_ PHANDLE pLogonHandle
+ )
+{
+ DWORD cbValue = MAX_PATH;
+ LPWSTR pValue = nullptr;
+ DATA_BLOB credsEncrypted = { 0, nullptr };
+ DATA_BLOB credsDecrypted = { 0, nullptr };
+ WCHAR user[MAX_PATH] = { 0 };
+ WCHAR domain[MAX_PATH] = { 0 };
+ DWORD cchUser = _countof(user);
+ DWORD cchDomain = _countof(domain);
+ DWORD cbPwd = 0;
+ BYTE* pFree = nullptr;
+ BOOL usingCachedPwd = FALSE;
+ BOOL pwdRequested = FALSE;
+
+ HRESULT result = SecurityUtils::GetCurrentUser(user, &cchUser, domain, &cchDomain);
+ if (FAILED(result))
+ {
+ goto exit_fn;
+ }
+
+ if (pPwd == nullptr || pPwd[0] == L'\0')
+ {
+ //
+ // User did not provide a pwd, check if it is cached in the system
+ //
+ usingCachedPwd = TRUE;
+
+ result = HRESULT_FROM_WIN32(ERROR_MORE_DATA);
+ while (result == HRESULT_FROM_WIN32(ERROR_MORE_DATA))
+ {
+ free(pValue);
+ pValue = static_cast(malloc(cbValue * sizeof (wchar_t)));
+ if (pValue == nullptr)
+ {
+ result = E_OUTOFMEMORY;
+ goto exit_fn;
+ }
+
+ result = RegistryUtils::ReadCurrentUserKey(
+ MSPMI_MODULE_PATH_KEY,
+ MSPMI_CREDENTIAL_VALUE,
+ &cbValue,
+ pValue
+ );
+ }
+
+ if (FAILED(result))
+ {
+ //
+ // If pwd is not cached before, result is ERROR_FILE_NOT_FOUND
+ //
+ pwdRequested = (result == HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND));
+ goto exit_fn;
+ }
+
+ credsEncrypted.cbData = cbValue;
+ credsEncrypted.pbData = reinterpret_cast(pValue);
+
+ if (!CryptUnprotectData(&credsEncrypted, nullptr, nullptr, nullptr, nullptr, 0, &credsDecrypted))
+ {
+ //
+ // If pwd was reset after previous encryption, result is ERROR_INVALID_PASSWORD
+ //
+ result = HRESULT_FROM_WIN32(GetLastError());
+ pwdRequested = (GetLastError() == ERROR_INVALID_PASSWORD);
+ goto exit_fn;
+ }
+
+ pPwd = reinterpret_cast(credsDecrypted.pbData);
+ cbPwd = credsDecrypted.cbData;
+ pFree = credsDecrypted.pbData;
+ }
+
+ if (!LogonUserW(
+ user,
+ domain,
+ pPwd,
+ LOGON32_LOGON_INTERACTIVE,
+ LOGON32_PROVIDER_DEFAULT,
+ pLogonHandle
+ ))
+ {
+ //
+ // If pwd is not correct, result is ERROR_LOGON_FAILURE.
+ // If cached pwd is used, this means user changed the pwd after it was cached,
+ // we need to request the new pwd.
+ // Otherwise, it means user input a wrong pwd.
+ //
+ result = HRESULT_FROM_WIN32(GetLastError());
+ pwdRequested = usingCachedPwd && (GetLastError() == ERROR_LOGON_FAILURE);
+ goto exit_fn;
+ }
+
+ if (saveCreds && !usingCachedPwd)
+ {
+ cbPwd = static_cast((wcslen(pPwd) + 1) * sizeof(wchar_t));
+
+ credsDecrypted.pbData = const_cast(
+ reinterpret_cast(pPwd));
+ credsDecrypted.cbData = cbPwd;
+
+ if (!CryptProtectData(
+ &credsDecrypted,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ CRYPTPROTECT_LOCAL_MACHINE,
+ &credsEncrypted))
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+ goto exit_fn;
+ }
+ pFree = credsEncrypted.pbData;
+
+ result = RegistryUtils::WriteCurrentUserKey(
+ MSPMI_MODULE_PATH_KEY,
+ MSPMI_CREDENTIAL_VALUE,
+ credsEncrypted.cbData,
+ reinterpret_cast(credsEncrypted.pbData)
+ );
+
+ if (FAILED(result))
+ {
+ goto exit_fn;
+ }
+ }
+
+exit_fn:
+ if (usingCachedPwd)
+ {
+ SecureZeroMemory(pFree, cbPwd);
+ }
+ LocalFree(pFree);
+ free(pValue);
+ if (pwdRequested)
+ {
+ result = NTE_UI_REQUIRED;
+ }
+ return result;
+}
diff --git a/src/launchSvc/MsmpiLaunchSvc.h b/src/launchSvc/MsmpiLaunchSvc.h
new file mode 100644
index 0000000..c103f50
--- /dev/null
+++ b/src/launchSvc/MsmpiLaunchSvc.h
@@ -0,0 +1,186 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+#include
+#include
+#include "SvcUtils.h"
+#include "mspms.h"
+#include "launchSvcMsg.h"
+
+
+//
+// MSMPI PMI settings
+//
+#define MSPMI_MODULE_PATH_KEY L"Software\\Microsoft\\MPI"
+#define MSPMI_PROVIDER_VALUE L"MSPMSProvider"
+#define MSPMI_PROC_GET_PMI "MSMPI_Get_pm_interface"
+#define MSPMI_PROC_PMI_QUERY_IF "MSMPI_pm_query_interface"
+#define MSPMI_CREDENTIAL_VALUE L"MSMPI_Credentials"
+#define DEFAULT_SERVICE_PORT 8677
+#define MAXIMUM_CONTEXTS (MAXIMUM_WAIT_OBJECTS - 1)
+#define SHUTDOWN_TIMEOUT 60000
+#define NEW_PROCESS_EVENT_IDX 0
+
+
+/*--------------------------------------------------------------------------*/
+/* LaunchContext */
+/*--------------------------------------------------------------------------*/
+
+//
+// Holds information that is related to a launch context, between start and
+// end of launch context calls from PMI
+//
+class LaunchContext
+{
+public:
+ const void* m_pmiHandle;
+ HANDLE m_parentThread;
+ HANDLE m_primaryToken;
+
+ HANDLE m_mgrProcess;
+ HANDLE m_userToken;
+ HANDLE m_userProfile;
+
+ LaunchContext();
+ ~LaunchContext();
+
+ void Dispose();
+};
+
+
+/*--------------------------------------------------------------------------*/
+/* ContextPool */
+/*--------------------------------------------------------------------------*/
+
+//
+// Thread-safe collection of active launch contexts
+//
+class ContextPool
+{
+private:
+ LaunchContext m_contexts[MAXIMUM_CONTEXTS];
+ DWORD m_activeIndices[MAXIMUM_CONTEXTS];
+ UINT m_activeContextCount;
+ SRWLOCK m_lock;
+
+public:
+ template
+ using IsMatch = BOOL(*)(_In_ const LaunchContext* pCtx, _In_ const T* pData);
+
+ ContextPool();
+
+ ~ContextPool();
+
+ LaunchContext* CreateNewContext();
+
+ BOOL DestroyContext(_In_ LaunchContext* pDeleteContext);
+
+ template
+ LaunchContext*
+ FindContext(
+ _In_ IsMatch compareFunct,
+ _In_ const T* pData
+ );
+};
+
+
+/*--------------------------------------------------------------------------*/
+/* ProcessQueue */
+/*--------------------------------------------------------------------------*/
+class ProcessQueue
+{
+private:
+ HANDLE m_runningProcesses[MAXIMUM_WAIT_OBJECTS];
+ HANDLE m_newProcessEvent;
+ volatile DWORD m_count;
+ HANDLE m_thread;
+ volatile BOOL m_run;
+ CRITICAL_SECTION m_lock;
+
+ static DWORD WINAPI WaitProcesses(_In_ LPVOID pData);
+
+ DWORD GetCountSafe();
+
+ HANDLE DeleteProcess(_In_ DWORD idx);
+
+public:
+ ProcessQueue();
+
+ ~ProcessQueue();
+
+ HRESULT Initialize();
+
+ HRESULT AddProcess(_In_ HANDLE newProcess);
+
+ HRESULT Start();
+
+ HRESULT Stop();
+};
+
+
+/*--------------------------------------------------------------------------*/
+/* MsmpiLaunchService */
+/*--------------------------------------------------------------------------*/
+
+class MsmpiLaunchService
+{
+ typedef
+ HRESULT
+ (WINAPI *PFN_MSMPI_GET_PM_INTERFACE)(
+ _In_ REFGUID RequestedVersion,
+ _Inout_ PmiServiceInterface* Interface
+ );
+
+ typedef
+ HRESULT
+ (WINAPI *PFN_MSMPI_PM_QUERY_INTERFACE)(
+ _In_ REFGUID RequestedVersion,
+ _Inout_ void** Interface
+ );
+
+private:
+ HMODULE m_pmiModule;
+ PmiServiceInterface m_pmiService;
+ PmiServiceLaunchInterface* m_pMspmiServiceLaunch;
+ USHORT m_servicePort;
+ PSID m_pMemberGroupSid;
+ ContextPool m_contextPool;
+ ProcessQueue m_mgrQueue;
+
+public:
+ MsmpiLaunchService();
+ ~MsmpiLaunchService();
+ HRESULT Load();
+ BOOL ParseOptions(_In_ DWORD argc, _In_ LPWSTR *argv);
+ HRESULT Run();
+ VOID Stop();
+ VOID ManagerProcessTerminated(_In_ HANDLE mgrProcess);
+
+ static HRESULT
+ WINAPI ServiceCreateManagerProcess(
+ _In_z_ PCSTR app,
+ _In_z_ PCSTR args,
+ _In_z_ PCSTR context
+ );
+
+ static HRESULT
+ WINAPI ServiceCreateLaunchCtx(
+ _In_ HANDLE clientToken,
+ _In_ const void* launchCtx,
+ _In_z_ const char* jobCtx
+ );
+
+ static HRESULT WINAPI ServiceStartLaunchCtx(_In_ const void* launchCtx);
+ static HRESULT WINAPI ServiceCleanupLaunchCtx(_In_ const void* launchCtx);
+
+private:
+
+ HRESULT
+ DoLogonUser(
+ _In_ LPCWSTR pPwd,
+ _In_ BOOL saveCreds,
+ _Outptr_ PHANDLE pLogonHandle
+ );
+};
diff --git a/src/launchSvc/launchSvcMsg.mc b/src/launchSvc/launchSvcMsg.mc
new file mode 100644
index 0000000..b378150
--- /dev/null
+++ b/src/launchSvc/launchSvcMsg.mc
@@ -0,0 +1,84 @@
+;
+; // Launch Service Message Texts
+;
+
+
+;
+; // Header
+;
+
+SeverityNames=(Success=0x0:STATUS_SEVERITY_SUCCESS
+ Informational=0x1:STATUS_SEVERITY_INFORMATIONAL
+ Warning=0x2:STATUS_SEVERITY_WARNING
+ Error=0x3:STATUS_SEVERITY_ERROR
+ )
+
+
+FacilityNames=(System=0x0:FACILITY_SYSTEM
+ Runtime=0x2:FACILITY_RUNTIME
+ Stubs=0x3:FACILITY_STUBS
+ Io=0x4:FACILITY_IO_ERROR_CODE
+ )
+
+LanguageNames=(English=0x409:MSG00409)
+
+
+;
+; // Event Categories
+;
+
+MessageIdTypedef=WORD
+
+MessageId=0x1
+SymbolicName=SVC_CATEGORY
+Language=English
+MsMpi Launch Service Events
+.
+
+MessageId=0x2
+SymbolicName=CLIENT_CATEGORY
+Language=English
+MsMpi Launch Service Client Events
+.
+
+
+;
+; // Message Definitions
+;
+
+MessageIdTypedef=DWORD
+
+MessageId=0x1
+Severity=Success
+Facility=Runtime
+SymbolicName=SERVICE_EVENT
+Language=English
+%1
+.
+
+MessageId=0x100
+Severity=Success
+Facility=Runtime
+SymbolicName=SERVICE_STARTED
+Language=English
+MsMpi Launch Service started succesfully.
+.
+
+
+MessageId=0x101
+Severity=Informational
+Facility=Runtime
+SymbolicName=SERVICE_STATE_CHANGE
+Language=English
+MsMpi Launch Service state change from %1 to %2.
+.
+
+
+MessageId=0x102
+Severity=Error
+Facility=System
+;// !!!!!!!!!!!!!!!
+SymbolicName=MSG_BAD_FILE_CONTENTS
+Language=English
+File %1 contains content that is not valid.
+.
diff --git a/src/launchSvc/launchsvc.rc b/src/launchSvc/launchsvc.rc
new file mode 100644
index 0000000..75668d6
--- /dev/null
+++ b/src/launchSvc/launchsvc.rc
@@ -0,0 +1,11 @@
+#include
+#include
+
+#define VER_FILETYPE VFT_APP
+#define VER_FILESUBTYPE VFT2_UNKNOWN
+#define VER_FILEDESCRIPTION_STR "Microsoft MPI Launch Service"
+#define VER_INTERNALNAME_STR "msmpilaunchsvc"
+#define VER_ORIGINALFILENAME_STR "msmpilaunchsvc.exe"
+
+#include "common.ver"
+#include "launchsvcmsg.rc"
\ No newline at end of file
diff --git a/src/launchSvc/msmpiLaunchSvc.vcxproj b/src/launchSvc/msmpiLaunchSvc.vcxproj
new file mode 100644
index 0000000..60aa776
--- /dev/null
+++ b/src/launchSvc/msmpiLaunchSvc.vcxproj
@@ -0,0 +1,68 @@
+
+
+
+
+
+
+ msmpiLaunchSvc
+ {03cfde58-e72e-41d7-85ed-43150db0ca44}
+
+
+
+ Microsoft MPI Launch Service
+
+
+
+ false
+ Application
+
+
+
+
+
+
+ ProgramDatabase
+
+ %(AdditionalIncludeDirectories);
+ $(SrcRoot)\mpi\msmpi\include;
+ $(SrcRoot)\mpi\common;
+ .\$(O);
+
+
+
+
+
+ Console
+ mainCRTStartup
+
+ %(AdditionalDependencies);
+ $(PUBLIC_SDK_LIB)\kernel32.lib;
+ $(PUBLIC_SDK_LIB)\crypt32.lib;
+ $(PUBLIC_SDK_LIB)\advapi32.lib;
+ $(PUBLIC_SDK_LIB)\userenv.lib;
+ $(CRT_Libs);
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ $(MPI_BIN_DESTINATION)
+ *.exe
+
+
+
+
diff --git a/src/launchSvc/msmpiLaunchSvcMc.vcxproj b/src/launchSvc/msmpiLaunchSvcMc.vcxproj
new file mode 100644
index 0000000..2a6952b
--- /dev/null
+++ b/src/launchSvc/msmpiLaunchSvcMc.vcxproj
@@ -0,0 +1,26 @@
+
+
+
+ {8E3235C8-9024-4915-BFA6-8F79FFAEC14A}
+
+
+
+
+ Microsoft MPI Launch Service
+
+
+ None
+ false
+ WindowsUserModeDriver10.0
+
+
+
+
+ true
+ $(SrcRoot)\launchsvc\$(O)
+ true
+ $(SrcRoot)\launchsvc\$(O)
+
+
+
+
\ No newline at end of file
diff --git a/src/mpi.props b/src/mpi.props
new file mode 100644
index 0000000..de2ff8e
--- /dev/null
+++ b/src/mpi.props
@@ -0,0 +1,168 @@
+
+
+
+
+
+
+
+ 10
+ 0
+ 12498
+ 5
+
+ <_MSMPI_VER_>$([MSBuild]::BitwiseOr(`$([MSBuild]::Multiply(`$(ProductMajorVersion)`, `256`))`, `$([MSBuild]::BitwiseAnd(`$(ProductMinorVersion)`, `255`))`))
+ <_MSMPI_VER_EX_>$([MSBuild]::BitwiseOr(`$([MSBuild]::Multiply(`$(_MSMPI_VER_)`, `65536`))`, `$([MSBuild]::BitwiseAnd(`$(BuildMinorVersion)`, `65535`))`))
+ <_MSMPI_BUILDNUM_>$(BuildMajorVersion)
+ <_MSMPI_FILEREV_>$(BuildMinorVersion)
+
+
+
+
+
+
+
+
+ %(PreprocessorDefinitions);
+ MSMPI_VER=$(_MSMPI_VER_);
+ MSMPI_VER_EX=$(_MSMPI_VER_EX_);
+ _BLDVERMAJOR=$(ProductMajorVersion);
+ _BLDVERMINOR=$(ProductMinorVersion);
+ _BLDNUMMAJOR=$(BuildMajorVersion);
+ _BLDNUMMINOR=$(BuildMinorVersion);
+
+
+
+
+
+
+
+
+ Debug
+ Win32
+
+
+ Release
+ Win32
+
+
+ Debug
+ Win32
+
+
+ Release
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+
+ libucrtd.lib;libcmtd.lib;libvcruntimed.lib
+
+
+ libucrt.lib;libcmt.lib;libvcruntime.lib
+
+
+
+
+ _WIN64=1;_AMD64_=1;AMD64=1;%(PreprocessorDefinitions)
+ %(AdditionalOptions) /favor:AMD64
+
+
+
+
+
+ _X86_=1;i386=1;%(PreprocessorDefinitions)
+
+
+
+
+
+ Disabled
+ DBG=1;%(PreprocessorDefinitions)
+
+
+
+
+
+ Full
+ true
+ NDEBUG=1;%(PreprocessorDefinitions)
+ Guard
+
+
+
+
+
+
+ %(PreprocessorDefinitions);
+ _WIN32_WINNT=_WIN32_WINNT_WIN10;
+
+
+ %(AdditionalIncludeDirectories);
+
+
+
+
+
+
+ false
+ true
+ MultiThreaded
+ true
+ true
+ true
+ true
+ true
+ false
+ 8Bytes
+ OldStyle
+ false
+
+ 4986;4987;4471;4369;4309;4603;4627;
+ 28251;28252;28253;
+ %(DisableSpecificWarnings)
+
+ Strict
+ stdcall
+
+ warning.h;
+ $(MPI_INC_ROOT)\mpiwarning.h
+
+
+ %(AdditionalIncludeDirectories);
+ $(MPI_INC_PATH);
+ $(MPI_SRC_ROOT)\common
+
+
+ %(PreprocessorDefinitions);
+ _WINSOCK_DEPRECATED_NO_WARNINGS;
+ VER_USE_OTHER_MAJOR_MINOR_VER;
+ HAVE_DEBUGGER_SUPPORT=1;
+ HAVE_FORTRAN_BINDING=1;
+ USE_HUMAN_READABLE_TOKENS=1;
+ _USE_DECLSPECS_FOR_SAL=1;
+ WIN32_LEAN_AND_MEAN=1;
+ CONDITION_HANDLING=1;
+ _CRT_SECURE_NO_WARNINGS=1;
+
+
+ %(PreprocessorDefinitions);
+ _MPICH_DLL_=1;
+
+
+
+ true
+ UseLinkTimeCodeGeneration
+
+
+
+
\ No newline at end of file
diff --git a/src/mpi/common/MpiLock.cpp b/src/mpi/common/MpiLock.cpp
new file mode 100644
index 0000000..ec7524e
--- /dev/null
+++ b/src/mpi/common/MpiLock.cpp
@@ -0,0 +1,270 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "MpiLock.h"
+//
+// This file defines function pointers to the core Win32 lock functions
+// that are used by the MSMPI stack. We store these as function pointers
+// so that we can replace them with "NOP" functions when we run in single
+// threaded mode.
+//
+
+
+
+//
+// Prototype functions for the platform locking functions we wrap.
+//
+
+typedef void WINAPI
+FN_InitializeCriticalSection(
+ _Out_ LPCRITICAL_SECTION lpCriticalSection
+ );
+
+typedef void WINAPI
+FN_DeleteCriticalSection(
+ _Inout_ LPCRITICAL_SECTION lpCriticalSection
+ );
+
+typedef void WINAPI
+FN_EnterCriticalSection(
+ _Inout_ LPCRITICAL_SECTION CriticalSection
+ );
+
+typedef void WINAPI
+FN_LeaveCriticalSection(
+ _Inout_ LPCRITICAL_SECTION CriticalSection
+ );
+
+typedef VOID WINAPI
+FN_InitializeSRWLock(
+ _Out_ PSRWLOCK SRWLock
+ );
+
+typedef VOID WINAPI
+FN_AcquireSRWLockExclusive(
+ _Inout_ PSRWLOCK SRWLock
+ );
+
+typedef VOID WINAPI
+FN_AcquireSRWLockShared(
+ _Inout_ PSRWLOCK SRWLock
+ );
+
+typedef VOID WINAPI
+FN_ReleaseSRWLockExclusive(
+ _Inout_ PSRWLOCK SRWLock
+ );
+
+typedef VOID WINAPI
+FN_ReleaseSRWLockShared(
+ _Inout_ PSRWLOCK SRWLock
+ );
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//
+// Summary:
+// Empty lock function for SingleThread mode
+//
+static void WINAPI
+MpiLockNoThread(
+ _Inout_ LPCRITICAL_SECTION
+ )
+{
+}
+
+
+//
+// Summary:
+// Empty read/writer lock function for SingleThread mode
+//
+static VOID WINAPI
+MpiRwLockNoThread(
+ _Inout_ PSRWLOCK
+ )
+{
+}
+
+
+//
+// Global list of all Locking functions MPI will use in
+// both ST and MT scenarios (IE, Shared code paths).
+//
+static struct _MpiLockFunctions
+{
+
+ FN_InitializeCriticalSection* LockInitialize;
+ FN_DeleteCriticalSection* LockDelete;
+ FN_EnterCriticalSection* LockEnter;
+ FN_LeaveCriticalSection* LockLeave;
+
+
+ FN_InitializeSRWLock* RwLockInitialize;
+ FN_AcquireSRWLockExclusive* RwLockAcquireExclusive;
+ FN_ReleaseSRWLockExclusive* RwLockReleaseExclusive;
+
+ FN_AcquireSRWLockShared* RwLockAcquireShared;
+ FN_ReleaseSRWLockShared* RwLockReleaseShared;
+
+} MpiLockFunctions =
+//
+// NOTE: We set the default locking behavior to be ON
+// this means that all locks function as expected by default
+// and can be disabled with a call to MpiLockInitializeSingleThreadMode
+//
+{
+ InitializeCriticalSection,
+ DeleteCriticalSection,
+ EnterCriticalSection,
+ LeaveCriticalSection,
+ InitializeSRWLock,
+ AcquireSRWLockExclusive,
+ ReleaseSRWLockExclusive,
+ AcquireSRWLockShared,
+ ReleaseSRWLockShared
+};
+
+
+//
+// Summary:
+// Initialize the local process to skip all locking functions.
+//
+VOID
+MpiLockInitializeSingleThreadMode()
+{
+ MpiLockFunctions.LockInitialize = MpiLockNoThread;
+ MpiLockFunctions.LockDelete = MpiLockNoThread;
+ MpiLockFunctions.LockEnter = MpiLockNoThread;
+ MpiLockFunctions.LockLeave = MpiLockNoThread;
+ MpiLockFunctions.RwLockInitialize = MpiRwLockNoThread;
+ MpiLockFunctions.RwLockAcquireExclusive = MpiRwLockNoThread;
+ MpiLockFunctions.RwLockReleaseExclusive = MpiRwLockNoThread;
+ MpiLockFunctions.RwLockAcquireShared = MpiRwLockNoThread;
+ MpiLockFunctions.RwLockReleaseShared = MpiRwLockNoThread;
+}
+
+
+//
+// Summary:
+// Initializes a MPI_LOCK (CRITICAL_SECTION)
+//
+VOID
+MpiLockInitialize(
+ _Out_ MPI_LOCK* Lock
+ )
+{
+ MpiLockFunctions.LockInitialize(Lock);
+}
+
+
+//
+// Summary:
+// Deletes a MPI_LOCK (CRITICAL_SECTION)
+//
+VOID
+MpiLockDelete(
+ _Inout_ MPI_LOCK* Lock
+ )
+{
+ MpiLockFunctions.LockDelete(Lock);
+}
+
+
+//
+// Summary:
+// Enters a MPI_LOCK (CRITICAL_SECTION)
+//
+VOID
+MpiLockEnter(
+ _Inout_ MPI_LOCK* Lock
+ )
+{
+ MpiLockFunctions.LockEnter(Lock);
+}
+
+
+//
+// Summary:
+// Leaves a MPI_LOCK (CRITICAL_SECTION)
+//
+VOID
+MpiLockLeave(
+ _Inout_ MPI_LOCK* Lock
+ )
+{
+ MpiLockFunctions.LockLeave(Lock);
+}
+
+
+//
+// Summary:
+// Initialize a MPI_RWLOCK (SRWLOCK)
+//
+VOID
+MpiRwLockInitialize(
+ _Out_ MPI_RWLOCK* RwLock
+ )
+{
+ MpiLockFunctions.RwLockInitialize(RwLock);
+}
+
+
+//
+// Summary:
+// Acquire Exclusive access for a MPI_RWLOCK (SRWLOCK)
+//
+VOID
+MpiRwLockAcquireExclusive(
+ _Inout_ MPI_RWLOCK* RwLock
+ )
+{
+ MpiLockFunctions.RwLockAcquireExclusive(RwLock);
+}
+
+
+//
+// Summary:
+// Release Exclusive access for a MPI_RWLOCK (SRWLOCK)
+//
+VOID
+MpiRwLockReleaseExclusive(
+ _Inout_ MPI_RWLOCK* RwLock
+ )
+{
+ MpiLockFunctions.RwLockReleaseExclusive(RwLock);
+}
+
+
+//
+// Summary:
+// Acquire Shared access for a MPI_RWLOCK (SRWLOCK)
+//
+VOID
+MpiRwLockAcquireShared(
+ _Inout_ MPI_RWLOCK* RwLock
+ )
+{
+ MpiLockFunctions.RwLockAcquireShared(RwLock);
+}
+
+
+//
+// Summary:
+// Release Shared access for a MPI_RWLOCK (SRWLOCK)
+//
+VOID
+MpiRwLockReleaseShared(
+ _Inout_ MPI_RWLOCK* RwLock
+ )
+{
+ MpiLockFunctions.RwLockReleaseShared(RwLock);
+}
+
+
+#ifdef __cplusplus
+} //extern "C" {
+#endif
diff --git a/src/mpi/common/MpiLock.h b/src/mpi/common/MpiLock.h
new file mode 100644
index 0000000..7a1c39d
--- /dev/null
+++ b/src/mpi/common/MpiLock.h
@@ -0,0 +1,66 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+typedef SRWLOCK MPI_RWLOCK;
+typedef CRITICAL_SECTION MPI_LOCK;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+VOID
+MpiLockInitializeSingleThreadMode();
+
+VOID
+MpiLockInitialize(
+ _Out_ MPI_LOCK* Lock
+ );
+
+VOID
+MpiLockDelete(
+ _Inout_ MPI_LOCK* Lock
+ );
+
+VOID
+MpiLockEnter(
+ _Inout_ MPI_LOCK* Lock
+ );
+
+VOID
+MpiLockLeave(
+ _Inout_ MPI_LOCK* Lock
+ );
+
+#define MPI_RWLOCK_INIT SRWLOCK_INIT
+
+VOID
+MpiRwLockInitialize(
+ _Out_ MPI_RWLOCK* RwLock
+ );
+
+VOID
+MpiRwLockAcquireExclusive(
+ _Inout_ MPI_RWLOCK* RwLock
+ );
+
+VOID
+MpiRwLockReleaseExclusive(
+ _Inout_ MPI_RWLOCK* RwLock
+ );
+
+VOID
+MpiRwLockAcquireShared(
+ _Inout_ MPI_RWLOCK* RwLock
+ );
+
+VOID
+MpiRwLockReleaseShared(
+ _Inout_ MPI_RWLOCK* RwLock
+ );
+
+
+#ifdef __cplusplus
+} //extern "C" {
+#endif
\ No newline at end of file
diff --git a/src/mpi/common/SvcUtils.cpp b/src/mpi/common/SvcUtils.cpp
new file mode 100644
index 0000000..dc518c9
--- /dev/null
+++ b/src/mpi/common/SvcUtils.cpp
@@ -0,0 +1,546 @@
+/*
+* Copyright (c) Microsoft Corporation. All rights reserved.
+* Licensed under the MIT License.
+*
+* This file includes definitions of utility functions:
+* Windows Event Logger Utilities
+* Registry Utilities
+* Security Utilities
+*/
+
+#include "precomp.h"
+#include "SvcUtils.h"
+
+/*---------------------------------------------------------------------------*/
+/* Windows Event Logger Utility Class */
+/*---------------------------------------------------------------------------*/
+
+EventLogger::EventLogger()
+ : m_eventLog(nullptr)
+{
+}
+
+
+EventLogger::~EventLogger()
+{
+ DeregisterEventSource(m_eventLog);
+}
+
+
+BOOL EventLogger::Open(_In_z_ PCWSTR pEventSource)
+{
+ m_eventLog = RegisterEventSourceW(nullptr, pEventSource);
+ if (m_eventLog == nullptr)
+ {
+ wprintf(
+ L"Cannot register event source %s with error 0x%x\n",
+ pEventSource,
+ GetLastError()
+ );
+
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+//
+// Writes formatted sting to event log.
+//
+void
+EventLogger::WriteEvent(
+ _In_ WORD type,
+ _In_ WORD category,
+ _In_ DWORD eventId,
+ _Printf_format_string_ LPCWSTR pFormatStr,
+ ...
+ )
+{
+ WCHAR buffer[MAX_LOG_TEXT];
+ va_list args;
+
+ va_start(args, pFormatStr);
+
+ HRESULT hr = StringCchVPrintfW(buffer, _countof(buffer), pFormatStr, args);
+
+ va_end(args);
+
+ LPCWSTR eventStr = buffer;
+
+ if(hr != S_OK && hr != STRSAFE_E_INSUFFICIENT_BUFFER)
+ {
+ //
+ // STRSAFE_E_INSUFFICIENT_BUFFER is acceptable since truncated messages
+ // are good enough.
+ //
+ eventStr = L"Failed to form event message.";
+ }
+
+ ReportEventW(
+ m_eventLog,
+ type,
+ category,
+ eventId,
+ nullptr,
+ 1,
+ 0,
+ &eventStr,
+ nullptr
+ );
+}
+
+
+/*---------------------------------------------------------------------------*/
+/* Registry Access Utility Class */
+/*---------------------------------------------------------------------------*/
+
+HRESULT
+RegistryUtils::ReadKey(
+ _In_ HKEY key,
+ _In_opt_z_ LPCWSTR pSubKey,
+ _In_z_ LPCWSTR pValueName,
+ _Inout_ LPDWORD pcbValue,
+ _Out_ LPWSTR pValue
+ )
+{
+ HKEY readKey;
+
+ LONG result = RegOpenKeyExW(key, pSubKey, 0, KEY_READ, &readKey);
+
+ if( result != ERROR_SUCCESS )
+ {
+ return HRESULT_FROM_WIN32( result );
+ }
+
+ result = RegGetValueW(
+ readKey,
+ nullptr,
+ pValueName,
+ RRF_RT_REG_SZ,
+ nullptr,
+ pValue,
+ pcbValue
+ );
+
+ RegCloseKey(readKey);
+
+ return HRESULT_FROM_WIN32( result );
+}
+
+
+//
+// Reads a key in HKEY_CURRENT_USER for the user the current thread is impersonating.
+//
+HRESULT
+RegistryUtils::ReadCurrentUserKey(
+ _In_opt_z_ LPCWSTR pSubKey,
+ _In_z_ LPCWSTR pValueName,
+ _Inout_ LPDWORD pCapValue,
+ _Out_writes_z_(*pCapValue) LPWSTR pValue
+)
+{
+ HKEY currentUser = nullptr;
+ LONG result = RegOpenCurrentUser(KEY_READ, ¤tUser);
+ if (result != ERROR_SUCCESS)
+ {
+ return HRESULT_FROM_WIN32(result);
+ }
+
+ result = RegGetValueW(
+ currentUser,
+ pSubKey,
+ pValueName,
+ RRF_RT_REG_SZ,
+ nullptr,
+ pValue,
+ pCapValue
+ );
+
+ RegCloseKey(currentUser);
+ return HRESULT_FROM_WIN32(result);
+}
+
+
+//
+// Writes to a key in HKEY_CURRENT_USER for the user the current thread is impersonating.
+// Creates the key if it does not exist.
+//
+HRESULT
+RegistryUtils::WriteCurrentUserKey(
+ _In_opt_z_ LPCWSTR pSubKey,
+ _In_ LPCWSTR pValueName,
+ _In_ DWORD capValue,
+ _In_ LPCWSTR pValue
+)
+{
+ HKEY currentUser = nullptr;
+ HKEY hWrite = nullptr;
+ LONG result = ERROR_SUCCESS;
+ DWORD disposition;
+
+ result = RegOpenCurrentUser(KEY_WRITE, ¤tUser);
+ if (result != ERROR_SUCCESS)
+ {
+ goto exit_fn;
+ }
+
+ //
+ // Opens or creates registry key.
+ //
+ result = RegCreateKeyExW(
+ currentUser,
+ pSubKey,
+ 0,
+ nullptr,
+ 0,
+ KEY_WRITE,
+ nullptr,
+ &hWrite,
+ &disposition
+ );
+
+ if (result != ERROR_SUCCESS)
+ {
+ goto exit_fn;
+ }
+
+ result = RegSetValueExW(
+ hWrite,
+ pValueName,
+ 0,
+ RRF_RT_REG_SZ,
+ reinterpret_cast(pValue),
+ capValue
+ );
+
+exit_fn:
+ RegCloseKey(currentUser);
+ RegCloseKey(hWrite);
+ return HRESULT_FROM_WIN32(result);
+}
+
+
+/*---------------------------------------------------------------------------*/
+/* Security Access Utility Class */
+/*---------------------------------------------------------------------------*/
+
+//
+// Creates a primary token that duplicates the access token of the calling thread.
+// Caller must CloseHandle when it is no longer needed.
+//
+HRESULT SecurityUtils::GetCurrentThreadPrimaryToken(_Out_ PHANDLE pPrimaryToken)
+{
+ HANDLE token;
+ HRESULT result = NO_ERROR;
+
+ if (!OpenThreadToken(
+ GetCurrentThread(),
+ TOKEN_ALL_ACCESS,
+ TRUE,
+ &token)
+ )
+ {
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ if (!DuplicateTokenEx(
+ token,
+ TOKEN_ALL_ACCESS,
+ nullptr,
+ SecurityImpersonation,
+ TokenPrimary,
+ pPrimaryToken)
+ )
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ CloseHandle(token);
+ return result;
+}
+
+
+//
+// Checks if given token is a member of the well known group.
+//
+HRESULT
+SecurityUtils::IsGroupMember(
+ _In_ WELL_KNOWN_SID_TYPE wellKnownSidType,
+ _In_ HANDLE token,
+ _Out_ PBOOL pIsMember
+ )
+{
+ SID* pGroupSid = nullptr;
+ DWORD cbSid = 0;
+ HRESULT result = NO_ERROR;
+
+ *pIsMember = FALSE;
+
+ //
+ // Get the size of group sid
+ //
+ if (!CreateWellKnownSid(wellKnownSidType, nullptr, pGroupSid, &cbSid))
+ {
+ DWORD gle = GetLastError();
+ if (gle != ERROR_INSUFFICIENT_BUFFER)
+ {
+ result = HRESULT_FROM_WIN32(gle);
+ goto exit_fn;
+ }
+ }
+
+ //
+ // Allocate necessary memory for group sid and create it
+ //
+ pGroupSid = static_cast(malloc(cbSid));
+ if (pGroupSid == nullptr)
+ {
+ result = ERROR_OUTOFMEMORY;
+ goto exit_fn;
+ }
+
+ if (!CreateWellKnownSid(wellKnownSidType, nullptr, pGroupSid, &cbSid))
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+ goto exit_fn;
+ }
+
+ //
+ // Check token membership
+ //
+ if (!CheckTokenMembership(token, pGroupSid, pIsMember))
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+ goto exit_fn;
+ }
+
+exit_fn:
+ free(pGroupSid);
+ return result;
+}
+
+
+HRESULT
+SecurityUtils::IsCurrentProcessInteractive(
+ _Out_ PBOOL pIsInteractive
+)
+{
+ if (pIsInteractive == nullptr)
+ {
+ return E_INVALIDARG;
+ }
+
+ //
+ // nullptr token makes API use an impersonation token of the calling thread
+ //
+ return IsGroupMember(WinInteractiveSid, nullptr, pIsInteractive);
+}
+
+
+//
+// Gets the user name and domain for given token.
+//
+HRESULT
+SecurityUtils::GetTokenUser(
+ _In_ HANDLE token,
+ _Out_opt_ LPWSTR pUser,
+ _Inout_ LPDWORD pcchUser,
+ _Out_opt_ LPWSTR pDomain,
+ _Inout_ LPDWORD pcchDomain
+ )
+{
+ PTOKEN_USER pTokenUser = nullptr;
+ DWORD cbTokenUser = 0;
+ HRESULT result = S_OK;
+ SID_NAME_USE snu;
+
+ while (!GetTokenInformation(
+ token,
+ TokenUser,
+ pTokenUser,
+ cbTokenUser,
+ &cbTokenUser)
+ )
+ {
+ if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+ goto exit_fn;
+ }
+
+ free(pTokenUser);
+ pTokenUser = static_cast(malloc(cbTokenUser));
+ if (pTokenUser == nullptr)
+ {
+ result = E_OUTOFMEMORY;
+ goto exit_fn;
+ }
+ }
+
+ Assert(pTokenUser != nullptr);
+
+ if (!LookupAccountSidW(
+ nullptr,
+ pTokenUser->User.Sid,
+ pUser,
+ pcchUser,
+ pDomain,
+ pcchDomain,
+ &snu))
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+ }
+
+exit_fn:
+ free(pTokenUser);
+ return result;
+}
+
+
+//
+// Gets the user name and domain of the current user.
+//
+HRESULT
+SecurityUtils::GetCurrentUser(
+ _Out_opt_ LPWSTR pUser,
+ _Inout_ LPDWORD pcchUser,
+ _Out_opt_ LPWSTR pDomain,
+ _Inout_ LPDWORD pcchDomain
+ )
+{
+ HANDLE currentToken = nullptr;
+ HRESULT result;
+
+ if (!OpenThreadToken(GetCurrentThread(), TOKEN_QUERY, TRUE, ¤tToken))
+ {
+ DWORD gle = GetLastError();
+ if (gle != ERROR_NO_TOKEN)
+ {
+ result = HRESULT_FROM_WIN32(gle);
+ goto exit_fn;
+ }
+
+ //
+ // If current thread does not have a token, use process token
+ //
+ if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, ¤tToken))
+ {
+ result = HRESULT_FROM_WIN32(GetLastError());
+ goto exit_fn;
+ }
+ }
+
+ result = GetTokenUser(currentToken, pUser, pcchUser, pDomain, pcchDomain);
+
+exit_fn:
+ CloseHandle(currentToken);
+ return result;
+}
+
+
+HRESULT
+SecurityUtils::GrantPrivilege(
+ _In_ HANDLE hToken,
+ _In_ LPCTSTR privilege,
+ _In_ BOOL enable
+)
+{
+ TOKEN_PRIVILEGES adjustedPrivs;
+ LUID luid;
+
+ if (!LookupPrivilegeValue(
+ nullptr,
+ privilege,
+ &luid))
+ {
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ adjustedPrivs.PrivilegeCount = 1;
+ adjustedPrivs.Privileges[0].Luid = luid;
+ adjustedPrivs.Privileges[0].Attributes = (enable ? SE_PRIVILEGE_ENABLED : 0);
+
+ AdjustTokenPrivileges(
+ hToken,
+ FALSE,
+ &adjustedPrivs,
+ 0,
+ nullptr,
+ nullptr);
+
+ return HRESULT_FROM_WIN32(GetLastError());
+}
+
+
+//
+// Get the SID for a given account/group name.
+// Caller is responsible for invoking free(*ppSid).
+//
+HRESULT
+SecurityUtils::GetSidForAccount(
+ _In_opt_z_ LPCWSTR pSystemName,
+ _In_ LPCWSTR pAccountName,
+ _Outptr_ PSID* ppSid
+)
+{
+ PSID pSid = nullptr;
+ LPWSTR pRefDomain = nullptr;
+ DWORD cbRefDomain = 0;
+ DWORD cbSid = 0;
+ SID_NAME_USE eUse;
+
+ if (LookupAccountNameW(
+ pSystemName,
+ pAccountName,
+ 0,
+ &cbSid,
+ 0,
+ &cbRefDomain,
+ &eUse))
+ {
+ return HRESULT_FROM_WIN32(ERROR_NONE_MAPPED);
+ }
+
+ if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ {
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ if (cbSid != 0)
+ {
+ pSid = static_cast(malloc(cbSid));
+ if (pSid == nullptr)
+ {
+ return E_OUTOFMEMORY;
+ }
+ }
+
+ if (cbRefDomain != 0)
+ {
+ pRefDomain = static_cast(malloc(cbRefDomain * sizeof(wchar_t)));
+ if (pRefDomain == nullptr)
+ {
+ free(pSid);
+ return E_OUTOFMEMORY;
+ }
+ }
+
+ if (!LookupAccountNameW(
+ pSystemName,
+ pAccountName,
+ pSid,
+ &cbSid,
+ pRefDomain,
+ &cbRefDomain,
+ &eUse))
+ {
+ free(pRefDomain);
+ free(pSid);
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ free(pRefDomain);
+ *ppSid = pSid;
+ return S_OK;
+}
diff --git a/src/mpi/common/SvcUtils.h b/src/mpi/common/SvcUtils.h
new file mode 100644
index 0000000..a4182d4
--- /dev/null
+++ b/src/mpi/common/SvcUtils.h
@@ -0,0 +1,133 @@
+/*
+* Copyright (c) Microsoft Corporation. All rights reserved.
+* Licensed under the MIT License.
+*
+* This file includes declarations of utility classes:
+* Windows Event Logger Utilities
+* Registry Utilities
+* Security Utilities
+*/
+
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+
+/*---------------------------------------------------------------------------*/
+/* Windows Event Logger Utility Class */
+/*---------------------------------------------------------------------------*/
+
+#define MAX_LOG_TEXT 1024
+
+class EventLogger
+{
+private:
+ HANDLE m_eventLog;
+
+public:
+ EventLogger();
+
+ BOOL Open(_In_z_ PCWSTR pEventSource);
+
+ void
+ WriteEvent(
+ _In_ WORD type,
+ _In_ WORD category,
+ _In_ DWORD eventId,
+ _Printf_format_string_ PCWSTR pFormatStr,
+ ...
+ );
+
+ ~EventLogger();
+};
+
+
+/*---------------------------------------------------------------------------*/
+/* Registry Access Utility Class */
+/*---------------------------------------------------------------------------*/
+
+class RegistryUtils
+{
+public:
+ static HRESULT
+ ReadKey(
+ _In_ HKEY key,
+ _In_opt_z_ LPCWSTR pSubKey,
+ _In_z_ LPCWSTR pValueName,
+ _Inout_ LPDWORD pcbValue,
+ _Out_ LPWSTR pValue
+ );
+
+ static HRESULT
+ ReadCurrentUserKey(
+ _In_opt_z_ LPCWSTR pSubKey,
+ _In_z_ LPCWSTR pValueName,
+ _Inout_ LPDWORD pcbValue,
+ _Out_writes_z_(*pcbValue) LPWSTR pValue
+ );
+
+ static HRESULT
+ WriteCurrentUserKey(
+ _In_opt_z_ LPCWSTR pSubKey,
+ _In_z_ LPCWSTR pValueName,
+ _In_ DWORD cbValue,
+ _In_z_ LPCWSTR pValue
+ );
+};
+
+
+/*---------------------------------------------------------------------------*/
+/* Security Access Utility Class */
+/*---------------------------------------------------------------------------*/
+
+class SecurityUtils
+{
+public:
+ static HRESULT GetCurrentThreadPrimaryToken(_Out_ PHANDLE pPrimaryToken);
+
+ static HRESULT
+ IsGroupMember(
+ _In_ WELL_KNOWN_SID_TYPE wellKnownSidType,
+ _In_ HANDLE token,
+ _Out_ PBOOL pIsMember
+ );
+
+ static HRESULT
+ GetTokenUser(
+ _In_ HANDLE token,
+ _Out_opt_ LPWSTR pUser,
+ _Inout_ LPDWORD pcchUser,
+ _Out_opt_ LPWSTR pDomain,
+ _Inout_ LPDWORD pcchDomain
+ );
+
+ static HRESULT
+ GetCurrentUser(
+ _Out_opt_ LPWSTR pUser,
+ _Inout_ LPDWORD pcchUser,
+ _Out_opt_ LPWSTR pDomain,
+ _Inout_ LPDWORD pcchDomain
+ );
+
+ static HRESULT
+ GrantPrivilege(
+ _In_ HANDLE token,
+ _In_ LPCTSTR privilege,
+ _In_ BOOL enable
+ );
+
+ static HRESULT
+ GetSidForAccount(
+ _In_opt_z_ LPCWSTR pSystemName,
+ _In_z_ LPCWSTR pAccountName,
+ _Outptr_ PSID* ppSid
+ );
+
+ static HRESULT
+ IsCurrentProcessInteractive(
+ _Out_ PBOOL pIsInteractive
+ );
+};
diff --git a/src/mpi/common/Util.cpp b/src/mpi/common/Util.cpp
new file mode 100644
index 0000000..05bb0c0
--- /dev/null
+++ b/src/mpi/common/Util.cpp
@@ -0,0 +1,95 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "util.h"
+#include "kernel32util.h"
+
+//
+// Summary:
+// Ensure that the OS version is greater than or equal to the specified version.
+//
+// Parameters:
+// major - Windows major version
+// minor - Windows minor version
+//
+_Success_(return!=FALSE)
+BOOL
+CheckOSVersion(
+ _In_ DWORD major,
+ _In_ DWORD minor
+ )
+{
+ OSVERSIONINFOEX version = {};
+ ULONG mask = VER_MAJORVERSION | VER_MINORVERSION;
+ ULONGLONG conditions = 0;
+
+ version.dwOSVersionInfoSize = sizeof(version);
+ version.dwMajorVersion = major;
+ version.dwMinorVersion = minor;
+
+ conditions = ::VerSetConditionMask( conditions, VER_MAJORVERSION, VER_GREATER_EQUAL );
+ conditions = ::VerSetConditionMask( conditions, VER_MINORVERSION, VER_GREATER_EQUAL );
+
+ return ::VerifyVersionInfo( &version, mask, conditions );
+}
+
+
+static const wchar_t * const AZURE_REGISTRY_VALUE = L"NodeLogicalName";
+static const wchar_t * const AZURE_REGISTRY_KEY = L"SOFTWARE\\MICROSOFT\\HPC";
+
+//
+// Summary:
+// Check if the smpd instance is running on azure and if so,
+// return the logical name of the node
+//
+// Input:
+// szBuffer: the size of the name buffer
+//
+// Output:
+// buffer : store the logical name. If null, name is not returned
+//
+// Return:
+// true if the node is on azure
+// false if the node is not on azure, or if the size of the buffer is
+// too small
+//
+//
+bool get_azure_node_logical_name(
+ _Out_opt_z_cap_(szBuffer) wchar_t* buffer,
+ _In_ DWORD szBuffer )
+{
+ HKEY key;
+ DWORD size = szBuffer - 1;
+ DWORD status = RegOpenKeyExW( HKEY_LOCAL_MACHINE,
+ AZURE_REGISTRY_KEY,
+ NULL,
+ KEY_READ,
+ &key );
+
+ if( status != ERROR_SUCCESS )
+ {
+ return false;
+ }
+
+ status = RegQueryValueExW( key,
+ AZURE_REGISTRY_VALUE,
+ NULL,
+ NULL,
+ reinterpret_cast(buffer),
+ &size );
+
+ RegCloseKey( key );
+
+ if( status != ERROR_SUCCESS )
+ {
+ return false;
+ }
+
+ if( buffer != NULL )
+ {
+ buffer[size] = L'\0';
+ }
+
+ return true;
+}
\ No newline at end of file
diff --git a/src/mpi/common/argstr.cpp b/src/mpi/common/argstr.cpp
new file mode 100644
index 0000000..3c66603
--- /dev/null
+++ b/src/mpi/common/argstr.cpp
@@ -0,0 +1,697 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * Copyright (C) 1997 University of Chicago.
+ * See COPYRIGHT notice in top-level directory.
+ */
+#include "precomp.h"
+
+
+#define MPIU_STR_TRUNCATED MPIU_STR_NOMEM
+
+_Success_(return != nullptr)
+_Ret_z_
+static const char *
+first_token(
+ _In_opt_z_ const char *str
+ )
+{
+ if (str == nullptr)
+ return nullptr;
+ /* isspace is defined only if isascii is true */
+ while (/*isascii(*str) && isspace(*str)*/ *str == MPIU_STR_SEPAR_CHAR)
+ str++;
+ if (*str == '\0')
+ return nullptr;
+ return str;
+}
+
+
+_Ret_maybenull_z_
+static const char*
+next_token(
+ _In_opt_z_ const char *str
+ )
+{
+ if (str == nullptr)
+ return nullptr;
+ str = first_token(str);
+ if (str == nullptr)
+ return nullptr;
+ if (*str == MPIU_STR_QUOTE_CHAR)
+ {
+ /* move over string */
+ str++; /* move over the first quote */
+ if (*str == '\0')
+ return nullptr;
+ while (*str != MPIU_STR_QUOTE_CHAR)
+ {
+ /* move until the last quote, ignoring escaped characters */
+ if (*str == MPIU_STR_ESCAPE_CHAR)
+ {
+ str++;
+ }
+ str++;
+ if (*str == '\0')
+ return nullptr;
+ }
+ str++; /* move over the last quote */
+ }
+ else
+ {
+ if (*str == MPIU_STR_DELIM_CHAR)
+ {
+ /* move over the DELIM token */
+ str++;
+ }
+ else
+ {
+ /* move over literal */
+ while (/*(isascii(*str) &&
+ !isspace(*str)) &&*/
+ *str != MPIU_STR_SEPAR_CHAR &&
+ *str != MPIU_STR_DELIM_CHAR &&
+ *str != '\0')
+ str++;
+ }
+ }
+ return first_token(str);
+}
+
+static int
+compare_token(
+ _In_opt_z_ const char *token,
+ _In_opt_z_ const char *str
+ )
+{
+ if (token == nullptr || str == nullptr)
+ return -1;
+
+ if (*token == MPIU_STR_QUOTE_CHAR)
+ {
+ /* compare quoted strings */
+ token++; /* move over the first quote */
+ /* compare characters until reaching the end of the string or the end quote character */
+ for(;;)
+ {
+ if (*token == MPIU_STR_ESCAPE_CHAR)
+ {
+ token++;
+ if (*token != *str)
+ break;
+ }
+ else
+ {
+ if (*token != *str || *token == MPIU_STR_QUOTE_CHAR)
+ break;
+ }
+ if (*str == '\0')
+ break;
+ token++;
+ str++;
+ }
+ if (*str == '\0' && *token == MPIU_STR_QUOTE_CHAR)
+ return 0;
+ if (*token == MPIU_STR_QUOTE_CHAR)
+ return 1;
+ if (*str < *token)
+ return -1;
+ return 1;
+ }
+
+ /* compare DELIM token */
+ if (*token == MPIU_STR_DELIM_CHAR)
+ {
+ if (*str == MPIU_STR_DELIM_CHAR)
+ {
+ str++;
+ if (*str == '\0')
+ return 0;
+ return 1;
+ }
+ if (*token < *str)
+ return -1;
+ return 1;
+ }
+
+ /* compare literals */
+ while (*token == *str &&
+ *str != '\0' &&
+ *token != MPIU_STR_DELIM_CHAR &&
+ (/*isascii(*token) && !isspace(*token)*/ *token != MPIU_STR_SEPAR_CHAR) )
+ {
+ token++;
+ str++;
+ }
+ if ( (*str == '\0') &&
+ (*token == MPIU_STR_DELIM_CHAR ||
+ (/*isascii(*token) && isspace(*token)*/ *token == MPIU_STR_SEPAR_CHAR) || *token == '\0') )
+ return 0;
+ if (*token == MPIU_STR_DELIM_CHAR ||
+ (/*isascii(*token) && isspace(*token)*/ *token == MPIU_STR_SEPAR_CHAR) || *token < *str)
+ return -1;
+ return 1;
+}
+
+_Success_(return == MPIU_STR_SUCCESS)
+static int
+token_copy(
+ _In_opt_z_ const char *token,
+ _Out_writes_z_(maxlen) char *str,
+ _In_ size_t maxlen
+ )
+{
+ /* check parameters */
+ if (token == nullptr || str == nullptr)
+ return MPIU_STR_FAIL;
+
+ /* check special buffer lengths */
+ if (maxlen < 1)
+ return MPIU_STR_FAIL;
+ if (maxlen == 1)
+ {
+ *str = '\0';
+ return (token[0] == '\0') ? MPIU_STR_SUCCESS : MPIU_STR_TRUNCATED;
+ }
+
+ /* cosy up to the token */
+ token = first_token(token);
+ if (token == nullptr)
+ {
+ *str = '\0';
+ return MPIU_STR_SUCCESS;
+ }
+
+ if (*token == MPIU_STR_DELIM_CHAR)
+ {
+ /* copy the special deliminator token */
+ str[0] = MPIU_STR_DELIM_CHAR;
+ str[1] = '\0';
+ return MPIU_STR_SUCCESS;
+ }
+
+ if (*token == MPIU_STR_QUOTE_CHAR)
+ {
+ /* quoted copy */
+ token++; /* move over the first quote */
+
+ do
+ {
+ if (*token == MPIU_STR_QUOTE_CHAR)
+ {
+ *str = '\0';
+ return MPIU_STR_SUCCESS;
+ }
+
+ if (*token == MPIU_STR_ESCAPE_CHAR)
+ {
+ token++;
+ }
+
+ if( *token == '\0' )
+ {
+ //
+ // Bad input - we're expecting a closing quote
+ //
+ return MPIU_STR_FAIL;
+ }
+
+ *str = *token;
+ str++;
+ token++;
+
+ } while( --maxlen > 0 );
+
+ /* we've run out of destination characters so back up and null terminate the string */
+ str--;
+ *str = '\0';
+ return MPIU_STR_TRUNCATED;
+ }
+
+ /* literal copy */
+ while (*token != MPIU_STR_DELIM_CHAR &&
+ (/*isascii(*token) && !isspace(*token)*/ *token != MPIU_STR_SEPAR_CHAR) && *token != '\0' && maxlen)
+ {
+ *str = *token;
+ str++;
+ token++;
+ maxlen--;
+ }
+ if (maxlen)
+ {
+ *str = '\0';
+ return MPIU_STR_SUCCESS;
+ }
+ str--;
+ *str = '\0';
+ return MPIU_STR_TRUNCATED;
+}
+
+
+/*@ MPIU_Str_get_string_arg - Extract an option from a string with a maximum length
+
+ Input Parameters:
++ str - Source string
+. key - key
+- val_len - Maximum total length of 'val'
+
+ Output Parameter:
+. val - output string
+
+ Return value:
+ MPIU_STR_SUCCESS, MPIU_STR_NOMEM, MPIU_STR_FAIL
+
+ Notes:
+ This routine searches for a "key = value" entry in a string
+
+ Module:
+ Utility
+ @*/
+_Success_(return == MPIU_STR_SUCCESS)
+int
+MPIU_Str_get_string_arg(
+ _In_opt_z_ const char* str,
+ _In_opt_z_ const char* flag,
+ _Out_writes_z_(val_len) char* val,
+ _In_ size_t val_len
+ )
+{
+ if (val_len < 1)
+ return MPIU_STR_FAIL;
+
+ /* line up with the first token */
+ str = first_token(str);
+ if (str == nullptr)
+ return MPIU_STR_FAIL;
+
+ /* This loop will match the first instance of "flag = value" in the string. */
+ do
+ {
+ if (compare_token(str, flag) == 0)
+ {
+ str = next_token(str);
+ if (compare_token(str, MPIU_STR_DELIM_STR) == 0)
+ {
+ str = next_token(str);
+ if (str == nullptr)
+ return MPIU_STR_FAIL;
+ return token_copy(str, val, val_len);
+ }
+ }
+ else
+ {
+ str = next_token(str);
+ }
+ } while (str);
+ return MPIU_STR_FAIL;
+}
+
+
+/*@ MPIU_Str_get_int_arg - Extract an option from a string
+
+ Input Parameters:
++ str - Source string
+- key - key
+
+ Output Parameter:
+. val_ptr - pointer to the output integer
+
+ Return value:
+ MPIU_STR_SUCCESS, MPIU_STR_NOMEM, MPIU_STR_FAIL
+
+ Notes:
+ This routine searches for a "key = value" entry in a string and decodes the value
+ back to an int.
+
+ Module:
+ Utility
+ @*/
+_Success_(return == MPIU_STR_SUCCESS)
+int
+MPIU_Str_get_int_arg(
+ _In_z_ const char *str,
+ _In_z_ const char *flag,
+ _Out_ int *val_ptr
+ )
+{
+ int result;
+ char int_str[12];
+
+ result = MPIU_Str_get_string_arg(str, flag, int_str, _countof(int_str));
+ if (result == MPIU_STR_SUCCESS)
+ {
+ *val_ptr = atoi(int_str);
+ return MPIU_STR_SUCCESS;
+ }
+ return result;
+}
+
+/* quoted_printf does not nullptr terminate the string if maxlen is reached */
+_Success_(return < maxlen)
+static
+int
+quoted_printf(
+ _Out_writes_z_(maxlen) char *str,
+ _In_ int maxlen,
+ _In_z_ const char *val)
+{
+ int count = 0;
+ if (maxlen < 1)
+ return 0;
+ *str = MPIU_STR_QUOTE_CHAR;
+ str++;
+ maxlen--;
+ count++;
+ while (maxlen)
+ {
+ if (*val == '\0')
+ break;
+ if (*val == MPIU_STR_QUOTE_CHAR || *val == MPIU_STR_ESCAPE_CHAR)
+ {
+ *str = MPIU_STR_ESCAPE_CHAR;
+ str++;
+ maxlen--;
+ count++;
+ if (maxlen == 0)
+ return count;
+ }
+ *str = *val;
+ str++;
+ maxlen--;
+ count++;
+ val++;
+ }
+ if (maxlen)
+ {
+ *str = MPIU_STR_QUOTE_CHAR;
+ str++;
+ maxlen--;
+ count++;
+ if (maxlen == 0)
+ return count;
+ *str = '\0';
+ }
+ return count;
+}
+
+/*@ MPIU_Str_add_string - Add a string to a string
+
+ Input Parameters:
++ str_ptr - pointer to the destination string
+. maxlen_ptr - pointer to the maximum length of '*str_ptr'
+- val - string to add
+
+ Output Parameter:
++ str_ptr - The string pointer is updated to the next available location in the string
+- maxlen_ptr - maxlen is decremented by the amount str_ptr is incremented
+
+ Return value:
+ MPIU_STR_SUCCESS, MPIU_STR_NOMEM, MPIU_STR_FAIL
+
+ Notes:
+ This routine adds a string to a string in such a way that MPIU_Str_get_string can
+ retreive the same string back. It takes into account spaces and quote characters.
+ The string pointer is updated to the start of the next string in the string and maxlen
+ is updated accordingly.
+
+ Module:
+ Utility
+ @*/
+_Success_(return == MPIU_STR_SUCCESS)
+int
+MPIU_Str_add_string(
+ _Inout_ _Outptr_result_buffer_(*maxlen_ptr) PSTR* str_ptr,
+ _Inout_ int *maxlen_ptr,
+ _In_z_ const char *val
+ )
+{
+ int num_chars;
+ char *str;
+ int maxlen;
+
+ str = *str_ptr;
+ maxlen = *maxlen_ptr;
+
+ if (strchr(val, MPIU_STR_SEPAR_CHAR) ||
+ strchr(val, MPIU_STR_QUOTE_CHAR) ||
+ strchr(val, MPIU_STR_DELIM_CHAR))
+ {
+ num_chars = quoted_printf(str, maxlen, val);
+ if (num_chars == maxlen)
+ {
+ /* truncation, cleanup string */
+ *str = '\0';
+ return -1;
+ }
+ if (num_chars < maxlen - 1)
+ {
+ str[num_chars] = MPIU_STR_SEPAR_CHAR;
+ str[num_chars+1] = '\0';
+ num_chars++;
+ }
+ else
+ {
+ str[num_chars] = '\0';
+ }
+ }
+ else
+ {
+ if (*val == '\0')
+ {
+ num_chars = MPIU_Snprintf(str, maxlen, MPIU_STR_QUOTE_STR MPIU_STR_QUOTE_STR/*"\"\""*/);
+ }
+ else
+ {
+ num_chars = MPIU_Snprintf(str, maxlen, "%s%c", val, MPIU_STR_SEPAR_CHAR);
+ }
+ if ((num_chars < 0) || (num_chars == maxlen))
+ {
+ *str = '\0';
+ return -1;
+ }
+ }
+ *str_ptr += num_chars;
+ *maxlen_ptr -= num_chars;
+ return 0;
+}
+
+/*@ MPIU_Str_get_string - Get the next string from a string
+
+ Input Parameters:
++ str_ptr - pointer to the destination string
+- val_len - to the maximum length of '*str_ptr'
+
+ Output Parameter:
++ str_ptr - location of the next string
+- val - location to store the string
+
+ Return Value:
+ The return value is 0 for success, -1 for insufficient buffer space, and -2 for failure.
+
+ Notes:
+ This routine gets a string that was previously added by MPIU_Str_add_string.
+ It takes into account spaces and quote characters. The string pointer is updated to the
+ start of the next string in the string.
+
+ Module:
+ Utility
+ @*/
+
+_Success_(return == 0)
+int
+MPIU_Str_get_string(
+ _Inout_ _Outptr_result_maybenull_z_ PCSTR* str_ptr,
+ _Out_writes_z_(val_len)char *val,
+ _In_ size_t val_len
+ )
+{
+ int result;
+ PCSTR str;
+
+ if (str_ptr == nullptr)
+ {
+ return -2;
+ }
+
+ if (val_len < 1)
+ {
+ return -2;
+ }
+
+ str = *str_ptr;
+ *val = '\0';
+
+ /* line up with the first token */
+ str = first_token(str);
+ if (str == nullptr)
+ {
+ return 0;
+ }
+
+ /* copy the token */
+ result = token_copy(str, val, val_len);
+ if (result == MPIU_STR_SUCCESS)
+ {
+ str = next_token(str);
+ *str_ptr = str;
+ return 0;
+ }
+ else if (result == MPIU_STR_TRUNCATED)
+ {
+ return -1;
+ }
+
+ /* failure */
+ return -2;
+}
+
+/*@ MPIU_Str_add_string_arg - Add an option to a string with a maximum length
+
+ Input Parameters:
++ str_ptr - Pointer to the destination string
+. maxlen_ptr - Pointer to the maximum total length of '*str_ptr'
+. key - key
+- val - input string
+
+ Output Parameter:
++ str_ptr - The string pointer is updated to the next available location in the string
+- maxlen_ptr - maxlen is reduced by the number of characters written
+
+ Return value:
+ MPIU_STR_SUCCESS, MPIU_STR_NOMEM, MPIU_STR_FAIL
+
+ Notes:
+ This routine adds a string option to a string in the form "key = value".
+
+ Module:
+ Utility
+ @*/
+_Success_(return == MPIU_STR_SUCCESS)
+int
+MPIU_Str_add_string_arg(
+ _Inout_ _Outptr_result_buffer_(*maxlen_ptr) PSTR* str_ptr,
+ _Inout_ int *maxlen_ptr,
+ _In_z_ const char *flag,
+ _In_z_ const char *val
+ )
+{
+ int num_chars;
+ char *orig_str_ptr;
+ int orig_maxlen;
+
+ if (maxlen_ptr == nullptr)
+ return MPIU_STR_FAIL;
+
+ orig_maxlen = *maxlen_ptr;
+ orig_str_ptr = *str_ptr;
+
+ if (*maxlen_ptr < 1)
+ return MPIU_STR_FAIL;
+
+ /* add the flag */
+/* printf("strstr flag\n"); */
+ if (strstr(flag, MPIU_STR_SEPAR_STR) || strstr(flag, MPIU_STR_DELIM_STR) || flag[0] == MPIU_STR_QUOTE_CHAR)
+ {
+ num_chars = quoted_printf(*str_ptr, *maxlen_ptr, flag);
+ }
+ else
+ {
+ num_chars = MPIU_Snprintf(*str_ptr, *maxlen_ptr, "%s", flag);
+ }
+
+ if(num_chars < 0)
+ {
+ num_chars = *maxlen_ptr;
+ }
+
+ *maxlen_ptr = *maxlen_ptr - num_chars;
+ if (*maxlen_ptr < 1)
+ {
+ **str_ptr = '\0';
+ /*(*str_ptr)[num_chars-1] = '\0';*/
+ return MPIU_STR_NOMEM;
+ }
+ *str_ptr = *str_ptr + num_chars;
+
+ /* add the deliminator character */
+ **str_ptr = MPIU_STR_DELIM_CHAR;
+ *str_ptr = *str_ptr + 1;
+ *maxlen_ptr = *maxlen_ptr - 1;
+
+ /* add the value string */
+/* printf("strstr val\n"); */
+ if (strstr(val, MPIU_STR_SEPAR_STR) || strstr(val, MPIU_STR_DELIM_STR) || val[0] == MPIU_STR_QUOTE_CHAR)
+ {
+ num_chars = quoted_printf(*str_ptr, *maxlen_ptr, val);
+ }
+ else
+ {
+ if (*val == '\0')
+ {
+ num_chars = MPIU_Snprintf(*str_ptr, *maxlen_ptr, MPIU_STR_QUOTE_STR MPIU_STR_QUOTE_STR/*"\"\""*/);
+ }
+ else
+ {
+ num_chars = MPIU_Snprintf(*str_ptr, *maxlen_ptr, "%s", val);
+ }
+ }
+
+ if(num_chars < 0)
+ {
+ num_chars = *maxlen_ptr;
+ }
+
+ *str_ptr = *str_ptr + num_chars;
+ *maxlen_ptr = *maxlen_ptr - num_chars;
+ if (*maxlen_ptr < 2)
+ {
+ *orig_str_ptr = '\0';
+ *str_ptr = orig_str_ptr;
+ *maxlen_ptr = orig_maxlen;
+ return MPIU_STR_NOMEM;
+ }
+
+ /* add the trailing space */
+ **str_ptr = MPIU_STR_SEPAR_CHAR;
+ *str_ptr = *str_ptr + 1;
+ **str_ptr = '\0';
+ *maxlen_ptr = *maxlen_ptr - 1;
+
+ return MPIU_STR_SUCCESS;
+}
+
+/*@ MPIU_Str_add_int_arg - Add an option to a string with a maximum length
+
+ Input Parameters:
++ str_ptr - Pointer to the destination string
+. maxlen_ptr - Pointer to the maximum total length of '*str_ptr'
+. key - key
+- val - input integer
+
+ Output Parameter:
++ str_ptr - The string pointer is updated to the next available location in the string
+- maxlen_ptr - maxlen is reduced by the number of characters written
+
+ Return value:
+ MPIU_STR_SUCCESS, MPIU_STR_NOMEM, MPIU_STR_FAIL
+
+ Notes:
+ This routine adds an integer option to a string in the form "key = value".
+
+ Module:
+ Utility
+ @*/
+_Success_(return == MPIU_STR_SUCCESS)
+int
+MPIU_Str_add_int_arg(
+ _Inout_ _Outptr_result_buffer_(*maxlen_ptr) PSTR* str_ptr,
+ _Inout_ int *maxlen_ptr,
+ _In_z_ const char *flag,
+ _In_ int val
+ )
+{
+ char val_str[12];
+ MPIU_Snprintf(val_str, 12, "%d", val);
+ return MPIU_Str_add_string_arg(str_ptr, maxlen_ptr, flag, val_str);
+}
diff --git a/src/mpi/common/assertutil.h b/src/mpi/common/assertutil.h
new file mode 100644
index 0000000..7154d9f
--- /dev/null
+++ b/src/mpi/common/assertutil.h
@@ -0,0 +1,44 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+//
+//
+// Summary:
+// This file contains the assert utility macros.
+// Note: the "Invariant" versions that are included in retail.
+//
+// InvariantAssert/Assert(exp_) :
+// This uses interupt 2c, which enables new assert break and skip features.
+// If this assert fires, it will raise a fatal error and crash the host process.
+// If AeDebug key is set, this will cause the debugger to launch
+// When debugger attached, the 'ahi' command can be used to ignore the assertion and continue.
+// NOTE: Unlike the CRT assert, the strings for these asserts are stored in the symbols,
+// so asserts do not add strings to the image.
+//
+// InvariantAssertP/AssertP(exp_) :
+// This is the same as the InvariantAssert/Assert macro, except that it will only passively fire
+// when a debugger is actually attached.
+//
+//
+
+#define InvariantAssert(exp_) \
+ ((!(exp_)) ? \
+ (__annotation(L"Debug", L"AssertFail", L#exp_), \
+ __int2c(), FALSE) : \
+ TRUE)
+
+#define InvariantAssertP( exp_ ) \
+ ((!(exp_) && IsDebuggerPresent()) ? \
+ (__annotation(L"Debug", L"PassiveAssertFail", L#exp_), \
+ __int2c(), FALSE) : \
+ TRUE)
+
+#if DBG
+# define Assert(exp_) __analysis_assume(exp_);InvariantAssert(exp_)
+# define AssertP( exp_ ) __analysis_assume(exp_);InvariantAssertP(exp_)
+#else
+# define Assert( exp_ ) __analysis_assume(exp_)
+# define AssertP( exp_ ) __analysis_assume(exp_)
+#endif
diff --git a/src/mpi/common/baseerrnames.txt b/src/mpi/common/baseerrnames.txt
new file mode 100644
index 0000000..e366845
--- /dev/null
+++ b/src/mpi/common/baseerrnames.txt
@@ -0,0 +1,78 @@
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License.
+#
+#
+# This file maps each MPI error class to a short name form.
+# This is used to support MPI_Error_string when applied to one of the
+# MPI classes rather than to an error code created by MPIR_Err_create_code.
+# By mapping the MPI codes to short generic names, we ensure that the
+# message strings for the defined MPI classes matches some message that
+# may be created by MPIR_Err_create_code.
+#
+# The format of this file is
+# mpi_err_xxx integer-value short-name
+# where "integer-value" is the same as in mpi.h (eventually, we should
+# generate this automatically).
+MPI_SUCCESS 0 **success
+# Communication argument parameters
+MPI_ERR_BUFFER 1 **buffer
+MPI_ERR_COUNT 2 **count
+MPI_ERR_TYPE 3 **dtype
+MPI_ERR_TAG 4 **tag
+MPI_ERR_COMM 5 **comm
+MPI_ERR_RANK 6 **rank
+MPI_ERR_ROOT 7 **root
+# MPI Objects (other than COMM)
+MPI_ERR_GROUP 8 **group
+MPI_ERR_OP 9 **op
+# Special topology argument parameters
+MPI_ERR_TOPOLOGY 10 **topology
+MPI_ERR_DIMS 11 **dims
+# All other arguments. This is a class with many kinds
+MPI_ERR_ARG 12 **arg
+# Other errors that are not simply an invalid argument
+MPI_ERR_UNKNOWN 13 **unknown
+MPI_ERR_TRUNCATE 14 **truncate
+MPI_ERR_OTHER 15 **other
+MPI_ERR_INTERN 16 **intern
+# Multiple completion has two special error classes
+MPI_ERR_IN_STATUS 17 **instatus
+MPI_ERR_PENDING 18 **inpending
+MPI_ERR_REQUEST 19 **request
+# New MPI-2 Error classes
+MPI_ERR_ACCESS 20 **fileaccess
+MPI_ERR_AMODE 21 **fileamode
+MPI_ERR_BAD_FILE 22 **filename
+MPI_ERR_CONVERSION 23 **conversion
+MPI_ERR_DUP_DATAREP 24 **datarepused
+MPI_ERR_FILE_EXISTS 25 **fileexist
+MPI_ERR_FILE_IN_USE 26 **fileinuse
+MPI_ERR_FILE 27 **file
+# MPI_ERR_INFO is NOT defined in the MPI-2 standard. I believe that
+# this is an oversight
+MPI_ERR_INFO 28 **info
+MPI_ERR_INFO_KEY 29 **infokey
+MPI_ERR_INFO_VALUE 30 **infoval
+MPI_ERR_INFO_NOKEY 31 **infonokey
+MPI_ERR_IO 32 **io
+MPI_ERR_NAME 33 **nameservice
+MPI_ERR_NO_MEM 34 **allocmem
+MPI_ERR_NOT_SAME 35 **notsame
+MPI_ERR_NO_SPACE 36 **filenospace
+MPI_ERR_NO_SUCH_FILE 37 **filenoexist
+MPI_ERR_PORT 38 **port
+MPI_ERR_QUOTA 39 **filequota
+MPI_ERR_READ_ONLY 40 **filerdonly
+MPI_ERR_SERVICE 41 **servicename
+MPI_ERR_SPAWN 42 **spawn
+MPI_ERR_UNSUPPORTED_DATAREP 43 **datarepunsupported
+MPI_ERR_UNSUPPORTED_OPERATION 44 **fileopunsupported
+MPI_ERR_WIN 45 **win
+MPI_ERR_BASE 46 **base
+MPI_ERR_LOCKTYPE 47 **locktype
+MPI_ERR_KEYVAL 48 **keyval
+MPI_ERR_RMA_CONFLICT 49 **rmaconflict
+MPI_ERR_RMA_SYNC 50 **rmasync
+MPI_ERR_SIZE 51 **rmasize
+MPI_ERR_DISP 52 **rmadisp
+MPI_ERR_ASSERT 53 **assert
diff --git a/src/mpi/common/dbg_printf.cpp b/src/mpi/common/dbg_printf.cpp
new file mode 100644
index 0000000..5604741
--- /dev/null
+++ b/src/mpi/common/dbg_printf.cpp
@@ -0,0 +1,134 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "precomp.h"
+
+#include "dbg_printf.h"
+
+//
+// This constant is used only by the debug extension to query about the minimum
+// version of the extension that this version of MSMPI can support.
+// When the debug extension attaches to a running MPI process, it does not know
+// whether the current MSMPI has the correct data structure layout to support
+// this current version of the debugger extension (or that the user would need
+// to upgrade to a newer version of the debug extension, as indicated by this
+// constant).
+//
+static const unsigned int MSMPI_DBGEXT_VER = 200;
+
+
+//
+// enable breaking on initialization
+//
+// -env MSMPI_INIT_BREAK preinit
+// breaks into all ranks before initialization
+//
+// -env MSMPI_INIT_BREAK all
+// breaks into all ranks after initialization
+//
+// -env MSMPI_INIT_BREAK a,b,d-f,x-z
+// breaks into ranks "a b d e f x y z" after initialization.
+// ranks are decimal numbers; separator is any character except '-'
+//
+
+_Success_(return==true)
+static inline
+bool
+get_break_env(
+ _Out_writes_z_(cchEnv) char* env,
+ _In_ DWORD cchEnv
+ )
+{
+ DWORD err = MPIU_Getenv( "MSMPI_INIT_BREAK",
+ env,
+ cchEnv );
+ if( err == ERROR_ENVVAR_NOT_FOUND )
+ {
+ err = MPIU_Getenv( "MPICH_INIT_BREAK",
+ env,
+ cchEnv );
+ }
+
+ return err == NOERROR;
+}
+
+
+void
+MPIU_dbg_preinit()
+{
+ char env[_countof("preinit")];
+ if( get_break_env( env, _countof(env) ) == false )
+ {
+ return;
+ }
+
+ if( CompareStringA( LOCALE_INVARIANT,
+ 0,
+ env,
+ -1,
+ "preinit",
+ -1 ) != CSTR_EQUAL )
+ {
+ return;
+ }
+
+ MPIU_Debug_break();
+}
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIU_dbg_init(
+ _In_ unsigned int rank,
+ _In_ unsigned int world_size
+ )
+{
+ char env[32767];
+ if( get_break_env(env, _countof(env) ) == false )
+ {
+ return MPI_SUCCESS;
+ }
+
+ //
+ // This one is called after MPIU_dbg_preinit but it parses the same
+ // environment variable so if we already parsed preinit earlier we
+ // will not do it again here.
+ //
+ if( CompareStringA( LOCALE_INVARIANT,
+ 0,
+ env,
+ -1,
+ "preinit",
+ -1 ) == CSTR_EQUAL )
+ {
+ return MPI_SUCCESS;
+ }
+
+ unsigned int unique_ranks;
+ bool isDebug;
+ int mpi_errno = MPI_SUCCESS;
+
+ mpi_errno = MPIU_Parse_rank_range( rank, env, world_size, &isDebug, &unique_ranks);
+
+ if ( mpi_errno == MPI_SUCCESS &&
+ isDebug == true )
+ {
+ MPIU_Debug_break();
+ }
+ return mpi_errno;
+}
+
+void
+MPIU_dbg_printf(
+ _Printf_format_string_ const char * str,
+ ...
+ )
+{
+ va_list list;
+ va_start(list, str);
+ vfprintf(stderr, str, list); fflush(stderr);
+ va_end(list);
+}
diff --git a/src/mpi/common/dbg_printf.h b/src/mpi/common/dbg_printf.h
new file mode 100644
index 0000000..d20876a
--- /dev/null
+++ b/src/mpi/common/dbg_printf.h
@@ -0,0 +1,19 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+void MPIU_dbg_preinit();
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIU_dbg_init(
+ _In_ unsigned int rank,
+ _In_ unsigned int world_size
+ );
+
+void
+MPIU_dbg_printf(
+ _Printf_format_string_ const char * str,
+ ...
+ );
diff --git a/src/mpi/common/dump.cpp b/src/mpi/common/dump.cpp
new file mode 100644
index 0000000..01062b8
--- /dev/null
+++ b/src/mpi/common/dump.cpp
@@ -0,0 +1,260 @@
+/*++
+
+ Copyright (c) Microsoft Corporation. All rights reserved.
+ Licensed under the MIT License.
+
+dump.cpp - MPI minidump functionality
+
+--*/
+#include "precomp.h"
+#include "mpidump.h"
+#include
+#include
+
+
+typedef BOOL
+(WINAPI *PFN_MiniDumpWriteDump)(
+ __in HANDLE hProcess,
+ __in DWORD ProcessId,
+ __in HANDLE hFile,
+ __in MINIDUMP_TYPE DumpType,
+ __in_opt PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam,
+ __in_opt PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam,
+ __in_opt PMINIDUMP_CALLBACK_INFORMATION CallbackParam
+ );
+
+
+MSMPI_DUMP_MODE GetDumpMode()
+{
+ wchar_t env[12];
+
+ DWORD err = MPIU_Getenv( L"MSMPI_DUMP_MODE", env, _countof(env) );
+ if( err != NOERROR )
+ {
+ return MsmpiDumpNone;
+ }
+
+ int val = _wtoi(env);
+ if( val > MsmpiDumpNone && val < MsmpiDumpMaximumValue )
+ {
+ return static_cast(val);
+ }
+
+ return MsmpiDumpNone;
+}
+
+
+void
+CreateFinalDumpFile(
+ _In_ HANDLE tempFileHandle,
+ _In_ int rank,
+ _In_z_ const wchar_t* dumpPath,
+ _In_ int jobid,
+ _In_ int taskid,
+ _In_ int taskinstid
+ )
+{
+ MPIU_Assert( tempFileHandle != INVALID_HANDLE_VALUE );
+ wchar_t tempFileName[MAX_PATH];
+ DWORD err = GetFinalPathNameByHandleW(
+ tempFileHandle,
+ tempFileName,
+ _countof( tempFileName ),
+ 0
+ );
+
+ if( err == 0 )
+ {
+ return;
+ }
+
+ wchar_t dPath[MAX_PATH];
+ HRESULT hr;
+
+ if( dumpPath != NULL && dumpPath[0] != L'\0' )
+ {
+ hr = StringCchCopyW( dPath, _countof( dPath ), dumpPath );
+ }
+ else
+ {
+ hr = StringCchCopyW( dPath, _countof( dPath ), L"%USERPROFILE%");
+ }
+
+ if( FAILED( hr ) )
+ {
+ return;
+ }
+
+ wchar_t name[MAX_PATH];
+
+ //
+ // For CCP, task instance id starts at 0. Thus, we only verify
+ // jobid and taskid
+ //
+ if( jobid == 0 || taskid == 0 )
+ {
+ //
+ // In the SDK environment use the sdk default dumpfile
+ //
+ hr = StringCchPrintfW(
+ name,
+ _countof( name ),
+ L"%s\\mpi_dump_%d.dmp",
+ dPath,
+ rank
+ );
+ }
+ else
+ {
+ //
+ // In the cluster environment use the cluster default dumpfile
+ // (incl. jobid.taskid.taskinstid)
+ //
+ hr = StringCchPrintfW(
+ name,
+ _countof( name ),
+ L"%s\\mpi_dump_%d.%d.%d.%d.dmp",
+ dPath,
+ jobid,
+ taskid,
+ taskinstid,
+ rank
+ );
+ }
+
+ if( FAILED( hr ) )
+ {
+ return;
+ }
+
+ DWORD ccPath = ExpandEnvironmentStringsW(
+ name,
+ dPath,
+ _countof( dPath )
+ );
+ if( ccPath == 0 )
+ {
+ return;
+ }
+
+ //
+ // For MPI Process, it is possible that it will be suspended
+ // by SMPD during the CopyFile operation. However, if this happens,
+ // this means this process is not the failing process (otherwise SPMD
+ // would not have suspended it). In the case of all processes generating
+ // dump files, SMPD will then write the dump for this process, which will
+ // result in a good dupm file.
+ //
+ CopyFileW( tempFileName, dPath, FALSE );
+}
+
+
+HANDLE
+CreateTempDumpFile(
+ __in HANDLE hProcess,
+ __in DWORD pid,
+ __in MINIDUMP_TYPE dumpType,
+ __in const wchar_t* dumpPath,
+ __in_opt MINIDUMP_EXCEPTION_INFORMATION* pExrParam
+ )
+{
+ HANDLE hFile = INVALID_HANDLE_VALUE;
+
+ // Create target file
+ wchar_t path[MAX_PATH];
+
+ // Load dbghelp library.
+ DWORD ccPath = GetSystemDirectoryW( path, _countof(path) );
+ if( ccPath == 0 )
+ {
+ return INVALID_HANDLE_VALUE;
+ }
+
+ HRESULT hr = StringCchCopyW( &path[ccPath],
+ _countof(path) - ccPath,
+ L"\\dbghelp.dll" );
+ if( FAILED( hr ) )
+ {
+ return INVALID_HANDLE_VALUE;
+ }
+
+ HMODULE hDbgHelp;
+ OACR_REVIEWED_CALL(
+ mpicr,
+ hDbgHelp = LoadLibraryExW( path, nullptr, 0 ) );
+ if( hDbgHelp == NULL )
+ {
+ return INVALID_HANDLE_VALUE;
+ }
+
+ PFN_MiniDumpWriteDump pfnWriteDump = (PFN_MiniDumpWriteDump)GetProcAddress(
+ hDbgHelp,
+ "MiniDumpWriteDump"
+ );
+ if( pfnWriteDump == NULL )
+ {
+ goto free_dbghelp_and_exit;
+ }
+
+ if( dumpPath != NULL && dumpPath[0] != L'\0' )
+ {
+ ccPath = ExpandEnvironmentStringsW(
+ dumpPath,
+ path,
+ _countof(path)
+ );
+ }
+ else
+ {
+ ccPath = ExpandEnvironmentStringsW(
+ L"%USERPROFILE%",
+ path,
+ _countof(path)
+ );
+ }
+
+ if( ccPath == 0 )
+ {
+ goto free_dbghelp_and_exit;
+ }
+
+ wchar_t name[MAX_PATH];
+ int err = GetTempFileNameW(
+ path,
+ L"_mp",
+ 0,
+ name
+ );
+ if( err == 0 )
+ {
+ goto free_dbghelp_and_exit;
+ }
+
+ hFile = CreateFileW(
+ name,
+ GENERIC_WRITE,
+ FILE_SHARE_READ,
+ NULL,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE,
+ NULL
+ );
+ if( hFile == INVALID_HANDLE_VALUE )
+ {
+ goto free_dbghelp_and_exit;
+ }
+
+ pfnWriteDump(
+ hProcess,
+ pid,
+ hFile,
+ dumpType,
+ pExrParam,
+ NULL,
+ NULL
+ );
+
+free_dbghelp_and_exit:
+ FreeLibrary( hDbgHelp );
+ return hFile;
+}
diff --git a/src/mpi/common/errnames.txt b/src/mpi/common/errnames.txt
new file mode 100644
index 0000000..9b29159
--- /dev/null
+++ b/src/mpi/common/errnames.txt
@@ -0,0 +1,773 @@
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License.
+#
+# Error messages for error classes referenced in baseerrnames.txt
+#
+**success:No MPI error
+**buffer:Invalid buffer pointer
+**count:Invalid count
+**dtype:Invalid datatype
+**tag:Invalid tag
+**comm:Invalid communicator
+**rank:Invalid rank
+**root:Invalid root
+**group:Invalid group
+**op:Invalid MPI_Op
+**request:Invalid MPI_Request
+**topology:Invalid topology
+**dims:Invalid dimension argument
+**arg:Invalid argument
+**unknown:Unknown error. Please file a bug report.
+**truncate:Message truncated
+**other:Other MPI error
+**intern:Internal MPI error!
+**instatus:See the MPI_ERROR field in MPI_Status for the error code
+**inpending:Pending request (no error)
+**fileaccess:Access denied to file
+**fileamode:Invalid amode value in MPI_File_open
+**filename:Invalid file name
+**conversion:An error occurred in a user-defined data conversion function
+**datarepused:The requested datarep name has already been specified to \
+ MPI_REGISTER_DATAREP
+**fileexist:File exists
+**fileinuse:File in use by some process
+**file:Invalid MPI_File
+**info:Invalid MPI_Info
+**infokey:Invalid key for MPI_Info
+**infoval:Invalid MPI_Info value
+**infonokey:MPI_Info key is not defined
+**io:Other I/O error
+**nameservice:Invalid service name (see MPI_Publish_name)
+**allocmem:Unable to allocate memory for MPI_Alloc_mem
+**notsame:Inconsistent arguments to collective routine
+**filenospace:Not enough space for file
+**filenoexist:File does not exist
+**port:Invalid port
+**filequota:Quota exceeded for files
+**filerdonly:Read-only file or filesystem name
+**servicename:Attempt to lookup an unknown service name
+**spawn:Error in spawn call
+**datarepunsupported:Unsupported datarep
+**datarepunsupported %s:Unsupported datarep passed to %s
+**fileopunsupported:Unsupported file operation
+**win:Invalid MPI_Win
+**winnotshm:A shared memory window is required but a local window was provided.
+**base:Invalid base address
+**locktype:Invalid lock type
+**keyval:Invalid keyval
+**rmaconflict:Conflicting accesses to window
+**rmasync:Wrong synchronization of RMA calls
+**rmasyncq %d %d %d %d:Wrong synchronization of RMA calls - rank %d, target %d, found lock state %d, expected lock state %d
+**rmasize:Invalid size argument in RMA call
+**rmadisp:Invalid displacement argument in RMA call
+**rmatype:Invalid datatype for RMA operation
+**assert:Invalid assert argument
+
+# NOTE: The following error messages are not defined in the standard,
+# but, are internally used.
+#
+**message:Invalid message handle
+
+
+**argnonpos %s %d:Invalid value for %s; must be positive but is %d
+**argneg %s %d:Invalid value for %s, must be non-negative but is %d
+**countneg %d:Negative count, value is %d
+**inittwice:Cannot call MPI_INIT or MPI_INIT_THREAD more than once
+**nomem:Out of memory
+**notimpl:Function not implemented
+**notimpl %s:Function %s not implemented
+**nullptr %s:Null pointer in parameter %s
+**nullptrtype %s:Null %s pointer
+**typenamelen %d:Specified datatype name is too long (%d characters)
+**commnamelen %d:Specified communicator name is too long (%d characters)
+**winnamelen %d:Specified window name is too long (%d characters)
+**keyvalobj %s:Keyval was not defined for %s objects
+**keyvalinvalid:Attribute key was MPI_KEYVAL_INVALID
+**permattr:Cannot set permanent attribute
+**noerrclasses:No more user-defined error classes
+**noerrcodes:No more user-defined error codes
+**rankdup %d %d %d:Duplicate ranks in rank array at index %d, has value %d which is \
+also the value at index %d
+**topotoolarge %d %d:Topology size %d is larger than communicator size (%d)
+**notcarttopo:No Cartesian topology associated with this communicator
+**dimszero:Communicator associated with zero-dimensional cartesian topology
+**notgraphtopo:No Graph topology associated with this communicator
+**notopology:No topology associated with this communicator
+**dimsmany %d %d:Number of dimensions %d is too large (maximum is %d)
+**neighborsmany %d %d:Number of neighbors %d is too large (maximum is %d)
+**dimspartition:Cannot partition nodes as requested
+**cartcoordinvalid %d %d %d:Cartesian coordinate for the %d coordinate \
+ is %d but must be between 0 and %d
+**cartdim %d %d:Size of the communicator (%d) is smaller than the size of the \
+ Cartesian topology (%d)
+**edgeoutrange %d %d %d:Edge index edges[%d] is %d but must be nonnegative \
+ and less than %d
+**nulledge %d %d:Edge for node %d (entry edges[%d]) is to itself
+**indexneg %d %d:Index value for index[%d] is %d but must be nonnegative
+**indexnonmonotone %d %d %d:Index values in graph topology must be monotone \
+ nondecreasing but index[%d] is %d but the next index value is %d
+**graphnnodes:Number of graph nodes exceeds size of communicator.
+**rangedup %d %d %d:The range array specifies duplicate entries; process %d \
+ specified in range array %d was previously specified in range array %d
+**rank %d %d:Invalid rank has value %d but must be nonnegative and less than %d
+**rank %d %g %d:Invalid rank %d in group %g but must be nonnegative and less than %d
+**stride %d %d %d:Range (start = %d, end = %d, stride = %d) does not terminate
+**stridezero:Zero stride is invalid
+**rangestartinvalid %d %d %d:The %dth element of a range array starts at %d \
+ but must be nonnegative and less than %d
+**rangeendinvalid %d %d %d:The %dth element of a range array ends at %d \
+ but must be nonnegative and less than %d
+**argrange %s %d %d:Argument %s has value %d but must be within [0,%d]
+**argarrayneg %s %d %d:Negative value in array %s[%d] (value is %d)
+**bufexists:Buffer already attached with MPI_BUFFER_ATTACH.
+**bsendbufsmall %d %d:Buffer size of %d is smaller than MPI_BSEND_OVERHEAD (%d)
+**notgenreq:Attempt to complete a request with MPI_GREQUEST_COMPLETE that \
+was not started with MPI_GREQUEST_START
+**cancelunknown:Attempt to cancel an unknown type of request
+**permop:Cannot free permanent MPI_Op
+**toomanycomm:Too many communicators
+**commperm %s:Cannot free permanent communicator %s
+**groupperm:Cannot free permanent group
+**groupnotincomm %d:Rank %d of the specified group is not a member of this communicator
+**commnotintra:An intracommunicator is required but an intercommunicator \
+ was provided.
+**commnotinter:An intercommunicator is required but an intracommunicator \
+ was provided.
+**nosplittype %d:Communicator cannot be split for type %d.
+**ranklocal %d %d:Error specifying local_leader; rank given was %d but must \
+be in the range 0 to %d
+**rankremote %d %d:Error specifying remote_leader; rank given was %d but must \
+be in the range 0 to %d
+**ranksdistinct:Local and remote leaders must be different processes
+**dupprocesses %d:Local and remote groups in MPI_Intercomm_create must not \
+ contain the same processes; both contain process %d
+**tag %d:Invalid tag, value is %d
+**count %d:Invalid count, value = %d
+**bufnull:Null buffer pointer
+**bufbsend %d %d:Insufficient space in Bsend buffer; requested %d; total \
+ buffer size is %d
+**namepubnotpub %s:Lookup failed for service name %s
+**nonamepub:No name publishing service available
+**namepubnotfound %s:Lookup failed for service name %s
+**namepubnotunpub %s:Failed to unpublish service name %s
+**sendbuf_inplace:sendbuf cannot be MPI_IN_PLACE
+**recvbuf_inplace:recvbuf cannot be MPI_IN_PLACE
+**buf_inplace:buffer cannot be MPI_IN_PLACE
+**typematchnoclass:The value of typeclass is not one of MPI_TYPECLASS_REAL, \
+MPI_TYPECLASS_INTEGER, or MPI_TYPECLASS_COMPLEX
+**typematchsize %s %d:No MPI datatype available for typeclass %s and size %d
+**f90typetoomany:Too many requests for unnamed, predefined f90 types
+**f90typeintnone %d: No integer type with %d digits of range is avaiable
+**f90typerealnone %d %d: No REAL type with precision %d and %d digits of range is avaiable
+**f90typecomplexnone %d %d: No COMPLEX type with precision %d and %d digits of range is avaiable
+**getConnStringFailed:Failed to obtain connection info for this process group
+**version %d %d %d %d %d %d:Version mismatch in connection request, received version %d.%d.%d, expected %d.%d.%d. \
+Check cluster configuration and ensure MSMPI versions match.
+**comm_split_type %d:Split type %d is not valid.
+**overflow %s:Operation overflow in %s
+**intoverflow %l:The value %l passed to the internal operation is bigger than MAX_INT and may cause data corruption
+**invalidarg %s %s:Invalid argument value specified in parameter %s. %s
+**unweightedboth:Must specify MPI_UNWEIGHTED for both or neither weight arguments
+**argarrayrange %s %d %d %d:Argument %s[%d] has value %d but must be within [0,%d]
+
+# -- FIXME: Some (but not all) of the messages below this line have been used
+#---- The messages below this line haven't been used yet.
+#
+**bufalias %s %s:Buffer parameters %s and %s must not be aliased
+**dtypenull %s:Datatype for argument %s is a null datatype
+**dtypecommit:Datatype has not been committed
+**dtypeperm:Cannot free permanent data type
+**dtypecomm:Pack buffer not packed for this communicator.
+**dtypemismatch:Receiving data with a datatype whose signature does not match that of the sending datatype.
+**intercomm:Intercommunicator is not allowed
+**rankarray %d %d %d:Invalid rank in rank array at index %d; value is %d but must \
+be in the range 0 to %d
+**rankarraysize %d %d:Invalid rank array size %d: the number of ranks must be between 0 \
+and the size of the group (%d)
+**rangessize %d %d:Invalid number of ranges %d: the number of ranges must be between 0 \
+and the size of the group (%d)
+**root %d:Invalid root (value given was %d)
+**opundefined %s:MPI_Op %s operation not defined for this datatype
+**noopnotallowed:MPI_NO_OP operation is not allowed in this call
+**replacenotallowed:MPI_REPLACE operation is not allowed in this call
+**dims %d:Invalid dimension argument (value is %d)
+**arg %s:Invalid argument %s
+**argerrcode %d:Invalid error code %d
+**errhandler:Invalid errhandler
+**errhandnotcomm:Error handler is not a comm error handler
+**errhandnotfile:Error handler is not a file error handler
+**errhandnotwin:Error handler is not a win error handler
+**argarray %s %d %d:Invalid value in %s[%d] = %d
+**darraydist %d %d:For MPI_DISTRIBUTE_NONE, the value of array_of_psizes[%d] \
+ is %d but must have value 1
+**darrayunknown:Unknown distribution type
+**darrayblock %d:Value of m in block(m) distribution is %d must must be \
+ positive
+**darrayblock2 %d %d:m * nprocs is %d but must equal the array size %d and is \
+ not valid for block(m) distribution
+**darraycyclic %d:Value of m is %d but must be positive for a cyclic(m) \
+ distribution
+**argposneg %d:Value of position is %d but must be nonnegative
+**argpackbuf %l %l:Size of data to pack (%l bytes) is larger than remaining (%l bytes) \
+ space in pack buffer (%d bytes)
+**truncate %d %d:Message truncated; %d bytes received but buffer size is %d
+**truncate %d %d %d %d:Message from rank %d and tag %d truncated; \
+ %d bytes received but buffer size is %d
+**rsendnomatch %d %d %d:Ready send from source %d, for destination %d and \
+ with tag %d had no matching receive
+**rsendnomatch %d %d:Ready send from source %d and with tag %d had no matching receive
+**intern %s:Internal MPI error! %s
+**unknowngpid %d %d:Internal MPI error: Unknown gpid (%d)%d
+**request_invalid_kind %d:The supplied request was invalid (kind=%d)
+**requestnotpersist:Request is not persistent in MPI_Start or MPI_Startall.
+**requestpersistactive:Persistent request passed to MPI_Start or MPI_Startall is already active.
+**requestrmacancel:Cannot cancel RMA request
+**requestrmanotexpected:Request-based RMA operations are only valid within a passive target epoch
+**requestrmaoutofbounds:The requested displacement specifies memory outside of the RMA window
+**requestrmaremoteerror:RMA operation caused an error on the target process
+**fileamode %d:Invalid amode value of %d in MPI_File_open
+**fileamodeone:Exactly one of MPI_MODE_RDONLY, MPI_MODE_WRONLY, or \
+ MPI_MODE_RDWR must be specified
+**fileamoderead:Cannot use MPI_MODE_CREATE or MPI_MODE_EXCL with \
+ MPI_MODE_RDONLY
+**fileamodeseq:Cannot specify MPI_MODE_SEQUENTIAL with MPI_MODE_RDWR
+**infokeynull:Null key
+**infokeylong %s %d %d:Key %s is too long (length is %d but maximum allowed is %d)
+**infokeyempty:Empty or blank key
+**infovalnull:Null value
+**infovallong %s %d %d:Value %s is too long (length is %d but maximum length is %d)
+**infonokey %s:MPI_Info key %s is not defined
+**infonkey %d %d:Requested key %d but this MPI_Info only has %d keys
+**io %s:Other I/O error %s
+**ioetype:Only an integral number of etypes can be accessed
+**iosplitcoll:Only one active split collective I/O operation is allowed per file handle
+**iosplitcollnone:No split collective I/O operation is active
+**iofiletype:Filetype must be constructed out of one or more etypes
+**ioamodeseq %s:Cannot use function %s when the file is opened with amode \
+ MPI_MODE_SEQUENTIAL
+**iowronly:Cannot read from a file opened with amode MPI_MODE_WRONLY
+**iordonly:Cannot write to a file opened with amode MPI_MODE_RDONLY
+**iodispifseq:disp must be set to MPI_DISPLACEMENT_CURRENT since file \
+ was opened with MPI_MODE_SEQUENTIAL
+**iobaddisp:Invalid displacement argument
+**iobadoffset:Invalid offset argument
+**ionegoffset:Negative offset argument
+**iobadwhence:Invalid whence argument
+**iobadfh:Invalid file handle
+**ioagnomatch:No aggregators match
+**iobadsize:Invalid size argument
+**unsupporteddatarep:Only native data representation currently supported
+**iodatarepnomem:User must allocate memory for datarep
+**ioverlapping:Filetype specifies overlapping write regions
+**ioinfokey %s:Value for info key %s not same across processes
+**allocmem %d %d:Unable to allocate %d memory for MPI_Alloc_mem; only %d available
+**notsame %s %s:Inconsistent arguments %s to collective routine %s
+**rmasize %d:Invalid size argument in RMA call (value is %d)
+**winunlockrank %d %d:Invalid rank argument %d, should be %d
+**winlockall: A window locked with MPI_WIN_LOCK_ALL should be unlocked with MPI_WIN_UNLOCK_ALL
+**winrmaop: Unexpected type of RMA operation
+**notcstatignore:MPI_STATUS_IGNORE cannot be passed to MPI_Status_c2f()
+**notfstatignore:MPI_STATUS_IGNORE cannot be passed to MPI_Status_f2c()
+**user:user defined function returned an error code
+**userdel %d:user delete function returned error code %d
+**usercopy %d:user copy function returned error code %d
+**userquery %d:user request query function returned error code %d
+**usercancel %d:user request cancel function returned error code %d
+**userfree %d:user request free function returned error code %d
+**oremote_fail:open failed on a remote node
+**join_portname %s %s:local %s, remote %s
+**join_send %d:send on the socket failed (errno %d)
+**join_recv %d:recv from the socket failed (errno %d)
+**flag %d:invalid flag parameter (flag = %d)
+**badcase %d:INTERNAL ERROR: unexpected value in case statement (value=%d)
+**MapViewOfFile %d:MapViewOfFile failed, error %d
+**errcontextid:Creating Remote ContextID Failed
+**uuidgenfailed:Generating ContextID Failed
+**interuptiblenotsupported:Interuptible Waits not supported.
+
+#
+# Errors common to several devices
+#
+**dev|selfsenddeadlock:DEADLOCK: attempting to send a message to the local process without a prior matching receive
+
+#
+# CH3 errors
+#
+**ch3|badreqtype %d:request contained an invalid request type (%d)
+**ch3|unknownpkt %d:received unknown packet type (type=%d)
+**opnotpredefined %d:only predefined ops are valid (op = %d)
+**ch3|conn_parent:spawned process group was unable to connect back to the parent
+**ch3|conn_parent %s:spawned process group was unable to connect back to the parent on port <%s>
+
+#
+# CH3:sock errors
+#
+**ch3|sock|connrefused %s %d %s:[ch3:sock] failed to connect to process %s:%d (%s)
+**ch3|sock|connfailed %d %d:[ch3:sock] failed to connnect to remote process %d:%d
+**ch3|sock|connfailed %g %d:[ch3:sock] failed to connnect to remote process %g:%d
+**ch3|sock|badpacket %d:[ch3:sock] received packet of unknown type (%d)
+**ch3|sock|postread %p %p %p:attempt to post a read operation failed (rreq=%p,conn=%p,vc=%p)
+**ch3|sock|postwrite %p %p %p:attempt to post a write operation failed (sreq=%p,conn=%p,vc=%p)
+**ch3|sock|postconnect %d %d %s:[ch3:sock] rank %d unable to connect to rank %d using business card <%s>
+**ch3|sock|open_lrecv_data:[ch3:sock] failed to handle open lrecv data packet
+**ch3|sock|badhost %s %d %s:[ch3:sock] invalid host description, %s:%d (%s)
+**pglookup %g:unable to find the process group structure with id %g
+
+#
+# CH3:nd
+#
+**ch3|nd|startup %x:[ch3:nd] NdStartup failed with %x
+**ch3|nd|query_addr %x:[ch3:nd] NdQueryAddressList failed with %x
+**ch3|nd|badbuscard:[ch3:nd] Invalid business card
+**ch3|nd|not_here_fallback %d %s:[ch3:nd] Could not connect via NetworkDirect to rank %d with business card (%s).\n\
+There is no matching NetworkDirect adapter and fallback to the socket interconnect is disabled.\n\
+Check the local NetworkDirect configuration or set the MPICH_ND_ENABLE_FALLBACK environment variable to true.
+**ch3|nd|not_both_force %d %s:[ch3:nd] Could not connect via NetworkDirect to rank %d with business card (%s).\n\
+A matching NetworkDirect adapter is not available on either rank and the socket interconnect is disabled.\n\
+Check NetworkDirect configuration or clear the MPICH_DISABLE_SOCK environment variable.
+**ch3|nd|not_here_force %d %s:[ch3:nd] Could not connect via NetworkDirect to rank %d with business card (%s).\n\
+There is no matching NetworkDirect adapter and the socket interconnect is disabled.\n\
+Check the local NetworkDirect configuration or clear the MPICH_DISABLE_SOCK environment variable.
+**ch3|nd|not_there_fallback %d %s:[ch3:nd] Could not connect via NetworkDirect to rank %d with business card (%s).\n\
+There is no NetworkDirect information in the business card and fallback to the socket interconnect is disabled.\n\
+Check the remote NetworkDirect configuration or set the MPICH_ND_ENABLE_FALLBACK environment variable to true.
+**ch3|nd|not_there_force %d %s:[ch3:nd] Could not connect via NetworkDirect to rank %d with business card (%s).\n\
+There is no NetworkDirect information in the business card and the socket interconnect is disabled.\n\
+Check the remote NetworkDirect configuration or clear the MPICH_DISABLE_SOCK environment variable.
+**ch3|nd|no_path_fallback %d %s:[ch3:nd] Could not connect via NetworkDirect to rank %d with business card (%s).\n\
+The local and remote ranks have active NetworkDirect adapters but a route via NetworkDirect could not \
+be resolved and fallback to the socket interconnect is disabled.\n\
+Check NetworkDirect configuration or set the MPICH_ND_ENABLE_FALLBACK environment variable to true.
+**ch3|nd|no_path_force %d %s:[ch3:nd] Could not connect via NetworkDirect to rank %d with business card (%s).\n\
+The local and remote ranks have active NetworkDirect adapters but a route via NetworkDirect could not \
+be resolved and the socket interconnect is disabled.\n\
+Check NetworkDirect configuration or clear the MPICH_DISABLE_SOCK environment variable.
+**ch3|nd|open %x:[ch3:nd] NdOpenAdapter failed with %x
+**ch3|nd|query %x:[ch3:nd] IND2Adapter::Query failed with %x
+**ch3|nd|max_cq %d %d:[ch3:nd] Adapter's MaxCompletionQueueDepth: %d, need %d
+**ch3|nd|max_sq %d %d:[ch3:nd] Adapter's MaxInitiatorQueueDepth: %d, need %d
+**ch3|nd|max_rq %d %d:[ch3:nd] Adapter's MaxReceiveQueueDepth: %d, need %d
+**ch3|nd|ov_file %x:[ch3:nd] IND2Adapter::CreateOverlappedFile failed with %x
+**ch3|nd|create_cq %x:[ch3:nd] IND2Adapter::CreateCompletionQueue failed with %x
+**ch3|nd|notify %x:[ch3:nd] IND2CompletionQueue::Notify failed with %x
+**ch3|nd|create_mr %x:[ch3:nd] IND2Adapter::CreateMemoryRegion failed with %x
+**ch3|nd|regmem %x:[ch3:nd] IND2MemoryRegion::Register failed with %x
+**ch3|nd|deregmem %x:[ch3:nd] IND2MemoryRegion::Deregister failed with %x
+**ch3|nd|create_listen %x:[ch3:nd] IND2Adapter::CreateListener failed with %x
+**ch3|nd|bindlisten %x:[ch3:nd] IND2Listener::Bind failed with %x
+**ch3|nd|listen %x:[ch3:nd] IND2Listener::Listen failed with %x
+**ch3|nd|listen_addr %x:[ch3:nd] IND2Listener::GetLocalAddress failed with %x
+**ch3|nd|create_conn %x:[ch3:nd] IND2Adapter::CreateConnector failed with %x
+**ch3|nd|bindconn %x:[ch3:nd] IND2Connector::Bind failed with %x
+**ch3|nd|get_conn %x:[ch3:nd] IND2Listener::GetConnectionRequest failed with %x
+**ch3|nd|conn_data %x:[ch3:nd] IND2Connector::GetPrivateData failed with %x
+**ch3|nd|conn_data_len %d:[ch3:nd] IND2Connector::GetPrivateData returned insufficient private data (%d)
+**ch3|nd|conn_data %s %d %x:[ch3:nd] IND2Connector::GetPrivateData from %s:%d failed with %x
+**ch3|nd|conn_data_len %s %d %d:[ch3:nd] IND2Connector::GetPrivateData from %s:%d returned insufficient private data (%d)
+**ch3|nd|peer_addr %x:[ch3:nd] IND2Connector::GetPeerAddr failed with %x
+**ch3|nd|accept %x:[ch3:nd] IND2Listener::Accept failed with %x
+**ch3|nd|create_ep %x:[ch3:nd] IND2Adapter::CreateEndpoint failed with %x
+**ch3|nd|conn %x:[ch3:nd] IND2Connector::Connect failed with %x
+**ch3|nd|conn %s %d %x:[ch3:nd] IND2Connector::Connect to %s:%d failed with %x
+**ch3|nd|comp_conn %s %d %x:[ch3:nd] IND2Connector::CompleteConnect to %s:%d failed with %x
+**ch3|nd|recv %x:[ch3:nd] IND2Endpoint::Receive failed with %x
+**ch3|nd|recv_err %s %d %x:[ch3:nd] Recv from %s:%d completed in error with %x
+**ch3|nd|send %x:[ch3:nd] IND2Endpoint::Send failed with %x
+**ch3|nd|send %s %d %x:[ch3:nd] IND2Endpoint::Send to %s:%d failed with %x
+**ch3|nd|send_err %s %d %x:[ch3:nd] Send to %s:%d completed in error with %x
+**ch3|nd|dconn %x:[ch3:nd] IND2Connector::Disconnect failed with %x
+**ch3|nd|flush %x:[ch3:nd] IND2Endpoint::Flush failed with %x
+**ch3|nd|create_mw %x:[ch3:nd] IND2Adapter::CreateMemoryWindow failed with %x
+**ch3|nd|bind %x:[ch3:nd] IND2Endpoint::Bind failed with %x
+**ch3|nd|bind_err %x:[ch3:nd] Bind failed with %x
+**ch3|nd|unbind %x:[ch3:nd] IND2Endpoint::Invalidate failed with %x
+**ch3|nd|unbind_err %x:[ch3:nd] Invalidate failed with %x
+**ch3|nd|read %s %d %x:[ch3:nd] IND2Endpoint::Read from %s:%d failed with %x
+**ch3|nd|read_err %s %d %x:[ch3:nd] Read from %s:%d completed in error with %x
+
+#
+# CH3:ssm
+#
+**OpenProcess %d %d:OpenProcess failed for process %d, error %d
+**CreateFileMapping %d:CreateFileMapping failed, error %d
+**MapViewOfFileEx %d:MapViewOfFileEx failed, error %d
+**snprintf %d:snprintf returned %d
+**boot_attach %s:failed to attach to a bootstrap queue - %s
+**attach_to_mem:attach to shared memory segment failed
+**boot_send:sending bootstrap message failed
+**shmconnect_getmem:failed to allocate shared memory for a write queue
+**attach_to_mem %d:attach to shared memory returned error %d
+**ca %d:invalid completion action (%d)
+**vc_state %d:invalid vc state (%d)
+**argstr_port:no space for the listener port
+**argstr_port_name_tag:no space for port_name tag
+**argstr_no_port_name_tag:no port_name tag in MPI port. Make sure that port \
+ was created with MPI_Open_port
+**argstr_shmq:no space for the shared memory queue name
+**argstr_missinghost:Missing hostname or invalid host/port description in business card
+**argstr_missingport:Missing port or invalid host/port description in business card
+**buscard:unable to create a business card
+**buscard_len:no space left in the business card to add a parameter
+**desc_len:host description buffer too small
+**duphandle %d:unable to duplicate a handle (errno %d)
+**duphandle %s %d:unable to duplicate a handle, %s (errno %d)
+**fail:
+**fail %d:generic failure with errno = %d
+**fail %s:%s
+**fail %s %d:%s (errno %d)
+**gethostbyname %d:gethostbyname failed (errno %d)
+**gethostbyname %s %d:gethostbyname failed, %s (errno %d)
+**sock_connect %d:connect failed (errno %d)
+**sock_connect %s %d:connect failed - %s (errno %d)
+**sock_connect %s %d %d:unable to connect to %s on port %d, error %d
+**sock_connect %s %d %s:unable to connect to %s on port %d, %s
+**sock_connect %s %d %s %d:unable to connect to %s on port %d, %s (errno %d)
+**vcfailedstate %d:Failed to communicate with %d on previous attempts
+
+#
+# Sock
+#
+**sock|connclosed:connection closed by peer
+**sock|getport:failed to obtain port number of the listener
+
+#
+# mpi functions
+#
+**mpi_send %p %d %D %i %t %C:MPI_Send(buf=%p, count=%d, %D, dest=%i, tag=%t, %C) failed
+**mpi_recv %p %d %D %i %t %C %p:MPI_Recv(buf=%p, count=%d, %D, src=%i, tag=%t, %C, status=%p) failed
+**mpi_get_count %p %D %p:MPI_Get_count(status=%p, %D, count=%p) failed
+**mpi_bsend %p %d %D %i %t %C:MPI_Bsend(buf=%p, count=%d, %D, dest=%i, tag=%t, %C) failed
+**mpi_ssend %p %d %D %i %t %C:MPI_Ssend(buf=%p, count=%d, %D, dest=%i, tag=%t, %C) failed
+**mpi_rsend %p %d %D %i %t %C:MPI_Rsend(buf=%p, count=%d, %D, src=%i, tag=%t, %C) failed
+**mpi_buffer_attach %p %d:MPI_Buffer_attach(buf=%p, size=%d) failed
+**mpi_buffer_detach %p %p:MPI_Buffer_detach(buf=%p, size=%p) failed
+**mpi_isend %p %d %D %i %t %C %p:MPI_Isend(buf=%p, count=%d, %D, dest=%i, tag=%t, %C, request=%p) failed
+**mpi_ibsend %p %d %D %i %t %C %p:MPI_Ibsend(buf=%p, count=%d, %D, dest=%i, tag=%t, %C, request=%p) failed
+**mpi_issend %p %d %D %i %t %C %p:MPI_Issend(buf=%p, count=%d, %D, dest=%i, tag=%t, %C, request=%p) failed
+**mpi_irsend %p %d %D %i %t %C %p:MPI_Irsend(buf=%p, count=%d, %D, dest=%i, tag=%t, %C, request=%p) failed
+**mpi_irecv %p %d %D %i %t %C %p:MPI_Irecv(buf=%p, count=%d, %D, src=%i, tag=%t, %C, request=%p) failed
+**mpi_wait %p %p:MPI_Wait(request=%p, status%p) failed
+**mpi_test %p %p %p:MPI_Test(request=%p, flag=%p, status=%p) failed
+**mpi_request_free %p:MPI_Request_free(request=%p) failed
+**mpi_waitany %d %p %p %p:MPI_Waitany(count=%d, req_array=%p, index=%p, status=%p) failed
+**mpi_testany %d %p %p %p %p:MPI_Testany(count=%d, req_array=%p, index=%p, flag=%p, status=%p) failed
+**mpi_waitall %d %p %p:MPI_Waitall(count=%d, req_array=%p, status_array=%p) failed
+**mpi_testall %d %p %p %p:MPI_Testall(count=%d, req_array=%p, flag=%p, status_array=%p) failed
+**mpi_waitsome %d %p %p %p %p:MPI_Waitsome(count=%d, req_array=%p, out_count=%p, indices=%p, status_array=%p) failed
+**mpi_testsome %d %p %p %p %p:MPI_Testsome(count=%d, req_array=%p, out_count=%p, indices=%p, status_array=%p) failed
+**mpi_iprobe %i %t %C %p %p:MPI_Iprobe(src=%i, tag=%t, %C, flag=%p, status=%p) failed
+**mpi_probe %i %t %C %p:MPI_Probe(src=%i, tag=%t, %C, status=%p) failed
+**mpi_improbe %i %t %C %p %p %p:MPI_Improbe(src=%i, tag=%t, %C, flag=%p, message=%p, status=%p) failed
+**mpi_mprobe %i %t %C %p %p:MPI_Mprobe(src=%i, tag=%t, %C, message=%p, status=%p) failed
+**mpi_mrecv %p %d %D %p %p:MPI_Mrecv(buf=%p, count=%d, %D, message=%p, status=%p) failed
+**mpi_imrecv %p %d %D %p %p:MPI_Imrecv(buf=%p, count=%d, %D, message=%p, request=%p) failed
+**mpi_cancel %p:MPI_Cancel(request=%p) failed
+**mpi_test_cancelled %p %p:MPI_Test_cancelled(status=%p, flag=%p) failed
+**mpi_send_init %p %d %D %i %t %C %p:MPI_Send_init(buf=%p, count=%d, %D, dest=%i, tag=%t, %C, request=%p) failed
+**mpi_bsend_init %p %d %D %i %t %C %p:MPI_Bsend_init(buf=%p, count=%d, %D, dest=%i, tag=%t, %C, request=%p) failed
+**mpi_ssend_init %p %d %D %i %t %C %p:MPI_Ssend_init(buf=%p, count=%d, %D, dest=%i, tag=%t, %C, request=%p) failed
+**mpi_rsend_init %p %d %D %i %t %C %p:MPI_Rsend_init(buf=%p, count=%d, %D, dest=%i, tag=%t, %C, request=%p) failed
+**mpi_recv_init %p %d %D %i %t %C %p:MPI_Recv_init(buf=%p, count=%d, %D, src=%i, tag=%t, %C, request=%p) failed
+**mpi_start %p:MPI_Start(request=%p) failed
+**mpi_startall %d %p:MPI_Startall(count=%d, req_array=%p) failed
+**mpi_sendrecv %p %d %D %i %t %p %d %D %i %t %C %p:MPI_Sendrecv(sbuf=%p, scount=%d, %D, dest=%i, stag=%t, rbuf=%p, rcount=%d, %D, src=%i, rtag=%t, %C, status=%p) failed
+**mpi_sendrecv_replace %p %d %D %i %t %i %t %C %p:MPI_Sendrecv_replace(buf=%p, count=%d, %D, dest=%i, stag=%t, src=%i, rtag=%t, %C, status=%p) failed
+**mpi_type_contiguous %d %D %p:MPI_Type_contiguous(count=%d, %D, new_type_p=%p) failed
+**mpi_type_vector %d %d %d %D %p:MPI_Type_vector(count=%d, blocklength=%d, stride=%d, %D, new_type_p=%p) failed
+**mpi_type_indexed %d %p %p %D %p:MPI_Type_indexed(count=%d, blocklens=%p, indices=%p, %D, new_type_p=%p) failed
+**mpi_type_size %D %p:MPI_Type_size(%D, size=%p) failed
+**mpi_type_size_x %D %p:MPI_Type_size_x(%D, size=%p) failed
+**mpi_type_commit %p:MPI_Type_commit(datatype_p=%p) failed
+**mpi_type_free %p:MPI_Type_free(datatype_p=%p) failed
+**mpi_get_elements %p %D %p:MPI_Get_elements(status=%p, %D, count=%p) failed
+**mpi_get_elements_x %p %D %p:MPI_Get_elements_x(status=%p, %D, count=%p) failed
+**mpi_pack %p %d %D %p %d %p %C:MPI_Pack(inbuf=%p, incount=%d, %D, outbuf=%p, outcount=%d, position=%p, %C) failed
+**mpi_unpack %p %d %p %p %d %D %C:MPI_Unpack(inbuf=%p, insize=%d, position=%p, outbuf=%p, outcount=%d, %D, %C) failed
+**mpi_pack_size %d %D %C %p:MPI_Pack_size(count=%d, %D, %C, size=%p) failed
+**mpi_barrier %C:MPI_Barrier(%C) failed
+**mpi_bcast %p %d %D %d %C:MPI_Bcast(buf=%p, count=%d, %D, root=%d, %C) failed
+**mpi_gather %p %d %D %p %d %D %d %C:MPI_Gather(sbuf=%p, scount=%d, %D, rbuf=%p, rcount=%d, %D, root=%d, %C) failed
+**mpi_gatherv %p %d %D %p %p %p %D %d %C:MPI_Gatherv failed(sbuf=%p, scount=%d, %D, rbuf=%p, rcnts=%p, displs=%p, %D, root=%d, %C) failed
+**mpi_scatter %p %d %D %p %d %D %d %C:MPI_Scatter(sbuf=%p, scount=%d, %D, rbuf=%p, rcount=%d, %D, root=%d, %C) failed
+**mpi_scatterv %p %p %p %D %p %d %D %d %C:MPI_Scatterv(sbuf=%p, scnts=%p, displs=%p, %D, rbuf=%p, rcount=%d, %D, root=%d, %C) failed
+**mpi_allgather %p %d %D %p %d %D %C:MPI_Allgather(sbuf=%p, scount=%d, %D, rbuf=%p, rcount=%d, %D, %C) failed
+**mpi_allgatherv %p %d %D %p %p %p %D %C:MPI_Allgatherv(sbuf=%p, scount=%d, %D, rbuf=%p, rcounts=%p, displs=%p, %D, %C) failed
+**mpi_alltoall %p %d %D %p %d %D %C:MPI_Alltoall(sbuf=%p, scount=%d, %D, rbuf=%p, rcount=%d, %D, %C) failed
+**mpi_alltoallv %p %p %p %D %p %p %p %D %C:MPI_Alltoallv(sbuf=%p, scnts=%p, sdispls=%p, %D, rbuf=%p, rcnts=%p, rdispls=%p, %D, %C) failed
+**mpi_reduce %p %p %d %D %O %d %C:MPI_Reduce(sbuf=%p, rbuf=%p, count=%d, %D, %O, root=%d, %C) failed
+**mpi_reduce_local %p %p %d %D %O:MPI_Reduce_local(inbuf=%p, inoutbuf=%p, count=%p, %D, %O) failed
+**mpi_iallgather %p %d %D %p %d %D %C %p:MPI_Iallgather(sbuf=%p, scount=%d, %D, rbuf=%p, rcount=%d, %D, %C, request=%p) failed
+**mpi_iallgatherv %p %d %D %p %p %p %D %C %p:MPI_Iallgatherv(sbuf=%p, scount=%d, %D, rbuf=%p, rcounts=%p, displs=%p, %D, %C, request=%p) failed
+**mpi_iallreduce %p %p %d %D %O %C %p:MPI_Iallreduce(sbuf=%p, rbuf=%p, count=%d, %D, %O, %C, request=%p) failed
+**mpi_ialltoall %p %d %D %p %d %D %C %p:MPI_Ialltoall(sbuf=%p, scount=%d, %D, rbuf=%p, rcount=%d, %D, %C, request=%p) failed
+**mpi_ialltoallv %p %p %p %D %p %p %p %D %C %p:MPI_Ialltoallv(sbuf=%p, scnts=%p, sdispls=%p, %D, rbuf=%p, rcnts=%p, rdispls=%p, %D, %C, request=%p) failed
+**mpi_ialltoallw %p %p %p %p %p %p %p %p %C %p:MPI_Ialltoallw(sbuf=%p, scnts=%p, sdispls=%p, stypes=%p, rbuf=%p, rcnts=%p, rdispls=%p, rtypes=%p, %C, request=%p) failed
+**mpi_ibarrier %C %p:MPI_Ibarrier(%C, request=%p) failed
+**mpi_ibcast %p %d %D %d %C %p:MPI_Ibcast(buf=%p, count=%d, %D, root=%d, %C, request=%p) failed
+**mpi_iexscan %p %p %d %D %O %C %p:MPI_Iexscan(sbuf=%p, rbuf=%p, count=%d, %D, %O, %C, request=%p) failed
+**mpi_igather %p %d %D %p %d %D %d %C %p:MPI_Igather(sbuf=%p, scount=%d, %D, rbuf=%p, rcount=%d, %D, root=%d, %C, request=%p) failed
+**mpi_igatherv %p %d %D %p %p %p %D %d %C %p:MPI_Igatherv failed(sbuf=%p, scount=%d, %D, rbuf=%p, rcnts=%p, displs=%p, %D, root=%d, %C, request=%p) failed
+**mpi_ireduce %p %p %d %D %O %d %C %p:MPI_Ireduce(sbuf=%p, rbuf=%p, count=%d, %D, %O, root=%d, %C, request=%p) failed
+**mpi_ireduce_scatter %p %p %p %D %O %C %p:MPI_Ireduce_scatter(sbuf=%p, rbuf=%p, rcnts=%p, %D, %O, %C, request=%p) failed
+**mpi_ireduce_scatter_block %p %p %d %D %O %C %p:MPI_Reduce_scatter_block(sbuf=%p, rbuf=%p, rcnt=%d, %D, %O, %C, request=%p) failed
+**mpi_iscan %p %p %d %D %O %C %p:MPI_Iscan(sbuf=%p, rbuf=%p, count=%d, %D, %O, %C, request=%p) failed
+**mpi_iscatter %p %d %D %p %d %D %d %C %p:MPI_Iscatter(sbuf=%p, scount=%d, %D, rbuf=%p, rcount=%d, %D, root=%d, %C, request=%p) failed
+**mpi_iscatterv %p %p %p %D %p %d %D %d %C %p:MPI_Iscatterv(sbuf=%p, scnts=%p, displs=%p, %D, rbuf=%p, rcount=%d, %D, root=%d, %C, request=%p) failed
+**mpi_op_commutative %O:MPI_Op_commutative(%O) failed
+**mpi_op_create %p %d %p:MPI_Op_create(fn=%p, commute=%d, op=%p) failed
+**mpi_op_free %p:MPI_Op_free(op=%p) failed
+**mpi_allreduce %p %p %d %D %O %C:MPI_Allreduce(sbuf=%p, rbuf=%p, count=%d, %D, %O, %C) failed
+**mpi_reduce_scatter %p %p %p %D %O %C:MPI_Reduce_scatter(sbuf=%p, rbuf=%p, rcnts=%p, %D, %O, %C) failed
+**mpi_reduce_scatter_block %p %p %d %D %O %C:MPI_Reduce_scatter_block(sbuf=%p, rbuf=%p, rcnt=%d, %D, %O, %C) failed
+**mpi_scan %p %p %d %D %O %C:MPI_Scan(sbuf=%p, rbuf=%p, count=%d, %D, %O, %C) failed
+**mpi_group_size %G %p:MPI_Group_size(%G, size=%p) failed
+**mpi_group_rank %G %p:MPI_Group_rank(%G, rank=%p) failed
+**mpi_group_translate_ranks %G %d %p %G %p:MPI_Group_translate_ranks(%G, n=%d, ranks1=%p, %G, ranks2=%p) failed
+**mpi_group_compare %G %G %p:MPI_Group_compare(%G, %G, result=%p) failed
+**mpi_comm_group %C %p:MPI_Comm_group(%C, group=%p) failed
+**mpi_group_union %G %G %p:MPI_Group_union(%G, %G, new_group=%p) failed
+**mpi_group_intersection %G %G %p:MPI_Group_intersection(%G, %G, new_group=%p) failed
+**mpi_group_difference %G %G %p:MPI_Group_difference(%G, %G, new_group=%p) failed
+**mpi_group_incl %G %d %p %p:MPI_Group_incl(%G, n=%d, ranks=%p, new_group=%p) failed
+**mpi_group_excl %G %d %p %p:MPI_Group_excl(%G, n=%d, ranks=%p, new_group=%p) failed
+**mpi_group_range_incl %G %d %p %p:MPI_Group_range_incl(%G, n=%d, ranges=%p, new_group=%p) failed
+**mpi_group_range_excl %G %d %p %p:MPI_Group_range_excl(%G, n=%d, ranges=%p, new_group=%p) failed
+**mpi_group_free %p:MPI_Group_free(group=%p) failed
+**mpi_comm_size %C %p:MPI_Comm_size(%C, size=%p) failed
+**mpi_comm_rank %C %p:MPI_Comm_rank(%C, rank=%p) failed
+**mpi_comm_compare %C %C %p:MPI_Comm_compare(%C, %C, result=%p) failed
+**mpi_comm_dup %C %p:MPI_Comm_dup(%C, new_comm=%p) failed
+**mpi_comm_create %C %G %p:MPI_Comm_create(%C, %G, new_comm=%p) failed
+**mpi_comm_split %C %d %d %p:MPI_Comm_split(%C, color=%d, key=%d, new_comm=%p) failed
+**mpi_comm_split_type %C %d %d %I %p:MPI_Comm_split_type(%C, split_type=%d, key=%d, info=%I new_comm=%p) failed
+**mpi_comm_free %p:MPI_Comm_free(comm=%p) failed
+**mpi_comm_test_inter %C %p:MPI_Comm_test_inter(%C, flag=%p) failed
+**mpi_comm_remote_size %C %p:MPI_Comm_remote_size(%C, size=%p) failed
+**mpi_comm_remote_group %C %p:MPI_Comm_remote_group(%C, group=%p) failed
+**mpi_intercomm_create %C %d %C %d %d %p:MPI_Intercomm_create(%C, local_leader=%d, %C, remote_leader=%d, tag=%d, newintercomm=%p) failed
+**mpi_intercomm_merge %C %d %p:MPI_Intercomm_merge(%C, high=%d, newintracomm=%p) failed
+**mpi_topo_test %C %p:MPI_Topo_test(%C, topo_type=%p) failed
+**mpi_cart_create %C %d %p %p %d %p:MPI_Cart_create(%C, ndims=%d, dims=%p, periods=%p, reorder=%d, comm_cart=%p) failed
+**mpi_dims_create %d %d %p:MPI_Dims_create(nnodes=%d, ndims=%d, dims=%p) failed
+**mpi_graph_create %C %d %p %p %d %p:MPI_Graph_create(%C, nnodes=%d, index=%p, edges=%p, reorder=%d, comm_graph=%p) failed
+**mpi_graphdims_get %C %p %p:MPI_Graphdims_get(%C, nnodes=%p, nedges=%p) failed
+**mpi_graph_get %C %d %d %p %p:MPI_Graph_get(%C, maxindex=%d, maxedges=%d, index=%p, edges=%p) failed
+**mpi_cartdim_get %C %p:MPI_Cartdim_get(%C, ndims=%p) failed
+**mpi_cart_get %C %d %p %p %p:MPI_Cart_get(%C, maxdims=%d, dims=%p, periods=%p, coords=%p) failed
+**mpi_cart_rank %C %p %p:MPI_Cart_rank(%C, coords=%p, rank=%p) failed
+**mpi_cart_coords %C %d %d %p:MPI_Cart_coords(%C, rank=%d, maxdims=%d, coords=%p) failed
+**mpi_graph_neighbors_count %C %d %p:MPI_Graph_neighbors_count(%C, rank=%d, nneighbors=%p) failed
+**mpi_graph_neighbors %C %d %d %p:MPI_Graph_neighbors(%C, rank=%d, maxneighbors=%d, neighbors=%p) failed
+**mpi_cart_shift %C %d %d %p %p:MPI_Cart_shift(%C, direction=%d, displ=%d, source=%p, dest=%p) failed
+**mpi_cart_sub %C %p %p:MPI_Cart_sub(%C, remain_dims=%p, comm_new=%p) failed
+**mpi_cart_map %C %d %p %p %p:MPI_Cart_map(%C, ndims=%d, dims=%p, periods=%p, newrank=%p) failed
+**mpi_graph_map %C %d %p %p %p:MPI_Graph_map(%C, nnodes=%d, index=%p, edges=%p, newrank=%p) failed
+**mpi_dist_graph_neighbors_count %C:MPI_Dist_graph_neighbors_count(%C) failed
+**mpi_dist_graph_neighbors %C %d %d:MPI_Dist_graph_neighbors(%C, maxindegree=%d, maxoutdegree=%d) failed
+**mpi_dist_graph_create_adjacent %C %d %p %p %d %p %p %d %d:MPI_Dist_graph_create_adjacent(%C, indegree=%d, sources=%p, sourceweights=%p, outdegree=%d, destinations=%p, destweights=%p, info=%d, reorder=%d) failed
+**mpi_dist_graph_create %C %d %p %p %p %p %d %d:MPI_Dist_graph_create(%C, n=%d, sources=%p, degrees=%p, destinations=%p, weights=%p, info=%d, reorder=%d) failed
+**mpi_get_processor_name %p %p:MPI_Get_processor_name(name=%p, resultlen=%p) failed
+**mpi_get_version %p %p:MPI_Get_version(version=%p, subversion=%p) failed
+**mpi_get_library_version %p %p:MPI_Get_library_version(version=%p, resultlen=%p) failed
+**mpi_errhandler_free %p:MPI_Errhandler_free(errhandler=%p) failed
+**mpi_error_string %d %s %p:MPI_Error_string(errorcode=%d, string=%s, resultlen=%p) failed
+**mpi_error_class %d %p:MPI_Error_class(errorcode=%d, errorclass=%p) failed
+**mpi_init %p %p:MPI_Init(argc_p=%p, argv_p=%p) failed
+**mpi_finalize:MPI_Finalize failed
+**mpi_initialized %p:MPI_Initialized(flag=%p) failed
+**mpi_abort %C %d:MPI_Abort(%C, errorcode=%d) failed
+**mpi_close_port %s:MPI_Close_port(port=\"%s\") failed
+**mpi_comm_accept %s %I %d %C %p:MPI_Comm_accept(port=\"%s\", %I, root=%d, %C, newcomm=%p) failed
+**mpi_comm_connect %s %I %d %C %p:MPI_Comm_connect(port=\"%s\", %I, root=%d, %C, newcomm=%p) failed
+**mpi_comm_disconnect %C:MPI_Comm_disconnect(comm=%C) failed
+**mpi_comm_get_parent %p:MPI_Comm_get_parent(comm=%p) failed
+**mpi_comm_join %d %p:MPI_Comm_join(fd=%d, intercomm=%p) failed
+**mpi_comm_spawn %s %p %d %I %d %C %p %p:MPI_Comm_spawn(cmd=\"%s\", argv=%p, maxprocs=%d, %I, root=%d, %C, intercomm=%p, errors=%p) failed
+**mpi_comm_spawn_multiple %d %p %p %p %p %d %C %p %p:MPI_Comm_spawn_multiple(count=%d, cmds=%p, argvs=%p, maxprocs=%p, infos=%p, root=%d, %C, intercomm=%p, errors=%p) failed
+**mpi_lookup_name %s %I %p:MPI_Lookup_name(service=\"%s\", %I, port=%p) failed
+**mpi_open_port %I %p:MPI_Open_port(%I, port=%p) failed
+**mpi_publish_name %s %I %s:MPI_Publish_name(service=\"%s\", %I, port=\"%s\") failed
+**mpi_unpublish_name %s %I %s:MPI_Unpublish_name(service=\"%s\", %I, port=\"%s\") failed
+**mpi_accumulate %p %d %D %d %d %d %D %O %W:MPI_Accumulate(origin_addr=%p, origin_count=%d, %D, target_rank=%d, target_disp=%d, target_count=%d, %D, %O, %W) failed
+**mpi_raccumulate %p %d %D %d %d %d %D %O %W %p:MPI_Raccumulate(origin_addr=%p, origin_count=%d, %D, target_rank=%d, target_disp=%d, target_count=%d, %D, %O, %W, request=%p) failed
+**mpi_get %p %d %D %d %d %d %D %W:MPI_Get(origin_addr=%p, origin_count=%d, %D, target_rank=%d, target_disp=%d, target_count=%d, %D, %W) failed
+**mpi_rget %p %d %D %d %d %d %D %W %p:MPI_Rget(origin_addr=%p, origin_count=%d, %D, target_rank=%d, target_disp=%d, target_count=%d, %D, %W, request=%p) failed
+**mpi_put %p %d %D %d %d %d %D %W:MPI_Put(origin_addr=%p, origin_count=%d, %D, target_rank=%d, target_disp=%d, target_count=%d, %D, %W) failed
+**mpi_rput %p %d %D %d %d %d %D %W %p:MPI_Rput(origin_addr=%p, origin_count=%d, %D, target_rank=%d, target_disp=%d, target_count=%d, %D, %W, request=%p) failed
+**mpi_get_accumulate %p %d %D %p %d %D %d %d %d %D %O %W:MPI_Get_accumulate(origin_addr=%p, origin_count=%d, %D, result_addr=%p, result_count=%d, %D, target_rank=%d, target_disp=%d, target_count=%d, %D, %O, %W) failed
+**mpi_rget_accumulate %p %d %D %p %d %D %d %d %d %D %O %W %p:MPI_Rget_accumulate(origin_addr=%p, origin_count=%d, %D, result_addr=%p, result_count=%d, %D, target_rank=%d, target_disp=%d, target_count=%d, %D, %O, %W, request=%p) failed
+**mpi_fetch_and_op %p %p %D %d %d %O %W:MPI_Fetch_and_op(origin_addr=%p, result_addr=%p, %D, target_rank=%d, target_disp=%d, %O, %W) failed
+**mpi_compare_and_swap %p %p %p %D %d %d %W:MPI_Compare_and_swap(origin_addr=%p, compare_addr=%p, result_addr=%p, %D, target_rank=%d, target_disp=%d, %W) failed
+**mpi_win_complete %W:MPI_Win_complete(%W) failed
+**mpi_win_create %p %d %d %I %C %p:MPI_Win_create(base=%p, size=%d, disp_unit=%d, %I, %C, win=%p) failed
+**mpi_win_allocate %d %d %I %C %p %p:MPI_Win_allocate(size=%d, disp_unit=%d, %I, %C, baseptr=%p, win=%p) failed
+**mpi_win_allocate_shared %d %d %I %C %p %p:MPI_Win_allocate_shared(size=%d, disp_unit=%d, %I, %C, baseptr=%p, win=%p) failed
+**mpi_win_create_dynamic %I %C %p:MPI_Win_create(%I, %C, win=%p) failed
+**mpi_win_shared_query %p %d %d %d %p:MPI_Win_shared_query(win=%p, rank=%d, size=%d, disp_unit=%d, baseptr=%p) failed
+**mpi_win_fence %A %W:MPI_Win_fence(%A, %W) failed
+**mpi_win_free %p:MPI_Win_free(win=%p) failed
+**mpi_win_get_group %W %p:MPI_Win_get_group(%W, group=%p) failed
+**mpi_win_lock %d %d %A %W:MPI_Win_lock(lock_type=%d, rank=%d, %A, %W) failed
+**mpi_win_lock_all %A %W:MPI_Win_lock_all(%A, %W) failed
+**mpi_win_post %G %A %W:MPI_Win_post(%G, %A, %W) failed
+**mpi_win_start %G %A %W:MPI_Win_start(%G, %A, %W) failed
+**mpi_win_test %W %p:MPI_Win_test(%W, flag=%p) failed
+**mpi_win_unlock %d %W:MPI_Win_unlock(rank=%d, %W) failed
+**mpi_win_unlock_all %W:MPI_Win_unlock_all(%W) failed
+**mpi_win_flush %d %W:MPI_Win_flush(rank=%d, %W) failed
+**mpi_win_flush_all %W:MPI_Win_flush_all(%W) failed
+**mpi_win_flush_local %d %W:MPI_Win_flush_local(rank=%d, %W) failed
+**mpi_win_flush_local_all %W:MPI_Win_flush_local_all(%W) failed
+**mpi_win_sync %W:MPI_Win_sync(%W) failed
+**mpi_win_wait %W:MPI_Win_wait(%W) failed
+**mpi_win_attach %W %p %d:MPI_Win_attach(win=%W, base=%p, size=%d) failed
+**mpi_win_detach %W %p:MPI_Win_detach(win=%W, base=%p) failed
+**mpi_alltoallw %p %p %p %p %p %p %p %p %C:MPI_Alltoallw(sbuf=%p, scnts=%p, sdispls=%p, stypes=%p, rbuf=%p, rcnts=%p, rdispls=%p, rtypes=%p, %C) failed
+**mpi_exscan %p %p %d %D %O %C:MPI_Exscan(sbuf=%p, rbuf=%p, count=%d, %D, %O, %C) failed
+**mpi_add_error_class %p:MPI_Add_error_class(errorclass=%p) failed
+**mpi_add_error_code %d %p:MPI_Add_error_code(errorclass=%d, errorcode=%p) failed
+**mpi_add_error_string %d %s:MPI_Add_error_string(code=%d, str=\"%s\") failed
+**mpi_comm_call_errhandler %C %d:MPI_Comm_call_errhandler(%C, errorcode=%d) failed
+**mpi_comm_create_keyval %p %p %p %p:MPI_Comm_create_keyval(comm_copy_attr_fn=%p, comm_delete_attr_fn=%p, comm_keyval=%p, extra_state=%p) failed
+**mpi_comm_delete_attr %C %d:MPI_Comm_delete_attr(%C, comm_keyval=%d) failed
+**mpi_comm_free_keyval %p:MPI_Comm_free_keyval(comm_keyval=%p) failed
+**mpi_comm_get_attr %C %d %p %p:MPI_Comm_get_attr(%C, comm_keyval=%d, attribute_val=%p, flag=%p) failed
+**mpi_comm_get_name %C %p %p:MPI_Comm_get_name(%C, comm_name=%p, resultlen=%p) failed
+**mpi_comm_set_attr %C %d %p:MPI_Comm_set_attr(%C, comm_keyval=%d, attribute_val=%p) failed
+**mpi_comm_set_name %C %s:MPI_Comm_set_name(%C, comm_name=%s) failed
+**mpi_grequest_complete %R:MPI_Grequest_complete(%R) failed
+**mpi_grequest_start %p %p %p %p %p:MPI_Grequest_start(query_fn=%p, free_fn=%p, cancel_fn=%p, extra_state=%p, request=%p) failed
+**mpi_init_thread %p %p %d %p:MPI_Init_thread(argc_p=%p, argv_p=%p, required=%d, provided=%p)
+**mpi_is_thread_main %p:MPI_Is_thread_main(flag=%p) failed
+**mpi_query_thread %p:MPI_Query_thread(provided=%p) failed
+**mpi_status_set_cancelled %p %d:MPI_Status_set_cancelled(status=%p, flag=%d) failed
+**mpi_status_set_elements %p %D %d:MPI_Status_set_elements(status=%p, %D, count=%d) failed
+**mpi_status_set_elements_x %p %D %l:MPI_Status_set_elements_x(status=%p, %D, count=%l) failed
+**mpi_type_create_keyval %p %p %p %p:MPI_Type_create_keyval(type_copy_attr_fn=%p, type_delete_attr_fn=%p, type_keyval=%p, extra_state=%p) failed
+**mpi_type_delete_attr %D %d:MPI_Type_delete_attr(%D, type_keyval=%d) failed
+**mpi_type_dup %D %p:MPI_Type_dup(%D, newtype=%p) failed
+**mpi_type_free_keyval %p:MPI_Type_free_keyval(type_keyval=%p) failed
+**mpi_type_get_attr %D %d %p %p:MPI_Type_get_attr(%D, type_keyval=%d, attribute_val=%p, flag=%p) failed
+**mpi_type_get_contents %D %d %d %d %p %p %p:MPI_Type_get_contents(%D, max_integers=%d, max_addresses=%d, max_datatypes=%d, array_of_integers=%p, array_of_addresses=%p, array_of_datatypes=%p) failed
+**mpi_type_get_envelope %D %p %p %p %p:MPI_Type_get_envelope(%D, num_integers=%p, num_addresses=%p, num_datatypes=%p, combiner=%p) failed
+**mpi_type_get_name %D %p %p:MPI_Type_get_name(%D, type_name=%p, resultlen=%p) failed
+**mpi_type_set_attr %D %d %p:MPI_Type_set_attr(%D, type_keyval=%d, attribute_val=%p) failed
+**mpi_type_set_name %D %s:MPI_Type_set_name(%D, type_name=%s) failed
+**mpi_type_match_size %d %d %p:MPI_Type_match_size(typeclass=%d, size=%d, datatype=%p) failed
+**mpi_win_call_errhandler %W %d:MPI_Win_call_errhandler(%W, errorcode=%d) failed
+**mpi_win_create_keyval %p %p %p %p:MPI_Win_create_keyval(win_copy_attr_fn=%p, win_delete_attr_fn=%p, win_keyval=%p, extra_state=%p) failed
+**mpi_win_delete_attr %W %d:MPI_Win_delete_attr(%W, win_keyval=%d) failed
+**mpi_win_free_keyval %p:MPI_Win_free_keyval(win_keyval=%p) failed
+**mpi_win_get_attr %W %d %p %p:MPI_Win_get_attr(%W, win_keyval=%d, attribute_val=%p, flag=%p) failed
+**mpi_win_get_name %W %p %p:MPI_Win_get_name(%W, win_name=%p, resultlen=%p) failed
+**mpi_win_set_attr %W %d %p:MPI_Win_set_attr(%W, win_keyval=%d, attribute_val=%p) failed
+**mpi_win_set_name %W %s:MPI_Win_set_name(%W, win_name=%s) failed
+**mpi_alloc_mem %p %I %p:MPI_Alloc_mem(size=%p, %I, baseptr=%p) failed
+**mpi_comm_create_errhandler %p %p:MPI_Comm_create_errhandler(function=%p, errhandler=%p) failed
+**mpi_comm_get_errhandler %C %p:MPI_Comm_get_errhandler(%C, errhandler=%p) failed
+**mpi_comm_set_errhandler %C %E:MPI_Comm_set_errhandler(%C, %E) failed
+**mpi_file_create_errhandler %p %p:MPI_File_create_errhandler(function=%p, errhandler=%p) failed
+**mpi_file_get_errhandler %F %p:MPI_File_get_errhandler(%F, errhandler=%p) failed
+**mpi_file_set_errhandler %F %E:MPI_File_set_errhandler(%F, %E) failed
+**mpi_finalized %p:MPI_Finalized(flag=%p) failed
+**mpi_get_address %p %p:MPI_Get_address(location=%p, address=%p) failed
+**mpi_info_create %p:MPI_Info_create(info=%p) failed
+**mpi_info_delete %I %s:MPI_Info_delete(%I, key=%s) failed
+**mpi_info_dup %I %p:MPI_Info_dup(%I, newinfo=%p) failed
+**mpi_info_free %p:MPI_Info_free(info=%p) failed
+**mpi_info_get %I %s %d %p %p:MPI_Info_get(%I, key=%s, valuelen=%d, value=%p, flag=%p) failed
+**mpi_info_get_nkeys %I %p:MPI_Info_get_nkeys(%I, nkeys=%p) failed
+**mpi_info_get_nthkey %I %d %p:MPI_Info_get_nthkey(%I, n=%d, key=%p) failed
+**mpi_info_get_valuelen %I %s %p %p:MPI_Info_get_valuelen(%I, key=%s, valuelen=%p, flag=%p) failed
+**mpi_info_set %I %s %s:MPI_Info_set(%I, key=%s, value=%s) failed
+**mpi_pack_external %s %p %d %D %p %d %p:MPI_Pack_external(datarep=%s, inbuf=%p, incount=%d, %D, outbuf=%p, outcount=%d, position=%p) failed
+**mpi_pack_external_size %s %d %D %p:MPI_Pack_external_size(datarep=%s, incount=%d, %D, size=%p) failed
+**mpi_request_get_status %R %p %p:MPI_Request_get_status(%R, flag=%p, status=%p) failed
+**mpi_type_create_darray %d %d %d %p %p %p %p %d %D %p:MPI_Type_create_darray(size=%d, rank=%d, ndims=%d, array_of_gsizes=%p, array_of_distribs=%p, array_of_dargs=%p, array_of_psizes=%p, order=%d, %D, newtype=%p) failed
+**mpi_type_create_hindexed %d %p %p %D %p:MPI_Type_create_hindexed(count=%d, array_of_blocklengths=%p, array_of_displacements=%p, %D, newtype=%p) failed
+**mpi_type_create_hindexed_block %d %d %p %D %p:MPI_Type_create_hindexed_block(count=%d, blocklength=%d, array_of_displacements=%p, %D, newtype=%p) failed
+**mpi_type_create_hvector %d %d %d %D %p:MPI_Type_create_hvector(count=%d, blocklength=%d, stride=%d, %D, newtype=%p) failed
+**mpi_type_create_indexed_block %d %d %p %D %p:MPI_Type_create_indexed_block(count=%d, blocklength=%d, array_of_displacements=%p, %D, newtype=%p) failed
+**mpi_type_create_resized %D %d %d %p:MPI_Type_create_resized(%D, lb=%d, extent=%d, newtype=%p) failed
+**mpi_type_create_struct %d %p %p %p %p:MPI_Type_create_struct(count=%d, array_of_blocklengths=%p, array_of_displacements=%p, array_of_types=%p, newtype=%p) failed
+**mpi_type_create_subarray %d %p %p %p %d %D %p:MPI_Type_create_subarray(ndims=%d, array_of_sizes=%p, array_of_subsizes=%p, array_of_starts=%p, order=%d, %D, newtype=%p) failed
+**mpi_type_get_extent %D %p %p:MPI_Type_get_extent(%D, lb=%p, extent=%p) failed
+**mpi_type_get_extent_x %D %p %p:MPI_Type_get_extent_x(%D, lb=%p, extent=%p) failed
+**mpi_type_get_true_extent %D %p %p:MPI_Type_get_true_extent(%D, lb=%p, true_extent=%p) failed
+**mpi_type_get_true_extent_x %D %p %p:MPI_Type_get_true_extent_x(%D, lb=%p, true_extent=%p) failed
+**mpi_unpack_external %s %p %d %p %p %d %D:MPI_Unpack_external(datarep=%s, inbuf=%p, insize=%d, position=%p, outbuf=%p, outcount=%d, %D) failed
+**mpi_win_create_errhandler %p %p:MPI_Win_create_errhandler(function=%p, errhandler=%p) failed
+**mpi_win_get_errhandler %W %p:MPI_Win_get_errhandler(%W, errhandler=%p) failed
+**mpi_win_set_errhandler %W %E:MPI_Win_set_errhandler(%W, %E) failed
+**mpi_register_datarep %s %p %p %p %p:MPI_Register_datarep(datarep=%s, read_conversion_fn=%p, write_conversion_fn=%p, dtype_file_extent_fn=%p, extra_state=%p) failed
+
+#
+# msmpi functions
+#
+**msmpi_req_set_apc %R %p %p:MSMPI_Request_set_apc(%R, callback_fn=%p, callback_status=%p) failed
+**msmpi_waitsome_interruptible %d %p %p %p %p:MSMPI_Waitsome_interruptible(count=%d, req_array=%p, out_count=%p, indices=%p, status_array=%p) failed
+
+#
+# Compression
+#
+**unableToLoadDLL: Unable to load a dynamically loadable library
+**failureGetProcAddress %d:Call to GetProcAddress failed (errno %d)
+**failureCompressionWorkSpace %d:Call to RtlGetCompressionWorkSpaceSize failed (errno %d)
+**compressionMinimum %d %d:The provided compression threshold of %d was too small. \
+Try again with a threshold no less than %d
+**decompressFailure %d:Decompression of a message failed (errno %d)
+**nullPayloadRequest:The initial clear to send request has an invalid sender request id.
+
+#
+# SMP Awareness
+#
+**frequency:This machine does not support high frequency performance counters.
+**measurementfailed:Measurement of collective failed.
+**nodeids:Unable to read node ids.
+**badenv %s:Invalid value for %s environment variable.
+
+
+#
+# Parsing Util
+#
+**rangedup %s %d:The specified range %s contains duplicate entries; rank %d \
+ appeared more than once
+**invalidrange %s: The specified range %s is invalid
+
+#
+# Dynamic Process related
+#
+**dynamicStartFailedEnv:The dynamic server failed to start. Invalid environment variable value given for MSMPI_ACCEPT_PORT.
+**dynamicStartFailed %d:The dynamic server failed to start with status %d.
+**dynamicNewPortFailed:The dynamic server failed to open a new port.
+**dynamicInvalidPort %s:No server is accepting connection on port %s.
+**dynamicInvalidBindingString %s:The specified binding %s is invalid.
+**dynamicBindingFromStringFailed %s %d:Failed to create RPC binding from the specified port string %s (errno %d).
+**dynamicBindingSetAuthFailed %d:Failed to set authentication on the RPC binding (errno %d).
+**dynamicTimedOut:The accept server is too busy.
+**dynamicCreateContextFailed %d:Failed to establish context with the server (errno %d).
+**dynamicInternalFailure:Internal error while trying to collect process groups information from the local communicator.
+**dynamicInitializeAsyncFailed %d:Failed to initialize Asynchronous RPC (errno %d).
+**dynamicWaitForAsyncFailed %d:Failed to initialize asynchronous RPC (errno %d).
+**dynamicCompleteAsyncFailed %d:Failed to complete asynchronous RPC (errno %d).
+**dynamicExchangeInfoFailed %d:Failed to exchange information with the Accept Server (errno %d).
+**dynamicRootConnectAcceptFailed:The root of this operation indicated that it experienced an error.
+**dynamicClientRPCFailed:An error on the client side resulted in the cancellation of the RPC call.
+
+#
+# Print environment block
+#
+**badfile %s:The requested file could not be opened to write the environment block. %s.
+**envexpand:The environment variables in MSMPI_PRINT_ENVIRONMENT_BLOCK_FILE could not be expanded.
+**getenvfailed:Unable to get the environment block for the process.
+**freeenvfailed:Unable to free the environment block for the process.
+
+#
+# Casting values to type int results in truncation
+#
+**packtruncate:The size of the packed data was larger than could be represented using an integer.
+**unpacktruncate:The size of the unpacked data was larger than could be represented using an integer.
+
+# -----------------------------------------------------------------------------
+# The following names are defined but not used (see the -careful option
+# for extracterrmsgs) (still to do: move the undefined names here)
diff --git a/src/mpi/common/errutil.cpp b/src/mpi/common/errutil.cpp
new file mode 100644
index 0000000..740aaaa
--- /dev/null
+++ b/src/mpi/common/errutil.cpp
@@ -0,0 +1,732 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "precomp.h"
+
+/* defmsg is generated automatically from the source files and contains
+ all of the error messages */
+#include "defmsg.h"
+
+#include "mpidump.h"
+#include
+#include
+
+
+/*
+ * Instance-specific error messages are stored in a ring. The elements of this
+ * ring are MPIR_Err_msg_t structures, which contain the following fields:
+ * id - this is used to check that the entry is valid; it is computed from
+ * the error code and location in the ring. The routine
+ * ErrcodeToId( errcode, &id ) is used to extract the id from an
+ * error code and
+ * ErrcodeCreateID( class, generic, msg, &id, &seq ) is used to
+ * create the id from an error class, generic index, and message
+ * string. The "seq" field is inserted into the error code as a
+ * check.
+ *
+ * prev_error - The full MPI error code of the previous error attached
+ * to this list of errors, or MPI_SUCCESSS (which has value 0).
+ * This is the last error code, not the index in the ring of the last
+ * error code. That's the right choice, because we want to ensure
+ * that the value is valid if the ring overflows. In addition,
+ * we allow this to be an error CLASS (one of the predefined MPI
+ * error classes). This is particularly important for
+ * MPI_ERR_IN_STATUS, which may be returned as a valid error code.
+ * (classes are valid error codes).
+ *
+ * use_user_error_code and user_error_code - Used to handle a few cases
+ * in MPI where a user-provided routine returns an error code;
+ * this allows us to provide information about the chain of
+ * routines that were involved, while returning the users prefered
+ * error value to the users environment.
+ *
+ * location - A string that indicates what function and line number
+ * where the error code was set.
+ *
+ * msg - A message about the error. This may be instance-specific (e.g.,
+ * it may have been created at the time the error was detected with
+ * information about the parameters that caused the error).
+ *
+ * Note that both location and msg are defined as length MAX_xxx+1. This
+ * isn't really necessary (at least for msg), since the MPI standard
+ * requires that MAX_MPI_ERROR_STRING include the space for the trailing null,
+ * but using the extra byte makes the code a little simpler.
+ *
+ * The "id" value is used to keep a sort of "checkvalue" to ensure that the
+
+ * error code that points at this message is in fact for this particular
+ * message. This is used to handle the unlikely but possible situation where
+ * so many error messages are generated that the ring is overlapped.
+ *
+ * The message arrays are preallocated to ensure that there is space for these
+ * messages when an error occurs. One variation would be to allow these
+ * to be dynamically allocated, but it is probably better to either preallocate
+ * these or turn off all error message generation (which will eliminate these
+ * arrays).
+ *
+ * One possible alternative is to use the message ring *only* for instance
+ * messages and use the predefined messages in-place for the generic
+ * messages. This approach is used to provide uniform handling of all
+ * error messages.
+ */
+
+#define MAX_LOCATION_LEN 63
+
+/* The maximum error string in this case may be a multi-line message,
+ constructed from multiple entries in the error message ring. The
+ individual ring messages should be shorter than MPI_MAX_ERROR_STRING,
+ perhaps as small a 256. We define a separate value for the error lines.
+ */
+#define MPIR_MAX_ERROR_LINE 512
+
+/* See the description above for the fields in this structure */
+struct MPIR_Err_msg_t
+{
+ int id;
+ int prev_error;
+
+ int use_user_error_code;
+ int user_error_code;
+
+ char location[MAX_LOCATION_LEN+1];
+ char msg[MPIR_MAX_ERROR_LINE+1];
+};
+
+static MPIR_Err_msg_t ErrorRing[128];
+static volatile long error_ring_loc = 0;
+
+#define GET_RING_INDEX(code) \
+ (ERROR_GET_INDEX(code) % _countof(ErrorRing))
+
+static MPIR_Err_to_string_fn MPIR_Err_code_to_string;
+
+void
+MPIR_Err_set_dynerr_fn(
+ _In_ MPIR_Err_to_string_fn fn
+ )
+{
+ MPIU_Assert(MPIR_Err_code_to_string == nullptr);
+ MPIR_Err_code_to_string = fn;
+}
+
+
+/* ------------------------------------------------------------------------- */
+/* The following block of code manages the instance-specific error messages */
+/* ------------------------------------------------------------------------- */
+
+#if DBG
+
+static int
+ErrcodeIsValid(
+ _In_ int errcode
+ )
+{
+ int idx;
+
+ /* If the errcode is a class, then it is valid */
+ if (errcode >= 0 && errcode <= MPICH_ERR_LAST_CLASS)
+ return TRUE;
+
+ /* check for extra bits set. note: dynamic error codes are not valid here */
+ if((errcode & ~(ERROR_CLASS_MASK | ERROR_INDEX_MASK | ERROR_FATAL_FLAG)) != ERROR_COD_FLAG)
+ return FALSE;
+
+ idx = GET_RING_INDEX(errcode);
+ if (ErrorRing[idx].id != errcode)
+ return FALSE;
+
+ return TRUE;
+}
+
+#endif // DBG
+
+
+_Post_satisfies_( return != MPI_SUCCESS )
+int
+MPIR_Err_get_user_error_code(
+ _In_ int errcode
+ )
+{
+ int idx;
+
+ /* check for class only error code */
+ if(!ERROR_IS_CODE(errcode))
+ return errcode;
+
+ /* Can we get a more specific error message */
+ idx = GET_RING_INDEX(errcode);
+ if (ErrorRing[idx].id == errcode && ErrorRing[idx].use_user_error_code)
+ return ErrorRing[idx].user_error_code;
+
+ return errcode;
+}
+
+
+/*
+ * Given a message string abbreviation (e.g., one that starts "**"),
+ * return the corresponding index.
+ */
+static int __cdecl
+compare_key_map(
+ _In_ const void* pv1,
+ _In_ const void* pv2
+ )
+{
+ const char* key = (const char*)pv1;
+ const msgpair* pair = (const msgpair*)pv2;
+ return strcmp( key, pair->key );
+}
+
+static int
+FindMessageIndex(
+ _In_z_ const char *msg
+ )
+{
+ const void* p;
+ p = bsearch(
+ msg,
+ errors_map,
+ _countof(errors_map),
+ sizeof(errors_map[0]),
+ compare_key_map
+ );
+
+ MPIU_Assert(p != nullptr);
+
+ return (int)((const msgpair*)p - errors_map);
+}
+
+
+/* ------------------------------------------------------------------------ */
+/* The following routines create an MPI error code, handling optional, */
+/* instance-specific error message information. There are two key routines:*/
+/* MPIR_Err_create_code - Create the error code; this is the routine used*/
+/* by most routines */
+/* MPIR_Err_create_code_valist - Create the error code; accept a valist */
+/* instead of a variable argument list (this is */
+/* used to allow this routine to be used from */
+/* within another varargs routine) */
+/* ------------------------------------------------------------------------ */
+/* --BEGIN ERROR MACROS-- */
+
+_Post_satisfies_( return != MPI_SUCCESS )
+MPI_RESULT
+MPIR_Err_create_code(
+ _In_ int lastcode,
+ _In_ int fatal,
+ _In_ int error_class,
+ _Printf_format_string_ const char specific_msg[],
+ ...
+ )
+{
+ int rc;
+ va_list Argp;
+ va_start(Argp, specific_msg);
+ __analysis_assert(error_class != 0);
+ rc = MPIR_Err_create_code_valist(
+ lastcode,
+ fatal,
+ error_class,
+ specific_msg,
+ Argp
+ );
+
+ __analysis_assume(rc != 0);
+ va_end(Argp);
+ return rc;
+}
+
+/* --END ERROR MACROS-- */
+
+static int
+TakeDump(
+ _In_ EXCEPTION_POINTERS* exp,
+ _In_ int dumpMode
+ )
+{
+ MINIDUMP_EXCEPTION_INFORMATION exrParam;
+ exrParam.ExceptionPointers = exp;
+ exrParam.ThreadId = GetCurrentThreadId();
+ exrParam.ClientPointers = FALSE;
+
+ MINIDUMP_TYPE dumpType;
+ if( dumpMode >= MsmpiDumpFull )
+ {
+ dumpType = MiniDumpWithFullMemory;
+ }
+ else
+ {
+ dumpType = MiniDumpNormal;
+ }
+
+ wchar_t dumpPath[MAX_PATH];
+ DWORD err = MPIU_Getenv( L"MSMPI_DUMP_PATH",
+ dumpPath,
+ _countof( dumpPath ) );
+ if( err != NOERROR )
+ {
+ dumpPath[0] = '\0';
+ }
+
+ HANDLE tempDumpFile = CreateTempDumpFile(
+ GetCurrentProcess(),
+ GetCurrentProcessId(),
+ dumpType,
+ dumpPath,
+ &exrParam
+ );
+
+ if( tempDumpFile != INVALID_HANDLE_VALUE )
+ {
+ CreateFinalDumpFile(
+ tempDumpFile,
+ env_to_int( L"PMI_RANK", -1, -1 ),
+ dumpPath,
+ env_to_int( L"CCP_JOBID", 0, 0 ),
+ env_to_int( L"CCP_TASKID", 0, 0 ),
+ env_to_int( L"CCP_TASKINSTANCEID", 0, 0 )
+ );
+ CloseHandle( tempDumpFile );
+ }
+ return EXCEPTION_CONTINUE_EXECUTION;
+}
+
+
+void CreateDumpFileIfConfigured(
+ _In_ EXCEPTION_POINTERS* exp
+ )
+{
+ enum MSMPI_DUMP_MODE dumpMode = GetDumpMode();
+ if (dumpMode != MsmpiDumpNone)
+ {
+ TakeDump(exp, dumpMode);
+ }
+}
+
+
+/*
+ * This is the real routine for generating an error code. It takes
+ * a va_list so that it can be called by any routine that accepts a
+ * variable number of arguments.
+ */
+_Post_satisfies_( return != MPI_SUCCESS )
+MPI_RESULT
+MPIR_Err_create_code_valist(
+ _In_ int lastcode,
+ _In_ int fatal,
+ _In_ int error_class,
+ _Printf_format_string_ const char specific_msg[],
+ _In_ va_list Argp
+ )
+{
+ int user_error_code = -1;
+ int specific_idx;
+ int ring_idx;
+ long ring_idx_base;
+ const char* specific_fmt;
+ char* ring_msg;
+
+ /* Check that lastcode is valid */
+#if DBG
+ MPIU_Assert(ErrcodeIsValid(lastcode));
+#endif //DBG
+ MPIU_Assert(specific_msg != nullptr);
+
+ if( IsDebuggerPresent() )
+ {
+ DebugBreak();
+ }
+
+ enum MSMPI_DUMP_MODE dumpMode = GetDumpMode();
+ if( dumpMode != MsmpiDumpNone && lastcode == MPI_SUCCESS )
+ {
+ __try
+ {
+ RaiseException( 0, 0, 0, nullptr );
+ }
+ __except( TakeDump( GetExceptionInformation(), dumpMode ) )
+ OACR_WARNING_SUPPRESS(EXCEPT_BLOCK_EMPTY,"mpicr: dump handler")
+ {
+ }
+ }
+
+ if (error_class == MPI_ERR_OTHER)
+ {
+ if (ERROR_GET_CLASS(lastcode) != MPI_SUCCESS)
+ {
+ /* If the last class is more specific (and is valid), then pass it through */
+ error_class = ERROR_GET_CLASS(lastcode);
+ }
+ }
+
+ /* Handle special case of MPI_ERR_IN_STATUS. According to the standard,
+ the code must be equal to the class. See section 3.7.5.
+ Information on the particular error is in the MPI_ERROR field
+ of the status. */
+ if (error_class == MPI_ERR_IN_STATUS)
+ return MPI_ERR_IN_STATUS;
+
+ ring_idx_base = ::InterlockedIncrement(&error_ring_loc);
+
+ /* Get the next entry in the ring */
+ ring_idx = ring_idx_base % _countof(ErrorRing);
+
+ ring_msg = ErrorRing[ring_idx].msg;
+
+ specific_idx = FindMessageIndex(specific_msg);
+ specific_fmt = errors_map[specific_idx].fmt;
+
+ //
+ // CompareString does not check for null terminating character
+ // when length of string is given explicitly
+ //
+ int len = _countof("**user") - 1;
+ if( MPIU_Strlen( errors_map[specific_idx].key ) >= static_cast(len) &&
+ CompareStringA( LOCALE_INVARIANT,
+ 0,
+ errors_map[specific_idx].key,
+ len,
+ "**user",
+ len ) == CSTR_EQUAL )
+ {
+ /* This is a special case. The format is ..., "**userxxx %d", intval);
+ In this case we must save the user value because we store it explicitly in the ring.
+ We do this here because we cannot both access the user error code and pass the argp
+ to vsnprintf_mpi. */
+ user_error_code = va_arg(Argp,int);
+ ErrorRing[ring_idx].use_user_error_code = 1;
+ ErrorRing[ring_idx].user_error_code = user_error_code;
+
+ // errors_map is generated by a perl script but since its test code isn't compiled
+ // there is no opportunity to do specifier to type checking. Ideally, testerr.c would
+ // be included in the project build so oacr could potentially find mismatches
+ OACR_WARNING_SUPPRESS(PRINTF_FORMAT_STRING_PARAM_NEEDS_REVIEW, "format is determined at runtime");
+ MPIU_Snprintf( ring_msg, MPIR_MAX_ERROR_LINE, specific_fmt, user_error_code );
+ }
+ else
+ {
+ OACR_WARNING_SUPPRESS(PRINTF_FORMAT_STRING_PARAM_NEEDS_REVIEW, "format is determined at runtime");
+ MPIR_Err_vsnprintf_mpi( ring_msg, MPIR_MAX_ERROR_LINE, specific_fmt, Argp );
+
+ if (ERROR_IS_CODE(lastcode))
+ {
+ int last_ring_idx;
+
+ last_ring_idx = GET_RING_INDEX(lastcode);
+ if (ErrorRing[last_ring_idx].id == lastcode)
+ {
+ if (ErrorRing[last_ring_idx].use_user_error_code)
+ {
+ ErrorRing[ring_idx].use_user_error_code = 1;
+ ErrorRing[ring_idx].user_error_code = ErrorRing[last_ring_idx].user_error_code;
+ }
+ }
+ }
+ }
+
+ ring_msg[MPIR_MAX_ERROR_LINE] = '\0';
+
+ /* Set the previous code. */
+ ErrorRing[ring_idx].prev_error = lastcode;
+ ErrorRing[ring_idx].location[0] = '\0';
+
+ /* Make sure error_index doesn't get so large that it sets the dynamic bit. */
+ int error_index = (ring_idx_base << ERROR_INDEX_SHIFT) & ERROR_INDEX_MASK;
+
+ int err_code = error_class | ERROR_COD_FLAG | error_index;
+ if (fatal || ERROR_IS_FATAL(lastcode))
+ {
+ err_code |= ERROR_FATAL_FLAG;
+ }
+
+ ErrorRing[ring_idx].id = err_code;
+ __analysis_assume( err_code != MPI_SUCCESS );
+ return err_code;
+}
+
+
+/*
+ * Accessor routines for the predefined messages. These can be
+ * used by the other routines (such as MPI_Error_string) to
+ * access the messages in this file, or the messages that may be
+ * available through any message catalog facility
+ */
+C_ASSERT(_countof(class_to_index) == MPICH_ERR_LAST_CLASS + 1);
+
+_Ret_z_
+static const char*
+get_class_msg(
+ _In_ int error_class
+ )
+{
+ if (error_class >= 0 && error_class < _countof(class_to_index))
+ {
+ return errors_map[class_to_index[error_class]].fmt;
+ }
+ else
+ {
+ return "Unknown error class";
+ }
+}
+
+/* Given an error code, print the stack of messages corresponding to this
+ error code. */
+static void
+MPIR_Err_print_stack_string(
+ _In_ int errcode,
+ _Out_writes_z_(maxlen) char *str,
+ _In_ size_t maxlen
+ )
+{
+ size_t len;
+ const char *str_orig = str;
+ size_t max_location_len = 0;
+ int tmp_errcode = errcode;
+ int error_class;
+
+ /* make sure is not a dynamic error code or a simple error class */
+ MPIU_Assert(!ERROR_IS_DYN(errcode));
+ MPIU_Assert(ERROR_GET_CLASS(errcode) != errcode);
+ MPIU_Assert(maxlen > 1);
+ *str = '\0';
+
+
+ /* Find the longest location string in the stack */
+ while (ERROR_IS_CODE(tmp_errcode))
+ {
+ int ring_idx = GET_RING_INDEX(tmp_errcode);
+
+ if (ErrorRing[ring_idx].id != tmp_errcode)
+ break;
+
+ len = MPIU_Strlen( ErrorRing[ring_idx].location,
+ _countof(ErrorRing[ring_idx].location) );
+ max_location_len = max(max_location_len, len);
+ tmp_errcode = ErrorRing[ring_idx].prev_error;
+ }
+
+ max_location_len += 2; /* add space for the ": " */
+
+ /* print the error stack */
+ while (ERROR_IS_CODE(errcode))
+ {
+ size_t nchrs;
+
+ int ring_idx = GET_RING_INDEX(errcode);
+
+ if (ErrorRing[ring_idx].id != errcode)
+ break;
+
+ len = MPIU_Snprintf(str, maxlen, "%s", ErrorRing[ring_idx].location);
+ maxlen -= len;
+ str += len;
+
+ nchrs = max_location_len -
+ MPIU_Strlen( ErrorRing[ring_idx].location,
+ _countof(ErrorRing[ring_idx].location) ) - 2;
+ while (nchrs > 0 && maxlen > 0)
+ {
+ *str++ = '.';
+ nchrs--;
+ maxlen--;
+ }
+
+ len = MPIU_Snprintf(str, maxlen, "%s\n", ErrorRing[ring_idx].msg);
+ maxlen -= len;
+ str += len;
+
+ errcode = ErrorRing[ring_idx].prev_error;
+ }
+
+ /* FIXME: This is wrong. The only way that you can get here without
+ errcode beign MPI_SUCCESS is if there is an error in the
+ processing of the error codes. Dropping through into the next
+ level of code (particularly when that code doesn't check for
+ valid error codes!) is erroneous */
+ if (errcode == MPI_SUCCESS)
+ {
+ goto fn_exit;
+ }
+
+ error_class = ERROR_GET_CLASS(errcode);
+
+ if (error_class <= MPICH_ERR_LAST_CLASS)
+ {
+ len = MPIU_Snprintf(str, maxlen, "(unknown)(): %s\n", get_class_msg(error_class));
+ maxlen -= len;
+ str += len;
+ }
+ else
+ {
+ len = MPIU_Snprintf(str, maxlen,
+ "Error code contains an invalid class (%d)\n",
+ error_class);
+ maxlen -= len;
+ str += len;
+ }
+
+fn_exit:
+ if (str_orig != str)
+ {
+ str--;
+ *str = '\0'; /* erase the last \n */
+ }
+}
+
+
+void
+MPIR_Err_get_string(
+ _In_ int errorcode,
+ _Out_writes_z_(length) char * msg,
+ _In_ size_t length
+ )
+{
+ size_t len;
+ size_t num_remaining = length;
+
+ MPIU_Assert(num_remaining > 0);
+
+ /* Convert the code to a string. The cases are:
+ simple class. Find the corresponding string.
+
+ if (user code)
+ {
+ go to code that extracts user error messages
+ }
+ else
+ {
+ is specific message code set and available? if so, use it
+ else use generic code (lookup index in table of messages)
+ }
+ */
+ if (ERROR_IS_DYN(errorcode))
+ {
+ /* This is a dynamically created error code (e.g., with MPI_Err_add_class) */
+
+ /* If a dynamic error code was created, the function to convert
+ them into strings has been set. Check to see that it was; this
+ is a safeguard against a bogus error code */
+ const char* s = nullptr;
+ if (MPIR_Err_code_to_string)
+ {
+ /* FIXME: not internationalized */
+ s = MPIR_Err_code_to_string(errorcode);
+ if(s != nullptr)
+ {
+ MPIU_Strncpy(msg, s, num_remaining);
+ }
+ }
+
+ if(s == nullptr)
+ {
+ len = MPIU_Snprintf(msg, num_remaining, "Undefined dynamic error code (%d)", errorcode);
+ msg[num_remaining-1] = '\0';
+ }
+ }
+ else if (ERROR_GET_CLASS(errorcode) == errorcode)
+ {
+ MPIU_Strncpy(msg, get_class_msg( errorcode ), num_remaining);
+ }
+ else
+ {
+ /* print the class message first */
+ MPIU_Strncpy(msg, get_class_msg(ERROR_GET_CLASS(errorcode)), num_remaining);
+
+ msg[num_remaining - 1] = '\0';
+ len = MPIU_Strlen( msg, num_remaining );
+ msg += len;
+ num_remaining -= len;
+
+ /* then print the stack or the last specific error message */
+ MPIU_Strncpy(msg, ", error stack:\n", num_remaining);
+ len = MPIU_Strlen( msg, num_remaining );
+ msg += len;
+ num_remaining -= len;
+ MPIR_Err_print_stack_string(errorcode, msg, num_remaining);
+ }
+}
+
+_Ret_z_
+const char*
+get_error_string(
+ _In_ int error
+ )
+{
+ wchar_t* wmsg;
+ static char msg[1024];
+
+ int n;
+ OACR_REVIEWED_CALL(
+ mpicr,
+ n = FormatMessageW(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS |
+ FORMAT_MESSAGE_MAX_WIDTH_MASK, // dwFlags
+ nullptr, // lpSource
+ error, // dwMessageId,
+ 0, // dwLanguageId
+ reinterpret_cast(&wmsg),// lpBuffer
+ 0, // nSize
+ nullptr )); // Arguments
+
+ if( n != 0 )
+ {
+ n = WideCharToMultiByte(
+ CP_UTF8,
+ 0,
+ wmsg,
+ -1,
+ msg,
+ sizeof(msg),
+ nullptr,
+ nullptr
+ );
+ LocalFree( wmsg );
+ }
+ if( n == 0 )
+ {
+ msg[0] = '\0';
+ }
+
+ return msg;
+}
+
+
+//
+// Summary:
+// Traces the MPI Error string and error class for the specified
+// mpi error code.
+//
+ULONG MpiTraceError(
+ REGHANDLE RegHandle,
+ PCEVENT_DESCRIPTOR Descriptor,
+ int ErrorCode
+ )
+{
+ const ULONG EventDataCount = 2;
+ EVENT_DATA_DESCRIPTOR EventData[ EventDataCount ];
+ char message[ MPI_MAX_ERROR_STRING ];
+ int error_class;
+
+ //make sure the string starts null terminated.
+ message[0] = 0;
+
+ //
+ // Get the error class
+ //
+ error_class = ERROR_GET_CLASS(ErrorCode);
+
+ MPIR_Err_get_string( ErrorCode, message, MPI_MAX_ERROR_STRING);
+
+ EventDataDescCreate(&EventData[0], &error_class, sizeof(error_class) );
+ EventDataDescCreate(&EventData[1], message, (ULONG)strlen(message) + sizeof('\0') );
+
+ return EventWrite(RegHandle, Descriptor, EventDataCount, EventData);
+}
diff --git a/src/mpi/common/ex.cpp b/src/mpi/common/ex.cpp
new file mode 100644
index 0000000..34a0bb5
--- /dev/null
+++ b/src/mpi/common/ex.cpp
@@ -0,0 +1,307 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * Copyright (C) 1997 University of Chicago.
+ * See COPYRIGHT notice in top-level directory.
+ */
+
+#include "precomp.h"
+#include "ex.h"
+#include
+
+_Success_(return==NO_ERROR)
+static
+int
+WINAPI
+ExpKeyZeroCompletionProcessor(
+ _In_opt_ DWORD BytesTransfered,
+ _In_ PVOID pOverlapped
+ );
+
+
+#ifdef EXSINGLETONE
+//
+// The only completion port, supporting all events
+//
+static HANDLE s_port;
+#endif
+
+//
+// The registered processors. This module supports up to 4 processors where
+// processor zero is pre-registered for overlapped operations completion.
+// the completion processor is registered by the completion key.
+//
+static ExCompletionProcessor s_processors[EX_KEY_MAX] = {
+
+ ExpKeyZeroCompletionProcessor,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr
+};
+
+
+static inline BOOL
+IsValidSet(
+ _In_ ExSetHandle_t Set
+ )
+{
+ return (Set != nullptr && Set != INVALID_HANDLE_VALUE);
+}
+
+_Success_(return != nullptr)
+_Ret_valid_
+ExSetHandle_t
+ExCreateSet(
+ void
+ )
+{
+ ExSetHandle_t Set;
+ Set = CreateIoCompletionPort(
+ INVALID_HANDLE_VALUE, // FileHandle
+ nullptr, // ExistingCompletionPort
+ 0, // CompletionKey
+ MAXDWORD // NumberOfConcurrentThreads
+ );
+
+ return Set;
+}
+
+
+void
+ExCloseSet(
+ _In_ _Post_invalid_ ExSetHandle_t Set
+ )
+{
+ MPIU_Assert(IsValidSet(Set));
+ CloseHandle(Set);
+}
+
+
+void
+ExRegisterCompletionProcessor(
+ _In_ EX_PROCESSOR_KEYS Key,
+ _In_ ExCompletionProcessor pfnCompletionProcessor
+ )
+{
+ MPIU_Assert(Key > 0);
+ MPIU_Assert(Key < RTL_NUMBER_OF(s_processors));
+ MPIU_Assert(s_processors[Key] == nullptr);
+ s_processors[Key] = pfnCompletionProcessor;
+}
+
+
+void
+ExUnregisterCompletionProcessor(
+ _In_ EX_PROCESSOR_KEYS Key
+ )
+{
+ MPIU_Assert(Key > 0);
+ MPIU_Assert(Key < RTL_NUMBER_OF(s_processors));
+ MPIU_Assert(s_processors[Key] != nullptr);
+ s_processors[Key] = nullptr;
+}
+
+
+void
+ExPostCompletion(
+ _In_ ExSetHandle_t Set,
+ _In_ EX_PROCESSOR_KEYS Key,
+ _Inout_opt_ PVOID pOverlapped,
+ _In_ DWORD BytesTransfered
+ )
+{
+ MPIU_Assert(IsValidSet(Set));
+
+ for(;;)
+ {
+ if(PostQueuedCompletionStatus(Set, BytesTransfered, Key, (OVERLAPPED*)pOverlapped))
+ return;
+
+ MPIU_Assert(GetLastError() == ERROR_NO_SYSTEM_RESOURCES);
+ Sleep(10);
+ }
+}
+
+
+ULONG
+ExGetPortValue(
+ _In_ ExSetHandle_t Set
+ )
+{
+ MPIU_Assert(IsValidSet(Set));
+ return HandleToUlong(Set);
+}
+
+_Success_(return==MPI_SUCCESS)
+int
+ExInitialize(
+ void
+ )
+{
+#ifdef EXSINGLETONE
+ MPIU_Assert(s_port == nullptr);
+ s_port = CreateIoCompletionPort(
+ INVALID_HANDLE_VALUE, // FileHandle
+ nullptr, // ExistingCompletionPort
+ 0, // CompletionKey
+ 0 // NumberOfConcurrentThreads
+ );
+
+ if(s_port != nullptr)
+ return MPI_SUCCESS;
+
+ return MPI_ERR_INTERN;
+#endif
+
+ return MPI_SUCCESS;
+}
+
+
+void
+ExFinalize(
+ void
+ )
+{
+#ifdef EXSINGLETONE
+ MPIU_Assert(s_port != nullptr);
+ CloseHandle(s_port);
+ s_port = nullptr;
+#endif
+}
+
+_Success_(return==MPI_SUCCESS || return==MPI_ERR_PENDING)
+int
+ExProcessCompletions(
+ _In_ ExSetHandle_t set,
+ _In_ DWORD timeout,
+ _In_opt_ bool interruptible
+ )
+{
+ MPIU_Assert(IsValidSet(set));
+
+ BOOL fSucc;
+ DWORD bytesTransfered;
+ ULONG_PTR key;
+ OVERLAPPED* pOverlapped;
+ for (;;)
+ {
+ fSucc = GetQueuedCompletionStatus(
+ set,
+ &bytesTransfered,
+ &key,
+ &pOverlapped,
+ timeout
+ );
+
+ if(!fSucc && pOverlapped == nullptr)
+ {
+ //
+ // Return success on timeout per caller request. The Executive progress
+ // engine will not wait for the async processing to complete
+ //
+ DWORD gle = GetLastError();
+ if (gle == WAIT_TIMEOUT)
+ {
+ if( timeout == 0 )
+ {
+ return MPI_SUCCESS;
+ }
+ return HRESULT_FROM_WIN32(WAIT_TIMEOUT);
+ }
+
+ //
+ // Io Completion port internal error, try again
+ //
+ continue;
+ }
+
+ if( key == EX_KEY_WAIT_INTERRUPT )
+ {
+ if( interruptible == false )
+ {
+ continue;
+ }
+ return MPI_ERR_PENDING;
+ }
+
+ MPIU_Assert(key < RTL_NUMBER_OF(s_processors));
+ MPIU_Assert(s_processors[key] != nullptr);
+
+ //
+ // Call the completion processor and return the result.
+ //
+ return s_processors[key](bytesTransfered, pOverlapped);
+ }
+}
+
+
+//----------------------------------------------------------------------------
+//
+// Preregistered completion processor for Key-Zero
+//
+
+_Success_(return==NO_ERROR)
+static
+int
+WINAPI
+ExpKeyZeroCompletionProcessor(
+ _In_opt_ DWORD /*BytesTransfered*/,
+ _In_ PVOID pOverlapped
+ )
+{
+ EXOVERLAPPED* pov = CONTAINING_RECORD(pOverlapped, EXOVERLAPPED, ov);
+ return ExCompleteOverlapped(pov);
+}
+
+
+void
+ExPostOverlapped(
+ _In_ ExSetHandle_t Set,
+ _Inout_ EXOVERLAPPED* pOverlapped
+ )
+{
+ MPIU_Assert(IsValidSet(Set));
+
+ ExPostCompletion(
+ Set,
+ EX_KEY_RESERVED, // Key,
+ &pOverlapped->ov,
+ 0 // BytesTransfered
+ );
+}
+
+
+void
+ExAttachHandle(
+ _In_ ExSetHandle_t Set,
+ _In_ HANDLE Handle,
+ _In_opt_ EX_PROCESSOR_KEYS Key
+ )
+{
+ MPIU_Assert(IsValidSet(Set));
+ MPIU_Assert(s_processors[Key] != nullptr);
+
+ for(;;)
+ {
+ HANDLE hPort;
+ hPort = CreateIoCompletionPort(
+ Handle, // FileHandle
+ Set, // ExistingCompletionPort
+ Key, // CompletionKey
+ 0 // NumberOfConcurrentThreads
+ );
+
+ if(hPort != nullptr)
+ {
+ MPIU_Assert(hPort == Set);
+ return;
+ }
+
+ MPIU_Assert(GetLastError() == ERROR_NO_SYSTEM_RESOURCES);
+ Sleep(10);
+ }
+}
diff --git a/src/mpi/common/ex.h b/src/mpi/common/ex.h
new file mode 100644
index 0000000..197ff9a
--- /dev/null
+++ b/src/mpi/common/ex.h
@@ -0,0 +1,461 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#ifndef EX_H
+#define EX_H
+
+
+//----------------------------------------------------------------------------
+//
+// The Executive, a generic progress engine
+//
+// Overview:
+// The generic progress engine, the Executive, implements a simple asynchronous
+// processing model. In this model records indicating events get queued up for
+// processing. The Executive invokes the appropriate registered handler for the
+// event to take further action if required. The executive exits when a sequence
+// of events, representing a complete logical function is complete.
+//
+// This model is a producers-consumers model with a single events queue. The
+// events are being processed in the order they have been queued.
+// *** temporary extended to support multiple queues until the clients can ***
+// *** use a single event queue. (e.g., PMI client implementation) ***
+//
+// The function ExProcessCompletions is the heart of the consumers model. It
+// dequeues the events records and invokes the appropriate completion processor.
+// Multiple threads can call this function for processing. The Executive will
+// run as many concurrent threads as there are processors in the system. When
+// concurrent threads are running, the completion processors should be able to
+// handle the concurrency and out-of-order execution.
+//
+// The Executive pre-registers a completion processor for Key-Zero with a simple
+// handler signature. see description below.
+//
+//----------------------------------------------------------------------------
+
+
+//
+// ExSetHandle_t *** temp extenssion ***
+//
+// Represents the set object
+//
+typedef HANDLE ExSetHandle_t;
+#define EX_INVALID_SET nullptr
+
+//to get NTSTATUS
+#include
+
+
+//
+// ExCreateSet *** temp extenssion ***
+//
+// Create the set object.
+// An EX_INVALID_SET value is returned for an out of memory condition.
+//
+_Success_(return != nullptr)
+_Ret_valid_
+ExSetHandle_t
+ExCreateSet(
+ void
+ );
+
+//
+// ExCloseSet *** temp extenssion ***
+//
+// Close the set object.
+//
+void
+ExCloseSet(
+ _In_ _Post_invalid_ ExSetHandle_t Set
+ );
+
+//
+// ExCompletionProcessor, function prototype
+//
+// The completion processor function is called by ExProcessCompletions function
+// to process a completion event. The completion processor indicates whether the
+// sequence of asynchronous events is complete or whether further processing is
+// required.
+//
+// Parameters:
+// BytesTransferred
+// A DWORD value posted to the completion port. Usually the number of bytes
+// transferred in this operation
+//
+// pOverlapped
+// A pointer value posted to the completion port. Usually a pointer to the
+// OVERLAPPED structure
+//
+// Return Value:
+// MPI error value indicating the result of the "logical" async function.
+// This value is meaningful only when the completion processor returns TRUE.
+//
+typedef
+_Success_(return==MPI_SUCCESS)
+int
+(WINAPI * ExCompletionProcessor)(
+ DWORD BytesTransferred,
+ PVOID pOverlapped
+ );
+
+
+//
+// Key values used when calling ExRegisterCompletionProcessor.
+//
+enum EX_PROCESSOR_KEYS
+{
+ EX_KEY_WAIT_INTERRUPT = -1,
+ EX_KEY_RESERVED = 0,
+ EX_KEY_SHM_NOTIFY_CONNECTION,
+ EX_KEY_SHM_NOTIFY_MESSAGE,
+ EX_KEY_PROGRESS_WAKEUP,
+ EX_KEY_ND,
+ EX_KEY_CONNECT_REQUEST,
+ EX_KEY_CONNECT_COMPLETE,
+ EX_KEY_DEFER_CONNECT,
+ EX_KEY_DEFER_WRITE,
+ EX_KEY_MAX
+};
+
+
+//
+// ExRegisterCompletionProcessor
+//
+// Resister a completion processor for a specific Key.
+// N.B. Current implementation supports keys 0 - 3, where key 0 is reserved.
+//
+void
+ExRegisterCompletionProcessor(
+ _In_ EX_PROCESSOR_KEYS Key,
+ _In_ ExCompletionProcessor pfnCompletionProcessor
+ );
+
+
+//
+// ExUnregisterCompletionProcessor
+//
+// Remove a registered completion processor
+//
+void
+ExUnregisterCompletionProcessor(
+ _In_ EX_PROCESSOR_KEYS Key
+ );
+
+
+//
+// ExPostCompletion
+//
+// Post an event completion to the completion queue. The appropriate
+// completion processor will be invoked by ExProcessCompletions thread
+// with the passed in parameters.
+//
+void
+ExPostCompletion(
+ _In_ ExSetHandle_t Set,
+ _In_ EX_PROCESSOR_KEYS Key,
+ _Inout_opt_ PVOID pOverlapped,
+ _In_ DWORD BytesTransferred
+ );
+
+
+//
+// ExGetPortValue
+//
+// Returns the value of the completion queue handle
+//
+ULONG
+ExGetPortValue(
+ _In_ ExSetHandle_t Set
+ );
+
+
+//
+// ExProcessCompletions
+//
+// Process all completion event types by invoking the appropriate completion
+// processor function. This routine continues to process all events until an
+// asynchronous sequence is complete (function with several async stages).
+// If the caller indicated "no blocking" this routine will exit when no more
+// events to process are available, regardles of the completion processor
+// indication to continue.
+//
+// Parameters:
+// timeout - Milliseconds to wait for an event sequence to complete
+// interruptible - flag indicating whether the wait can be interrupted
+//
+// Return Value:
+// The result of the asynchronous function last to complete.
+// Returns MPI_ERR_PENDING if the wait was interrupted.
+// Returns MPI_SUCCESS if the wait timed out.
+//
+_Success_(return==MPI_SUCCESS || return==MPI_ERR_PENDING)
+int
+ExProcessCompletions(
+ _In_ ExSetHandle_t set,
+ _In_ DWORD timeout,
+ _In_opt_ bool interruptible = false
+ );
+
+
+//
+// ExInitialize
+//
+// Initialize the completion queue. This function can only be called once before
+// before Finialize.
+//
+_Success_(return==MPI_SUCCESS)
+int
+ExInitialize(
+ void
+ );
+
+
+//
+// ExFinitialize
+//
+// Close the completion queue progress engine. This function can only be called
+// once.
+//
+void
+ExFinalize(
+ void
+ );
+
+
+//----------------------------------------------------------------------------
+//
+// The Executive Key-Zero completion processor
+//
+// Overview:
+// The Key-Zero completion processor enables the users of the Executive to
+// associate a different completion routine with each operation rather than
+// use a single completion processor per handle. The Key-Zero processor works
+// with user supplied OVERLAPPED structure. It cannot work with system generated
+// completion events (e.g., Job Object enets), or other external events.
+//
+// The Key-Zero processor uses the EXOVERLAPPED data structure which embeds the
+// user success and failure completion routines. When the Key-Zero completion
+// processor is invoked it calls the user success or failure routine base on
+// the result of the async operation.
+//
+//----------------------------------------------------------------------------
+
+//
+// ExCompletionRoutine function prototype
+//
+// The ExCompletionRoutine callback routine is invoked by the built-in Key-Zero
+// completion processor. The information required for processing the event is
+// packed with the EXOVERLAPED structure and can be accessed with the Ex utility
+// functions. The callback routine returns the logical error value.
+//
+// Parameters:
+// pOverlapped
+// A pointer to an EXOVERLAPPED structure associated with the completed
+// operation. This pointer is used to extract the caller context with the
+// CONTAINING_RECORD macro.
+//
+// Return Value:
+// MPI error value indicating the result of the "logical" async function.
+//
+struct EXOVERLAPPED;
+
+typedef
+_Success_(return==MPI_SUCCESS)
+int
+(WINAPI * ExCompletionRoutine)(
+ _Inout_ struct EXOVERLAPPED* pOverlapped
+ );
+
+
+//
+// struct EXOVERLAPPED
+//
+// The data structure used for Key-Zero completions processing. The pfnSuccess
+// and pfnFailure are set by the caller before calling an async operation.
+// The pfnSuccess is called if the async operation was successful.
+// The pfnFailure is called if the async operation was unsuccessful.
+//
+struct EXOVERLAPPED
+{
+
+ OVERLAPPED ov;
+ ExCompletionRoutine pfnSuccess;
+ ExCompletionRoutine pfnFailure;
+};
+
+
+//
+// ExInitOverlapped
+//
+// Initialize the success & failure callback function fields
+// Rest the hEvent field of the OVERLAPPED, make it ready for use with the OS
+// overlapped API's.
+//
+static
+inline
+void
+ExInitOverlapped(
+ _Inout_ EXOVERLAPPED* pOverlapped,
+ _In_ ExCompletionRoutine pfnSuccess,
+ _In_ ExCompletionRoutine pfnFailure
+ )
+{
+ pOverlapped->ov.hEvent = nullptr;
+ pOverlapped->pfnSuccess = pfnSuccess;
+ pOverlapped->pfnFailure = pfnFailure;
+}
+
+
+//
+// ExPostOverlapped
+//
+// Post an EXOVERLAPPED completion to the completion queue to be invoked by
+// ExProcessCompletions.
+//
+void
+ExPostOverlapped(
+ _In_ ExSetHandle_t Set,
+ _Inout_ EXOVERLAPPED* pOverlapped
+ );
+
+
+//
+// ExPostOverlappedResult
+//
+// Post an EXOVERLAPPED completion to the completion queue to be invoked by
+// ExProcessCompletions. Set the status and bytes transferred count.
+//
+static
+inline
+void
+ExPostOverlappedResult(
+ _In_ ExSetHandle_t Set,
+ _Inout_ EXOVERLAPPED* pOverlapped,
+ _In_ HRESULT Status,
+ _In_ DWORD BytesTransferred
+ )
+{
+ pOverlapped->ov.Internal = Status;
+ pOverlapped->ov.InternalHigh = BytesTransferred;
+ ExPostOverlapped(Set, pOverlapped);
+}
+
+
+//
+// ExAttachHandle
+//
+// Associate an OS handle with the Executive completion queue. All asynchronous
+// operations using the attached handle are processed with the completion
+// processor associated with the provided Key. If no key value is specified,
+// the Key-Zero completion processor is used. Key-Zero completion processor
+// requires the use of EXOVERLAPPED data structure when calling an asynchronous
+// operation with that Handle.
+//
+void
+ExAttachHandle(
+ _In_ ExSetHandle_t Set,
+ _In_ HANDLE Handle,
+ _In_opt_ EX_PROCESSOR_KEYS Key = EX_KEY_RESERVED
+ );
+
+
+//
+// ExGetBytesTransferred
+//
+// Get the number of bytes transferred from the overlapped structure
+//
+static
+inline
+DWORD
+ExGetBytesTransferred(
+ _In_ const EXOVERLAPPED* pOverlapped
+ )
+{
+ return (DWORD)pOverlapped->ov.InternalHigh;
+}
+
+
+//
+// ExGetStatus
+//
+// Get the status return value from the overlapped structure
+//
+static
+inline
+NTSTATUS
+ExGetStatus(
+ _In_ const EXOVERLAPPED* pOverlapped
+ )
+{
+ return (NTSTATUS)pOverlapped->ov.Internal;
+}
+
+
+//
+// ExCallSuccess
+//
+// Set the completion status and bytes transferred and execute the EXOVERLAPPED
+// Success completion routine
+//
+static
+inline
+int
+ExCallSuccess(
+ _Inout_ EXOVERLAPPED* pOverlapped,
+ _In_ HRESULT Status,
+ _In_ DWORD BytesTransferred
+ )
+{
+ pOverlapped->ov.Internal = Status;
+ pOverlapped->ov.InternalHigh = BytesTransferred;
+ return pOverlapped->pfnSuccess(pOverlapped);
+}
+
+
+//
+// ExCallFailure
+//
+// Set the completion status and bytes transferred and execute the EXOVERLAPPED
+// Failure completion routine
+//
+static
+inline
+int
+ExCallFailure(
+ _Inout_ EXOVERLAPPED* pOverlapped,
+ _In_ HRESULT Status,
+ _In_ DWORD BytesTransferred
+ )
+{
+ pOverlapped->ov.Internal = Status;
+ pOverlapped->ov.InternalHigh = BytesTransferred;
+ return pOverlapped->pfnFailure(pOverlapped);
+}
+
+
+//
+// ExCompleteOverlapped
+//
+// Execute the EXOVERLAPPED success or failure completion routine based
+// on the overlapped status value.
+//
+static
+inline
+int
+ExCompleteOverlapped(
+ _Inout_ EXOVERLAPPED* pOverlapped
+ )
+{
+ if(SUCCEEDED(ExGetStatus(pOverlapped)))
+ return pOverlapped->pfnSuccess(pOverlapped);
+
+ return pOverlapped->pfnFailure(pOverlapped);
+}
+
+#endif /* EX_H */
diff --git a/src/mpi/common/extracterrmsgs b/src/mpi/common/extracterrmsgs
new file mode 100644
index 0000000..8991e16
--- /dev/null
+++ b/src/mpi/common/extracterrmsgs
@@ -0,0 +1,799 @@
+#! /usr/bin/perl
+# (Tested with -w; 10/5/04)
+#
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License.
+#
+# Find the parse.sub routine.
+my $maintdir = "maint";
+my $rootdir = ".";
+if ( ! -s "maint/parse.sub" ) {
+ my $program = $0;
+ $program =~ s/extracterrmsgs//;
+ if (-s "$program/parse.sub") {
+ $maintdir = $program;
+ $rootdir = $program;
+ $rootdir =~ s/\/maint\///g;
+ $rootdir =~ s/\\maint\\//g;
+ print "Rootdir = $rootdir\n" if $debug;
+ }
+}
+require "$maintdir/parse.sub";
+
+$debug = 0;
+$careful = 0; # Set careful to 1 to flag unused messages
+$carefulFilename = "";
+$showfiles = 0;
+$quiet = 0;
+$build_test_pgm = 1;
+
+# Strict is used to control checking of error message strings.
+$gStrict = 0;
+if (defined($ENV{"DEBUG_STRICT"})) { $gStrict = 1; }
+
+# Check for special args
+@files = ();
+%skipFiles = ();
+$outfile = "";
+$testfile = "errtest.c";
+$outpath = ".";
+$srcroot = $rootdir;
+
+foreach $arg (@ARGV) {
+ if ($arg =~ /^-showfiles/) { $showfiles = 1; }
+ elsif( $arg =~ /-debug/) { $debug = 1; }
+ elsif( $arg =~ /-quiet/) { $quiet = 1; }
+ elsif( $arg =~ /-notest/) { $build_test_pgm = 0; }
+ elsif( $arg =~ /-outfile=(.*)/) { $outfile = $1; }
+ elsif( $arg =~ /-outpath=(.*)/) { $outpath = $1; }
+ elsif( $arg =~ /-testfile=(.*)/) { $testfile = $1; }
+ elsif( $arg =~ /-srcroot=(.*)/) { $srcroot = $1; }
+ elsif( $arg =~ /-careful=(.*)/) {
+ $careful = 1;
+ $carefulFilename = $1;
+ }
+ elsif( $arg =~ /-careful/) { $careful = 1; }
+ elsif( $arg =~ /-strict/) { $gStrict = 1; }
+ elsif( $arg =~ /-skip=(.*)/) { $skipFiles{$1} = 1; }
+ else {
+ print "Adding $arg to files\n" if $debug;
+ if (-d $arg) {
+ # Add all .c files from directory $arg to the list of files
+ # to process (this lets us shorten the arg list)
+ @files = (@files, &ExpandDir( $arg ));
+ }
+ else {
+ $files[$#files+1] = $arg;
+ }
+ }
+}
+# End of argument processing
+
+
+# Setup the basic file for errnames - Now determined in ExpandDirs
+#@errnameFiles = ( "$srcroot/errnames.txt" );
+
+if ($outfile ne "") {
+ print STDOUT "Creating out file $outpath\\$outfile\n";
+ $OUTFD = "MyOutFile";
+ open( $OUTFD, ">$outpath\\$outfile" ) or die "Could not open $outpath\\$outfile\n";
+}
+else {
+ $OUTFD = STDOUT;
+}
+# Setup before processing the files
+if ($build_test_pgm) {
+ print STDOUT "Creating test file $outpath\\$testfile\n";
+ open( TESTFD, ">$outpath\\$testfile" ) or die "Cannot create test program $outpath\\$testfile\n";
+ print TESTFD "/* -*- Mode: C++; c-basic-offset:4 ; -*- */\
+/* \
+ * (C) 2004 by Argonne National Laboratory.\
+ * See COPYRIGHT in top-level directory.\
+ *\
+ * This file is automatically generated by maint/extracterrmsgs\
+ * DO NOT EDIT\
+ */\n";
+ print TESTFD "
+#include
+#include
+#include
+#include
+#include
+#include \"mpi.h\"
+#include \"mpierror.h\"
+#include \"errcodes.h\"
+
+#pragma warning(disable:4100) // unreferenced formal parameter
+
+void MPID_Type_get_envelope(MPI_Datatype datatype, int* num_integers, int* num_addresses, int* num_datatypes, int* combiner)
+{
+ *combiner = MPI_COMBINER_NAMED;
+}
+
+typedef struct MPID_Comm MPID_Comm;
+
+_Analysis_noreturn_
+int
+MPID_Abort(
+ _Inout_opt_ MPID_Comm* comm,
+ _In_ BOOL intern,
+ _In_ int exit_code,
+ _In_z_ const char* error_msg
+ )
+{
+ printf(\"MPID_Abort called. exit code (%d); msg '%s'\", exit_code, error_msg);
+ exit(exit_code);
+}
+
+_Success_(return>=0)
+int
+MPIU_Internal_error_printf(
+ _In_z_ _Printf_format_string_params_(...) const char *str,
+ ...
+ )
+{
+ int n;
+ va_list list;
+
+ va_start(list, str);
+ n = vfprintf(stderr, str, list);
+ va_end(list);
+
+ fflush(stderr);
+
+ return n;
+}
+
+void ChkMsg( int err, int msgclass, const char msg[] )
+{
+ char errmsg[MPI_MAX_ERROR_STRING];
+
+ MPIR_Err_get_string( err, errmsg, MPI_MAX_ERROR_STRING );
+
+ printf( \"[0x%08x] [0x%08x] %2d %s\\n%s\\n\", err, MPIR_Err_get_user_error_code(err), msgclass, msg, errmsg );
+}
+\n\n";
+
+ print TESTFD "int __cdecl main(int argc, char **argv)\n";
+ print TESTFD "{\n int err;\n\n";
+ print TESTFD " printf(\"mpi_errno user_errno class error id string\\n\");\n";
+}
+
+# Process the definitions
+foreach $file (@files) {
+ print "$file\n" if $showfiles;
+ &ProcessFile( $file );
+}
+
+#
+# Create the hash %longnames that maps the short names to the long names,
+# $longnames{shortname} => longname, by reading the errnames.txt files
+foreach my $sourcefile (@errnameFiles) {
+ #print STDERR "processing $sourcefile for error names\n";
+ &ReadErrnamesFile( $sourcefile );
+}
+
+# Create the output files from the input that we've read
+&CreateErrmsgsHeader( $OUTFD );
+&CreateErrMsgMapping( $OUTFD );
+
+if ($build_test_pgm) {
+ print TESTFD " printf(\"---------- end ----------\\n\");\n";
+ print TESTFD "\n return 0;\n}\n";
+ close TESTFD;
+}
+
+#
+# Generate a list of unused keys
+if ($careful) {
+ my $OUTFD = STDERR;
+ if ($carefulFilename ne "") {
+ $OUTFD = "ERRFD";
+ open $OUTFD, ">$carefulFilename" or die "Cannot open $carefulFilename";
+ }
+ foreach $shortname (keys(%longnames)) {
+ if (!defined($longnamesUsed{$shortname}) ||
+ $longnamesUsed{$shortname} < 1) {
+ $loc = $longnamesDefined{$shortname};
+ print $OUTFD "Name $shortname is defined in $loc but never used\n";
+ }
+ }
+ if ($carefulFilename ne "") {
+ close $OUTFD;
+ }
+}
+
+#-----------------------------------------------------------------------------
+# ROUTINES
+# ----------------------------------------------------------------------------
+# From the data collected above, generate the file containing the error message
+# text.
+# This is a temporary routine; the exact output form will be defined later
+sub CreateErrmsgsHeader {
+ $FD = $_[0];
+ print $FD "/* -*- Mode: C; c-basic-offset:4 ; -*- */\
+/* \
+ * (C) 2001 by Argonne National Laboratory.\
+ * See COPYRIGHT in top-level directory.\
+ *\
+ * This file automatically created by extracterrmsgs\
+ * DO NOT EDIT\
+ */\n";
+ print $FD "typedef struct msgpair {\
+ const char* key;
+ const char* fmt;
+} msgpair;\n\n"
+}
+#
+# We also need a way to create the records
+# We then hash these on the first occurance (or precompute the hashes?)
+#
+# The error messages are output in the following form:
+# typedef struct {const char short[], const long[]} namemap;
+# Generic messages
+# static const char[] short1 = "";
+# static const char[] long1 = "";
+# ...
+# static const namemap[] = { {short1, long1}, {...} }
+#
+sub CreateErrMsgMapping {
+ my $OUTFD = $_[0];
+
+ # Create a mapping of MPI error classes to the specific error
+ # message by index into generic_err_msgs. This reads the file
+ # baseerrnames, looks up the generic message, and maps the MPI error
+ # class to the corresponding index.
+ # We must do this here because we must ensure that all MPI error
+ # classes have been added to the generic messages
+ @class_msgs = ();
+ open (FD, "<$srcroot/baseerrnames.txt" ) ||
+ die "Could not open $srcroot/baseerrnames.txt\n";
+ while () {
+ s/#.*$//;
+ my ($mpiname,$num,$shortmsg) = split(/\s\s*/);
+ if (!defined($shortmsg)) {
+ # Incase there is no short message entry (!)
+ $shortmsg = "";
+ }
+ if ($shortmsg ne "")
+ {
+ if ($shortmsg =~ /\%/)
+ {
+ print STDERR "$srcroot/baseerrnames.txt(1) : error : message $shortmsg in baseerrnames.txt contains format control\n";
+ }
+
+ $specific_msgs{$shortmsg}++;
+ $specific_loc{$shortmsg} = ":baseerrnames.txt(1)";
+
+ $class_msgs[$num] = "$shortmsg";
+ }
+ }
+ close (FD);
+
+ $num = 0;
+ # Now output the instance specific messages
+ foreach $key (sort keys %specific_msgs)
+ {
+ $longvalue = "\"\0\"";
+
+ if (!defined($longnames{$key}))
+ {
+ print STDERR "$specific_loc{$key} : error : shortname $key for specific messages has no expansion\n";
+ next;
+ }
+ else {
+ # Keep track of which messages we have seen
+ $longnamesUsed{$key} += 1;
+ }
+
+ # Escape any naked quotes
+ $longvalue =~ s/(?) {
+ $linecount++;
+ # Skip Comments
+ if (/^\s*\#/) { next; }
+ # Read entire error message (allow \ at end of line to continue)
+ if (/^\s*(\*\*[^:]*):(.*)$/) {
+ my $name = $1;
+ my $repl = $2;
+ $repl =~ s/\r*\n*$//g;
+ while ($repl =~ /\\\s*$/) {
+ # If there is a \\ at the end, read another.
+ # Remove the \ at the end (an alternative is to turn
+ # it into a \n (newline), but we may want to avoid
+ # multiline messages
+ $repl =~ s/\\\s*$//;
+ my $inline = ;
+ $linecount++;
+ $inline =~ s/^\s*//; # remove leading spaces
+ $repl .= $inline;
+ $repl =~ s/[\r\n]*$//g; # remove newlines
+ }
+
+ # Check that the name and the replacement text at least
+ # partially match as to format specifiers
+ # (They should have exactly the same pattern, i.e.,
+ # if the name has %d %x in is, the replacement should
+ # have %d %x, in that order)
+ my $namehasformat = ($name =~ /%/);
+ my $replhasformat = ($repl =~ /%/);
+ if ($namehasformat != $replhasformat) {
+ print STDERR "$sourcefile($linecount) : error : format control usage in $name and $repl do not agree\n";
+ }
+# if (!defined($longnames{"\"$name\""}))
+# {
+# $longnames{"\"$name\""} = $repl;
+# $longnamesDefined{"\"$name\""} = "$sourcefile:$linecount";
+# }
+ # Check that the replacement text doesn't include a unquoted
+ # double quote
+ if ($repl =~ /(.)\"/) {
+ my $prechar = $1;
+ if ($1 ne "\\") {
+ print STDERR "$sourcefile($linecount) : error : replacement text for $name contains an unescaped double quote: $repl\n";
+ }
+ }
+ if (!defined($longnames{$name}))
+ {
+ $longnames{$name} = $repl;
+ $longnamesDefined{$name} = "$sourcefile:$linecount";
+ }
+ else
+ {
+ print STDERR "$sourcefile($linecount) : warning : attempt to redefine $name. Duplicate ignored.\n";
+ }
+ }
+ }
+ close( FD );
+}
+
+# ==========================================================================
+# Call this for each file
+# This reads a C source or header file and adds does the following:
+# adds any generic message short names encountered to the hash generic_msgs.
+# adds any specific message short names encounter to the hash specific_msgs.
+# adds the filename to the hash generic_loc{msg} as the value (: separated)
+# and the same for hash specific_loc{msg}.
+# The last two are used to provide better error reporting.
+#
+$filename = ""; # Make global so that other routines can echo filename
+sub ProcessFile
+{
+ # Leave filename global for AddTest
+ $filename = $_[0];
+ my $linecount = 0;
+ open (FD, "<$filename" ) or die "Could not open $filename\n";
+
+ while () {
+ $linecount++;
+
+ # Skip code that is marked as ignore (e.g., for
+ # macros that are used to simplify the use of MPIR_Err_create_code
+ # (such macros must also be recognized and processed)
+ if (/\/\*\s+--BEGIN ERROR MACROS--\s+\*\//) {
+ while () {
+ $linecount++;
+ if (/\/\*\s+--END ERROR MACROS--\s+\*\//) { last; }
+ }
+ $remainder = "";
+ next;
+ }
+
+ # Next, remove any comments
+ $_ = StripComments( FD, $_ );
+
+ # Skip the definition of the function
+ if (/MPI_RESULT\s+MPI[OUR]_Err_create_code/) { $remainder = ""; next; }
+
+ # Match the known routines and macros.
+ # Then check that the arguments match if there is a
+ # specific string (number of args matches the number present)
+ # (MPIU_ERR_FATAL_GET[0-4]?(cond,code,class,gmsg[,smsg,args])
+ # Value is a quadruplet of:
+ # 1. the count of args where the generic msg begins (starting from 0)
+ # 2. location of __LINE__ (-1 for none)
+ # 3. specific msg arg required (0 for no, > 0 for yes)
+ # 4. only indirect message allowed
+ #
+ %KnownErrRoutines = ( 'MPIR_Err_create_code' => '3:-1:1:1',
+ 'MPIR_ERRTEST_VALID_HANDLE' => '4:-1:0:1',
+ 'MPIU_ERR_FATAL_GET' => '2:-1:0:1',
+ 'MPIU_ERR_GET' => '1:-1:0:1',
+ 'MPIU_ERR_CLASS_GET' => '2:-1:0:1',
+ 'MPIU_ERR_CREATE' => '1:-1:0:1',
+
+ 'MPIU_E_ERR' => '0:-1:0:1',
+ );
+
+ while (/(MPI[OUR]_E[A-Za-z0-9_]+)\s*(\(.*)$/) {
+ my $routineName = $1;
+ my $arglist = $2;
+ if (!defined($KnownErrRoutines{$routineName})) {
+ if($routineName =~ /[1-9]$/) {
+ $routineNameN = substr($routineName, 0, $#routineName);
+ if (!defined($KnownErrRoutines{$routineNameN})) {
+ print "Skipping $routineName\n" if $debug;
+ last;
+ }
+ print "Found $routineName, using $routineNameN definition\n" if $debug;
+ $routineName = $routineNameN;
+ }
+ else {
+ print "Skipping $routineName\n" if $debug;
+ last;
+ }
+ }
+ else {
+ print "Found $routineName\n" if $debug;
+ }
+
+ my ($genericArgLoc,$hasLine,$hasSpecific,$onlyIndirect) =
+ split(/:/,$KnownErrRoutines{$routineName});
+
+ ($leader, $remainder, @args ) = &GetSubArgs( FD, $arglist );
+ # Discard leader
+ if ($debug) {
+ print "Line begins with $leader\n"; # Use $leader to keep -w happy
+ foreach $arg (@args) {
+ print "|$arg|\n";
+ }
+ }
+ # Process the signature
+
+ # if signature does not match new function prototype, then skip it
+ if ($#args < $genericArgLoc) {
+ if (!defined($bad_syntax_in_file{$filename})) {
+ $bad_syntax_in_file{$filename} = 1;
+ print STDERR "$filename($linecount) : error : $routineName call with too few arguments\n";
+ }
+ next;
+ }
+ if ($hasLine >= 0 &&
+ ($args[$hasLine] ne "__LINE__" && $args[$hasLine] ne "line")) {
+ if (!defined($bad_syntax_in_file{$filename})) {
+ $bad_syntax_in_file{$filename} = 1;
+ my $tmpi = $hasLine + 1;
+ print STDERR "$filename($linecount) : error : Expected __LINE__ or line as ${tmpi}th argument of $routineName\n";
+ }
+ next;
+ }
+
+ #my $last_errcode = $args[0];
+ #my $fatal_flag = $args[1];
+ #my $fcname = $args[2];
+ #my $linenum = $args[3];
+ #my $errclass = $args[4];
+ my $specific_msg = $args[$genericArgLoc];
+
+ if ($specific_msg =~ /(\".*\")/)
+ {
+ $specific_msg = $1;
+ }
+
+ # Check the generic and specific message arguments
+ if ($specific_msg =~ /\s"/)
+ {
+ print STDERR "$filename($linecount) : warning : trailing blank in error key '$specific_msg'\n";
+ }
+
+ if ($onlyIndirect && !($specific_msg =~ /^\"\*\*.+\"$/)) {
+
+ print STDERR "$filename($linecount) : error : error key '$specific_msg' has incorrect format\n";
+ next;
+ }
+
+ if ($specific_msg =~ /%/) {
+ # Specific message includes format values. Check
+ # for number and for valid strings if %s
+ my $nargs = 0;
+ my $tmpmsg = $specific_msg;
+ my @stringLocs = ();
+ while ($tmpmsg =~ /[^%]*%(.)(.*)/) {
+ $tmpmsg = $2;
+ my $followchar = $1;
+ if ($followchar eq "s") {
+ $stringLocs[$#stringLocs+1] = $nargs;
+ }
+ if ($followchar ne "%") {
+ $nargs ++;
+ }
+ if (! ($followchar =~ /[%xsditpDCRWOEIGFAgl]/) ) {
+ print STDERR "$filename($linecount) : error : Unrecognized format specifier in error key $specific_msg\n";
+ }
+ }
+ if ($nargs != $#args - $genericArgLoc) {
+ my $actargs = $#args - $genericArgLoc;
+ print STDERR "$filename($linecount) : error : wrong number of arguments for instance error key $specific_msg; expected $nargs but found $actargs\n";
+ }
+ elsif ($#stringLocs >= 0 && $gStrict) {
+ # Check for reasonable strings if strict checking requested
+ for (my $i=0; $i<=$#stringLocs; $i++) {
+ my $index = $stringLocs[$i];
+ my $string = $args[$genericArgLoc+1+$index];
+ if ($string =~ /\"/) {
+ # Allow a few special cases:
+ # Always: all uppercase and _, single word
+ my $stringOk = 0;
+ if ($string =~ /^\"[A-Z_]*\"$/) {
+ $stringOk = 1;
+ }
+ elsif ($string =~ /^\"\w*\"$/) {
+ if (1) { $stringOk = 1; }
+ }
+ if (!$stringOk) {
+ print STDERR "$filename($linecount) : error : explicit string as argument to error key $specific_msg; explicit string is $string\n";
+ }
+ }
+ }
+ }
+ }
+
+ if ($build_test_pgm) {
+ &AddTestCall( $genericArgLoc, @args )
+ }
+
+ if ($specific_msg =~ /^\"(\*\*.*)\"/)
+ {
+ $specific_msg = $1;
+ $specific_msgs{$specific_msg}++;
+ $specific_loc{$specific_msg} .= ":$filename($linecount)";
+ }
+ }
+ continue
+ {
+ $_ = $remainder;
+ }
+ }
+ close FD;
+}
+
+# Get all of the .c files from the named directory, including any subdirs
+# Also, add any errnames.txt files to the errnamesFiles arrays
+sub ExpandDir {
+ my $dir = $_[0];
+ my @otherdirs = ();
+ my @files = ();
+ opendir DIR, "$dir";
+ while ($filename = readdir DIR) {
+ if ($filename =~ /^\./ || $filename eq "CVS" || $filename eq $testfile) {
+ next;
+ }
+ elsif (-d "$dir/$filename") {
+ if( ($filename ne "objd") && ($filename ne "obj") ) {
+ $otherdirs[$#otherdirs+1] = "$dir/$filename";
+ }
+ }
+ elsif ($filename =~ /(.*\.[chi][xp]*)$/) {
+ # Test for both Unix- and Windows-style directory separators
+ if (!defined($skipFiles{"$dir/$filename"}) &&
+ !defined($skipFiles{"$dir\\$filename"})) {
+ $files[$#files + 1] = "$dir/$filename";
+ }
+ }
+ elsif ($filename eq "errnames.txt") {
+ $errnameFiles[$#errnameFiles+1] = "$dir/$filename";
+ }
+ }
+ closedir DIR;
+ # (almost) tail recurse on otherdirs (we've closed the directory handle,
+ # so we don't need to worry about it anymore)
+ foreach $dir (@otherdirs) {
+ @files = (@files, &ExpandDir( $dir ) );
+ }
+ return @files;
+}
+#
+# Other todos:
+# It would be good to keep track of any .N MPI_ERR_xxx names in the structured
+# comment and match these against any MPI_ERR_yyy used in the code, emitting a
+# warning message for MPI_ERR_yyy values used in the code but not mentioned
+# in the header. This could even apply to routines that are not at the MPI
+# layer, forcing all routines to document all MPI error classes that they might
+# return (this is like requiring routines to document the exceptions that
+# they may throw).
+
diff --git a/src/mpi/common/hwinfowin6.cpp b/src/mpi/common/hwinfowin6.cpp
new file mode 100644
index 0000000..e421591
--- /dev/null
+++ b/src/mpi/common/hwinfowin6.cpp
@@ -0,0 +1,189 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "hwtree.h"
+#include "util.h"
+
+typedef SYSTEM_LOGICAL_PROCESSOR_INFORMATION SLPI;
+
+
+inline const SLPI* HwInfoGetEnd( const SLPI* pInfo, DWORD cb )
+{
+ return (const SLPI*)((ULONG_PTR)pInfo + (ULONG_PTR)cb);
+}
+
+
+//
+// Summary:
+// Get the full mask of all cores
+//
+UINT64
+HwInfoGetGroupMask(
+ _In_ const SLPI* pSlpi,
+ _In_ DWORD cb
+ )
+{
+ UINT64 mask = 0;
+ for( const SLPI* pCurrent = pSlpi;
+ pCurrent < HwInfoGetEnd(pSlpi, cb);
+ pCurrent++
+ )
+ {
+ if( pCurrent->Relationship == RelationProcessorCore )
+ {
+ mask |= static_cast(pCurrent->ProcessorMask);
+ }
+ }
+ Assert(0 != mask);
+ return mask;
+}
+
+
+//
+// Summary:
+// Get the count of logical cores per physical core
+//
+UINT8
+HwInfoGetPcoreWidth(
+ _In_ const SLPI* pSlpi,
+ _In_ DWORD cb
+ )
+{
+ UINT8 c = 1;
+ for( const SLPI* pCurrent = pSlpi;
+ pCurrent < HwInfoGetEnd(pSlpi, cb);
+ pCurrent++
+ )
+ {
+ if( pCurrent->Relationship == RelationProcessorCore )
+ {
+ //
+ // MSDN documents that if Flags == 1, HT is enabled.
+ // so we need to count the bits used.
+ //
+ if( 1 == pCurrent->ProcessorCore.Flags )
+ {
+ c = CountBits( pCurrent->ProcessorMask );
+ }
+ break;
+ }
+ }
+ return c;
+}
+
+
+//
+// Summary:
+// Get the count of logical cores per numa node
+//
+UINT8
+HwInfoGetNumaWidth(
+ _In_ const SLPI* pSlpi,
+ _In_ DWORD cb
+ )
+{
+ UINT8 c = 0;
+ for( const SLPI* pCurrent = pSlpi;
+ pCurrent < HwInfoGetEnd(pSlpi, cb);
+ pCurrent++
+ )
+ {
+ if( pCurrent->Relationship == RelationNumaNode )
+ {
+ c = CountBits( pCurrent->ProcessorMask );
+ break;
+ }
+ }
+ Assert(0 != c);
+ return c;
+}
+
+
+//
+// Summary:
+// Allocate the SLPI information for this machine.
+//
+// Parameters:
+// ppSlpi - pointer to recieve the allocate SLPI array
+// pcbSlpi - pointer to recieve the total buffer size.
+//
+HRESULT
+HwInfoGetSlpi(
+ _Outptr_result_buffer_(*pcbSlpi) SLPI** ppSlpi,
+ _Out_ UINT32* pcbSlpi
+ )
+{
+ SLPI* pSlpi;
+ DWORD ntError;
+ DWORD cb = 0;
+
+ BOOL bResult = ::GetLogicalProcessorInformation( nullptr, &cb );
+ if( FALSE == bResult )
+ {
+ ntError = GetLastError();
+ if( ntError != ERROR_INSUFFICIENT_BUFFER )
+ {
+ return HRESULT_FROM_WIN32(ntError);
+ }
+ }
+
+ pSlpi = static_cast(malloc(cb));
+ if( nullptr == pSlpi )
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ bResult = ::GetLogicalProcessorInformation( pSlpi, &cb );
+ if( FALSE == bResult )
+ {
+ free(pSlpi);
+ ntError = GetLastError();
+ return HRESULT_FROM_WIN32(ntError);
+ }
+
+ *ppSlpi = pSlpi;
+ *pcbSlpi = cb;
+
+ return S_OK;
+}
+
+
+//
+// Summary:
+// Initialize the array of pInfos from the current machine using Win7 apis
+//
+// Parameters:
+// pInfo - HWINFO to initialize
+// pFilter - Option bit mask filter
+//
+HRESULT
+HwInfoInitializeWin6(
+ _Out_ HWINFO* pInfo
+ )
+{
+ SLPI* pSlpi;
+ UINT32 cbSlpi;
+ HRESULT hr;
+
+ hr = HwInfoGetSlpi(&pSlpi, &cbSlpi );
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+
+ pInfo->Mask = HwInfoGetGroupMask(pSlpi,cbSlpi);
+ pInfo->ActiveMask = pInfo->Mask;
+
+ pInfo->Group = 0;
+ pInfo->GroupWidth = CountBits(pInfo->Mask);
+ pInfo->NumaWidth = HwInfoGetNumaWidth(pSlpi,cbSlpi);
+ pInfo->PcoreWidth = HwInfoGetPcoreWidth(pSlpi,cbSlpi);
+
+ Assert(pInfo->GroupWidth >= pInfo->NumaWidth);
+ Assert(pInfo->NumaWidth >= pInfo->PcoreWidth);
+ Assert(pInfo->PcoreWidth > 0 );
+
+ free(pSlpi);
+ return S_OK;
+}
diff --git a/src/mpi/common/hwinfowin7.cpp b/src/mpi/common/hwinfowin7.cpp
new file mode 100644
index 0000000..30e32e1
--- /dev/null
+++ b/src/mpi/common/hwinfowin7.cpp
@@ -0,0 +1,175 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "hwtree.h"
+#include "util.h"
+#include "kernel32util.h"
+#include
+#include
+//
+// Summary:
+// Utility function to get the specificied processor relations using the existing buffer if possible.
+//
+// Parameters:
+// relation - the specific relation that we want.
+// ppSlpi - pointer to an existing buffer on input (optional)
+// pointer to new buffer if current buffer is null or too small
+// pcbSlpi - on input, points to the current size of the buffer
+// on output, contains the new alloc size or the size used if the
+// existing buffer is large enough.
+//
+static
+HRESULT
+HwInfoGetSlpiEx(
+ _In_ LOGICAL_PROCESSOR_RELATIONSHIP relation,
+ _Inout_ _Outptr_result_bytebuffer_to_(*pcbSlpi,*pcbSlpi) SLPIEX** ppSlpi,
+ _Inout_ UINT32* pcbSlpi
+ )
+{
+ SLPIEX* pSlpi = *ppSlpi;
+ DWORD cb = *pcbSlpi;
+ DWORD ntError;
+ BOOL bResult;
+ for(;;)
+ {
+ Assert( nullptr != Kernel32::Methods.GetLogicalProcessorInformationEx );
+
+ bResult = Kernel32::Methods.GetLogicalProcessorInformationEx( relation, pSlpi, &cb );
+ if( FALSE != bResult )
+ {
+ break;
+ }
+ ntError = GetLastError();
+ if( nullptr != pSlpi )
+ {
+ //
+ // ensure that we null out the input buffer if
+ // and free the input buffer on error.
+ // this prevents the caller from having to
+ // free it on failure.
+ //
+ *ppSlpi = nullptr;
+ free(pSlpi);
+ }
+ if( ntError != ERROR_INSUFFICIENT_BUFFER )
+ {
+ return HRESULT_FROM_WIN32(ntError);
+ }
+
+ pSlpi = static_cast( malloc( cb ) );
+ if( nullptr == pSlpi )
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ }
+
+ *ppSlpi = pSlpi;
+ *pcbSlpi = cb;
+
+ return S_OK;
+}
+
+
+//
+// Summary:
+// Initialize the array of pInfos from the current machine using Win7 apis
+//
+// Parameters:
+// pnInfos - On input, the count of elements in pInfos.
+// On output, the count of elements used in pInfos or size required if too small
+// pInfos - Array of HWINFO to populate
+//
+HRESULT
+HwInfoInitializeWin7(
+ _Inout_ UINT32* pnInfos,
+ _Out_writes_to_opt_(*pnInfos,*pnInfos) HWINFO pInfos[]
+ )
+{
+ SLPIEX* pSlpiEx = nullptr;
+ UINT32 cbSlpiEx = 0;
+ UINT32 cbSlpiExAlloc = 0;
+ UINT8 numaWidth;
+ UINT8 pcoreWidth = 1;
+ HRESULT hr;
+
+ hr = HwInfoGetSlpiEx(RelationNumaNode,&pSlpiEx, &cbSlpiExAlloc );
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+
+ __analysis_assume(pSlpiEx != nullptr);
+
+ numaWidth = CountBits( pSlpiEx->NumaNode.GroupMask.Mask );
+
+ cbSlpiEx = cbSlpiExAlloc;
+ hr = HwInfoGetSlpiEx( RelationProcessorCore, &pSlpiEx, &cbSlpiEx );
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+
+ //
+ // If HT is enabled, then calculate the pcoreWidth to include the
+ // logical cores it contains.
+ //
+ __analysis_assume(pSlpiEx != nullptr);
+ if( 0 != (pSlpiEx->Processor.Flags & LTP_PC_SMT) )
+ {
+ pcoreWidth = CountBits( pSlpiEx->Processor.GroupMask[0].Mask );
+ }
+
+ //
+ // if the second call to HwInfoGetSlpiEx reallocated the buffer
+ // we calculate the new alloc max based on previous and current size
+ //
+ cbSlpiExAlloc = max(cbSlpiExAlloc,cbSlpiEx);
+ cbSlpiEx = cbSlpiExAlloc;
+ hr = HwInfoGetSlpiEx(RelationGroup,&pSlpiEx, &cbSlpiEx );
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+
+ //
+ // To make the data structures pack nicely, we have limited ourselves to
+ // UINT8 worth of processor groups. There is no real world case where
+ // the number of processor groups will exceed this limit.
+ //
+ __analysis_assume(pSlpiEx != nullptr);
+ UINT8 nGroups = static_cast( pSlpiEx->Group.ActiveGroupCount );
+
+ //
+ // This should be a very rare case where group count > 1
+ // so I don't feel bad not pre-emptively checking the group count
+ // before calculating the PCores and Numa widths.
+ //
+ if( nGroups > *pnInfos )
+ {
+ *pnInfos = nGroups;
+ free(pSlpiEx);
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+
+ for( UINT8 i = 0; i < nGroups; i++ )
+ {
+ pInfos[i].Mask = pSlpiEx->Group.GroupInfo[i].ActiveProcessorMask;
+ pInfos[i].ActiveMask = pInfos[i].Mask;
+
+ pInfos[i].Group = i;
+ pInfos[i].GroupWidth = pSlpiEx->Group.GroupInfo[ i ].ActiveProcessorCount;
+ pInfos[i].NumaWidth = numaWidth;
+ pInfos[i].PcoreWidth = pcoreWidth;
+
+ Assert(pInfos[i].GroupWidth >= pInfos[i].NumaWidth);
+ Assert(pInfos[i].NumaWidth >= pInfos[i].PcoreWidth);
+ Assert(pInfos[i].PcoreWidth > 0 );
+ }
+ free(pSlpiEx);
+ *pnInfos = nGroups;
+ return S_OK;
+}
+
diff --git a/src/mpi/common/hwlayout.cpp b/src/mpi/common/hwlayout.cpp
new file mode 100644
index 0000000..805c5ef
--- /dev/null
+++ b/src/mpi/common/hwlayout.cpp
@@ -0,0 +1,401 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "hwtree.h"
+#include "stdio.h"
+
+
+//
+// Summary:
+// Utility function to get a simple string for error message formatting.
+//
+inline const char* GetNodeTypeString(HWNODE_TYPE targetType)
+{
+ switch( targetType )
+ {
+ case HWNODE_TYPE_MACHINE:
+ return "M";
+ case HWNODE_TYPE_GROUP:
+ return "G";
+ case HWNODE_TYPE_NUMA:
+ return "N";
+ case HWNODE_TYPE_PCORE:
+ return "P";
+ case HWNODE_TYPE_LCORE:
+ return "L";
+ default:
+ Assert(0);
+ return "Invalid";
+ }
+}
+
+
+//
+// FW delclare some local functions
+//
+static HRESULT
+HwLayoutProcessEnum(
+ _In_ const HWLAYOUT* pLayout,
+ _In_ const HWVIEW* pView,
+ _In_ PVOID pData,
+ _In_ FN_HwLayoutProcessCallback* pfn,
+ _In_ HWLAYOUT_STATE* pState,
+ _In_ UINT32 enumIndex
+ );
+
+
+//
+// Summary:
+// Process the specified layout using the specified tree
+//
+// Parameters:
+// pLayout - Pointer to layout description
+// pView - Pointer to the view
+// minProc - Minimum number of processes to create
+// maxProc - Maximum number of processes to create
+// pData - Opaque data pointer to hand to callback
+// pfn - Callback function to invoke for each iteration
+//
+HRESULT
+HwLayoutProcess(
+ _In_ const HWLAYOUT* pLayout,
+ _In_ const HWVIEW* pView,
+ _In_ UINT32 minProc,
+ _In_ UINT32 maxProc,
+ _In_ PVOID pData,
+ _In_ FN_HwLayoutProcessCallback* pfn
+ )
+{
+ HRESULT hr;
+ HWLAYOUT_STATE state;
+
+ state.MaxProc = maxProc;
+ state.MinProc = minProc;
+ state.ProcCount = 0;
+
+ for( UINT32 i = 0; i < pLayout->EnumCount; i++ )
+ {
+ state.Enums[i].Start = 0;
+ state.Enums[i].Size = 0;
+ state.Enums[i].Current = 0;
+ }
+
+ do
+ {
+ hr = HwLayoutProcessEnum( pLayout, pView, pData, pfn, &state, 0 );
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+ if( S_FALSE == hr )
+ {
+ break;
+ }
+
+ } while( state.ProcCount < state.MinProc );
+
+ return S_OK;
+}
+
+
+//
+// Summary:
+// Process the target of each iteration.
+//
+// Parameters:
+// pLayout - Pointer to the current layout
+// pView - Pointer to the view where the location is set
+// pData - Pointer to the opaque data to pass to the callback
+// pfn - Callback function invoked for each iteration target
+// pState - Pointer to the current state of the iterations.
+//
+inline HRESULT
+HwLayoutProcessTarget(
+ _In_ const HWLAYOUT* pLayout,
+ _In_ const HWVIEW* pView,
+ _In_ PVOID pData,
+ _In_ FN_HwLayoutProcessCallback* pfn,
+ _In_ HWLAYOUT_STATE* pState
+ )
+{
+ HRESULT hr;
+ UINT32 location = pState->Enums[pLayout->EnumCount-1].Current;
+ UINT32 count = pLayout->TargetCount;
+
+ if( 0 != pState->MaxProc )
+ {
+ count = min(count,pState->MaxProc - pState->ProcCount);
+ }
+
+ if( pView == nullptr)
+ {
+ return S_FALSE;
+ }
+
+ while( pView->Nodes[location].Type > pLayout->TargetType )
+ {
+ location = pView->Nodes[location].Parent;
+ }
+
+ Assert( 0 != location );
+
+ hr = pfn( pLayout, pState, pView, location, pData, &count );
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+
+ pState->ProcCount += count;
+
+ if( S_FALSE == hr )
+ {
+ return hr;
+ }
+
+ if( 0 != pState->MaxProc && pState->ProcCount >= pState->MaxProc )
+ {
+ return S_FALSE;
+ }
+ return S_OK;
+}
+
+
+//
+// Summary:
+// Lookup the count of elements for a give type within the scope of the current layout
+//
+// Parameters:
+// pLayout - The layout to use to scope the returned size
+// pState - The state of the layout enumeration
+// enumIndex - The current layout enumeration being processed
+// pView - The view to use to resolve the size
+// type - The type of node we need a count for
+//
+static UINT32
+HwLayoutResolveWellknownExpression(
+ _In_ const HWLAYOUT* pLayout,
+ _In_ const HWLAYOUT_STATE* pState,
+ _In_ UINT32 enumIndex,
+ _In_ const HWVIEW* pView,
+ _In_ HWNODE_TYPE type
+ )
+{
+ Assert( type >= HWNODE_TYPE_MACHINE );
+ Assert( pLayout->Enums[enumIndex].Type >= type );
+
+ //
+ // First find the Enumerator in the layout that provides the
+ // scope we need resolve the wellknown value.
+ //
+ while( enumIndex > 0 && pLayout->Enums[enumIndex].Type > type )
+ {
+ enumIndex--;
+ }
+
+ //
+ // If expression matches an enumerator value, just return the size
+ // of that exact match.
+ //
+ if( pLayout->Enums[enumIndex].Type == type )
+ {
+ return pState->Enums[enumIndex].Size;
+ }
+
+
+ //
+ // Else, we must be closer to the root of the tree with the
+ // currently selected enumerator, so continue to walk the span
+ // of the tree until we reach the correct depth, and return the width
+ // of that range.
+ //
+ UINT32 start = pState->Enums[enumIndex].Current;
+ UINT32 end = start;
+
+ do
+ {
+ start = pView->Nodes[start].FirstChild;
+ end = pView->Nodes[end].LastChild;
+
+ }while( pView->Nodes[start].Type < type );
+
+ return end - start + 1;
+}
+
+
+//
+// Summary:
+// Resolve the HWENUM_EXPR value to the resulting UINT32
+//
+// Parameters:
+// pLayout - The layout to use to scope the returned size
+// pState - The state of the layout enumeration
+// enumIndex - The current layout enumeration being processed
+// pView - The view to use to resolve the size
+// pExpression - The expression to resolve
+//
+inline UINT32
+HwLayoutResolveExpression(
+ _In_ const HWLAYOUT* pLayout,
+ _In_ const HWLAYOUT_STATE* pState,
+ _In_ UINT32 enumIndex,
+ _In_ const HWVIEW* pView,
+ _In_ const HWENUM_EXPR* pExpression
+ )
+{
+ if( 0 == pExpression->Flags )
+ {
+ return pExpression->Left;
+ }
+ else
+ {
+ UINT32 left = pExpression->Left;
+ UINT32 right = pExpression->Right;
+
+ if( 0 != ( pExpression->Flags & HWENUM_EXPR_WELLKNOWN_LEFT ) )
+ {
+ left = HwLayoutResolveWellknownExpression(
+ pLayout,
+ pState,
+ enumIndex,
+ pView,
+ static_cast(left)
+ );
+ }
+ if( 0 == ( pExpression->Flags & HWENUM_EXPR_DIVIDE_BY_RIGHT ) )
+ {
+ return left;
+ }
+
+ if( 0 != ( pExpression->Flags & HWENUM_EXPR_WELLKNOWN_RIGHT ) )
+ {
+ right = HwLayoutResolveWellknownExpression(
+ pLayout,
+ pState,
+ enumIndex,
+ pView,
+ static_cast(right)
+ );
+ }
+
+ Assert(0 != right);
+
+ return left / right;
+ }
+}
+
+
+inline UINT32 HwLayoutResolveExpressionAndRoundUp(
+ _In_ const HWLAYOUT* pLayout,
+ _In_ const HWLAYOUT_STATE* pState,
+ _In_ UINT32 enumIndex,
+ _In_ const HWVIEW* pView,
+ _In_ const HWENUM_EXPR* pExpression
+ )
+{
+ UINT32 i = HwLayoutResolveExpression(pLayout,pState,enumIndex,pView,pExpression);
+ if( i == 0 )
+ {
+ i++;
+ }
+ return i;
+}
+
+
+//
+// Summary:
+// Run the chain of enumerations recursively
+//
+// Parameters:
+// pLayout - Pointer to the current layout
+// pView - Pointer to the view where the location is set
+// pData - Pointer to the opaque data to pass to the callback
+// pfn - Callback function invoked for each iteration target
+// pState - Pointer to the current state of the iterations.
+// enumIndex - The index of the current enumation level.
+//
+static HRESULT
+HwLayoutProcessEnum(
+ _In_ const HWLAYOUT* pLayout,
+ _In_ const HWVIEW* pView,
+ _In_ PVOID pData,
+ _In_ FN_HwLayoutProcessCallback* pfn,
+ _In_ HWLAYOUT_STATE* pState,
+ _In_ UINT32 enumIndex
+ )
+{
+ HRESULT hr;
+ if( enumIndex == pLayout->EnumCount )
+ {
+ return HwLayoutProcessTarget( pLayout, pView, pData, pfn, pState );
+ }
+
+ HWENUM_STATE* pEnumState = &pState->Enums[ enumIndex ];
+ const HWENUM* pEnum = &pLayout->Enums[ enumIndex ];
+
+ //
+ // Work out the start and count of elements in the list
+ //
+ if( 0 != enumIndex )
+ {
+ UINT32 end = pState->Enums[enumIndex-1].Current;
+ pEnumState->Start = end;
+
+ Assert(pView != nullptr);
+
+ while( pView->Nodes[end].Type != pEnum->Type )
+ {
+ Assert( pView->Nodes[pEnumState->Start].Type == pView->Nodes[end].Type );
+
+ pEnumState->Start = pView->Nodes[pEnumState->Start].FirstChild;
+ end = pView->Nodes[end].LastChild;
+ }
+ pEnumState->Size = (end - pEnumState->Start) + 1;
+ }
+ else
+ {
+ pEnumState->Start = pView->Strides[pEnum->Type];
+ pEnumState->Size = pView->Counts[pEnum->Type];
+ }
+
+ //
+ // There are cases where you can scope the values of an expression and get
+ // a resulting 0 from the divide operation. This can happen when the Left side of the expression
+ // results in a value < the right. For example, the pattern "MNL{1,0,N/2}" on boxes where there is only 1 numa node.
+ // We round these value up to 1 to ensure that we place at least a single item..
+ //
+ UINT32 offset = HwLayoutResolveExpression( pLayout, pState, enumIndex, pView, &pEnum->Offset );
+ UINT32 count = HwLayoutResolveExpressionAndRoundUp( pLayout, pState, enumIndex, pView, &pEnum->Count );
+ UINT32 stride = HwLayoutResolveExpressionAndRoundUp( pLayout, pState, enumIndex, pView, &pEnum->Stride );
+ UINT32 repeatCount = HwLayoutResolveExpressionAndRoundUp( pLayout, pState, enumIndex, pView, &pEnum->RepeatCount );
+ UINT32 repeatOffset = HwLayoutResolveExpression( pLayout, pState, enumIndex, pView, &pEnum->RepeatOffset );
+
+ Assert(repeatCount>0);
+ Assert(count>0);
+ Assert(stride>0);
+
+ pEnumState->Current = pEnumState->Start + ( offset % pEnumState->Size );
+
+ do
+ {
+ for( UINT32 i = 0; i < count; i++ )
+ {
+ hr = HwLayoutProcessEnum( pLayout, pView, pData, pfn, pState, enumIndex + 1 );
+ if( FAILED( hr ) || S_FALSE == hr )
+ {
+ return hr;
+ }
+
+ pEnumState->Current = pEnumState->Start + ((pEnumState->Current - pEnumState->Start + stride) % pEnumState->Size);
+ }
+
+ pEnumState->Current = pEnumState->Start + ((pEnumState->Current - pEnumState->Start + repeatOffset) % pEnumState->Size);
+
+ repeatCount--;
+
+ } while( repeatCount > 0 );
+
+
+ return S_OK;
+}
+
diff --git a/src/mpi/common/hwsummary.cpp b/src/mpi/common/hwsummary.cpp
new file mode 100644
index 0000000..ab2002d
--- /dev/null
+++ b/src/mpi/common/hwsummary.cpp
@@ -0,0 +1,135 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "hwtree.h"
+#include "util.h"
+#include "kernel32util.h"
+
+HRESULT
+HwInfoInitializeWin6(
+ _Out_ HWINFO* pInfo
+ );
+
+
+HRESULT
+HwInfoInitializeWin7(
+ _Inout_ UINT32* pnInfos,
+ _Out_writes_to_opt_(*pnInfos,*pnInfos) HWINFO pInfos[]
+ );
+
+
+//
+// Summary:
+// Initialize HWINFO array from the local logical processor information.
+//
+// Parameters:
+// pnInfos - On input, max size of pInfos
+// on output, size used.
+// if return code is HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)
+// the required count will be written here.
+// pInfos - pointer to buffer to fill with values
+// pFilters - (optional) pointer to array of affinity filters
+// must be same length as pInfos when specified.
+//
+HRESULT
+HwInfoInitialize(
+ _Inout_ UINT32* pnInfos,
+ _Out_writes_to_opt_(*pnInfos,*pnInfos) HWINFO pInfos[]
+ )
+{
+ if( FALSE != g_IsWin7OrGreater )
+ {
+ return HwInfoInitializeWin7(pnInfos,pInfos);
+ }
+ else
+ {
+ if( *pnInfos < 1 )
+ {
+ *pnInfos = 1;
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+ Assert(pInfos != nullptr);
+ HRESULT hr = HwInfoInitializeWin6(pInfos);
+ if( SUCCEEDED( hr ) )
+ {
+ *pnInfos = 1;
+ }
+ return hr;
+ }
+}
+
+
+
+//
+// Summary:
+// Initialize the local HWSUMMARY information
+//
+// Parameters:
+// pcbSummary - On input, size of pSummary buffer
+// on output, size used.
+// if return code is HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)
+// the required count will be written here.
+// pSummary - The summary information to initialize.
+//
+HRESULT
+HwSummaryInitialize(
+ _Inout_ UINT32* pcbSummary,
+ _Inout_updates_bytes_to_(*pcbSummary, *pcbSummary) HWSUMMARY* pSummary
+ )
+{
+ HRESULT hr;
+ UINT32 nInfos = 0;
+ UINT32 cb;
+
+ hr = HwInfoInitialize( &nInfos, nullptr );
+ if( FAILED( hr ) )
+ {
+ if( HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER) != hr )
+ {
+ return hr;
+ }
+ }
+
+ cb = sizeof(*pSummary) - sizeof(pSummary->Infos) +
+ (sizeof(pSummary->Infos[0]) * nInfos);
+
+ if( *pcbSummary < cb )
+ {
+ *pcbSummary = cb;
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ Assert(pSummary != nullptr);
+
+ pSummary->Size = cb;
+ pSummary->Count = nInfos;
+
+ hr = HwInfoInitialize( &nInfos, pSummary->Infos );
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+
+ if (!(g_IsWin7OrGreater && nInfos > 1))
+ {
+ KAFFINITY sysMask;
+ union
+ {
+ KAFFINITY procMask;
+ UINT64 procMask64;
+ };
+ procMask64 = 0;
+ if( FALSE == ::GetProcessAffinityMask( ::GetCurrentProcess(), &procMask, &sysMask ) )
+ {
+ return HRESULT_FROM_WIN32( ::GetLastError() );
+ }
+ HwSummaryFilter( pSummary, &procMask64 );
+ }
+
+ *pcbSummary = cb;
+ return S_OK;
+}
+
+
+
diff --git a/src/mpi/common/hwtree.cpp b/src/mpi/common/hwtree.cpp
new file mode 100644
index 0000000..9d5b194
--- /dev/null
+++ b/src/mpi/common/hwtree.cpp
@@ -0,0 +1,575 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "hwtree.h"
+#include "util.h"
+
+//
+// List of masks index by count - 1 of sequential bits set starting at bit 0.
+//
+static const UINT64 WidthMasks[] =
+{
+ 0x0000000000000001L,0x0000000000000003L,0x0000000000000007L,0x000000000000000FL,
+ 0x000000000000001FL,0x000000000000003FL,0x000000000000007FL,0x00000000000000FFL,
+ 0x00000000000001FFL,0x00000000000003FFL,0x00000000000007FFL,0x0000000000000FFFL,
+ 0x0000000000001FFFL,0x0000000000003FFFL,0x0000000000007FFFL,0x000000000000FFFFL,
+ 0x000000000001FFFFL,0x000000000003FFFFL,0x000000000007FFFFL,0x00000000000FFFFFL,
+ 0x00000000001FFFFFL,0x00000000003FFFFFL,0x00000000007FFFFFL,0x0000000000FFFFFFL,
+ 0x0000000001FFFFFFL,0x0000000003FFFFFFL,0x0000000007FFFFFFL,0x000000000FFFFFFFL,
+ 0x000000001FFFFFFFL,0x000000003FFFFFFFL,0x000000007FFFFFFFL,0x00000000FFFFFFFFL,
+ 0x00000001FFFFFFFFL,0x00000003FFFFFFFFL,0x00000007FFFFFFFFL,0x0000000FFFFFFFFFL,
+ 0x0000001FFFFFFFFFL,0x0000003FFFFFFFFFL,0x0000007FFFFFFFFFL,0x000000FFFFFFFFFFL,
+ 0x000001FFFFFFFFFFL,0x000003FFFFFFFFFFL,0x000007FFFFFFFFFFL,0x00000FFFFFFFFFFFL,
+ 0x00001FFFFFFFFFFFL,0x00003FFFFFFFFFFFL,0x00007FFFFFFFFFFFL,0x0000FFFFFFFFFFFFL,
+ 0x0001FFFFFFFFFFFFL,0x0003FFFFFFFFFFFFL,0x0007FFFFFFFFFFFFL,0x000FFFFFFFFFFFFFL,
+ 0x001FFFFFFFFFFFFFL,0x003FFFFFFFFFFFFFL,0x007FFFFFFFFFFFFFL,0x00FFFFFFFFFFFFFFL,
+ 0x01FFFFFFFFFFFFFFL,0x03FFFFFFFFFFFFFFL,0x07FFFFFFFFFFFFFFL,0x0FFFFFFFFFFFFFFFL,
+ 0x1FFFFFFFFFFFFFFFL,0x3FFFFFFFFFFFFFFFL,0x7FFFFFFFFFFFFFFFL,0xFFFFFFFFFFFFFFFFL,
+};
+
+
+//
+// Summary:
+// Utility function to initialize the specified node within the tree.
+//
+// Parameters:
+// pTree - pointer to the tree to manipulate
+// index - index of the node within the tree
+// type - the type to set on the node
+// parent - the parent ID of the node
+// previous - the previous node
+//
+static void
+HwTreeInitializeNode(
+ _Inout_ HWTREE* pTree,
+ _In_ UINT32 index,
+ _In_ HWNODE_TYPE type,
+ _In_ UINT32 parent,
+ _In_ UINT32 previous
+ )
+{
+ pTree->Nodes[index].Type = type;
+ pTree->Nodes[index].Parent = parent;
+ pTree->Nodes[index].FirstChild = HWNODEID_NONE;
+ pTree->Nodes[index].LastChild = HWNODEID_NONE;
+ pTree->Nodes[index].NextSibling = HWNODEID_NONE;
+ pTree->Nodes[index].PrevSibling = previous;
+
+ pTree->Nodes[parent].LastChild = index;
+
+ if( previous == HWNODEID_NONE ||
+ pTree->Nodes[previous].Parent != parent )
+ {
+ pTree->Nodes[parent].FirstChild = index;
+ }
+ else
+ {
+ pTree->Nodes[previous].NextSibling = index;
+ }
+}
+
+
+//
+// Summary:
+// Utility function to initialize the specified node within the tree with affinity data.
+//
+// Parameters:
+// pTree - pointer to the tree to manipulate
+// index - index of the node within the tree
+// type - the type to set on the node
+// parent - the parent ID of the node
+// previous - the previous node
+// group - the processor group to set in the affinity value
+// mask - the affinity mask to set
+//
+static void
+HwTreeInitializeAffinityNode(
+ _Inout_ HWTREE* pTree,
+ _In_ UINT32 index,
+ _In_ HWNODE_TYPE type,
+ _In_ UINT32 parent,
+ _In_ UINT32 previous,
+ _In_ UINT16 group,
+ _In_ UINT64 mask
+ )
+{
+ HwTreeInitializeNode(pTree, index, type, parent, previous);
+ pTree->Nodes[index].Affinity.GroupId = group;
+ pTree->Nodes[index].Affinity.Mask = mask;
+}
+
+
+//
+// Summary:
+// Utility function to initialize the specified machine node
+//
+// Parameters:
+// pTree - pointer to the tree to manipulate
+// index - index of the node within the tree
+// type - the type to set on the node
+// parent - the parent ID of the node
+// previous - the previous node
+// hostId - the hostid of the machine node
+//
+static void
+HwTreeInitializeMachineNode(
+ _Inout_ HWTREE* pTree,
+ _In_ UINT32 index,
+ _In_ HWNODE_TYPE type,
+ _In_ UINT32 parent,
+ _In_ UINT32 previous,
+ _In_ int hostId
+ )
+{
+ HwTreeInitializeNode(pTree, index, type, parent, previous);
+ pTree->Nodes[index].HostId = hostId;
+}
+
+
+//
+// Summary:
+// Initialize all logical core elements in the tree.
+//
+// Parameter:
+// pTree - pointer to the tree to populate
+// pStrides - pointer to the current strides of each level in the tree
+// pEnds - pointer to the current ends of each level in the tree
+// group - the processor group that this core belongs to.
+// pcoreMask - the processor mask for the parent physical core
+//
+static HRESULT
+HwTreeInitializeLCores(
+ _Inout_ HWTREE* pTree,
+ _Inout_updates_(HWNODE_MAX_DEPTH) UINT32 pStrides[],
+ _In_reads_(HWNODE_MAX_DEPTH) const UINT32 pEnds[],
+ _In_ UINT16 group,
+ _In_ UINT64 pcoreMask
+ )
+{
+ UINT32 last = HWNODEID_NONE;
+ UINT64 fullMask = pcoreMask;
+
+ //
+ // Get the lowest set bit from pcoreMask (the first logical core)
+ //
+ UINT64 mask = ((~pcoreMask << 1 ) | 1) & pcoreMask;
+
+ while( 0 != fullMask )
+ {
+ Assert(mask & pcoreMask);
+
+ if( pStrides[HWNODE_TYPE_LCORE] >= pEnds[HWNODE_TYPE_LCORE] )
+ {
+ return E_UNEXPECTED;
+ }
+
+ HwTreeInitializeAffinityNode(
+ pTree,
+ pStrides[HWNODE_TYPE_LCORE],
+ HWNODE_TYPE_LCORE,
+ pStrides[HWNODE_TYPE_PCORE],
+ last,
+ group,
+ mask
+ );
+
+ last = pStrides[HWNODE_TYPE_LCORE];
+ pStrides[HWNODE_TYPE_LCORE]++;
+ pTree->Counts[HWNODE_TYPE_LCORE]++;
+
+ //
+ // Remove mask from the remaining bits
+ //
+ fullMask &= (~mask);
+
+ //
+ // Move to the next bit
+ //
+ mask <<= 1;
+ }
+ return S_OK;
+}
+
+
+//
+// Summary:
+// Initialize all physical core elements in the tree.
+//
+// Parameter:
+// pTree - pointer to the tree to populate
+// pStrides - pointer to the current strides of each level in the tree
+// pEnds - pointer to the current ends of each level in the tree
+// numaMask - the processor mask for the parent numa node
+// pInfo - pointer to the HWINFO for the current processor group
+//
+static HRESULT
+HwTreeInitializePcores(
+ _Inout_ HWTREE* pTree,
+ _Inout_updates_(HWNODE_MAX_DEPTH) UINT32 pStrides[],
+ _In_reads_(HWNODE_MAX_DEPTH) const UINT32 pEnds[],
+ UINT64 numaMask,
+ const HWINFO* pInfo
+ )
+{
+ HRESULT hr;
+ UINT32 last = HWNODEID_NONE;
+ UINT64 fullMask = numaMask;
+
+ //
+ // Get the lowest N bits from numaMask where N is PCoreWidth (the first physical core)
+ //
+ Assert(pInfo->PcoreWidth>0);
+ UINT64 mask = ((~numaMask << pInfo->PcoreWidth ) | WidthMasks[pInfo->PcoreWidth-1]) & numaMask;
+
+ while( 0 != fullMask )
+ {
+ Assert( mask & numaMask );
+
+ if( pStrides[HWNODE_TYPE_PCORE] >= pEnds[HWNODE_TYPE_PCORE] )
+ {
+ return E_UNEXPECTED;
+ }
+
+ HwTreeInitializeAffinityNode(
+ pTree,
+ pStrides[HWNODE_TYPE_PCORE],
+ HWNODE_TYPE_PCORE,
+ pStrides[HWNODE_TYPE_NUMA],
+ last,
+ pInfo->Group,
+ mask
+ );
+
+ hr = HwTreeInitializeLCores( pTree, pStrides, pEnds, pInfo->Group, mask );
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+
+ last = pStrides[HWNODE_TYPE_PCORE];
+ pStrides[HWNODE_TYPE_PCORE]++;
+ pTree->Counts[HWNODE_TYPE_PCORE]++;
+
+ //
+ // Remove mask from the remaining bits
+ //
+ fullMask &= (~mask);
+
+ //
+ // Move to the next physical core
+ //
+ mask <<= pInfo->PcoreWidth;
+ }
+ return S_OK;
+}
+
+
+//
+// Summary:
+// Initialize all numa elements in the tree.
+//
+// Parameter:
+// pTree - pointer to the tree to populate
+// pStrides - pointer to the current strides of each level in the tree
+// pEnds - pointer to the current ends of each level in the tree
+// pInfo - pointer to the HWINFO for the current processor group
+//
+static HRESULT
+HwTreeInitializeNuma(
+ _Inout_ HWTREE* pTree,
+ _Inout_updates_(HWNODE_MAX_DEPTH) UINT32 pStrides[],
+ _In_reads_(HWNODE_MAX_DEPTH) const UINT32 pEnds[],
+ _In_ const HWINFO* pInfo
+ )
+{
+ HRESULT hr;
+ UINT32 last = HWNODEID_NONE;
+ UINT64 fullMask = pInfo->Mask;
+
+ //
+ // Get the lowest N bits from group mask where N is NumaWidth (the numa node)
+ //
+ Assert(pInfo->NumaWidth>0);
+ UINT64 mask = ((~pInfo->Mask << pInfo->NumaWidth ) | WidthMasks[pInfo->NumaWidth-1]) & pInfo->Mask;
+
+ while( 0 != fullMask )
+ {
+ Assert( mask & pInfo->Mask );
+
+ if( pStrides[HWNODE_TYPE_NUMA] >= pEnds[HWNODE_TYPE_NUMA] )
+ {
+ return E_UNEXPECTED;
+ }
+
+ HwTreeInitializeAffinityNode(
+ pTree,
+ pStrides[HWNODE_TYPE_NUMA],
+ HWNODE_TYPE_NUMA,
+ pStrides[HWNODE_TYPE_GROUP],
+ last,
+ pInfo->Group,
+ mask
+ );
+
+ hr = HwTreeInitializePcores(pTree, pStrides, pEnds, mask, pInfo);
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+
+ last = pStrides[HWNODE_TYPE_NUMA];
+ pStrides[HWNODE_TYPE_NUMA]++;
+ pTree->Counts[HWNODE_TYPE_NUMA]++;
+
+ //
+ // Remove mask from the remaining bits
+ //
+ fullMask &= (~mask);
+
+ //
+ // Move to the next numa node
+ //
+ mask <<= pInfo->NumaWidth;
+ }
+ return S_OK;
+}
+
+
+//
+// Summary:
+// Initialize all Group and Machine element in the tree.
+//
+// Parameter:
+// pTree - pointer to the tree to populate
+// pStrides - pointer to the current strides of each level in the tree
+// pEnds - pointer to the current ends of each level in the tree
+// pSummary - Pointer to machine's summary
+//
+static HRESULT
+HwTreeInitializeGroups(
+ _Inout_ HWTREE* pTree,
+ _Inout_updates_(HWNODE_MAX_DEPTH) UINT32 pStrides[],
+ _In_reads_(HWNODE_MAX_DEPTH) const UINT32 pEnds[],
+ _In_ const HWSUMMARY* pSummary
+
+ )
+{
+ UINT32 lastMachine = HWNODEID_NONE;
+ UINT32 lastGroup = HWNODEID_NONE;
+ HRESULT hr;
+
+ if(pSummary == nullptr)
+ {
+ return S_OK;
+ }
+
+ if( pStrides[HWNODE_TYPE_MACHINE] >= pEnds[HWNODE_TYPE_MACHINE] )
+ {
+ return E_UNEXPECTED;
+ }
+
+ HwTreeInitializeMachineNode(
+ pTree,
+ pStrides[HWNODE_TYPE_MACHINE],
+ HWNODE_TYPE_MACHINE,
+ HWNODEID_WORLD,
+ lastMachine,
+ HWMACHINEID_SELF
+ );
+ lastGroup = HWNODEID_NONE;
+
+ for( UINT32 i = 0; i < pSummary->Count; i++ )
+ {
+ if( pStrides[HWNODE_TYPE_GROUP] >= pEnds[HWNODE_TYPE_GROUP] )
+ {
+ return E_UNEXPECTED;
+ }
+
+ HwTreeInitializeAffinityNode(
+ pTree,
+ pStrides[HWNODE_TYPE_GROUP],
+ HWNODE_TYPE_GROUP,
+ pStrides[HWNODE_TYPE_MACHINE],
+ lastGroup,
+ pSummary->Infos[i].Group,
+ pSummary->Infos[i].Mask
+ );
+
+ hr = HwTreeInitializeNuma( pTree, pStrides, pEnds, &pSummary->Infos[i] );
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+
+ lastGroup = pStrides[HWNODE_TYPE_GROUP];
+ pStrides[HWNODE_TYPE_GROUP]++;
+ pTree->Counts[HWNODE_TYPE_GROUP]++;
+ }
+
+ lastMachine = pStrides[HWNODE_TYPE_MACHINE];
+ pStrides[HWNODE_TYPE_MACHINE]++;
+ pTree->Counts[HWNODE_TYPE_MACHINE]++;
+
+ return S_OK;
+}
+
+
+//
+// Summary:
+// Calculate the size of the tree required for the specified HWINFO array
+//
+// Parameters:
+// pSummary - Pointer to a machine's summary information.
+// pCounts - Pointer to array of UINT32 for holding the counts of each level in the tree.
+//
+// Returns:
+// Total node code for full tree.
+//
+_Success_(return > 0)
+static UINT32
+HwTreeCalculateNodeCounts(
+ _In_opt_ const HWSUMMARY* pSummary,
+ _Out_writes_(HWNODE_MAX_DEPTH) UINT32 pCounts[]
+ )
+{
+ if(pSummary == nullptr)
+ {
+ return 0;
+ }
+
+ pCounts[HWNODE_TYPE_MACHINE] = 0;
+ pCounts[HWNODE_TYPE_GROUP] = 0;
+ pCounts[HWNODE_TYPE_NUMA] = 0;
+ pCounts[HWNODE_TYPE_PCORE] = 0;
+ pCounts[HWNODE_TYPE_LCORE] = 0;
+
+
+ pCounts[HWNODE_TYPE_MACHINE]++;
+ //one for every group
+ pCounts[HWNODE_TYPE_GROUP] += pSummary->Count;
+
+ for( UINT32 i = 0; i < pSummary->Count; i++ )
+ {
+
+ pCounts[HWNODE_TYPE_NUMA] += pSummary->Infos[i].GroupWidth / pSummary->Infos[i].NumaWidth;
+
+ //
+ // 1 for each pcore
+ //
+ pCounts[HWNODE_TYPE_PCORE] += pSummary->Infos[i].GroupWidth / pSummary->Infos[i].PcoreWidth;
+
+ //
+ // 1 for each lcore
+ //
+ pCounts[HWNODE_TYPE_LCORE] += pSummary->Infos[i].GroupWidth;
+ }
+
+ //
+ // Include 1 extra for the WORLD node.
+ //
+ return 1 +
+ pCounts[HWNODE_TYPE_MACHINE] +
+ pCounts[HWNODE_TYPE_GROUP] +
+ pCounts[HWNODE_TYPE_NUMA] +
+ pCounts[HWNODE_TYPE_PCORE] +
+ pCounts[HWNODE_TYPE_LCORE];
+}
+
+
+//
+// Summary:
+// Initialize hardware tree representing all machines.
+//
+// Parameters:
+// pcbTree - On input, current size of pTree
+// On output, the size of the buffer used.
+// if return code is HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)
+// the required size will be written here.
+// pTree - pointer to HWTREE buffer
+// pSummary - Pointer to a machine's summary information.
+//
+// NOTE:
+// The memory is sequentially allocated, and can be marshalled with memcpy.
+//
+HRESULT
+HwTreeInitialize(
+ _Inout_ UINT32* pcbTree,
+ _Inout_updates_to_(*pcbTree,*pcbTree) HWTREE* pTree,
+ _In_ const HWSUMMARY* pSummary
+ )
+{
+ HRESULT hr;
+ UINT32 cb;
+ UINT32 nNodes;
+ UINT32 strides[HWNODE_MAX_DEPTH];
+ UINT32 ends[HWNODE_MAX_DEPTH];
+
+ //
+ // Count total nodes + 1 for world
+ //
+ nNodes = HwTreeCalculateNodeCounts( pSummary, ends );
+
+ //
+ // There must be at least 1 node for each level in the tree.
+ //
+ Assert( nNodes >= HWNODE_MAX_DEPTH );
+
+ cb = sizeof(*pTree) - sizeof(pTree->Nodes) + (sizeof(pTree->Nodes[0]) * nNodes);
+
+ if( *pcbTree < cb )
+ {
+ *pcbTree = cb;
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ //
+ // Update the stride table.
+ //
+ strides[HWNODE_TYPE_MACHINE]= 1;
+ strides[HWNODE_TYPE_GROUP] = strides[HWNODE_TYPE_MACHINE] + ends[HWNODE_TYPE_MACHINE];
+ strides[HWNODE_TYPE_NUMA] = strides[HWNODE_TYPE_GROUP] + ends[HWNODE_TYPE_GROUP];
+ strides[HWNODE_TYPE_PCORE] = strides[HWNODE_TYPE_NUMA] + ends[HWNODE_TYPE_NUMA];
+ strides[HWNODE_TYPE_LCORE] = strides[HWNODE_TYPE_PCORE] + ends[HWNODE_TYPE_PCORE];
+
+ pTree->Strides[HWNODE_TYPE_MACHINE] = strides[HWNODE_TYPE_MACHINE];
+ pTree->Strides[HWNODE_TYPE_GROUP] = strides[HWNODE_TYPE_GROUP];
+ pTree->Strides[HWNODE_TYPE_NUMA] = strides[HWNODE_TYPE_NUMA];
+ pTree->Strides[HWNODE_TYPE_PCORE] = strides[HWNODE_TYPE_PCORE];
+ pTree->Strides[HWNODE_TYPE_LCORE] = strides[HWNODE_TYPE_LCORE];
+
+ pTree->Counts[HWNODE_TYPE_MACHINE] = 0;
+ pTree->Counts[HWNODE_TYPE_GROUP] = 0;
+ pTree->Counts[HWNODE_TYPE_NUMA] = 0;
+ pTree->Counts[HWNODE_TYPE_PCORE] = 0;
+ pTree->Counts[HWNODE_TYPE_LCORE] = 0;
+
+ //
+ // update the "ends" to be offset from the strides so we don't
+ // have to track the start of the chain through the iteration.
+ //
+ ends[HWNODE_TYPE_MACHINE] += strides[HWNODE_TYPE_MACHINE];
+ ends[HWNODE_TYPE_GROUP] += strides[HWNODE_TYPE_GROUP];
+ ends[HWNODE_TYPE_NUMA] += strides[HWNODE_TYPE_NUMA];
+ ends[HWNODE_TYPE_PCORE] += strides[HWNODE_TYPE_PCORE];
+ ends[HWNODE_TYPE_LCORE] += strides[HWNODE_TYPE_LCORE];
+
+
+ pTree->Nodes[HWNODEID_WORLD].Type = HWNODE_TYPE_WORLD;
+ pTree->Nodes[HWNODEID_WORLD].Parent = HWNODEID_NONE;
+ pTree->Nodes[HWNODEID_WORLD].FirstChild = HWNODEID_NONE;
+ pTree->Nodes[HWNODEID_WORLD].LastChild = HWNODEID_NONE;
+ pTree->Nodes[HWNODEID_WORLD].NextSibling = HWNODEID_NONE;
+ pTree->Nodes[HWNODEID_WORLD].PrevSibling = HWNODEID_NONE;
+
+
+ hr = HwTreeInitializeGroups(pTree, strides, ends, pSummary);
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+
+ *pcbTree = cb;
+ return S_OK;
+}
+
diff --git a/src/mpi/common/hwtree.h b/src/mpi/common/hwtree.h
new file mode 100644
index 0000000..0360347
--- /dev/null
+++ b/src/mpi/common/hwtree.h
@@ -0,0 +1,470 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include
+#include "hwtree_common.h"
+
+#pragma once
+
+//
+// The maximum depth of the tree
+//
+#define HWNODE_MAX_DEPTH ((UINT32)HWNODE_TYPE_MAX)
+
+
+//
+// Fixed width field for holding affinity information.
+//
+// Fields:
+// Mask - 64bit mask of processors with in the specified processor group.
+// GroupId - The processor group id that the affinity mask applies to.
+// Padding - padding to show wasted space.
+//
+typedef struct _HWAFFINITY
+{
+ UINT64 Mask;
+ UINT16 GroupId;
+ UINT16 Padding[3];
+
+} HWAFFINITY;
+
+
+#define HWNODEID_WORLD ((UINT32)0)
+#define HWNODEID_NONE ((UINT32)MAXDWORD)
+#define HWMACHINEID_SELF ((UINT32)1)
+
+//
+// Definition of basic tree node
+//
+// Fields:
+// Type - The type of data within the node.
+// Parent - Index of the parent element
+// FirstChild - Index of this nodes first child
+// LastChild - Index of this nodes last child
+// NextSibling - Index of this nodes next sibling
+// PrevSibling - Index of this nodes previous sibling
+//
+typedef struct HWNODEHEADER
+{
+ HWNODE_TYPE Type;
+ UINT32 Parent;
+ UINT32 FirstChild;
+ UINT32 LastChild;
+ UINT32 NextSibling;
+ UINT32 PrevSibling;
+
+} HWNODEHEADER;
+
+
+//
+// Definition of a node in the HWTREE.
+//
+// Fields:
+// Affinity - Specifies the affinity information represented by the node
+// Valid only when Type > HWNODE_TYPE_MACHINE
+// HostId - Specifies the identifier for the machine in the tree.
+// Valid only when Type == HWNODE_TYPE_MACHINE
+//
+typedef struct _HWNODE
+ : public HWNODEHEADER
+{
+ union
+ {
+ HWAFFINITY Affinity;
+ int HostId;
+ };
+
+} HWNODE;
+
+
+//
+// Definition of a node in the HWVIEW.
+//
+// Fields:
+// NodeId - Specifies node id in the original tree.
+//
+typedef struct _HWVIEWNODE
+: public HWNODEHEADER
+{
+ UINT32 NodeId;
+
+} HWVIEWNODE;
+
+
+//
+// Summary:
+// Data structure representing the a single machine hardware tree.
+//
+// Fields:
+// Counts - The total counts of each depth in the tree
+// Strides - The stride offset to the start of each level in the tree
+// Nodes - This list of HWNODE structures that make up the tree.
+//
+typedef struct _HWTREE
+{
+ UINT32 Counts[HWNODE_MAX_DEPTH];
+ UINT32 Strides[HWNODE_MAX_DEPTH];
+ HWNODE Nodes[ANYSIZE_ARRAY];
+
+} HWTREE;
+
+
+//
+// Summary:
+// Structure that represents a portion of the tree within a HWTREE
+//
+// Fields:
+// Counts - The total counts of each depth in the tree
+// Strides - The stride offset to the start of each level in the tree
+// Nodes - This list of HWVIEWNODE structures that make up the tree.
+//
+typedef struct _HWVIEW
+{
+ const HWTREE* Tree;
+ UINT32 Counts[HWNODE_MAX_DEPTH];
+ UINT32 Strides[HWNODE_MAX_DEPTH];
+ HWVIEWNODE Nodes[ANYSIZE_ARRAY];
+
+} HWVIEW;
+
+
+//
+// Summary:
+// Summary of the local information on an individual processor group
+//
+// Fields:
+// Mask - The entire bitmask for the group
+// ActiveMask - The active bitmask for the group after filter
+// Group - the processor group id
+// We only use UINT8 worth of groups. There is no
+// real world scenario where this will not work, and
+// using this size makes the structure pack cleanly.
+// GroupWidth - The number of logical cores for this group
+// NumaWidth - the number of logical cores per numa node
+// PcoreWidth - the number of logical cores per physical core
+// Padding - Pad.
+//
+typedef struct _HWINFO
+{
+ UINT64 Mask;
+ UINT64 ActiveMask;
+ UINT8 Group;
+ UINT8 GroupWidth;
+ UINT8 NumaWidth;
+ UINT8 PcoreWidth;
+ UINT32 Padding;
+
+} HWINFO;
+
+
+//
+// Summary:
+// Hosts the logical processor information for the processor groups on a machine.
+//
+// Fields:
+// Size - The total size of the buffer
+// Count - The count of elements in Infos
+// Infos - The array of HWINFO, one for each Processor Group
+//
+typedef struct _HWSUMMARY
+{
+ UINT32 Size;
+ UINT32 Count;
+ HWINFO Infos[ANYSIZE_ARRAY];
+
+} HWSUMMARY;
+
+
+//
+// Summary:
+// Hosts the summary information for a local machine.
+//
+// Fields:
+// Next - The pointer to the next machine in the chain
+// Summary - Pointer to the summary information for the machine
+// HostId - The unique id assigned to the machine.
+//
+typedef struct _HWMACHINEINFO
+{
+ struct _HWMACHINEINFO* Next;
+ HWSUMMARY* Summary;
+ int HostId;
+
+} HWMACHINEINFO;
+
+
+//
+// Summary:
+// Represents an exprssion parsed from the LAYOUT format string
+//
+// Fields:
+// Left - The left side of the expression
+// Right - The right side of the expression
+// Flags - The flags to indicate how to interpret Left and Right fields.
+// HWENUM_EXPR_WELLKNOWN_LEFT - means that left must be resolved
+// HWENUM_EXPR_DIVIDE_BY_RIGHT - means that right is present
+// HWENUM_EXPR_WELLKNOWN_RIGHT - means that right must be resolved
+// If no flags are set, Left is a constant value.
+//
+typedef struct _HWENUM_EXPR
+{
+ UINT32 Left;
+ UINT32 Right;
+ UINT32 Flags;
+
+} HWENUM_EXPR;
+
+
+#define HWENUM_EXPR_WELLKNOWN_LEFT 0x1
+#define HWENUM_EXPR_DIVIDE_BY_RIGHT 0x2
+#define HWENUM_EXPR_WELLKNOWN_RIGHT 0x4
+
+//
+// Summary:
+// Represents the settings for an individual enumeration in an HWTREE
+//
+// Fields:
+// Type - The depth in the HWTREE to target
+// Offset - The offset from the start to use
+// Count - The count of steps.
+// Stride - The stride to use for each step
+// RepeatCount - The number of times repeat all steps.
+// RepeatOffset - The size to step the offset each repeat.
+//
+typedef struct _HWENUM
+{
+ HWNODE_TYPE Type;
+ HWENUM_EXPR Offset;
+ HWENUM_EXPR Count;
+ HWENUM_EXPR Stride;
+ HWENUM_EXPR RepeatCount;
+ HWENUM_EXPR RepeatOffset;
+
+} HWENUM;
+
+
+#define HWLAYOUT_ENUM_MAX (HWNODE_MAX_DEPTH)
+
+//
+// Summary:
+// Represents the layout information of the processes.
+//
+// Fields:
+// TargetType - The affinity target type
+// TargetCount - The count of processes to create on each target
+// EnumCount - The count of enumeration items specified
+// Must be <= HWLAYOUT_ENUM_MAX.
+// Enums - The array of enumeration items.
+//
+typedef struct _HWLAYOUT
+{
+ HWNODE_TYPE TargetType;
+ UINT32 TargetCount;
+ UINT32 EnumCount;
+ HWENUM Enums[HWLAYOUT_ENUM_MAX];
+
+} HWLAYOUT;
+
+
+//
+// Summary:
+// Holds the iteration state during the processing of an HWENUM
+//
+// Fields:
+// Start - The start value for the full list
+// Size - The count of elements for the full list
+// Current - The current offset in the full list
+// This is used for deriving the size of child HWENUM's
+//
+typedef struct _HWENUM_STATE
+{
+ UINT32 Start;
+ UINT32 Size;
+ UINT32 Current;
+
+} HWENUM_STATE;
+
+
+//
+// Summary:
+// Holds the processing state of an HWLAYOUT
+//
+// Fields:
+// MinProc - The number of process that must be created.
+// MaxProc - The maximum number of processes to create.
+// ProcCount - The total number of processes created.
+// Enums - Array of states corresponding to the HWENUM in the layout
+// For example, item 0 represents the state for HWENUM 0 in the layout.
+//
+typedef struct _HWLAYOUT_STATE
+{
+ UINT32 MinProc;
+ UINT32 MaxProc;
+ UINT32 ProcCount;
+ HWENUM_STATE Enums[HWLAYOUT_ENUM_MAX];
+
+} HWLAYOUT_STATE;
+
+
+//
+// Summary:
+// Callback function invoked for each set of processes created during layout.
+//
+// Paramters:
+// pLayout - Pointer to the requested layout information.
+// pState - Pointer to the layout iteration state information.
+// pView - The view to use to get the location information.
+// location - The location within the view to assign the item.
+// pData - Opaque data passed from entry to callback functions.
+// pCount - On input, specifies the count of the processes to created.
+// On output, specifies the number of processes created.
+//
+// NOTE: Return S_FALSE to terminate process creation immediately.
+//
+typedef HRESULT ( __stdcall FN_HwLayoutProcessCallback) (
+ __in const HWLAYOUT* pLayout,
+ __in const HWLAYOUT_STATE* pState,
+ __in const HWVIEW* pView,
+ __in UINT32 location,
+ __in PVOID pData,
+ __inout UINT32* pCount
+ );
+
+
+//
+// Summary:
+// Initialize hardware tree representing all machines.
+//
+// Parameters:
+// pcbTree - On input, current size of pTree
+// On output, the size of the buffer used.
+// if return code is HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)
+// the required size will be written here.
+// pTree - pointer to HWTREE buffer
+// pSummary - pointer to a machine's summary information.
+//
+// NOTE:
+// The memory is sequentially allocated, and can be marshalled with memcpy.
+//
+HRESULT
+HwTreeInitialize(
+ _Inout_ UINT32* pcbTree,
+ _Inout_updates_to_(*pcbTree,*pcbTree) HWTREE* pTree,
+ _In_ const HWSUMMARY* pSummary
+ );
+
+
+//
+// Summary:
+// Find the node id for the specified machine
+//
+// Parameters:
+// pTree - The tree to search for the machine node.
+// pInfo - The HostId field is used as the search key.
+//
+// Returns:
+// HWNODEID_NONE - if not found
+//
+inline UINT32
+HwTreeFindMachineNodeId(
+ _In_ const HWTREE* pTree,
+ _In_ const HWMACHINEINFO* pInfo
+ )
+{
+ UINT32 stride;
+ for( UINT32 i = 0; i < pTree->Counts[HWNODE_TYPE_MACHINE]; i++ )
+ {
+ stride = i + pTree->Strides[HWNODE_TYPE_MACHINE];
+ if( pTree->Nodes[ stride ].HostId == pInfo->HostId )
+ {
+ return stride;
+ }
+ }
+ return HWNODEID_NONE;
+}
+
+
+//
+// Summary:
+// Initialize the local HWSUMMARY information
+//
+// Parameters:
+// pcbSummary - On input, size of pSummary buffer
+// on output, size used.
+// if return code is HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)
+// the required count will be written here.
+// pSummary - The summary information to initialize.
+//
+HRESULT
+HwSummaryInitialize(
+ _Inout_ UINT32* pcbSummary,
+ _Inout_updates_bytes_to_(*pcbSummary, *pcbSummary) HWSUMMARY* pSummary
+ );
+
+
+//
+// Summary:
+// Filter out the processor information in each processor group.
+//
+// Parameters:
+// pSummary - The summary information to filter.
+// pFilters - The core filters to apply
+//
+inline void
+HwSummaryFilter(
+ _Inout_ HWSUMMARY* pSummary,
+ _In_reads_(pSummary->Count) const UINT64 pFilters[]
+ )
+{
+ for( UINT32 i = 0; i < pSummary->Count; i++ )
+ {
+ pSummary->Infos[i].ActiveMask &= pFilters[i];
+ }
+}
+
+
+//
+// Summary:
+// Initialize a HWVIEW from an HWTREE
+//
+// Parameters:
+// pcbView - On input, current size of pView
+// On output, the size of the buffer used.
+// if return code is HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)
+// the required size will be written here.
+// pView - pointer to HWVIEW buffer
+// pTree - Pointer to tree to base view on
+// pSummary - Pointer to a summary of machine.
+//
+HRESULT
+HwViewInitialize(
+ _Inout_ UINT32* pcbView,
+ _Out_writes_to_(*pcbView,*pcbView) HWVIEW* pView,
+ _In_ const HWTREE* pTree,
+ _In_ const HWSUMMARY* pSummary
+ );
+
+
+//
+// Summary:
+// Process the specified layout using the specified tree
+//
+// Parameters:
+// pLayout - Pointer to layout description
+// pView - Pointer to the view
+// minProc - Minimum number of processes to create
+// maxProc - Maximum number of processes to create
+// pData - Opaque data pointer to hand to callback
+// pfn - Callback function to invoke for each iteration
+//
+HRESULT
+HwLayoutProcess(
+ _In_ const HWLAYOUT* pLayout,
+ _In_ const HWVIEW* pView,
+ _In_ UINT32 minProc,
+ _In_ UINT32 maxProc,
+ _In_ PVOID pData,
+ _In_ FN_HwLayoutProcessCallback* pfn
+ );
+
+
diff --git a/src/mpi/common/hwtree_common.h b/src/mpi/common/hwtree_common.h
new file mode 100644
index 0000000..95d53f0
--- /dev/null
+++ b/src/mpi/common/hwtree_common.h
@@ -0,0 +1,45 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+//
+// Represents the node types in the tree. Each value
+// corrisponds to depth in the HW tree.
+//
+typedef enum _HWNODE_TYPE
+{
+ HWNODE_TYPE_WORLD = -1,
+ HWNODE_TYPE_MACHINE = 0,
+ HWNODE_TYPE_GROUP = 1,
+ HWNODE_TYPE_NUMA = 2,
+ HWNODE_TYPE_PCORE = 3,
+ HWNODE_TYPE_LCORE = 4,
+
+ HWNODE_TYPE_MAX,
+
+} HWNODE_TYPE;
+
+
+enum SMPD_AFFINITY_PLACEMENT
+{
+ SMPD_AFFINITY_DISABLED = 0,
+ SMPD_AFFINITY_SPREAD = 1,
+ SMPD_AFFINITY_SEQUENTIAL = 2,
+ SMPD_AFFINITY_BALANCED = 3,
+ SMPD_AFFINITY_DEFAULT = SMPD_AFFINITY_SPREAD,
+};
+
+
+
+typedef struct _AffinityOptions
+{
+ BOOL isSet;
+ BOOL isExplicit;
+ BOOL isAuto;
+ enum SMPD_AFFINITY_PLACEMENT placement;
+ HWNODE_TYPE target;
+ HWNODE_TYPE stride;
+ INT affinityTableStyle;
+ INT hwTableStyle;
+} AffinityOptions;
diff --git a/src/mpi/common/hwview.cpp b/src/mpi/common/hwview.cpp
new file mode 100644
index 0000000..d237f78
--- /dev/null
+++ b/src/mpi/common/hwview.cpp
@@ -0,0 +1,292 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "hwtree.h"
+#include "util.h"
+
+//
+// Summary:
+// Utility function to initialize the specified node within the view.
+//
+// Parameters:
+// pView - pointer to the view to manipulate
+// index - index of the node with in the tree
+// type - the type to set on the node
+// parent - the parent ID of the node
+// previous - the previous node
+// treeNodeId - id of the node in the original tree
+//
+static void
+HwViewInitializeNode(
+ _Inout_ HWVIEW* pView,
+ _In_ UINT32 index,
+ _In_ HWNODE_TYPE type,
+ _In_ UINT32 parent,
+ _In_ UINT32 previous,
+ _In_ UINT32 treeNodeId
+ )
+{
+ pView->Nodes[index].Type = type;
+ pView->Nodes[index].Parent = parent;
+ pView->Nodes[index].FirstChild = HWNODEID_NONE;
+ pView->Nodes[index].LastChild = HWNODEID_NONE;
+ pView->Nodes[index].NextSibling = HWNODEID_NONE;
+ pView->Nodes[index].PrevSibling = previous;
+
+ pView->Nodes[parent].LastChild = index;
+
+ if( previous == HWNODEID_NONE ||
+ pView->Nodes[previous].Parent != parent )
+ {
+ pView->Nodes[parent].FirstChild = index;
+ }
+ else
+ {
+ pView->Nodes[previous].NextSibling = index;
+ }
+
+ pView->Nodes[index].NodeId = treeNodeId;
+}
+
+//
+// Summary:
+// Initialize the subtree elements of the view
+//
+// Parameters:
+// pView - pointer to the view to initialize
+// pStrides - array of offsets for each level in the view
+// pEnds - array of end offsets for each level in the view
+// parentId - the node id of the parent to add children for
+//
+static HRESULT
+HwViewInitializeSubTree(
+ _Inout_ HWVIEW* pView,
+ _Inout_updates_(HWNODE_MAX_DEPTH) UINT32 pStrides[],
+ _Inout_updates_(HWNODE_MAX_DEPTH) UINT32 pEnds[],
+ _In_ UINT32 parentId,
+ _In_ const HWSUMMARY* pSummary
+ )
+{
+ UINT32 last = HWNODEID_NONE;
+ UINT32 current = pView->Tree->Nodes[parentId].FirstChild;
+ UINT32 end = pView->Tree->Nodes[parentId].LastChild;
+
+ HWNODE_TYPE depth = pView->Tree->Nodes[current].Type;
+
+ Assert(depth >= HWNODE_TYPE_GROUP);
+ Assert( nullptr != pSummary );
+
+ while( current <= end )
+ {
+ if( pStrides[depth] >= pEnds[depth] )
+ {
+ return E_UNEXPECTED;
+ }
+
+ //
+ // Verify that the current element has not been filtered by the HWSUMMARY.
+ //
+ Assert( pSummary->Count >= (UINT32)pView->Tree->Nodes[ current ].Affinity.GroupId );
+ if( 0 == ( pView->Tree->Nodes[current].Affinity.Mask & pSummary->Infos[pView->Tree->Nodes[ current ].Affinity.GroupId].ActiveMask ) )
+ {
+ current++;
+ continue;
+ }
+
+ HwViewInitializeNode(
+ pView,
+ pStrides[depth],
+ depth,
+ pStrides[depth - 1],
+ last,
+ current
+ );
+
+ if( depth + 1 < HWNODE_MAX_DEPTH )
+ {
+ HRESULT hr = HwViewInitializeSubTree( pView, pStrides, pEnds, current, pSummary );
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+ }
+ last = pStrides[depth];
+ pStrides[depth]++;
+ pView->Counts[depth]++;
+ current++;
+ }
+ return S_OK;
+}
+
+
+//
+// Summary:
+// Utility function to collect the node counts for each depth and the total count
+// for the specified set of machines in the tree.
+//
+// Paramters:
+// pTree - pointer to the tree to scan
+// pMachines - The linked list of machine information
+// pCounts - array to recieve the counts at each depth
+//
+// Returns:
+// The total number of nodes required in the View
+//
+static UINT32
+HwViewCalculateCountNodes(
+ _In_ const HWTREE* pTree,
+ _Out_writes_(HWNODE_MAX_DEPTH) UINT32 pCounts[]
+ )
+{
+ pCounts[HWNODE_TYPE_MACHINE] = 0;
+ pCounts[HWNODE_TYPE_GROUP] = 0;
+ pCounts[HWNODE_TYPE_NUMA] = 0;
+ pCounts[HWNODE_TYPE_PCORE] = 0;
+ pCounts[HWNODE_TYPE_LCORE] = 0;
+
+ UINT32 first;
+ UINT32 last;
+ UINT32 depth;
+ UINT32 machineId;
+
+ machineId = HWMACHINEID_SELF;
+
+ Assert(machineId >= pTree->Strides[HWNODE_TYPE_MACHINE] );
+ Assert(machineId < pTree->Strides[HWNODE_TYPE_MACHINE] + pTree->Counts[HWNODE_TYPE_MACHINE] );
+ Assert(pTree->Nodes[machineId].Type == HWNODE_TYPE_MACHINE);
+
+ depth = static_cast( HWNODE_TYPE_GROUP );
+ first = pTree->Nodes[machineId].FirstChild;
+ last = pTree->Nodes[machineId].LastChild;
+
+ do
+ {
+ Assert( first != HWNODEID_NONE );
+ Assert( last != HWNODEID_NONE );
+
+ pCounts[depth] += last - first + 1;
+ first = pTree->Nodes[first].FirstChild;
+ last = pTree->Nodes[last].LastChild;
+
+ } while( ++depth < HWNODE_MAX_DEPTH );
+
+ pCounts[HWNODE_TYPE_MACHINE]++;
+
+ //
+ // Include 1 extra for the WORLD node.
+ //
+ return 1 +
+ pCounts[HWNODE_TYPE_MACHINE] +
+ pCounts[HWNODE_TYPE_GROUP] +
+ pCounts[HWNODE_TYPE_NUMA] +
+ pCounts[HWNODE_TYPE_PCORE] +
+ pCounts[HWNODE_TYPE_LCORE];
+
+}
+
+
+//
+// Summary:
+// Initialize a HWVIEW from an HWTREE
+//
+// Parameters:
+// pcbView - On input, current size of pView
+// On output, the size of the buffer used.
+// if return code is HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)
+// the required size will be written here.
+// pView - pointer to HWVIEW buffer
+// pTree - Pointer to tree to base view on
+// pMachines - The linked list of machine information
+//
+HRESULT
+HwViewInitialize(
+ _Inout_ UINT32* pcbView,
+ _Out_writes_to_(*pcbView,*pcbView) HWVIEW* pView,
+ _In_ const HWTREE* pTree,
+ _In_ const HWSUMMARY* pSummary
+ )
+{
+ UINT32 machineId;
+ UINT32 strides[HWNODE_MAX_DEPTH];
+ UINT32 ends[HWNODE_MAX_DEPTH];
+ UINT32 last = HWNODEID_NONE;
+
+ UINT32 nNodes = HwViewCalculateCountNodes( pTree, ends );
+
+ UINT32 cb = sizeof(*pView) - sizeof(pView->Nodes) + ( sizeof(pView->Nodes[0]) * nNodes );
+ if( cb > *pcbView )
+ {
+ *pcbView = cb;
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ //
+ // Update the stride table.
+ //
+ strides[HWNODE_TYPE_MACHINE]= 1;
+ strides[HWNODE_TYPE_GROUP] = strides[HWNODE_TYPE_MACHINE] + ends[HWNODE_TYPE_MACHINE];
+ strides[HWNODE_TYPE_NUMA] = strides[HWNODE_TYPE_GROUP] + ends[HWNODE_TYPE_GROUP];
+ strides[HWNODE_TYPE_PCORE] = strides[HWNODE_TYPE_NUMA] + ends[HWNODE_TYPE_NUMA];
+ strides[HWNODE_TYPE_LCORE] = strides[HWNODE_TYPE_PCORE] + ends[HWNODE_TYPE_PCORE];
+
+ pView->Strides[HWNODE_TYPE_MACHINE] = strides[HWNODE_TYPE_MACHINE];
+ pView->Strides[HWNODE_TYPE_GROUP] = strides[HWNODE_TYPE_GROUP];
+ pView->Strides[HWNODE_TYPE_NUMA] = strides[HWNODE_TYPE_NUMA];
+ pView->Strides[HWNODE_TYPE_PCORE] = strides[HWNODE_TYPE_PCORE];
+ pView->Strides[HWNODE_TYPE_LCORE] = strides[HWNODE_TYPE_LCORE];
+
+ pView->Counts[HWNODE_TYPE_MACHINE] = 0;
+ pView->Counts[HWNODE_TYPE_GROUP] = 0;
+ pView->Counts[HWNODE_TYPE_NUMA] = 0;
+ pView->Counts[HWNODE_TYPE_PCORE] = 0;
+ pView->Counts[HWNODE_TYPE_LCORE] = 0;
+
+ //
+ // update the "ends" to be offset from the strides so we don't
+ // have to track the start of the chain through the iteration.
+ //
+ ends[HWNODE_TYPE_MACHINE] += strides[HWNODE_TYPE_MACHINE];
+ ends[HWNODE_TYPE_GROUP] += strides[HWNODE_TYPE_GROUP];
+ ends[HWNODE_TYPE_NUMA] += strides[HWNODE_TYPE_NUMA];
+ ends[HWNODE_TYPE_PCORE] += strides[HWNODE_TYPE_PCORE];
+ ends[HWNODE_TYPE_LCORE] += strides[HWNODE_TYPE_LCORE];
+
+ pView->Nodes[HWNODEID_WORLD].Type = HWNODE_TYPE_WORLD;
+ pView->Nodes[HWNODEID_WORLD].Parent = HWNODEID_NONE;
+ pView->Nodes[HWNODEID_WORLD].NextSibling = HWNODEID_NONE;
+ pView->Nodes[HWNODEID_WORLD].FirstChild = HWNODEID_NONE;
+ pView->Nodes[HWNODEID_WORLD].PrevSibling = HWNODEID_NONE;
+ pView->Nodes[HWNODEID_WORLD].LastChild = HWNODEID_NONE;
+
+ pView->Tree = pTree;
+
+ if( strides[HWNODE_TYPE_MACHINE] >= ends[HWNODE_TYPE_MACHINE] )
+ {
+ return E_UNEXPECTED;
+ }
+
+ machineId = HWMACHINEID_SELF;
+
+ HwViewInitializeNode(
+ pView,
+ strides[HWNODE_TYPE_MACHINE],
+ HWNODE_TYPE_MACHINE,
+ HWNODEID_WORLD,
+ last,
+ machineId
+ );
+
+
+ HRESULT hr = HwViewInitializeSubTree(pView, strides, ends, machineId, pSummary);
+ if( FAILED( hr ) )
+ {
+ return hr;
+ }
+ last = strides[HWNODE_TYPE_MACHINE];
+ strides[HWNODE_TYPE_MACHINE]++;
+ pView->Counts[HWNODE_TYPE_MACHINE]++;
+
+ *pcbView = cb;
+ return S_OK;
+}
\ No newline at end of file
diff --git a/src/mpi/common/ipcShm.h b/src/mpi/common/ipcShm.h
new file mode 100644
index 0000000..f871eae
--- /dev/null
+++ b/src/mpi/common/ipcShm.h
@@ -0,0 +1,237 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "shlobj.h"
+#include "Shlwapi.h"
+#include "mpistr.h"
+
+//
+// A global shared memory for IPC. Allows ipc among different users on the same computer,
+// with no specific previlige like SeCrateGlobalPrevilige. Processes communicate over
+// a temporary, i.e no disk flush, delete-on-close file mapped view.
+//
+
+class GlobalShmRegion
+{
+private:
+ BOOL isReadOnly;
+ HANDLE file;
+ HANDLE fileMap;
+ void* pView;
+ WCHAR fullRegionName[MAX_PATH];
+
+ //
+ // Non-copyable
+ //
+ GlobalShmRegion(const GlobalShmRegion&);
+ GlobalShmRegion& operator = (const GlobalShmRegion&);
+
+ HRESULT CreateFullRegionName(_In_z_ PCWSTR regionName)
+ {
+ //
+ // The file system directory that contains application data for all users
+ //
+ HRESULT result = SHGetFolderPathW(nullptr, CSIDL_COMMON_APPDATA, nullptr, 0, fullRegionName);
+ if(result != S_OK)
+ {
+ return result;
+ }
+
+ //
+ // SHGetFolderPathW returns full path without a trailing backslash.
+ //
+ size_t fullPathLen = MPIU_Strlen(fullRegionName) + 1 + MPIU_Strlen(regionName);
+ if (fullPathLen >= _countof(fullRegionName))
+ {
+ return E_FAIL;
+ }
+
+ OACR_WARNING_SUPPRESS(UNSAFE_STRING_FUNCTION, "Buffer length is already checked.");
+ BOOL appended = PathAppendW(fullRegionName, regionName);
+ return appended ? S_OK : E_FAIL;
+ }
+
+
+public:
+ GlobalShmRegion():
+ isReadOnly(FALSE),
+ file(INVALID_HANDLE_VALUE),
+ fileMap(nullptr),
+ pView(nullptr)
+ {
+ }
+
+
+ ~GlobalShmRegion()
+ {
+ if(pView != nullptr)
+ {
+ UnmapViewOfFile(pView);
+ }
+ if(fileMap != nullptr)
+ {
+ CloseHandle(fileMap);
+ }
+ if(file != INVALID_HANDLE_VALUE)
+ {
+ CloseHandle(file);
+ }
+ }
+
+
+ DWORD Open(_In_z_ PCWSTR regionName)
+ {
+ if(pView != nullptr)
+ {
+ return ERROR_ALREADY_EXISTS;
+ }
+
+ isReadOnly = TRUE;
+
+ HRESULT result = CreateFullRegionName(regionName);
+ if(result != S_OK)
+ {
+ return ERROR_INVALID_NAME;
+ }
+
+ file = CreateFileW(
+ fullRegionName,
+ GENERIC_READ,
+ FILE_SHARE_WRITE | FILE_SHARE_READ | FILE_SHARE_DELETE,
+ nullptr,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_TEMPORARY,
+ nullptr
+ );
+
+ if(file == INVALID_HANDLE_VALUE)
+ {
+ return GetLastError();
+ }
+
+ fileMap = CreateFileMappingW(file, nullptr, PAGE_READONLY, 0, 0, nullptr);
+
+ if(fileMap ==nullptr)
+ {
+ CloseHandle(file);
+ file = nullptr;
+ return GetLastError();
+ }
+
+ pView = MapViewOfFile(
+ fileMap,
+ FILE_MAP_READ,
+ 0,
+ 0,
+ 0
+ );
+ if( pView == nullptr )
+ {
+ CloseHandle(file);
+ CloseHandle(fileMap);
+ file = nullptr;
+ fileMap = nullptr;
+ return GetLastError();
+ }
+
+ return NO_ERROR;
+ }
+
+
+ DWORD Create(_In_z_ PCWSTR regionName, UINT32 regionSize)
+ {
+ if(pView != nullptr)
+ {
+ return ERROR_ALREADY_EXISTS;
+ }
+
+ isReadOnly = FALSE;
+
+ HRESULT result = CreateFullRegionName(regionName);
+ if(result != S_OK)
+ {
+ return ERROR_INVALID_NAME;
+ }
+
+ SECURITY_ATTRIBUTES security;
+ ZeroMemory(&security, sizeof(security));
+ security.nLength = sizeof(security);
+ ConvertStringSecurityDescriptorToSecurityDescriptorW(
+ L"D:(A;;GA;;;WD)",
+ SDDL_REVISION_1,
+ &security.lpSecurityDescriptor,
+ NULL);
+
+ file = CreateFileW(
+ fullRegionName,
+ GENERIC_READ|GENERIC_WRITE,
+ FILE_SHARE_READ,
+ &security,
+ CREATE_NEW,
+ FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE,
+ NULL
+ );
+
+ LocalFree(security.lpSecurityDescriptor);
+
+ if(file == INVALID_HANDLE_VALUE)
+ {
+ return GetLastError();
+ }
+
+ fileMap = CreateFileMappingW(
+ file,
+ nullptr,
+ PAGE_READWRITE,
+ 0,
+ regionSize,
+ nullptr
+ );
+
+ if( fileMap == nullptr )
+ {
+ CloseHandle(file);
+ file = nullptr;
+ return GetLastError();
+ }
+
+ pView = MapViewOfFile(
+ fileMap,
+ FILE_MAP_WRITE,
+ 0,
+ 0,
+ 0
+ );
+
+ if(pView != nullptr)
+ {
+ ZeroMemory(pView, regionSize);
+ return NO_ERROR;
+ }
+
+ CloseHandle(file);
+ CloseHandle(fileMap);
+ file = nullptr;
+ fileMap = nullptr;
+ return GetLastError();
+ }
+
+
+ const void* ReadPtr() const
+ {
+ return pView;
+ }
+
+
+ void* WritePtr()
+ {
+ if(isReadOnly)
+ {
+ return nullptr;
+ }
+
+ return pView;
+ }
+
+};
\ No newline at end of file
diff --git a/src/mpi/common/kernel32util.cpp b/src/mpi/common/kernel32util.cpp
new file mode 100644
index 0000000..8dfd71a
--- /dev/null
+++ b/src/mpi/common/kernel32util.cpp
@@ -0,0 +1,18 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "kernel32util.h"
+#include "util.h"
+
+//
+// Summary:
+// Global to indicate if we should use Win7+ features.
+//
+// NOTE:
+// This is not static so we can manipulate this bit from unit tests
+//
+BOOL g_IsWin7OrGreater = CheckOSVersion(6,1);
+BOOL g_IsWin8OrGreater = CheckOSVersion(6,2);
+
+Kernel32 Kernel32::Methods;
\ No newline at end of file
diff --git a/src/mpi/common/kernel32util.h b/src/mpi/common/kernel32util.h
new file mode 100644
index 0000000..4a7facb
--- /dev/null
+++ b/src/mpi/common/kernel32util.h
@@ -0,0 +1,170 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+#include
+#include
+//
+// Make some shorter names.
+//
+typedef SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX SLPIEX;
+
+
+typedef _Success_(return==TRUE) BOOL ( WINAPI FN_GetLogicalProcessorInformationEx )(
+ _In_ LOGICAL_PROCESSOR_RELATIONSHIP relationshipType,
+ _Out_writes_to_opt_(*pReturnedLength,*pReturnedLength) PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX pBuffer,
+ _Inout_ PDWORD pReturnedLength
+ );
+
+
+typedef _Success_(return==TRUE) BOOL ( WINAPI FN_GetLogicalProcessorInformationEx )(
+ _In_ LOGICAL_PROCESSOR_RELATIONSHIP relationshipType,
+ _Out_writes_to_opt_(*pReturnedLength,*pReturnedLength) PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX pBuffer,
+ _Inout_ PDWORD pReturnedLength
+ );
+
+typedef _Success_(return==TRUE) BOOL ( WINAPI FN_SetThreadGroupAffinity)(
+ _In_ HANDLE hThread,
+ _In_ const GROUP_AFFINITY *GroupAffinity,
+ _Out_opt_ PGROUP_AFFINITY PreviousGroupAffinity
+ );
+
+typedef _Success_(return==TRUE) BOOL (WINAPI FN_GetNumaNodeProcessorMaskEx) (
+ _In_ USHORT Node,
+ _Out_ PGROUP_AFFINITY ProcessorMask
+ );
+
+typedef _Success_(return==TRUE) BOOL (WINAPI FN_GetProcessGroupAffinity) (
+ _In_ HANDLE hProcess,
+ _Inout_ PUSHORT GroupCount,
+ _Out_writes_to_(*GroupCount,*GroupCount) PUSHORT GroupArray
+ );
+
+
+typedef _Success_(return == TRUE) BOOL (WINAPI FN_InitializeProcThreadAttributeList) (
+ _In_opt_ LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList,
+ _In_ DWORD dwAttributeCount,
+ _In_ DWORD dwFlags,
+ _Inout_ PSIZE_T lpSize
+ );
+
+
+typedef _Success_(return == TRUE) BOOL(WINAPI FN_UpdateProcThreadAttribute) (
+ _In_ LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList,
+ _In_ DWORD dwFlags,
+ _In_ DWORD_PTR Attribute,
+ _In_ PVOID lpValue,
+ _In_ SIZE_T cbSize,
+ _In_opt_ PVOID lpPreviousValue,
+ _In_opt_ PSIZE_T lpReturnSize
+ );
+
+
+typedef _Success_(return==TRUE) BOOL (WINAPI FN_WaitOnAddress)(
+ _In_ VOID volatile *Address,
+ _In_ PVOID CompareAddress,
+ _In_ SIZE_T AddressSize,
+ _In_opt_ DWORD dwMilliseconds
+ );
+
+
+typedef VOID (WINAPI FN_WakeByAddressAll)(
+ _In_ PVOID Address
+ );
+
+
+extern BOOL g_IsWin7OrGreater;
+extern BOOL g_IsWin8OrGreater;
+
+
+//
+// Summary:
+// Singleton class to load and GetProcAddress
+//
+struct Kernel32
+{
+ static Kernel32 Methods;
+
+ FN_GetLogicalProcessorInformationEx* GetLogicalProcessorInformationEx;
+ FN_SetThreadGroupAffinity* SetThreadGroupAffinity;
+ FN_GetNumaNodeProcessorMaskEx* GetNumaNodeProcessorMaskEx;
+ FN_GetProcessGroupAffinity* GetProcessGroupAffinity;
+ FN_InitializeProcThreadAttributeList* InitializeProcThreadAttributeList;
+ FN_UpdateProcThreadAttribute* UpdateProcThreadAttribute;
+ FN_WaitOnAddress* WaitOnAddress;
+ FN_WakeByAddressAll* WakeByAddressAll;
+
+
+private:
+ Kernel32()
+ {
+ HMODULE kernel32 = ::GetModuleHandleW(L"kernel32");
+ if(kernel32 != nullptr)
+ {
+ GetLogicalProcessorInformationEx = reinterpret_cast(
+ ::GetProcAddress(
+ kernel32,
+ "GetLogicalProcessorInformationEx"
+ ) );
+ SetThreadGroupAffinity = reinterpret_cast(
+ ::GetProcAddress(
+ kernel32,
+ "SetThreadGroupAffinity"
+ ) );
+ GetNumaNodeProcessorMaskEx = reinterpret_cast(
+ ::GetProcAddress(
+ kernel32,
+ "GetNumaNodeProcessorMaskEx"
+ ) );
+ GetProcessGroupAffinity = reinterpret_cast(
+ ::GetProcAddress(
+ kernel32,
+ "GetProcessGroupAffinity"
+ ) );
+ InitializeProcThreadAttributeList = reinterpret_cast(
+ ::GetProcAddress(
+ kernel32,
+ "InitializeProcThreadAttributeList"
+ ) );
+ UpdateProcThreadAttribute = reinterpret_cast(
+ ::GetProcAddress(
+ kernel32,
+ "UpdateProcThreadAttribute"
+ ) );
+ }
+
+ assert(!g_IsWin7OrGreater || GetLogicalProcessorInformationEx != nullptr);
+ assert(!g_IsWin7OrGreater || SetThreadGroupAffinity != nullptr);
+ assert(!g_IsWin7OrGreater || GetNumaNodeProcessorMaskEx != nullptr);
+ assert(!g_IsWin7OrGreater || GetProcessGroupAffinity != nullptr);
+ assert(!g_IsWin7OrGreater || InitializeProcThreadAttributeList != nullptr);
+ assert(!g_IsWin7OrGreater || UpdateProcThreadAttribute != nullptr);
+
+ if (g_IsWin8OrGreater)
+ {
+ HMODULE kernelBase = ::GetModuleHandleW(L"kernelbase");
+ if(kernelBase != nullptr)
+ {
+ WakeByAddressAll = reinterpret_cast(
+ ::GetProcAddress(
+ kernelBase,
+ "WakeByAddressAll"
+ ));
+
+ WaitOnAddress = reinterpret_cast(
+ ::GetProcAddress(
+ kernelBase,
+ "WaitOnAddress"
+ ));
+ }
+
+ assert(WakeByAddressAll != nullptr);
+ assert(WaitOnAddress != nullptr);
+ }
+ else
+ {
+ WakeByAddressAll = nullptr;
+ WaitOnAddress = nullptr;
+ }
+ }
+};
diff --git a/src/mpi/common/mpicommon.vcxproj b/src/mpi/common/mpicommon.vcxproj
new file mode 100644
index 0000000..a3f7e70
--- /dev/null
+++ b/src/mpi/common/mpicommon.vcxproj
@@ -0,0 +1,97 @@
+
+
+
+
+
+
+ mpicommon
+ {990e2ef8-f801-4101-a7b5-2d561fb6667b}
+
+
+
+ false
+ StaticLibrary
+ None
+
+
+
+
+
+
+ Use
+ precomp.h
+ $(IntDir)\pch_hdr.src
+
+ %(AdditionalIncludeDirectories);
+ $(MPI_SRC_ROOT)\common\$(O);
+
+
+
+
+
+
+ Create
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ robocopy .\ $(BinariesBuildTypeArchDirectory)\$(MPI_BIN_DESTINATION) mpitrace.man
+set rc=%errorlevel%
+if not %rc%==1 exit %rce% else exit 0
+
+
+
+
+
+
+
+
+
+
+
+ $(BaseIntermediateOutputPath)i386\errtest.c
+ $(BaseIntermediateOutputPath)amd64\errtest.c
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/mpi/common/mpidef.h b/src/mpi/common/mpidef.h
new file mode 100644
index 0000000..bb1d146
--- /dev/null
+++ b/src/mpi/common/mpidef.h
@@ -0,0 +1,191 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#if !defined(MPITYPEDEFS_H_INCLUDED)
+#define MPITYPEDEFS_H_INCLUDED
+
+/* Define if addresses are larger than Fortran integers */
+#ifdef _WIN64
+#define HAVE_AINT_LARGER_THAN_FINT
+#endif
+
+/* Define to 1 if you have `alloca', as a function or macro. */
+#define HAVE_ALLOCA 1
+
+/* Define if debugger support is included */
+/* #undef HAVE_DEBUGGER_SUPPORT */
+
+/* Define if F90 type routines available */
+/* #undef HAVE_F90_TYPE_ROUTINES */
+
+/* Define if Fortran is supported */
+/* Defined as a compiler directive, see mpich2sources.inc */
+/* #undef HAVE_FORTRAN_BINDING */
+
+/* Controls byte alignment of structures (for aligning allocated structures)
+ */
+#define HAVE_MAX_STRUCT_ALIGNMENT 8
+
+/* Define if a name publishing service is available */
+#define HAVE_NAMEPUB_SERVICE 1
+
+/* Define if the Fortran types are not available in C */
+/* #undef HAVE_NO_FORTRAN_MPI_TYPES_IN_C */
+
+/* Define as the name of the debugger support library */
+/* #undef MPICH_INFODLL_LOC */
+
+/* Level of thread support selected at compile time */
+#ifdef MPICH_MULTITHREADED
+#define MPICH_THREAD_LEVEL MPI_THREAD_MULTIPLE
+#else
+#define MPICH_THREAD_LEVEL MPI_THREAD_SERIALIZED
+#endif
+
+/* C type to use for MPI_INTEGER16 */
+/* #undef MPIR_INTEGER16_CTYPE */
+
+/* C type to use for MPI_REAL2 */
+/* #undef MPIR_REAL2_CTYPE */
+
+/* C type to use for MPI_REAL16 */
+/* #undef MPIR_REAL16_CTYPE */
+
+/* C99 types available for MPI_C_XXX */
+/* #undef MPIR_C99_TYPES */
+
+/* Define if alloca should be used if available */
+/* #undef USE_ALLOCA */
+
+/* Define to use ='s and spaces in the string utilities. */
+#define USE_HUMAN_READABLE_TOKENS 1
+
+/* if C does not support restrict */
+#define restrict
+
+/* The following are defined as a compiler directive, see mpich2sources.inc */
+/* Added here to make it easier to search for the define */
+/*
+#define USE_MPI_FOR_NMPI
+#define HAVE_FORTRAN_BINDING
+#define MPIDI_CH3_HAS_NO_DYNAMIC_PROCESS
+*/
+
+
+#ifndef EXTERN_C
+#if defined(__cplusplus)
+#define EXTERN_C extern "C"
+#else
+#define EXTERN_C extern
+#endif
+#endif
+
+
+#ifdef _PREFIX_
+EXTERN_C void __pfx_assert(bool, const char*);
+#undef __analysis_assert
+#define __analysis_assert(expr) __pfx_assert(expr, "")
+#else
+#ifndef __analysis_assert
+#define __analysis_assert(expr)
+#endif
+#endif
+
+
+#ifdef _PREFIX_
+EXTERN_C void __pfx_assume(bool, const char*);
+#undef __analysis_assume
+#define __analysis_assume(expr) __pfx_assume(expr, "")
+#elif _PREFAST_
+#undef __analysis_assume
+#define __analysis_assume(expr) __assume(expr)
+#else
+#ifndef __analysis_assume
+#define __analysis_assume(expr)
+#endif
+#endif
+
+#ifdef __midl
+typedef int MPI_RESULT;
+#else
+typedef _Check_return_ _Return_type_success_(return == MPI_SUCCESS) int MPI_RESULT;
+#endif
+
+
+/* Set to a type that can express the size of a send/receive buffer */
+typedef unsigned int MPIU_Bsize_t;
+#define MPIU_BSIZE_MAX UINT_MAX
+
+typedef MPIU_Bsize_t MPIDI_msg_sz_t;
+#define MPIDI_MSG_SZ_MAX MPIU_BSIZE_MAX
+
+
+/* Use the MPIU_PtrToXXX macros to convert pointers to and from integer types */
+
+/* The Microsoft compiler will not allow casting of different sized types
+ * without
+ * printing a compiler warning. Using these macros allows compiler specific
+ * type casting and avoids the warning output. These macros should only be used
+ * in code that can handle loss of bits.
+ */
+
+/* PtrToInt converts a pointer to a int type, truncating bits if necessary */
+#define MPIU_PtrToInt PtrToInt
+
+/* PtrToAint converts a pointer to an MPI_Aint type, truncating bits if necessary */
+#define MPIU_PtrToAint(a) ((MPI_Aint)(INT_PTR) (a) )
+
+/* IntToPtr converts a int to a pointer type, extending bits if necessary */
+#define MPIU_IntToPtr IntToPtr
+
+
+//
+// Constant used to define the maximum number of characters in the hostname
+// string placed into the PMI KVS when publishing our node id.
+//
+#if (!defined MAXHOSTNAMELEN) && (!defined MAX_HOSTNAME_LEN)
+#define MAX_HOSTNAME_LEN 256
+#elif !defined MAX_HOSTNAME_LEN
+#define MAX_HOSTNAME_LEN MAXHOSTNAMELEN
+#endif
+
+#define MSMPI_VER_MAJOR( _v ) (_v >> 24)
+#define MSMPI_VER_MINOR( _v ) ((_v >> 16) & 0xFF)
+#define MSMPI_VER_BUILD( _v ) (_v & 0xFFFF)
+#if MSMPI_IS_RTM
+#define MSMPI_BUILD_LABEL L""
+#else
+#define MSMPI_BUILD_LABEL L" [PRE-RELEASE]"
+#endif
+
+//
+// This is the maximum length of an environment variable according
+// to platform specification.
+//
+#define MAX_ENV_LENGTH 32767
+
+#define MSMPI_MAX_RANKS 32768
+
+#define MSMPI_MAX_TRANSFER_SIZE INT_MAX
+
+//
+// Maximum number of connection retires in case of errors. Applies to both sockets and ND.
+//
+#define MSMPI_DEFAULT_CONNECT_RETRIES 5
+
+//
+// This string identifies the service_name in spn construction
+//
+#define MSMPI_SPN_SERVICE_NAME L"msmpi"
+
+//
+// Cast definition to help flag places we use potentially truncate but shouldn't.
+//
+#define fixme_cast static_cast
+
+#endif /* !defined(MPITYPEDEFS_H_INCLUDED) */
diff --git a/src/mpi/common/mpidump.h b/src/mpi/common/mpidump.h
new file mode 100644
index 0000000..e98fbd3
--- /dev/null
+++ b/src/mpi/common/mpidump.h
@@ -0,0 +1,48 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+#ifndef MPIDUMP_H_INCLUDED
+#define MPIDUMP_H_INCLUDED
+
+#include
+
+//
+// Constants used to control dump file generation.
+//
+enum MSMPI_DUMP_MODE
+{
+ MsmpiDumpNone = 0,
+ MsmpiDumpMini,
+ MsmpiDumpAllMini,
+ MsmpiDumpFull,
+ MsmpiDumpAllFull,
+ MsmpiDumpMaximumValue
+};
+
+
+MSMPI_DUMP_MODE GetDumpMode();
+
+
+void
+CreateFinalDumpFile(
+ _In_ HANDLE tempFileHandle,
+ _In_ int rank,
+ _In_z_ const wchar_t* dumpPath,
+ _In_ int jobid,
+ _In_ int taskid,
+ _In_ int taskinstid
+ );
+
+
+HANDLE
+CreateTempDumpFile(
+ __in HANDLE hProcess,
+ __in DWORD pid,
+ __in MINIDUMP_TYPE dumpType,
+ __in const wchar_t* dumpPath,
+ __in_opt MINIDUMP_EXCEPTION_INFORMATION* pExrParam
+ );
+
+#endif // MPIDUMP_H_INCLUDED
diff --git a/src/mpi/common/mpierror.h b/src/mpi/common/mpierror.h
new file mode 100644
index 0000000..92e2e59
--- /dev/null
+++ b/src/mpi/common/mpierror.h
@@ -0,0 +1,293 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+
+//
+// IMPORTANT NOTE: All MPI*_Err_* functions must have return value and function name
+// on the SAME LINE or the error string parser will FAIL.
+//
+
+#ifndef MPIERROR_H_INCLUDED
+#define MPIERROR_H_INCLUDED
+
+/* Error severity */
+#define MPIR_ERR_FATAL 1
+#define MPIR_ERR_RECOVERABLE 0
+
+/*
+ This file contains the definitions of the error code fields
+
+ An error code is organized as:
+
+ 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ +-+-+-----------------------------------------+-+-+-------------+
+ |0|D| Error Code Index |C|F| Class |
+ +-+-+-----------------------------------------+-+-+-------------+
+
+ Class: [0-6]
+ The MPI error class (including dynamically defined classes).
+
+ Fatal: [7]
+ Set if the error is fatal and should not be returned to the user.
+
+ Code: [8]
+ Set for error Codes (vs error class). Note that 0 is a valid value for the Error Code Index.
+
+ Index: [9-28]
+ The Error Code Index assigned at error code creation. The lower 7 bits are used as an index to
+ the ErrorRing to find the error message. The rest of this field bits help verifing that the
+ ErrorRing entry is assinged to that error.
+
+ Dyanmic: [30]
+ Set if this is a dynamically created error code (using the routines to add error classes and
+ codes at runtime). This *must* be the top bit so that MPI_ERR_LASTCODE and MPI_LASTUSEDCODE
+ can be set properly. (MPI_ERR_LASTCODE must be the largest valid error *code* from the
+ predefined codes.
+ The standard text is poorly worded here, but users will expect to be able to perform
+ (errcode <= MPI_ERR_LASTCODE). See Section 8.5 in the MPI-2 standard for MPI_LASTUSEDCODE.
+
+ 0: [31]
+ Error codes must be positive integers, so we lose one bit (if they aren't positive, the
+ comparisons agains MPI_ERR_LASTCODE and the value of the attribute MPI_LASTUSEDCODE will fail).
+ */
+
+
+#define ERROR_CLASS_MASK 0x0000007f
+#define ERROR_CLASS_SIZE 128
+
+#define ERROR_FATAL_FLAG 0x00000080
+#define ERROR_COD_FLAG 0x00000100
+
+#define ERROR_INDEX_MASK 0x3FFFFE00
+#define ERROR_INDEX_SHIFT 9
+
+#define ERROR_DYN_FLAG 0x40000000
+
+#define ERROR_DINDEX_MASK 0x003FFE00
+#define ERROR_DINDEX_SHIFT 9
+#define ERROR_DINDEX_SIZE 8192
+
+/* shorthand macros */
+#define ERROR_GET_CLASS(code) (code & ERROR_CLASS_MASK)
+#define ERROR_GET_INDEX(code) ((code & ERROR_INDEX_MASK) >> ERROR_INDEX_SHIFT)
+
+#define ERROR_IS_FATAL(code) ((code & ERROR_FATAL_FLAG) != 0)
+#define ERROR_IS_CODE(code) ((code & ERROR_COD_FLAG) != 0)
+#define ERROR_IS_DYN(code) ((code & ERROR_DYN_FLAG) != 0)
+
+
+/* FIXME:
+ * The following description is out of date and should not be used
+ */
+/*@
+ MPIR_Err_create_code - Create an error code and associated message
+ to report an error
+
+ Input Parameters:
++ lastcode - Previous error code (see notes)
+. severity - Indicates severity of error
+. class - Error class
+. instance_msg - A message containing printf-style formatting commands
+ that, when combined with the instance_parameters, specify an error
+ message containing instance-specific data.
+- instance_parameters - The remaining parameters. These must match
+ the formatting commands in 'instance_msg'.
+
+ Notes:
+ A typical use is:
+.vb
+ mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, MPI_ERR_RANK, "Invalid rank %d", rank );
+.ve
+
+ Predefined message may also be used. Any message that uses the
+ prefix '"**"' will be looked up in a table. This allows standardized
+ messages to be used for a message that is used in several different locations
+ in the code. For example, the name '"**rank"' might be used instead of
+ '"Invalid Rank"'; this would also allow the message to be made more
+ specific and useful, such as
+.vb
+ Invalid rank provided. The rank must be between 0 and the 1 less than
+ the size of the communicator in this call.
+.ve
+ This interface is compatible with the 'gettext' interface for
+ internationalization, in the sense that the 'generic_msg' and 'instance_msg'
+ may be used as arguments to 'gettext' to return a string in the appropriate
+ language; the implementation of 'MPID_Err_create_code' can then convert
+ this text into the appropriate code value.
+
+ The current set of formatting commands is undocumented and will change.
+ You may safely use '%d' and '%s' (though only use '%s' for names of
+ objects, not text messages, as using '%s' for a message breaks support for
+ internationalization.
+
+ This interface allows error messages to be chained together. The first
+ argument is the last error code; if there is no previous error code,
+ use 'MPI_SUCCESS'.
+
+ Module:
+ Error
+
+ @*/
+_Post_satisfies_( return != MPI_SUCCESS )
+MPI_RESULT MPIR_Err_create_code(
+ _In_ int lastcode,
+ _In_ int fatal,
+ _In_ int error_class,
+ _In_z_ const char specific_msg[],
+ ...
+ );
+
+_Post_satisfies_( return != MPI_SUCCESS )
+MPI_RESULT MPIR_Err_create_code_valist(
+ _In_ int lastcode,
+ _In_ int fatal,
+ _In_ int error_class,
+ _In_z_ const char specific_msg[],
+ _In_ va_list Argp
+ );
+
+void MPIR_Err_preOrPostInit( void );
+
+
+/*@
+ MPID_Err_get_string - Get the message string that corresponds to an error
+ class or code
+
+ Input Parameter:
++ code - An error class or code. If a code, it must have been created by
+ 'MPID_Err_create_code'.
+- msg_len - Length of 'msg'.
+
+ Output Parameter:
+. msg - A null-terminated text string of length (including the null) of no
+ more than 'msg_len'.
+
+ Return value:
+ Zero on success. Non-zero returns indicate either (a) 'msg_len' is too
+ small for the message or (b) the value of 'code' is neither a valid
+ error class or code.
+
+ Notes:
+ This routine is used to implement 'MPI_ERROR_STRING'.
+
+ Module:
+ Error
+
+ Question:
+ What values should be used for the error returns? Should they be
+ valid error codes?
+
+ How do we get a good value for 'MPI_MAX_ERROR_STRING' for 'mpi.h'?
+ See 'errgetmsg' for one idea.
+
+ @*/
+
+void MPIR_Err_get_string(
+ _In_ int errorcode,
+ _Out_writes_z_(length) char * msg,
+ _In_ size_t length
+ );
+
+
+/* Prototypes for internal routines for the errhandling module */
+MPI_RESULT
+MPIR_Err_set_msg(
+ _In_ int code,
+ _In_z_ const char * msg_string
+ );
+
+int MPIR_Err_add_class( void );
+int MPIR_Err_add_code( int );
+
+_Success_(return==MPI_SUCCESS)
+int MPIR_Err_vsnprintf_mpi(
+ _Out_writes_z_(maxlen) char* str,
+ _In_ size_t maxlen,
+ _Printf_format_string_ const char* fmt,
+ _In_ va_list list
+ );
+
+_Post_satisfies_( return != MPI_SUCCESS )
+int MPIR_Err_get_user_error_code(
+ _In_ int errcode
+ );
+
+typedef _Ret_z_ const char* (*MPIR_Err_to_string_fn)(int code);
+
+void MPIR_Err_set_dynerr_fn(
+ _In_ MPIR_Err_to_string_fn fn
+ );
+
+
+
+/*
+ * Standardized error checking macros. These provide the correct tests for
+ * common tests. These set err with the encoded error value.
+ */
+
+/* The following are placeholders. We haven't decided yet whether these
+ should take a handle or pointer, or if they should take a handle and return
+ a pointer if the handle is valid. These need to be rationalized with the
+ MPID_xxx_valid_ptr and MPID_xxx_get_ptr.
+
+*/
+
+
+#define MPIU_ERR_FAIL(err_) \
+ err_
+
+#define ON_ERROR_FAIL(err_) \
+ if((err_) != MPI_SUCCESS) { goto fn_fail; }
+
+#define MPIU_ERR_NOMEM() \
+ MPIU_ERR_CREATE(MPI_ERR_OTHER, "**nomem")
+
+/*
+ * Standardized error setting and checking macros
+ * These are intended to simplify the insertion of standardized error
+ * checks
+ *
+ */
+/* --BEGIN ERROR MACROS-- */
+
+/* If you add any macros to this list, make sure that you update
+ maint/extracterrmsgs to handle the additional macros (see the hash
+ KnownErrRoutines in that script) */
+#define MPIU_ERR_TYPE_GET(err_, fatal_, class_, fmt_, ...) \
+ MPIR_Err_create_code(err_, fatal_, class_, fmt_, __VA_ARGS__)
+
+/* Get fatal error code */
+#define MPIU_ERR_FATAL_GET(err_, class_, fmt_, ...) \
+ MPIU_ERR_TYPE_GET(err_, MPIR_ERR_FATAL, class_, fmt_, __VA_ARGS__)
+
+/* Get recoverable error code */
+#define MPIU_ERR_GET(err_, fmt_, ...) \
+ MPIU_ERR_TYPE_GET(*&err_, MPIR_ERR_RECOVERABLE, MPI_ERR_OTHER, fmt_, __VA_ARGS__)
+
+/* Get recov error code with class */
+#define MPIU_ERR_CLASS_GET(err_, class_, fmt_, ...) \
+ MPIU_ERR_TYPE_GET(*&err_, MPIR_ERR_RECOVERABLE, class_, fmt_, __VA_ARGS__)
+
+/* Create a new recoverable error code */
+#define MPIU_ERR_CREATE(class_, fmt_, ...) \
+ MPIU_ERR_TYPE_GET( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, class_, fmt_, __VA_ARGS__ )
+
+/* --END ERROR MACROS-- */
+
+_Ret_z_
+const char*
+get_error_string(
+ _In_ int error
+ );
+
+void CreateDumpFileIfConfigured(
+ _In_ EXCEPTION_POINTERS* exp
+ );
+
+#endif
diff --git a/src/mpi/common/mpihandlemem.h b/src/mpi/common/mpihandlemem.h
new file mode 100644
index 0000000..879b5b6
--- /dev/null
+++ b/src/mpi/common/mpihandlemem.h
@@ -0,0 +1,635 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#ifndef MPIHANDLE_H_INCLUDED
+#define MPIHANDLE_H_INCLUDED
+
+#include "MpiLock.h"
+
+/*TDSOverview.tex
+
+ MPI has a number of data structures, most of which are represented by
+ an opaque handle in an MPI program. In the MPICH implementation of MPI,
+ these handles are represented
+ as integers; this makes implementation of the C/Fortran handle transfer
+ calls (part of MPI-2) easy.
+
+ MPID objects (again with the possible exception of 'MPI_Request's)
+ are allocated by a common set of object allocation functions.
+ These are
+.vb
+ void *MPIU_Handle_obj_create( MPIU_Object_alloc_t *objmem )
+ void MPIU_Handle_obj_destroy( MPIU_Object_alloc_t *objmem, void *object )
+.ve
+ where 'objmem' is a pointer to a memory allocation object that knows
+ enough to allocate objects, including the
+ size of the object and the location of preallocated memory, as well
+ as the type of memory allocator. By providing the routines to allocate and
+ free the memory, we make it easy to use the same interface to allocate both
+ local and shared memory for objects (always using the same kind for each
+ type of object).
+
+ The names create/destroy were chosen because they are different from
+ new/delete (C++ operations) and malloc/free.
+ Any name choice will have some conflicts with other uses, of course.
+
+ Reference Counts:
+ Many MPI objects have reference count semantics.
+ The semantics of MPI require that many objects that have been freed by the
+ user
+ (e.g., with 'MPI_Type_free' or 'MPI_Comm_free') remain valid until all
+ pending
+ references to that object (e.g., by an 'MPI_Irecv') are complete. There
+ are several ways to implement this; MPICH uses `reference counts` in the
+ objects. To support the 'MPI_THREAD_MULTIPLE' level of thread-safety, these
+ reference counts must be accessed and updated atomically.
+ A reference count for
+ `any` object can be incremented (atomically)
+ with 'MPIU_Object_add_ref(objptr)'
+ and decremented with 'MPIU_Object_release_ref(objptr,newval_ptr)'.
+ These have been designed so that then can be implemented as inlined
+ macros rather than function calls, even in the multithreaded case, and
+ can use special processor instructions that guarantee atomicity to
+ avoid thread locks.
+ The decrement routine sets the value pointed at by 'inuse_ptr' to 0 if
+ the postdecrement value of the reference counter is zero, and to a non-zero
+ value otherwise. If this value is zero, then the routine that decremented
+ the
+ reference count should free the object. This may be as simple as
+ calling 'MPIU_Handle_obj_destroy' (for simple objects with no other allocated
+ storage) or may require calling a separate routine to destroy the object.
+ Because MPI uses 'MPI_xxx_free' to both decrement the reference count and
+ free the object if the reference count is zero, we avoid the use of 'free'
+ in the MPID routines.
+
+ The 'inuse_ptr' approach is used rather than requiring the post-decrement
+ value because, for reference-count semantics, all that is necessary is
+ to know when the reference count reaches zero, and this can sometimes
+ be implemented more cheaply that requiring the post-decrement value (e.g.,
+ on IA32, there is an instruction for this operation).
+
+ Question:
+ Should we state that this is a macro so that we can use a register for
+ the output value? That avoids a store. Alternately, have the macro
+ return the value as if it was a function?
+
+ Structure Definitions:
+ The structure definitions in this document define `only` that part of
+ a structure that may be used by code that is making use of the ADI.
+ Thus, some structures, such as 'MPID_Comm', have many defined fields;
+ these are used to support MPI routines such as 'MPI_Comm_size' and
+ 'MPI_Comm_remote_group'. Other structures may have few or no defined
+ members; these structures have no fields used outside of the ADI.
+ In C++ terms, all members of these structures are 'private'.
+
+ For the initial implementation, we expect that the structure definitions
+ will be designed for the multimethod device. However, all items that are
+ specific to a particular device (including the multi-method device)
+ will be placed at the end of the structure;
+ the document will clearly identify the members that all implementations
+ will provide. This simplifies much of the code in both the ADI and the
+ implementation of the MPI routines because structure member can be directly
+ accessed rather than using some macro or C++ style method interface.
+
+ T*/
+
+
+/*TOpaqOverview.tex
+ MPI Opaque Objects:
+
+ MPI Opaque objects such as 'MPI_Comm' or 'MPI_Datatype' are specified by
+ integers (in the MPICH2 implementation); the MPI standard calls these
+ handles.
+ Out of range values are invalid; the value 0 is reserved.
+ For most (with the possible exception of
+ 'MPI_Request' for performance reasons) MPI Opaque objects, the integer
+ encodes both the kind of object (allowing runtime tests to detect a datatype
+ passed where a communicator is expected) and important properties of the
+ object. Even the 'MPI_xxx_NULL' values should be encoded so that
+ different null handles can be distinguished. The details of the encoding
+ of the handles is covered in more detail in the MPICH2 Design Document.
+ For the most part, the ADI uses pointers to the underlying structures
+ rather than the handles themselves. However, each structure contains an
+ 'handle' field that is the corresponding integer handle for the MPI object.
+
+ MPID objects (objects used within the implementation of MPI) are not opaque.
+
+ T*/
+
+/* Known MPI object types. These are used for both the error handlers
+ and for the handles. This is a 4 bit value. 0 is reserved for so
+ that all-zero handles can be flagged as an error. */
+/*E
+ MPID_Object_kind - Object kind (communicator, window, or file)
+
+ Notes:
+ This enum is used by keyvals and errhandlers to indicate the type of
+ object for which MPI opaque types the data is valid. These are defined
+ as bits to allow future expansion to the case where an object is value for
+ multiple types (for example, we may want a universal error handler for
+ errors return). This is also used to indicate the type of MPI object a
+ MPI handle represents. It is an enum because only this applies only the
+ the MPI and internal MPICH2 objects.
+
+ The 'MPID_PROCGROUP' kind is used to manage process groups (different
+ from MPI Groups) that are used to keep track of collections of
+ processes (each 'MPID_PROCGROUP' corresponds to a group of processes
+ that define an 'MPI_COMM_WORLD'. This becomes important only
+ when MPI-2 dynamic process features are supported. 'MPID_VCONN' is
+ a virtual connection; while this is not part of the overall ADI3
+ design, an object that manages connections to other processes is
+ a common need, and 'MPID_VCONN' may be used for that.
+
+ Module:
+ Attribute-DS
+ E*/
+
+
+//
+// Handle values are 32 bit values laid out as follows:
+//
+// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+// +---+-------+---------------------------------------------------+
+// |Typ| Kind | Index |
+// +---+-------+---------------------------------------------------+
+//
+//
+// Handles of type HANDLE_TYPE_INDIRECT are laid out as follows:
+// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+// +---+-------+-------------------+-------------------------------+
+// |Typ| Kind | Block | Index |
+// +---+-------+-------------------+-------------------------------+
+//
+// where
+//
+// Typ - is the handle type:
+//
+#define HANDLE_TYPE_INVALID 0x0
+#define HANDLE_TYPE_BUILTIN 0x1
+#define HANDLE_TYPE_DIRECT 0x2
+#define HANDLE_TYPE_INDIRECT 0x3
+
+#define HANDLE_TYPE_MASK 0xc0000000
+#define HANDLE_TYPE_SHIFT 30
+#define HANDLE_GET_TYPE(a) (((a) & HANDLE_TYPE_MASK)>>HANDLE_TYPE_SHIFT)
+#define HANDLE_SET_TYPE(a,kind) ((a)|((kind)<> HANDLE_MPI_KIND_SHIFT )
+#define HANDLE_SET_MPI_KIND(a,kind) ( ((a) & 0xc3ffffff) | ((kind) << HANDLE_MPI_KIND_SHIFT) )
+
+//
+// Block - for indirect handles only, the index of the indirect block to
+// which the Index applies
+//
+#define HANDLE_INDIRECT_SHIFT 16
+#define HANDLE_INDIRECT_BLOCK(a) (((a)& 0x03FF0000) >> HANDLE_INDIRECT_SHIFT)
+/* Handle block is between 1 and 1024 *elements* */
+#define HANDLE_BLOCK_SIZE 256
+C_ASSERT(HANDLE_BLOCK_SIZE <= 1024);
+
+//
+// Index - Index of the object. For indirect handles, the index is relative ot the block.
+//
+#define HANDLE_INDIRECT_INDEX(a) ((a) & 0x0000FFFF)
+#define HANDLE_BLOCK_INDEX_SIZE 1024
+C_ASSERT(HANDLE_BLOCK_SIZE < 65536);
+
+#define HANDLE_INDEX_MASK 0x03FFFFFF
+#define HANDLE_DIRECT_INDEX(a) ((a) & HANDLE_INDEX_MASK)
+
+#define HANDLE_BUILTIN_INDEX(a) ((a) & 0x000000FF)
+
+/* ALL objects have the handle as the first value. */
+/* Inactive (unused and stored on the appropriate avail list) objects
+ have MPIU_Handle_common as the head */
+typedef struct MPIU_Handle_common
+{
+ int handle;
+ volatile long ref_count;
+ struct MPIU_Handle_common *next; /* Free handles use this field to point to the next
+ free object */
+
+} MPIU_Handle_common;
+
+/* All *active* (in use) objects have the handle as the first value; objects
+ with referene counts have the reference count as the second value.
+ See MPIU_Object_add_ref and MPIU_Object_release_ref. */
+typedef struct MPIU_Handle_head
+{
+ int handle;
+ volatile long ref_count;
+
+} MPIU_Handle_head;
+
+/* This type contains all of the data, except for the direct array,
+ used by the object allocators. */
+typedef struct MPIU_Object_alloc_t
+{
+ MPIU_Handle_common *avail; /* Next available object */
+ int initialized; /* */
+ void *(*indirect)[]; /* Pointer to indirect object blocks */
+ int indirect_size; /* Number of allocated indirect blocks */
+ MPID_Object_kind kind; /* Kind of object this is for */
+ int size; /* Size of an individual object */
+ void *direct; /* Pointer to direct block, used
+ for allocation */
+ int direct_size; /* Size of direct block */
+
+ MPI_RWLOCK alloc_lock;
+
+#if DBG
+ int num_alloc; /* Number of objects out of the pool. */
+ volatile long num_user_alloc; /* Number of objects out of the pool owned by the client. */
+ volatile long num_user_msg_alloc; /* Number of objects out of the pool of kind MPID_MESSAGE owned by the client. */
+#endif
+
+} MPIU_Object_alloc_t;
+
+
+_Success_(return!=nullptr)
+_Ret_valid_
+void* MPIU_Handle_obj_alloc(
+ _In_ MPIU_Object_alloc_t* objmem
+ );
+
+void MPIU_Handle_obj_free(
+ _In_ MPIU_Object_alloc_t* objmem,
+ _In_ _Post_ptr_invalid_ void* obj
+ );
+
+_Success_(return!=nullptr)
+void* MPIU_Handle_get_ptr_indirect(
+ _In_ int handle,
+ _In_ MPIU_Object_alloc_t* objmem
+ );
+
+
+/* ------------------------------------------------------------------------- */
+/* mpiobjref.h */
+/* ------------------------------------------------------------------------- */
+
+
+/*M
+ MPIU_Object_add_ref - Increment the reference count for an MPI object
+
+ Synopsis:
+.vb
+ MPIU_Object_add_ref( MPIU_Object *ptr )
+.ve
+
+ Input Parameter:
+. ptr - Pointer to the object.
+
+ Notes:
+ In an unthreaded implementation, this function will usually be implemented
+ as a single-statement macro. In an 'MPI_THREAD_MULTIPLE' implementation,
+ this routine must implement an atomic increment operation, using, for
+ example, a lock on datatypes or special assembly code such as
+.vb
+ try-again:
+ load-link refcount-address to r2
+ add 1 to r2
+ store-conditional r2 to refcount-address
+ if failed branch to try-again:
+.ve
+ on RISC architectures or
+.vb
+ lock
+ inc refcount-address or
+.ve
+ on IA32; "lock" is a special opcode prefix that forces atomicity. This
+ is not a separate instruction; however, the GNU assembler expects opcode
+ prefixes on a separate line.
+
+ Module:
+ MPID_CORE
+
+ Question:
+ This accesses the 'ref_count' member of all MPID objects. Currently,
+ that member is typed as 'volatile int'. However, for a purely polling,
+ thread-funnelled application, the 'volatile' is unnecessary. Should
+ MPID objects use a 'typedef' for the 'ref_count' that can be defined
+ as 'volatile' only when needed? For now, the answer is no; there isn''t
+ enough to be gained in that case.
+M*/
+
+/*M
+ MPIU_Object_release_ref - Decrement the reference count for an MPI object
+
+ Synopsis:
+.vb
+ MPIU_Object_release_ref( MPIU_Object *ptr, int *inuse_ptr )
+.ve
+
+ Input Parameter:
+. objptr - Pointer to the object.
+
+ Output Parameter:
+. inuse_ptr - Pointer to the value of the reference count after decrementing.
+ This value is either zero or non-zero. See below for details.
+
+ Notes:
+ In an unthreaded implementation, this function will usually be implemented
+ as a single-statement macro. In an 'MPI_THREAD_MULTIPLE' implementation,
+ this routine must implement an atomic decrement operation, using, for
+ example, a lock on datatypes or special assembly code such as
+.vb
+ try-again:
+ load-link refcount-address to r2
+ sub 1 to r2
+ store-conditional r2 to refcount-address
+ if failed branch to try-again:
+ store r2 to newval_ptr
+.ve
+ on RISC architectures or
+.vb
+ lock
+ dec refcount-address
+ if zf store 0 to newval_ptr else store 1 to newval_ptr
+.ve
+ on IA32; "lock" is a special opcode prefix that forces atomicity. This
+ is not a separate instruction; however, the GNU assembler expects opcode
+ prefixes on a separate line. 'zf' is the zero flag; this is set if the
+ result of the operation is zero. Implementing a full decrement-and-fetch
+ would require more code and the compare and swap instruction.
+
+ Once the reference count is decremented to zero, it is an error to
+ change it. A correct MPI program will never do that, but an incorrect one
+ (particularly a multithreaded program with a race condition) might.
+
+ The following code is `invalid`\:
+.vb
+ MPID_Object_release_ref( datatype_ptr );
+ if (datatype_ptr->ref_count == 0) MPID_Datatype_free( datatype_ptr );
+.ve
+ In a multi-threaded implementation, the value of 'datatype_ptr->ref_count'
+ may have been changed by another thread, resulting in both threads calling
+ 'MPID_Datatype_free'. Instead, use
+.vb
+ MPID_Object_release_ref( datatype_ptr, &inUse );
+ if (!inuse)
+ MPID_Datatype_free( datatype_ptr );
+.ve
+
+ Module:
+ MPID_CORE
+ M*/
+
+/* The MPIU_DBG... statements are macros that vanish unless
+ --enable-g=log is selected. MPIU_HANDLE_CHECK_REFCOUNT is
+ defined above, and adds an additional sanity check for the refcounts
+*/
+template
+__forceinline void MPIU_Object_set_ref(_In_ T* objptr, _In_ long val)
+{
+ ::InterlockedExchange(&objptr->ref_count, val);
+}
+
+template
+__forceinline void MPIU_Object_add_ref(_In_ T* objptr)
+{
+ ::InterlockedIncrement(&objptr->ref_count);
+}
+
+template
+__forceinline void MPIU_Object_release_ref(_In_ T* objptr, _Out_ BOOL* inuse_ptr)
+{
+ *inuse_ptr = ::InterlockedDecrement(&objptr->ref_count);
+}
+
+/* ------------------------------------------------------------------------- */
+/* mpiobjref.h */
+/* ------------------------------------------------------------------------- */
+
+/* Convert Handles to objects for MPI types that have predefined objects */
+/* Question. Should this do ptr=0 first, particularly if doing --enable-strict
+ complication? */
+#define MPID_Getb_ptr(kind,a,bmsk,ptr) \
+{ \
+ switch (HANDLE_GET_TYPE(a)) { \
+ case HANDLE_TYPE_BUILTIN: \
+ ptr=MPID_##kind##_builtin+((a)&(bmsk)); \
+ __analysis_assume(ptr != nullptr); \
+ break; \
+ case HANDLE_TYPE_DIRECT: \
+ ptr=MPID_##kind##_direct+HANDLE_DIRECT_INDEX(a); \
+ __analysis_assume(ptr != nullptr); \
+ break; \
+ case HANDLE_TYPE_INDIRECT: \
+ ptr=((MPID_##kind*) \
+ MPIU_Handle_get_ptr_indirect(a,&MPID_##kind##_mem)); \
+ __analysis_assume(ptr != nullptr); \
+ break; \
+ case HANDLE_TYPE_INVALID: \
+ default: \
+ ptr=0; \
+ break; \
+ } \
+}
+
+#define MPID_Getb_ptr_valid(kind,a,bmsk,ptr) \
+{ \
+ switch (HANDLE_GET_TYPE(a)) { \
+ case HANDLE_TYPE_BUILTIN: \
+ ptr=MPID_##kind##_builtin+((a)&(bmsk)); \
+ __analysis_assume(ptr != nullptr); \
+ break; \
+ case HANDLE_TYPE_DIRECT: \
+ ptr=MPID_##kind##_direct+HANDLE_DIRECT_INDEX(a); \
+ __analysis_assume(ptr != nullptr); \
+ break; \
+ case HANDLE_TYPE_INDIRECT: \
+ ptr=((MPID_##kind*) \
+ MPIU_Handle_get_ptr_indirect(a,&MPID_##kind##_mem)); \
+ __analysis_assume(ptr != nullptr); \
+ break; \
+ case HANDLE_TYPE_INVALID: \
+ default: \
+ MPID_Abort(nullptr, TRUE, 0, "Invalid Handle type for " #kind);\
+ break; \
+ } \
+}
+
+
+/* Convert handles to objects for MPI types that do _not_ have any predefined
+ objects */
+/* Question. Should this do ptr=0 first, particularly if doing --enable-strict
+ complication? */
+#define MPID_Get_ptr(kind,a,ptr) \
+{ \
+ switch (HANDLE_GET_TYPE(a)) { \
+ case HANDLE_TYPE_DIRECT: \
+ ptr=MPID_##kind##_direct+HANDLE_DIRECT_INDEX(a); \
+ __analysis_assume(ptr != nullptr); \
+ break; \
+ case HANDLE_TYPE_INDIRECT: \
+ ptr=((MPID_##kind*) \
+ MPIU_Handle_get_ptr_indirect(a,&MPID_##kind##_mem)); \
+ __analysis_assume(ptr != nullptr); \
+ break; \
+ case HANDLE_TYPE_INVALID: \
+ case HANDLE_TYPE_BUILTIN: \
+ default: \
+ ptr=0; \
+ break; \
+ } \
+}
+
+#define MPID_Get_ptr_valid(kind,a,ptr) \
+{ \
+ switch (HANDLE_GET_TYPE(a)) { \
+ case HANDLE_TYPE_DIRECT: \
+ ptr=MPID_##kind##_direct+HANDLE_DIRECT_INDEX(a); \
+ __analysis_assume(ptr != nullptr); \
+ break; \
+ case HANDLE_TYPE_INDIRECT: \
+ ptr=((MPID_##kind*) \
+ MPIU_Handle_get_ptr_indirect(a,&MPID_##kind##_mem)); \
+ __analysis_assume(ptr != nullptr); \
+ break; \
+ case HANDLE_TYPE_INVALID: \
+ case HANDLE_TYPE_BUILTIN: \
+ default: \
+ MPID_Abort(nullptr, TRUE, 0, "Invalid Handle type for " #kind);\
+ break; \
+ } \
+}
+
+
+
+/* FIXME: the masks should be defined with the handle definitions instead
+ of inserted here as literals */
+#define MPID_Group_get_ptr(a,ptr) MPID_Getb_ptr(Group,a,0x03ffffff,ptr)
+#define MPID_File_get_ptr(a,ptr) MPID_Get_ptr(File,a,ptr)
+#define MPID_Errhandler_get_ptr(a,ptr) MPID_Getb_ptr(Errhandler,a,0x3,ptr)
+#define MPID_Op_get_ptr(a,ptr) MPID_Getb_ptr(Op,a,0x000000ff,ptr)
+#define MPID_Info_get_ptr(a,ptr) MPID_Get_ptr(Info,a,ptr)
+#define MPID_Win_get_ptr(a,ptr) MPID_Get_ptr(Win,a,ptr)
+#define MPID_Request_get_ptr(a,ptr) MPID_Get_ptr(Request,a,ptr)
+
+
+#define MPID_Group_get_ptr_valid(a,ptr) MPID_Getb_ptr_valid(Group,a,0x03ffffff,ptr)
+#define MPID_File_get_ptr_valid(a,ptr) MPID_Get_ptr_valid(File,a,ptr)
+#define MPID_Errhandler_get_ptr_valid(a,ptr) MPID_Getb_ptr_valid(Errhandler,a,0x3,ptr)
+#define MPID_Op_get_ptr_valid(a,ptr) MPID_Getb_ptr_valid(Op,a,0x000000ff,ptr)
+#define MPID_Info_get_ptr_valid(a,ptr) MPID_Get_ptr_valid(Info,a,ptr)
+#define MPID_Win_get_ptr_valid(a,ptr) MPID_Get_ptr_valid(Win,a,ptr)
+#define MPID_Request_get_ptr_valid(a,ptr) MPID_Get_ptr_valid(Request,a,ptr)
+
+
+
+/* Keyvals have a special format. This is roughly MPID_Get_ptrb, but
+ the handle index is in a smaller bit field. In addition,
+ there is no storage for the builtin keyvals.
+ For the indirect case, we mask off the part of the keyval that is
+ in the bits normally used for the indirect block index.
+*/
+#define MPID_Keyval_get_ptr(a,ptr) \
+{ \
+ switch (HANDLE_GET_TYPE(a)) { \
+ case HANDLE_TYPE_BUILTIN: \
+ ptr=0; \
+ break; \
+ case HANDLE_TYPE_DIRECT: \
+ ptr=MPID_Keyval_direct+((a)&0x3fffff); \
+ break; \
+ case HANDLE_TYPE_INDIRECT: \
+ ptr=((MPID_Keyval*) \
+ MPIU_Handle_get_ptr_indirect((a)&0xfc3fffff,&MPID_Keyval_mem)); \
+ break; \
+ case HANDLE_TYPE_INVALID: \
+ default: \
+ ptr=0; \
+ break; \
+ } \
+}
+
+#define MPID_Keyval_get_ptr_valid(a,ptr) \
+{ \
+ switch (HANDLE_GET_TYPE(a)) { \
+ case HANDLE_TYPE_DIRECT: \
+ ptr=MPID_Keyval_direct+((a)&0x3fffff); \
+ break; \
+ case HANDLE_TYPE_INDIRECT: \
+ ptr=((MPID_Keyval*) \
+ MPIU_Handle_get_ptr_indirect((a)&0xfc3fffff,&MPID_Keyval_mem)); \
+ break; \
+ case HANDLE_TYPE_BUILTIN: \
+ case HANDLE_TYPE_INVALID: \
+ default: \
+ MPID_Abort(nullptr, TRUE, 0, "Invalid Handle type for Keyval"); \
+ break; \
+ } \
+}
+
+
+template bool SetName( _Inout_ T* obj, _In_z_ const char* name )
+{
+ if( obj->name == obj->GetDefaultName() )
+ {
+ char* tempName = static_cast(MPIU_Malloc( MPI_MAX_OBJECT_NAME ));
+ if( tempName == nullptr )
+ {
+ return false;
+ }
+ obj->name = tempName;
+ }
+ MPIU_Strncpy( const_cast(obj->name), name, MPI_MAX_OBJECT_NAME );
+ return true;
+}
+
+
+template void CleanupName( _Inout_ T* obj )
+{
+ if( obj->name != obj->GetDefaultName() && obj->name != nullptr )
+ {
+ MPIU_Free( const_cast(obj->name) );
+ obj->name = nullptr;
+ }
+}
+
+
+template void InitName( _Inout_ T* obj )
+{
+ obj->name = obj->GetDefaultName();
+}
+
+
+
+#endif /* MPIHANDLE_H_INCLUDED */
diff --git a/src/mpi/common/mpiiov.h b/src/mpi/common/mpiiov.h
new file mode 100644
index 0000000..b6a8918
--- /dev/null
+++ b/src/mpi/common/mpiiov.h
@@ -0,0 +1,32 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#ifndef MPIIOV_H_INCLUDED
+#define MPIIOV_H_INCLUDED
+
+/* IOVs */
+/* The basic channel interface uses IOVs */
+typedef WSABUF MPID_IOV;
+typedef char iovsendbuf_t;
+typedef char iovrecvbuf_t;
+
+/* FIXME: How is IOV_LIMIT chosen? */
+#define MPID_IOV_LIMIT 16
+
+static inline unsigned iov_size(_In_reads_(n_iov) const MPID_IOV* iov, int n_iov)
+{
+ unsigned total = 0;
+ while(n_iov--)
+ {
+ total += iov->len;
+ iov++;
+ }
+
+ return total;
+}
+
+#endif
diff --git a/src/mpi/common/mpimem.h b/src/mpi/common/mpimem.h
new file mode 100644
index 0000000..9d1e47b
--- /dev/null
+++ b/src/mpi/common/mpimem.h
@@ -0,0 +1,104 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#ifndef MPIMEM_H_INCLUDED
+#define MPIMEM_H_INCLUDED
+
+/* ------------------------------------------------------------------------- */
+/* mpimem.h */
+/* ------------------------------------------------------------------------- */
+
+/*D
+ Memory - Memory Management Routines
+
+ Rules for memory management:
+
+ MPICH explicity prohibits the appearence of 'malloc', 'free',
+ 'calloc', 'realloc', or 'strdup' in any code implementing a device or
+ MPI call (of course, users may use any of these calls in their code).
+ Instead, you must use 'MPIU_Malloc' etc.; if these are defined
+ as 'malloc', that is allowed, but an explicit use of 'malloc' instead of
+ 'MPIU_Malloc' in the source code is not allowed. This restriction is
+ made to simplify the use of portable tools to test for memory leaks,
+ overwrites, and other consistency checks.
+
+ Most memory should be allocated at the time that 'MPID_Init' is
+ called and released with 'MPID_Finalize' is called. If at all possible,
+ no other MPID routine should fail because memory could not be allocated
+ (for example, because the user has allocated large arrays after 'MPI_Init').
+
+ The implementation of the MPI routines will strive to avoid memory allocation
+ as well; however, operations such as 'MPI_Type_index' that create a new
+ data type that reflects data that must be copied from an array of arbitrary
+ size will have to allocate memory (and can fail; note that there is an
+ MPI error class for out-of-memory).
+
+ Question:
+ Do we want to have an aligned allocation routine? E.g., one that
+ aligns memory on a cache-line.
+ D*/
+
+/* ------------------------------------------------------------------------- */
+
+/* No memory tracing; just use native functions */
+void* MPIU_Malloc( _In_ SIZE_T size );
+
+void* MPIU_Calloc( _In_ SIZE_T elements, _In_ SIZE_T size );
+
+void MPIU_Free( _In_opt_ _Post_ptr_invalid_ void* pMem );
+
+void* MPIU_Realloc( _In_ void* pMem, _In_ SIZE_T size );
+
+#ifdef __cplusplus
+
+//
+// C++ operator new/delete overload.
+//
+// Normal operator new: pInt = new int;
+//
+void* __cdecl operator new( size_t size );
+
+//
+// Placement new: pInt = new( ptr ) int;
+//
+void* __cdecl operator new( size_t /*size*/, _In_/*count_(size)*/ void* pMem );
+
+//
+// Array new: pInt = new int[10];
+//
+void* __cdecl operator new[]( size_t size );
+
+//
+// Placement array new: pInt = new( ptr ) int[10];
+//
+void* __cdecl operator new[]( size_t /*size*/, _In_/*count_(size)*/ void* pMem );
+
+//
+// Normal operator delete: delete pInt;
+//
+void __cdecl operator delete( _In_opt_ _Post_ptr_invalid_ void* pObj );
+
+//
+// Array delete: delete[] pInt;
+//
+void __cdecl operator delete[]( _In_opt_ _Post_ptr_invalid_ void* pObj );
+
+#endif // __cplusplus
+
+
+#define MPIU_Malloc_obj(type_) \
+ (type_*)MPIU_Malloc(sizeof(type_))
+
+#define MPIU_Malloc_objn(count_, type_) \
+ (type_*)MPIU_Malloc((count_)*sizeof(type_))
+
+/* ------------------------------------------------------------------------- */
+/* end of mpimem.h */
+/* ------------------------------------------------------------------------- */
+
+#endif
diff --git a/src/mpi/common/mpimem.inl b/src/mpi/common/mpimem.inl
new file mode 100644
index 0000000..a1d52bd
--- /dev/null
+++ b/src/mpi/common/mpimem.inl
@@ -0,0 +1,52 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+#include "mpimem.h"
+
+
+__forceinline void* __cdecl operator new( size_t size )
+{
+ return MPIU_Malloc( size );
+}
+
+
+__forceinline void* __cdecl operator new( size_t /*size*/, _In_ void* pMem )
+{
+ return pMem;
+}
+
+
+__forceinline void* __cdecl operator new[]( size_t size )
+{
+ return MPIU_Malloc( size );
+}
+
+
+__forceinline void* __cdecl operator new[]( size_t /*size*/, _In_ void* pMem )
+{
+ return pMem;
+}
+
+
+__forceinline void __cdecl operator delete( _In_opt_ _Post_ptr_invalid_ void* pObj )
+{
+ MPIU_Free( pObj );
+}
+
+
+__forceinline void __cdecl operator delete( _In_opt_ void* /*pObj*/, _In_ void* /*pMem*/ )
+{
+}
+
+
+__forceinline void __cdecl operator delete[]( _In_opt_ _Post_ptr_invalid_ void* pObj )
+{
+ MPIU_Free( pObj );
+}
+
+
+__forceinline void __cdecl operator delete[]( _In_opt_ void* /*pObj*/, _In_ void* /*pMem*/ )
+{
+}
diff --git a/src/mpi/common/mpisock.h b/src/mpi/common/mpisock.h
new file mode 100644
index 0000000..609141c
--- /dev/null
+++ b/src/mpi/common/mpisock.h
@@ -0,0 +1,749 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#if !defined(MPIDU_SOCK_H_INCLUDED)
+#define MPIDU_SOCK_H_INCLUDED
+
+#include "mpidef.h"
+#include "ex.h"
+
+
+#define MPIDU_SOCK_INVALID_SOCK NULL
+#define MPIDU_SOCK_INFINITE_TIME INFINITE
+
+typedef SOCKET MPIDU_SOCK_NATIVE_FD;
+
+
+//
+// Allocated per request
+//
+typedef struct sock_read_context
+{
+ WSABUF tmpiov;
+ WSABUF *iov;
+ MPIU_Bsize_t total;
+ MPIU_Bsize_t min_recv;
+ int iovlen;
+
+} sock_read_context;
+
+
+typedef struct sock_write_context
+{
+ WSABUF tmpiov;
+ WSABUF *iov;
+ MPIU_Bsize_t total;
+ int iovlen;
+
+} sock_write_context;
+
+
+struct sock_state_t;
+
+
+typedef struct sock_accept_context
+{
+ sock_state_t* accept_state;
+ char accept_buffer[sizeof(struct sockaddr_in)*2+32];
+
+} sock_accept_context;
+
+
+#define SOCKI_DESCRIPTION_LENGTH 256
+typedef struct sock_connect_context
+{
+ HANDLE retry_timer;
+ const char* cur_host;
+ int error;
+ int port;
+ int retry_count;
+ char host_description[SOCKI_DESCRIPTION_LENGTH];
+
+} sock_connect_context;
+
+
+typedef struct sock_close_context
+{
+ int closectx;
+
+} sock_close_context;
+
+
+typedef struct sock_overlapped_s
+{
+ EXOVERLAPPED exov;
+ sock_state_t* sock;
+
+ union
+ {
+ sock_read_context read;
+ sock_write_context write;
+ sock_accept_context accept;
+ sock_connect_context connect;
+ sock_close_context close;
+ };
+
+} sock_overlapped_t;
+
+
+typedef void (*sock_close_routine)(
+ _Inout_ sock_state_t* sock,
+ _Inout_ sock_overlapped_t* pov
+ );
+
+
+//
+// Allocated per socket
+//
+typedef struct sock_connect_state_t
+{
+ sock_overlapped_t* pov;
+
+} sock_connect_state_t;
+
+
+typedef struct sock_state_t
+{
+ sock_close_routine pfnClose;
+ SOCKET sock;
+ ExSetHandle_t set;
+ int closing;
+ sock_connect_state_t connect;
+
+} sock_state_t;
+
+
+typedef struct MPIDU_Sock_context_t
+{
+
+ //
+ // Caller Executive overlapped.
+ // * The success/failure callback functions will be invoked on Scok async
+ // operation completion when MPIDU_Sock_wait is called.
+ // * The total number of bytes transfered in a successful read/write
+ // operation is in uov.ov.InernalHigh field of the OVERLAPPED strcture.
+ // * The Sock MPI error value is in uov.ov.Internal
+ //
+ EXOVERLAPPED uov;
+
+ //
+ // Sock private context
+ //
+ sock_overlapped_t sov;
+
+} MPIDU_Sock_context_t;
+
+
+//
+// Parses the netmask from a given environment variable.
+//
+HRESULT ParseNetmask( _In_z_ PCWSTR szNetmaskEnv, _Out_ IN_ADDR* pAddr, _Out_ IN_ADDR* pMask );
+
+
+/*@
+MPIDU_Sock_init - initialize the Sock communication library
+
+Return value: MPI error code
+. MPI_SUCCESS - initialization completed successfully
+
+Notes:
+The Sock module may be initialized multiple times. The implementation should perform reference counting if necessary.
+
+Module:
+Utility-Sock
+@*/
+
+_Success_(return==MPI_SUCCESS)
+int MPIDU_Sock_init(void);
+
+
+/*@
+MPIDU_Sock_finalize - shutdown the Sock communication library
+
+Return value: MPI error code
+. MPI_SUCCESS - shutdown completed successfully
+
+Notes:
+ What are the semantics of finalize? Is it responsible for releasing any resources (socks and sock sets) that the calling
+code(s) leaked? Should it block until all OS resources are released?
+
+Module:
+Utility-Sock
+@*/
+_Success_(return==MPI_SUCCESS)
+int MPIDU_Sock_finalize(void);
+
+
+/*@
+MPIDU_Sock_get_host_description - obtain a description of the host's
+communication capabilities
+
+Input Parameters:
+. host_description - character array in which the function can store a string
+ describing the communication capabilities of the host
+- len - length of the character array
+
+Return value: MPI error code
+. MPI_SUCCESS - description successfully obtained and placed in host_description
+
+Notes:
+The host description string returned by the function is defined by the
+implementation and should not be interpreted by the
+application. This string is to be supplied to MPIDU_Sock_post_connect() when
+one wishes to form a connection with this host.
+
+Module:
+Utility-Sock
+@*/
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_get_host_description(
+ _Out_writes_z_(len) char* host_description,
+ _In_ int len
+ );
+
+/*@
+MPIDU_Sock_hostname_to_host_description - convert a host name to a description of the host's communication capabilities
+
+Input Parameters:
++ hostname - host name string
+. host_description - character array in which the function can store a string describing the communication capabilities of the host
+- len - length of host_description
+
+Return value: MPI error code
+. MPI_SUCCESS - description successfully obtained and placed in host_description
+
+Notes:
+The host description string returned by the function is defined by the implementation and should not be interpreted by the
+application. This string is to be supplied to MPIDU_Sock_post_connect() when one wishes to form a connection with the host
+specified by hostname.
+
+Module:
+Utility-Sock
+@*/
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_hostname_to_host_description(
+ _In_z_ const char* hostname,
+ _Out_writes_z_(len) char* host_description,
+ _In_ int len
+ );
+
+
+/*@
+MPIDU_Sock_create_native_fd - create a new native socket descriptor/handle
+
+Output Parameter:
+. fd - pointer to the new socket handle
+
+Return value: MPI error code
+. MPI_SUCCESS - new sock set successfully create
+
+Notes:
+
+Module:
+Utility-Sock
+@*/
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_create_native_fd(
+ _Out_ MPIDU_SOCK_NATIVE_FD* fd
+ );
+
+
+/*@
+MPIDU_Sock_native_to_sock - convert a native file descriptor/handle to a sock object
+
+Input Parameters:
++ set - sock set to which the new sock should be added
+. fd - native file descriptor
+
+Output Parameter:
+. sock - new sock object
+
+Return value: MPI error code
+. MPI_SUCCESS - sock successfully created
+
+Notes:
+The constraints on which file descriptors/handles may be converted to a sock object are defined by the implementation.
+It is possible, however, that the conversion of an inappropriate descriptor/handle may complete successfully but the
+sock object may not function properly.
+
+Thread safety:
+The addition of a new sock object to the sock set may occur while other threads are performing operations on the same sock set.
+Thread safety of simultaneously operations on the same sock set must be guaranteed by the Sock implementation.
+
+@*/
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_native_to_sock(
+ _In_ ExSetHandle_t set,
+ _In_ MPIDU_SOCK_NATIVE_FD fd,
+ _Outptr_ sock_state_t **ppSock
+ );
+
+
+/*@
+MPIDU_Sock_listen - establish a listener sock
+
+Input Parameters:
++ set - sock set to which the listening sock should be added
+- port - desired port (or zero if a specific port is not desired)
+
+Output Parameters:
++ port - port assigned to the listener
+- sock - new listener sock
+
+Return value: MPI error code
+. MPI_SUCCESS - listener sock successfully established
+
+Notes:
+Use the established listener socket to call MPIDU_Sock_post_accept
+
+The environment variable MPICH_PORTRANGE=min:max may be used to restrict the ports mpich processes listen on.
+
+Thread safety:
+The addition of the listener sock object to the sock set may occur while other threads are performing operations on the same sock
+set. Thread safety of simultaneously operations on the same sock set must be guaranteed by the Sock implementation.
+
+Module:
+Utility-Sock
+@*/
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_listen(
+ _In_ ExSetHandle_t set,
+ _In_ unsigned long addr,
+ _Inout_ int *port,
+ _Outptr_ sock_state_t **ppSock
+ );
+
+
+/*@
+MPIDU_Sock_post_accept - request that a new connection would be accepted
+
+Input Parameters:
++ listener_sock - listener sock object from which to obtain the new connection
+- pov - user context associated with the accept request
+
+Output Parameter:
+. pSock - sock object for the new connection
+
+Return value: MPI error code
+. MPI_SUCCESS - new connection successfully established and associated with new sock objecta
+
+Notes:
+In the event of a connection failure, MPIDU_Sock_post_accept() may fail to acquire and return a new sock despite any
+MPIDU_SOCK_OP_ACCEPT event notification. On the other hand, MPIDU_Sock_post_accept() may return a sock for which the underlying
+connection has already failed. (The Sock implementation may be unaware of the failure until read/write operations are performed.)
+
+Thread safety:
+The addition of the new sock object to the sock set may occur while other threads are performing operations on the same sock set.
+Thread safety of simultaneously operations on the same sock set must be guaranteed by the Sock implementation.
+
+Module:
+Utility-Sock
+@*/
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_accept(
+ _In_ sock_state_t *listener_sock,
+ _Outptr_ sock_state_t **ppSock,
+ _In_ MPIDU_Sock_context_t* psc
+ );
+
+
+/*@
+MPIDU_Sock_post_connect - request that a new connection be formed
+
+Input Parameters:
++ set - sock set to which the new sock object should be added
+. host_description - string containing the communication capabilities of the listening host
++ port - port number of listener sock on the listening host
+. pov - user context associated with the connect request
+
+Output Parameter:
+. sock - new sock object associated with the connection request
+
+Return value: MPI error code
+. MPI_SUCCESS - request to form new connection successfully posted
+
+Notes:
+The host description of the listening host is supplied MPIDU_Sock_get_host_description(). The intention is that the description
+contain an enumeration of interface information so that the MPIDU_Sock_connect() can try each of the interfaces until it succeeds
+in forming a connection. Having a complete set of interface information also allows a particular interface be used selected by the
+user at runtime using the MPICH_NETMASK. The name of the environment variable seems wrong. Perhaps MPICH_INTERFACE? We
+should ask the Systems group.
+
+Thread safety:
+The addition of the new sock object to the sock set may occur while other threads are performing operations on the same sock set.
+Thread safety of simultaneously operations on the same sock set must be guaranteed by the Sock implementation.
+
+Module:
+Utility-Sock
+@*/
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_connect(
+ _In_ ExSetHandle_t set,
+ _In_z_ const char* host_description,
+ _In_ int port,
+ _Outptr_ sock_state_t** ppSock,
+ _In_ int usemask,
+ _Inout_ MPIDU_Sock_context_t* psc
+ );
+
+
+/*@
+MPIDU_Sock_post_close - request that an existing connection be closed
+
+Input Parameter:
+. sock - sock object to be closed
+- pov - user context associated with the close request
+
+Return value: MPI error code
+. MPI_SUCCESS - request to close the connection was successfully posted
+
+Notes:
+If any other operations are posted on the specified sock, they will be terminated. An appropriate event will be generated for each
+terminated operation. All such events will be delivered by MPIDU_Sock_wait() prior to the delivery of the MPIDU_SOCK_OP_CLOSE
+event.
+
+The sock object is destroyed just prior to the MPIDU_SOCK_OP_CLOSE event being returned by MPIDU_Sock_wait(). Any oustanding
+references to the sock object held by the application should be considered invalid and not used again.
+
+Thread safety:
+MPIDU_Sock_post_close() may be called while another thread is calling or blocking in MPIDU_Sock_wait() specifying the same sock set
+to which this sock belongs. If another thread is blocking MPIDU_Sock_wait() and the close operation causes the sock set to become
+empty, then MPIDU_Sock_wait() will return with an error.
+
+Calling any of the immediate or post routines during or after the call to MPIDU_Sock_post_close() is consider an application error.
+The result of doing so is undefined. The application should coordinate the closing of a sock with the activities of other threads
+to ensure that simultaneous calls do not occur.
+
+Module:
+Utility-Sock
+@*/
+
+void
+MPIDU_Sock_post_close(
+ _Inout_ sock_state_t *sock,
+ _Inout_ MPIDU_Sock_context_t* psc
+ );
+
+/*@
+MPIDU_Sock_post_read - request that data be read from a sock
+
+Input Parameters:
++ sock - sock object from which data is to be read
+. buf - buffer into which the data should be placed
+. len - number of bytes to read
+. minbr - the async operation can return with number of bytes read greater or
+ equal to minbr (min bar) before the entire buffer is read.
+. pov - user context associated with the read request
+
+Return value: MPI error code
+. MPI_SUCCESS - request to read was successfully posted
+
+Notes:
+Only one read operation may be posted at a time. Furthermore, an immediate read may not be performed while a posted write is
+outstanding. This is considered to be an application error, and the results of doing so are undefined.
+
+If MPIDU_Sock_post_close() is called before the posted read operation completes, the read operation will be terminated.
+
+Thread safety:
+MPIDU_Sock_post_read() may be called while another thread is attempting to perform an immediate write or post a write operation on
+the same sock. MPIDU_Sock_post_read() may also be called while another thread is calling or blocking in MPIDU_Sock_wait() on the
+same sock set to which the specified sock belongs.
+
+MPIDU_Sock_post_write() may not be called while another thread is performing an immediate read on the same sock. This is
+considered to be an application error, and the results of doing so are undefined.
+
+Calling MPIDU_Sock_post_read() during or after the call to MPIDU_Sock_post_close() is consider an application error. The result of
+doing so is undefined. The application should coordinate the closing of a sock with the activities of other threads to ensure that
+one thread is not attempting to post a new operation while another thread is attempting to close the sock.
+
+Module:
+Utility-Sock
+@*/
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_read(
+ _Inout_ sock_state_t *sock,
+ _Out_writes_bytes_(len) void * buf,
+ _In_ MPIU_Bsize_t len,
+ _In_ MPIU_Bsize_t minbr,
+ _Inout_ MPIDU_Sock_context_t* psc
+ );
+
+/*@
+MPIDU_Sock_post_readv - request that a vector of data be read from a sock
+
+Input Parameters:
++ sock - sock object from which the data is to read
+. iov - I/O vector describing buffers into which the data is placed
+. iov_n - number of elements in I/O vector (must be 1 currently)
+. minbr - the async operation can return with number of bytes read greater or
+ equal to minbr (min bar) before the entire buffer is read.
+. pov - user context associated with the readv request
+
+Return value: MPI error code
+. MPI_SUCCESS - request to read was successfully posted
+
+Notes:
+Only one read operation may be posted at a time. Furthermore, an immediate read may not be performed while a posted write is
+outstanding. This is considered to be an application error, and the results of doing so are undefined.
+
+If MPIDU_Sock_post_close() is called before the posted read operation completes, the read operation will be terminated.
+
+Thread safety:
+MPIDU_Sock_post_readv() may be called while another thread is attempting to perform an immediate write or post a write operation on
+the same sock. MPIDU_Sock_post_readv() may also be called while another thread is calling or blocking in MPIDU_Sock_wait() on the
+same sock set to which the specified sock belongs.
+
+MPIDU_Sock_post_readv() may not be called while another thread is performing an immediate read on the same sock. This is
+considered to be an application error, and the results of doing so are undefined.
+
+Calling MPIDU_Sock_post_readv() during or after the call to MPIDU_Sock_post_close() is consider an application error. The result
+of doing so is undefined. The application should coordinate the closing of a sock with the activities of other threads to ensure
+that one thread is not attempting to post a new operation while another thread is attempting to close the sock.
+
+Module:
+Utility-Sock
+@*/
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_readv(
+ _Inout_ sock_state_t *sock,
+ _In_reads_(iov_n)WSABUF * iov,
+ _In_ int iov_n,
+ _In_ MPIU_Bsize_t minbr,
+ _Inout_ MPIDU_Sock_context_t* psc
+ );
+
+/*@
+MPIDU_Sock_post_write - request that data be written to a sock
+
+Input Parameters:
++ sock - sock object which the data is to be written
+. buf - buffer containing the data
+. len - number of bytes to write
+. pov - user context associated with the write request
+
+Return value: MPI error code
+. MPI_SUCCESS - request to write was successfully posted
+
+Notes:
+Only one write operation may be posted at a time. Furthermore, an immediate write may not be performed while a posted write is
+outstanding. This is considered to be an application error, and the results of doing so are undefined.
+
+If MPIDU_Sock_post_close() is called before the posted write operation completes, the write operation will be terminated.
+
+Thread safety:
+MPIDU_Sock_post_write() may be called while another thread is attempting to perform an immediate read or post a read operation on
+the same sock. MPIDU_Sock_post_write() may also be called while another thread is calling or blocking in MPIDU_Sock_wait() on the
+same sock set to which the specified sock belongs.
+
+MPIDU_Sock_post_write() may not be called while another thread is performing an immediate write on the same sock. This is
+considered to be an application error, and the results of doing so are undefined.
+
+Calling MPIDU_Sock_post_write() during or after the call to MPIDU_Sock_post_close() is consider an application error. The result
+of doing so is undefined. The application should coordinate the closing of a sock with the activities of other threads to ensure
+that one thread is not attempting to post a new operation while another thread is attempting to close the sock. Do we really
+need this flexibility?
+
+Module:
+Utility-Sock
+@*/
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_write(
+ _Inout_ sock_state_t *sock,
+ _In_reads_bytes_(min) const void* buf,
+ _In_ MPIU_Bsize_t min,
+ _Inout_ MPIDU_Sock_context_t* psc
+ );
+
+/*@
+MPIDU_Sock_post_writev - request that a vector of data be written to a sock
+
+Input Parameters:
++ sock - sock object which the data is to be written
+. iov - I/O vector describing buffers of data to be written
+. iov_n - number of elements in I/O vector
+. pov - user context associated with the writev request
+
+Return value: MPI error code
+. MPI_SUCCESS - request to write was successfully posted
+
+Notes:
+Only one write operation may be posted at a time. Furthermore, an immediate write may not be performed while a posted write is
+outstanding. This is considered to be an application error, and the results of doing so are undefined.
+
+If MPIDU_Sock_post_close() is called before the posted write operation completes, the write operation will be terminated.
+
+Thread safety:
+MPIDU_Sock_post_writev() may be called while another thread is attempting to perform an immediate read or post a read operation on
+the same sock. MPIDU_Sock_post_writev() may also be called while another thread is calling or blocking in MPIDU_Sock_wait() on the
+same sock set to which the specified sock belongs.
+
+MPIDU_Sock_post_writev() may not be called while another thread is performing an immediate write on the same sock. This is
+considered to be an application error, and the results of doing so are undefined.
+
+Calling MPIDU_Sock_post_writev() during or after the call to MPIDU_Sock_post_close() is consider an application error. The result
+of doing so is undefined. The application should coordinate the closing of a sock with the activities of other threads to ensure
+that one thread is not attempting to post a new operation while another thread is attempting to close the sock.
+
+Module:
+Utility-Sock
+@*/
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_writev(
+ _Inout_ sock_state_t *sock,
+ _In_reads_(iov_n) WSABUF* iov,
+ _In_ int iov_n,
+ _Inout_ MPIDU_Sock_context_t* psc
+ );
+
+/*@
+MPIDU_Sock_close - perform an immediate hard close
+
+Input Parameter:
+. sock - sock object to be closed
+
+Return value: none
+
+Notes:
+If any other operations are posted on the specified sock, they will be terminated. An appropriate event will be generated for each
+terminated operation. All such events will be delivered by MPIDU_Sock_wait(). No MPIDU_SOCK_OP_CLOSE is generated.
+event.
+
+The sock object is destroyed immediately. Any oustanding references to the sock object held by the application should be considered
+invalid and not used again.
+
+Thread safety:
+MPIDU_Sock_close() may be called while another thread is calling or blocking in MPIDU_Sock_wait() specifying the same sock set
+to which this sock belongs. If another thread is blocking MPIDU_Sock_wait() and the close operation causes the sock set to become
+empty, then MPIDU_Sock_wait() will return with an error.
+
+Calling any of the immediate or post routines during or after the call to MPIDU_Sock_post_close() is consider an application error.
+The result of doing so is undefined. The application should coordinate the closing of a sock with the activities of other threads
+to ensure that simultaneous calls do not occur.
+
+Module:
+Utility-Sock
+@*/
+
+void
+MPIDU_Sock_close(
+ _In_ _Post_invalid_ sock_state_t *sock
+ );
+
+
+/*@
+MPIDU_Sock_writev - perform an immediate vector write
+
+Input Parameters:
++ sock - sock object to which data is to be written
+. iov - I/O vector describing buffers of data to be written
+- iov_n - number of elements in I/O vector
+
+Output Parameter:
+. num_written - actual number of bytes written
+
+Return value: MPI error code
+. MPI_SUCCESS - no error encountered during the write operation
+
+Notes:
+An immediate write may not be performed while a posted write is outstanding on the same sock. This is considered to be an
+application error, and the results of doing so are undefined.
+
+Thread safety:
+MPIDU_Sock_write() may be called while another thread is attempting to perform an immediate read or post a read operation on the
+same sock. MPIDU_Sock_write() may also be called while another thread is calling or blocking in MPIDU_Sock_wait() on the same sock
+set to which the specified sock belongs.
+
+A immediate write may not be performed if another thread is performing an immediate write on the same sock. This is considered to
+be an application error, and the results of doing so are undefined.
+
+Calling MPIDU_Sock_write() during or after the call to MPIDU_Sock_post_close() is consider to be an application error. The result
+of doing so is undefined. The application should coordinate the closing of a sock with the activities of other threads to ensure
+that one thread is not attempting to perform an immediate write while another thread is attempting to close the sock.
+
+Module:
+Utility-Sock
+@*/
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_writev(
+ _In_ const sock_state_t * const sock,
+ _In_reads_(iov_n) WSABUF * iov,
+ _In_ int iov_n,
+ _Out_ MPIU_Bsize_t * num_written
+ );
+
+
+/*@r
+MPIDU_Sock_get_sock_id - get an integer identifier for a sock object
+
+Input Parameter:
+. sock - sock object
+
+Return value: an integer that uniquely identifies the sock object
+
+Notes:
+The integer is unique relative to all other open sock objects in the local process. The integer may later be reused for a
+different sock once the current object is closed and destroyed.
+
+This function does not return an error code. Passing in an invalid sock object has undefined results (garbage in, garbage out).
+
+Module:
+Utility-Sock
+@*/
+
+_Success_(return >=0)
+int
+MPIDU_Sock_get_sock_id(
+ _In_ const sock_state_t * const sock
+ );
+
+/*@
+MPIDU_Sock_keepalive - enable connection keep-alive protocol
+
+Input Parameter:
+. sock - sock object
+
+Return value: an MPI error code
+
+Module:
+Utility-Sock
+@*/
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_keepalive(
+ _In_ const sock_state_t * const sock
+ );
+
+//
+// Utility function to retrieve the ip address string associated with a socket.
+// This is the address the socket is connected to on the remote host.
+//
+_Success_(*pPort != 0)
+void
+get_sock_peer_address(
+ _In_ _Pre_satisfies_(0 != pSock->sock) sock_state_t* pSock,
+ _Out_writes_bytes_(addrLen) char* pAddr,
+ _In_ int addrLen,
+ _Out_ int* pPort
+ );
+
+#endif /* !defined(MPIDU_SOCK_H_INCLUDED) */
diff --git a/src/mpi/common/mpistr.cpp b/src/mpi/common/mpistr.cpp
new file mode 100644
index 0000000..d51ccd5
--- /dev/null
+++ b/src/mpi/common/mpistr.cpp
@@ -0,0 +1,445 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+
+
+/*
+ * This file contains "safe" versions of the various string and printf
+ * operations.
+ */
+
+_Success_(return >= 0 && return <= cchDest)
+int
+MPIU_Snprintf(
+ _Null_terminated_ _Out_writes_to_(cchDest, return) char* dest,
+ _In_ size_t cchDest,
+ _Printf_format_string_ const char* format,
+ ...
+ )
+{
+ size_t len;
+ va_list args;
+ va_start( args, format );
+
+ OACR_WARNING_SUPPRESS(USE_WIDE_API, "SMPD uses only ANSI character set.");
+ HRESULT hr = StringCchVPrintfExA(
+ dest,
+ cchDest,
+ nullptr,
+ &len,
+ 0,
+ format,
+ args
+ );
+
+ va_end( args );
+
+ if( FAILED( hr ) )
+ {
+ return 0;
+ }
+
+ return static_cast(cchDest - len);
+}
+
+
+//
+// Overloaded function for wide characters
+//
+_Success_(return >= 0 && return <= cchDest)
+int
+MPIU_Snprintf(
+ _Null_terminated_ _Out_writes_to_(cchDest, return) wchar_t* dest,
+ _In_ size_t cchDest,
+ _Printf_format_string_ const wchar_t* format,
+ ...
+ )
+{
+ size_t len;
+ va_list args;
+ va_start( args, format );
+
+ HRESULT hr = StringCchVPrintfExW(
+ dest,
+ cchDest,
+ nullptr,
+ &len,
+ 0,
+ format,
+ args
+ );
+
+ va_end( args );
+
+ if( FAILED( hr ) )
+ {
+ return 0;
+ }
+
+ return static_cast(cchDest - len);
+}
+
+
+_Success_(return >= 0 && return <= cchDest)
+int
+MPIU_Vsnprintf(
+ _Null_terminated_ _Out_writes_to_(cchDest, return) char* dest,
+ _In_ size_t cchDest,
+ _Printf_format_string_ const char* format,
+ _In_ va_list args
+ )
+{
+ size_t len;
+ OACR_WARNING_SUPPRESS(USE_WIDE_API, "MSMPI uses only ANSI character set.");
+ HRESULT hr = StringCchVPrintfExA(
+ dest,
+ cchDest,
+ nullptr,
+ &len,
+ 0,
+ format,
+ args
+ );
+
+ if( FAILED( hr ) )
+ {
+ return 0;
+ }
+
+ return static_cast(cchDest - len);
+}
+
+
+//
+// Overloaded function for wide characters
+//
+_Success_(return >= 0 && return <= cchDest)
+int
+MPIU_Vsnprintf(
+ _Null_terminated_ _Out_writes_to_(cchDest, return) wchar_t* dest,
+ _In_ size_t cchDest,
+ _Printf_format_string_ const wchar_t* format,
+ _In_ va_list args
+ )
+{
+ size_t len;
+ HRESULT hr = StringCchVPrintfExW(
+ dest,
+ cchDest,
+ nullptr,
+ &len,
+ 0,
+ format,
+ args
+ );
+
+ if( FAILED( hr ) )
+ {
+ return 0;
+ }
+
+ return static_cast(cchDest - len);
+}
+
+
+_Success_(return == 0)
+int
+MPIU_Strcpy(
+ _Out_writes_z_(cchDest) char* dest,
+ _In_ size_t cchDest,
+ _In_z_ const char* src )
+{
+ OACR_WARNING_SUPPRESS(USE_WIDE_API, "MSMPI uses only ANSI character set.");
+ HRESULT hr = StringCchCopyA( dest, cchDest, src );
+
+ if( FAILED( hr ) )
+ {
+ return static_cast( hr );
+ }
+
+ return 0;
+}
+
+
+_Success_(return == 0)
+int
+MPIU_Strcpy(
+ _Out_writes_z_(cchDest) wchar_t* dest,
+ _In_ size_t cchDest,
+ _In_z_ const wchar_t* src )
+{
+ HRESULT hr = StringCchCopyW( dest, cchDest, src );
+
+ if( FAILED( hr ) )
+ {
+ return static_cast( hr );
+ }
+
+ return 0;
+}
+
+
+void
+MPIU_Strnapp(
+ _Out_writes_z_(n) char* dst,
+ _In_z_ const char* src,
+ _In_ size_t n )
+{
+ OACR_WARNING_SUPPRESS(USE_WIDE_API, "MSMPI uses only ANSI character set.");
+ OACR_WARNING_SUPPRESS(HRESULT_NOT_CHECKED, "Ignoring return value.");
+ StringCchCatA( dst, n, src );
+}
+
+
+void
+MPIU_Strnapp(
+ _Out_writes_z_(n) wchar_t* dst,
+ _In_z_ const wchar_t* src,
+ _In_ size_t n)
+{
+ OACR_WARNING_SUPPRESS(HRESULT_NOT_CHECKED, "Ignoring return value.");
+ StringCchCatW(dst, n, src);
+}
+
+
+size_t
+MPIU_Strlen(
+ _In_ PCSTR src,
+ _In_ size_t cchMax
+ )
+{
+ size_t len;
+ OACR_WARNING_SUPPRESS(USE_WIDE_API, "MSMPI uses only ANSI character set.");
+ HRESULT hr = StringCchLengthA( src, cchMax, &len );
+ if( FAILED( hr ) )
+ {
+ return SIZE_MAX;
+ }
+
+ return len;
+}
+
+
+size_t
+MPIU_Strlen(
+ _In_ PCWSTR src,
+ _In_ size_t cchMax
+ )
+{
+ size_t len;
+ HRESULT hr = StringCchLengthW( src, cchMax, &len );
+ if( FAILED( hr ) )
+ {
+ return SIZE_MAX;
+ }
+
+ return len;
+}
+
+
+_Ret_valid_ _Null_terminated_
+_Success_(return != nullptr)
+wchar_t*
+MPIU_Strdup(
+ _In_z_ const wchar_t* str
+ )
+{
+ size_t maxlen = MPIU_Strlen( str );
+ if( maxlen == SIZE_MAX )
+ {
+ return nullptr;
+ }
+
+ //
+ // Need one extra for the null terminating character
+ //
+ maxlen++;
+ wchar_t* s = static_cast( MPIU_Malloc( sizeof(wchar_t) * maxlen ) );
+ if( s != nullptr )
+ {
+ CopyMemory( s, str, maxlen * sizeof(wchar_t) );
+ }
+ return s;
+}
+
+
+_Ret_valid_ _Null_terminated_
+_Success_(return != nullptr)
+char*
+MPIU_Strdup(
+ _In_z_ const char* str
+ )
+{
+ size_t maxlen = MPIU_Strlen( str );
+ if( maxlen == SIZE_MAX )
+ {
+ return nullptr;
+ }
+
+ //
+ // Need one extra for the null terminating character
+ //
+ maxlen++;
+ char* s = static_cast( MPIU_Malloc( sizeof(char) * maxlen ) );
+ if( s != nullptr )
+ {
+ CopyMemory( s, str, maxlen );
+ }
+ return s;
+}
+
+
+_Success_(return==NO_ERROR)
+DWORD
+MPIU_Getenv(
+ _In_z_ PCSTR name,
+ _Out_writes_z_(cchBuffer) PSTR buffer,
+ _In_ DWORD cchBuffer
+ )
+{
+ OACR_WARNING_SUPPRESS( USE_WIDE_API, "MS MPI uses ANSI char set" );
+ DWORD cchRet = GetEnvironmentVariableA( name, buffer, cchBuffer );
+ if( cchRet == 0 )
+ {
+ //
+ // There can be errors other than ERROR_ENVVAR_NOT_FOUND.
+ // We treat them all as if the env var does not exist.
+ //
+ return ERROR_ENVVAR_NOT_FOUND;
+ }
+ else if( cchRet >= cchBuffer )
+ {
+ return ERROR_INSUFFICIENT_BUFFER;
+ }
+
+ return NO_ERROR;
+}
+
+
+_Success_(return==NO_ERROR)
+DWORD
+MPIU_Getenv(
+ _In_z_ PCWSTR name,
+ _Out_writes_z_(cchBuffer) PWSTR buffer,
+ _In_ DWORD cchBuffer
+ )
+{
+ DWORD cchRet = GetEnvironmentVariableW( name, buffer, cchBuffer );
+ if( cchRet == 0 )
+ {
+ //
+ // There can be errors other than ERROR_ENVVAR_NOT_FOUND.
+ // We treat them all as if the env var does not exist.
+ //
+ return ERROR_ENVVAR_NOT_FOUND;
+ }
+ else if( cchRet >= cchBuffer )
+ {
+ return ERROR_INSUFFICIENT_BUFFER;
+ }
+
+ return NOERROR;
+}
+
+
+//
+// Callee will need to call delete[] to free the memory allocated
+// for wname_ptr if the function succeeds.
+//
+_Success_(return == NOERROR)
+DWORD
+MPIU_MultiByteToWideChar(
+ _In_z_ const char* name,
+ _Outptr_result_z_ wchar_t** wname_ptr
+ )
+{
+ int len = MultiByteToWideChar(
+ CP_UTF8,
+ MB_ERR_INVALID_CHARS,
+ name,
+ -1,
+ NULL,
+ 0
+ );
+ if( len == 0 )
+ {
+ return GetLastError();
+ }
+
+ wchar_t* wname = new wchar_t[len];
+ if( wname == NULL )
+ {
+ return ERROR_INSUFFICIENT_BUFFER;
+ }
+
+ len = MultiByteToWideChar(
+ CP_UTF8,
+ MB_ERR_INVALID_CHARS,
+ name,
+ -1,
+ wname,
+ len
+ );
+ if( len == 0 )
+ {
+ delete[] wname;
+ return GetLastError();
+ }
+
+ *wname_ptr = wname;
+ return NOERROR;
+}
+
+
+//
+// Callee will need to call delete[] to free the memory allocated
+// for outputStr if the function succeeds.
+//
+_Success_(return == NOERROR)
+DWORD
+MPIU_WideCharToMultiByte(
+ _In_z_ const wchar_t* str,
+ _Outptr_result_z_ char** outputStr
+ )
+{
+ int len = WideCharToMultiByte(
+ CP_UTF8,
+ WC_ERR_INVALID_CHARS,
+ str,
+ -1,
+ nullptr,
+ 0,
+ nullptr,
+ nullptr
+ );
+ if( len == 0 )
+ {
+ return GetLastError();
+ }
+
+ char* tmpStr = new char[len];
+ if( tmpStr == nullptr )
+ {
+ return ERROR_INSUFFICIENT_BUFFER;
+ }
+
+ len = WideCharToMultiByte(
+ CP_UTF8,
+ WC_ERR_INVALID_CHARS,
+ str,
+ -1,
+ tmpStr,
+ len,
+ nullptr,
+ nullptr
+ );
+ if( len == 0 )
+ {
+ delete[] tmpStr;
+ return GetLastError();
+ }
+
+ *outputStr = tmpStr;
+ return NOERROR;
+}
diff --git a/src/mpi/common/mpistr.h b/src/mpi/common/mpistr.h
new file mode 100644
index 0000000..a39d04a
--- /dev/null
+++ b/src/mpi/common/mpistr.h
@@ -0,0 +1,366 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+#include
+#include
+
+
+/*@ MPIU_Strncpy - Copy a string with buffer size. Force null termination
+
+ Input Parameters:
+. dst - String to copy into
++ src - String to copy
+- n - 'dst' buffer size in chars (including null char)
+
+ Return value:
+ pointer to the end terminating null char
+
+ Notes:
+ This routine is the routine that you wish 'strncpy' was. In copying
+ 'src' to 'dst', it stops when either the end of 'src' (the
+ null character) is seen or the maximum length 'n is reached.
+ Unlike 'strncpy', it does not add enough nulls to 'dst' after
+ copying 'src' in order to move precisely 'n' characters.
+ This routine is safer than strncpy; it always null terminates the dst
+ string. (except when the dst size is zero)
+
+ MPIU_Strncpy is implemented inline to help the compiler optimize
+ per use instance.
+
+ Module:
+ Utility
+ @*/
+_Ret_z_
+_Success_(return!=nullptr)
+static inline char*
+MPIU_Strncpy(
+ _Out_writes_z_(n) char* dst,
+ _In_z_ const char* src,
+ _In_ size_t n
+ )
+{
+ char* end;
+ OACR_WARNING_SUPPRESS(USE_WIDE_API, "MSMPI uses only ANSI character set.");
+ HRESULT hr = StringCchCopyExA( dst, n, src, &end, nullptr, 0 );
+ if( hr == STRSAFE_E_INVALID_PARAMETER )
+ {
+ return nullptr;
+ }
+
+ return end;
+}
+
+
+_Ret_z_
+_Success_(return!=nullptr)
+static inline wchar_t*
+MPIU_Strncpy(
+ _Out_writes_z_(n) wchar_t* dst,
+ _In_z_ const wchar_t* src,
+ _In_ size_t n
+ )
+{
+ wchar_t* end;
+ HRESULT hr = StringCchCopyExW( dst, n, src, &end, nullptr, 0 );
+ if( hr == STRSAFE_E_INVALID_PARAMETER )
+ {
+ return nullptr;
+ }
+
+ return end;
+}
+
+
+//
+// Summary:
+// This is a convenient wrapper for StringCchCopyA
+//
+// Return: 0 if success, other errors if failure
+//
+_Success_(return == 0)
+int
+MPIU_Strcpy(
+ _Out_writes_z_(cchDest) char* dest,
+ _In_ size_t cchDest,
+ _In_z_ const char* src
+ );
+
+
+//
+// Summary:
+// This is a convenient wrapper for StringCchCopyW
+//
+// Return: 0 if success, other errors if failure
+//
+_Success_(return == 0)
+int
+MPIU_Strcpy(
+ _Out_writes_z_(cchDest) wchar_t* dest,
+ _In_ size_t cchDest,
+ _In_z_ const wchar_t* src
+ );
+
+
+/*@ MPIU_Szncpy - Copy a string into a fixed sized buffer; force null termination
+
+ MPIU_Szncpy is a helper macro provided for copying into fixed sized char arrays.
+ The macro computes the size (char count) of the dst array. Usage example,
+
+ char buffer[333];
+ ...
+ -* copy max 333 chars into buffer; buffer will be null terminated. *-
+ MPIU_Szncpy(buffer, str);
+
+ @*/
+#define MPIU_Szncpy(dst, src) MPIU_Strncpy(dst, src, _countof(dst))
+
+
+/*@ MPIU_Strnapp - Append to a string with buffer size. Force null termination
+
+ Input Parameters:
+. dst - String to copy into
++ src - String to append
+- n - 'dst' buffer size in chars (including null char)
+
+ Output Parameter:
+ pointer to the end terminating null char
+
+ Notes:
+ This routine is similar to 'strncat' except that the 'n' argument
+ is the maximum total length of 'dst', rather than the maximum
+ number of characters to move from 'src'. Thus, this routine is
+ easier to use when the declared size of 'src' is known.
+
+ MPIU_Strnapp is implemented inline to help the compiler optimize
+ per use instance.
+
+ Module:
+ Utility
+ @*/
+void MPIU_Strnapp(
+ _Out_writes_z_(n) char *dst,
+ _In_z_ const char *src,
+ _In_ size_t n);
+
+
+void MPIU_Strnapp(
+ _Out_writes_z_(n) wchar_t *dst,
+ _In_z_ const wchar_t *src,
+ _In_ size_t n);
+
+
+/*@ MPIU_Sznapp - Append a string into a fixed sized buffer; force null termination
+
+ MPIU_Sznapp is a helper macro provided for appending into fixed sized char arrays.
+ The macro computes the size (char count) of the dst array. Usage example,
+
+ char buffer[333] = "Initial string";
+ ...
+ -* copy max 333 chars into buffer; buffer will be null terminated. *-
+ MPIU_Sznapp(buffer, str);
+
+ @*/
+#define MPIU_Sznapp(dst, src) MPIU_Strnapp(dst, src, _countof(dst))
+
+
+size_t MPIU_Strlen(
+ _In_ PCSTR src,
+ _In_ size_t cchMax = STRSAFE_MAX_CCH );
+
+
+size_t MPIU_Strlen(
+ _In_ PCWSTR src,
+ _In_ size_t cchMax = STRSAFE_MAX_CCH );
+
+
+/* ---------------------------------------------------------------------- */
+/* FIXME - The string routines do not belong in the memory header file */
+/* FIXME - The string error code such be MPICH2-usable error codes */
+#define MPIU_STR_SUCCESS 0
+#define MPIU_STR_FAIL -1
+#define MPIU_STR_NOMEM 1
+
+/* FIXME: TRUE/FALSE definitions should either not be used or be
+ used consistently. These also do not belong in the mpimem header file. */
+#define MPIU_TRUE 1
+#define MPIU_FALSE 0
+
+/* FIXME: Global types like this need to be discussed and agreed to */
+typedef int MPIU_BOOL;
+
+/* FIXME: These should be scoped to only the routines that need them */
+#ifdef USE_HUMAN_READABLE_TOKENS
+
+#define MPIU_STR_QUOTE_CHAR '\"'
+#define MPIU_STR_QUOTE_STR "\""
+#define MPIU_STR_DELIM_CHAR '='
+#define MPIU_STR_DELIM_STR "="
+#define MPIU_STR_ESCAPE_CHAR '\\'
+#define MPIU_STR_SEPAR_CHAR ' '
+#define MPIU_STR_SEPAR_STR " "
+
+#else
+
+#define MPIU_STR_QUOTE_CHAR '\"'
+#define MPIU_STR_QUOTE_STR "\""
+#define MPIU_STR_DELIM_CHAR '#'
+#define MPIU_STR_DELIM_STR "#"
+#define MPIU_STR_ESCAPE_CHAR '\\'
+#define MPIU_STR_SEPAR_CHAR '$'
+#define MPIU_STR_SEPAR_STR "$"
+
+#endif
+
+_Success_(return == MPIU_STR_SUCCESS)
+int
+MPIU_Str_get_string_arg(
+ _In_opt_z_ const char* str,
+ _In_opt_z_ const char* key,
+ _Out_writes_z_(val_len) char* val,
+ _In_ size_t val_len
+ );
+
+
+_Success_(return == MPIU_STR_SUCCESS)
+int
+MPIU_Str_get_int_arg(
+ _In_z_ const char *str,
+ _In_z_ const char *flag,
+ _Out_ int *val_ptr
+ );
+
+
+_Success_(return == MPIU_STR_SUCCESS)
+int
+MPIU_Str_add_string_arg(
+ _Inout_ _Outptr_result_buffer_(*maxlen_ptr) PSTR*str_ptr,
+ _Inout_ int *maxlen_ptr,
+ _In_z_ const char *flag,
+ _In_z_ const char *val
+ );
+
+
+_Success_(return == MPIU_STR_SUCCESS)
+int
+MPIU_Str_add_int_arg(
+ _Inout_ _Outptr_result_buffer_(*maxlen_ptr) PSTR*str_ptr,
+ _Inout_ int *maxlen_ptr,
+ _In_z_ const char *flag,
+ _In_ int val
+ );
+
+
+_Success_(return == MPIU_STR_SUCCESS)
+int
+MPIU_Str_add_string(
+ _Inout_ _Outptr_result_buffer_(*maxlen_ptr) PSTR*str_ptr,
+ _Inout_ int *maxlen_ptr,
+ _In_z_ const char *val
+ );
+
+
+_Success_(return == 0)
+int
+MPIU_Str_get_string(
+ _Inout_ _Outptr_result_maybenull_z_ PCSTR* str_ptr,
+ _Out_writes_z_(val_len)char *val,
+ _In_ size_t val_len
+ );
+
+
+//
+// Provide a fallback snprintf for systems that do not have one
+//
+_Success_(return >= 0 && return <= cchDest)
+int
+MPIU_Snprintf(
+ _Null_terminated_ _Out_writes_to_(cchDest, return) char* dest,
+ _In_ size_t cchDest,
+ _Printf_format_string_ const char* format,
+ ...
+ );
+
+
+//
+// Overloaded function for wide characters
+//
+_Success_(return >= 0 && return <= cchDest)
+int
+MPIU_Snprintf(
+ _Null_terminated_ _Out_writes_to_(cchDest, return) wchar_t* dest,
+ _In_ size_t cchDest,
+ _Printf_format_string_ const wchar_t* format,
+ ...
+ );
+
+
+//
+// Provide vsnprintf functionality by using strsafe's StringCchVPrintfEx
+//
+_Success_(return >= 0 && return <= cchDest)
+int
+MPIU_Vsnprintf(
+ _Null_terminated_ _Out_writes_to_(cchDest,return)char* dest,
+ _In_ size_t cchDest,
+ _Printf_format_string_ const char* format,
+ _In_ va_list args
+ );
+
+
+//
+// Overloaded function for wide characters
+// Provide vsnprintf functionality by using strsafe's StringCchVPrintfEx
+//
+_Success_(return >= 0 && return <= cchDest)
+int
+MPIU_Vsnprintf(
+ _Null_terminated_ _Out_writes_to_(cchDest, return) wchar_t* dest,
+ _In_ size_t cchDest,
+ _Printf_format_string_ const wchar_t* format,
+ _In_ va_list args
+ );
+
+
+//
+// Provide _strdup functionality
+//
+_Ret_valid_ _Null_terminated_
+_Success_(return != nullptr)
+char*
+MPIU_Strdup(
+ _In_z_ const char* str
+ );
+
+
+_Ret_valid_ _Null_terminated_
+_Success_(return != nullptr)
+wchar_t*
+MPIU_Strdup(
+ _In_z_ const wchar_t* str
+ );
+
+
+//
+// Callee will need to call delete[] to free the memory allocated
+// for wname_ptr if the function succeeds.
+//
+_Success_(return == NOERROR)
+DWORD
+MPIU_MultiByteToWideChar(
+ _In_z_ const char* name,
+ _Outptr_result_z_ wchar_t** wname_ptr
+ );
+
+
+//
+// Callee will need to call delete[] to free the memory allocated
+// for outputStr if the function succeeds.
+//
+_Success_(return == NOERROR)
+DWORD
+MPIU_WideCharToMultiByte(
+ _In_z_ const wchar_t* str,
+ _Outptr_result_z_ char** outputStr
+ );
diff --git a/src/mpi/common/mpitrace.h b/src/mpi/common/mpitrace.h
new file mode 100644
index 0000000..2267a1e
--- /dev/null
+++ b/src/mpi/common/mpitrace.h
@@ -0,0 +1,185 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+#include
+#include
+#include "evntprov.h"
+
+#ifndef _In_reads_
+# define _In_reads_(max_count)
+#endif
+
+//
+//Enums to identify call sites.
+//
+
+//used for events in ch3u_nd_adapter.cpp
+enum AdapterInit{
+ AdapterInitOpen = 1,
+ AdapterInitQuery,
+ AdapterInitCQDepth,
+ AdapterInitInitiatorQDepth,
+ AdapterInitRecvQDepth,
+ AdapterInitCreateOverlapped
+};
+
+enum AdapterListen{
+ AdapterListenCreateListener = 1,
+ AdapterListenBind,
+ AdapterListenGetLocalAddress,
+ AdapterListenListen
+};
+
+enum AdapterGetConnectionRequest{
+ GetConnectionRequestCreateConnector = 1,
+ GetConnectionRequestGetConnectionRequest
+};
+
+enum AdapterCreateConector{
+ AdapterCreateConectorCreateConnector = 1,
+ AdapterCreateConectorBind
+};
+
+enum AdapterGetConnSucceeded{
+ AdapterGetConnSucceededInvalidBufferSize = 1,
+ AdapterGetConnSucceededAbortOrInvalid,
+ AdapterGetConnSucceededReject,
+ AdapterGetConnSucceededMismatchedVersion,
+ AdapterGetConnSucceededPGFind,
+ AdapterGetConnSucceededRank,
+ AdapterGetConnSucceededHeadToHeadReject,
+ AdapterGetConnSucceededHeadToHeadShutdown,
+ AdapterGetConnSucceededAdapterShutdown,
+ AdapterGetConnSucceededDefaultReject,
+ AdapterGetConnSucceededSuccess
+};
+
+//used for events in ch3u_nd_endpoint.cpp
+enum Endpoint{
+ EndpointCompleteConnectBufferSize = 1,
+ EndpointCompleteConnectDefault,
+ EndpointCompleteConnectPending,
+ EndpointConnReqFailedPassive,
+ EndpointConnReqFailedCanceled,
+ EndpointConnReqFailedFailed,
+ EndpointConnCompleted,
+ EndpointConnFailedRetry,
+ EndpointConnFailedFail,
+ EndpointAcceptPending,
+ EndpointPrepostReceivesFailed,
+ EndpointAcceptCompleted,
+ EndpointAcceptFailedAbortedOrTimeout,
+ EndpointAcceptFailedFailed,
+ EndpointDisconnect,
+ EndpointConnect,
+ EndpointAccept,
+ EndpointHandleTimeoutConnectTimeout,
+ EndpointCompleteConnectAbortedOrInvalid,
+ EndpointCompleteConnectCompleteConnect,
+ EndpointHandleTimeoutConnect
+};
+
+//used for events in ch3u_nd_env.cpp
+enum EnvironmentListen{
+ EnvironmentListenNoNDv2Providers = 1,
+ EnvironmentListenQueryAddressListForSizeFailed,
+ EnvironmentListenQueryAddressListFailed
+};
+
+enum EnvironmentConnect{
+ EnvironmentConnectGetAddrsFromBc = 1,
+ EnvironmentConnectNoLocalNoRemoteForce,
+ EnvironmentConnectNoLocalForce,
+ EnvironmentConnectNoLocalNoFallback,
+ EnvironmentConnectNoLocalNoFallbackForce,
+ EnvironmentConnectNoRemoteForce,
+ EnvironmentConnectNoRemoteNoFallback,
+ EnvironmentConnectNoPathForce,
+ EnvironmentConnectNoPathNoFallback,
+ EnvironmentConnectNoLocalFallback,
+ EnvironmentConnectNoRemoteFallback,
+ EnvironmentConnectNoPathFallback
+};
+
+//used for events in ch3_progress_connect.cpp
+enum Shm_connect{
+ Shm_connectConnectQueueName = 1,
+ Shm_connectConnectQueueAttach,
+ Shm_connectWriteQueue,
+ Shm_connectNotifyConnect
+};
+
+enum Shm_accept{
+ Shm_acceptQueueAttach = 1,
+ Shm_acceptMismatchedVersion,
+ Shm_acceptPGFind,
+ Shm_acceptRank,
+ Shm_acceptGetConnStringFailed,
+ Shm_acceptGetStringArgFailed,
+ Shm_acceptBootstrapQueueAttach
+};
+
+//used for events in ch3_progress_sock.c
+enum RecvOpenRequestSucceeded{
+ RecvOpenRequestSucceededUnexpectedControl = 1,
+ RecvOpenRequestSucceededMismatchedVersion,
+ RecvOpenRequestSucceededInternal,
+ RecvOpenRequestSucceededSuccess
+};
+
+//used for events in sock.c
+enum ConnectFailedEnum{
+ ConnectFailedEnumAbortedBeforeTimeout = 1,
+ ConnectFailedEnumTimeout,
+ ConnectFailedEnumAbortedClosing,
+ ConnectFailedEnumRefused,
+ ConnectFailedEnumError,
+ ConnectFailedEnumExhausted,
+ ConnectFailedEnumFail
+};
+
+
+//
+// Mpi specific wrappers around trace functions
+//
+ULONG MpiTraceError(
+ REGHANDLE RegHandle,
+ PCEVENT_DESCRIPTOR EventDescriptor,
+ int ErrorCode
+ );
+
+
+#define SENTINEL_MASK ((ULONG_PTR)0x01)
+
+#define IS_SENTINEL(p_) (0 == (((ULONG_PTR)p_) & (~SENTINEL_MASK)))
+
+#define SENTINEL_SAFE_SIZE(p_) (IS_SENTINEL(p_)?0:sizeof(*p_))
+
+#define SENTINEL_SAFE_COUNT(p_,c_) (IS_SENTINEL(p_)?0:c_)
+
+//
+// This is a generated header.
+//
+#include "MpiTraceEvents.h"
+
+
+#ifndef MAX_TRACE_ARRAY_VALUE_COUNT
+#define MAX_TRACE_ARRAY_VALUE_COUNT 1
+#endif
+
+#ifndef TraceArrayLength
+#define TraceArrayLength(_c_) ((BYTE)(_c_<=MAX_TRACE_ARRAY_VALUE_COUNT?_c_:MAX_TRACE_ARRAY_VALUE_COUNT))
+#endif
+
+
+//
+// Conditional trace macros. They will only trace if enabled. The generated header
+// contains the macros and values we use for these tests. See that header for more deatils.
+//
+
+#define TraceError(_fn_,_errorcode_) \
+ MCGEN_ENABLE_CHECK(MICROSOFT_HPC_MPI_PROVIDER_Context, EVENT_Error_##_fn_) ?\
+ MpiTraceError(Microsoft_HPC_MPIHandle, &EVENT_Error_##_fn_,_errorcode_) \
+ : ERROR_SUCCESS
+
diff --git a/src/mpi/common/mpitrace.man b/src/mpi/common/mpitrace.man
new file mode 100644
index 0000000..5dd3d33
--- /dev/null
+++ b/src/mpi/common/mpitrace.man
@@ -0,0 +1,30663 @@
+
+
+
+
+
+
+
+
+
+ false
+
+ 16106127360
+
+
+
+ 5
+
+ 0x00
+
+ 2
+
+ QPC
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ false
+
+ 16106127360
+
+
+
+ 5
+
+ 0x00
+
+ 2
+
+ QPC
+
+
+
+
+
+
+ false
+
+ 16106127360
+
+
+
+ 5
+
+ 0x00
+
+ 2
+
+ QPC
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ queued
+ connect
+ head
+ inline dump
+ continue
+ done
+ packet
+ data
+ deferconnect
+ deferwrite
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/mpi/common/mpiutil.cpp b/src/mpi/common/mpiutil.cpp
new file mode 100644
index 0000000..602c475
--- /dev/null
+++ b/src/mpi/common/mpiutil.cpp
@@ -0,0 +1,227 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+
+#include "precomp.h"
+#include "winsock2.h"
+
+BOOL
+env_is_on_ex(
+ _In_z_ const wchar_t* name,
+ _In_opt_z_ const wchar_t* deprecatedName,
+ _In_ BOOL defval
+ )
+{
+ wchar_t env[5];
+
+ DWORD err = MPIU_Getenv( name, env, _countof(env) );
+ if( err == ERROR_ENVVAR_NOT_FOUND && deprecatedName != nullptr )
+ {
+ err = MPIU_Getenv( deprecatedName, env, _countof(env) );
+ }
+
+ if( err != NOERROR )
+ {
+ return defval;
+ }
+
+ if( CompareStringW( LOCALE_INVARIANT,
+ 0,
+ env,
+ -1,
+ L"1",
+ -1 ) == CSTR_EQUAL )
+ {
+ return TRUE;
+ }
+
+ if( CompareStringW( LOCALE_INVARIANT,
+ NORM_IGNORECASE,
+ env,
+ -1,
+ L"on",
+ -1 ) == CSTR_EQUAL ||
+ CompareStringW( LOCALE_INVARIANT,
+ NORM_IGNORECASE,
+ env,
+ -1,
+ L"yes",
+ -1 ) == CSTR_EQUAL ||
+ CompareStringW( LOCALE_INVARIANT,
+ NORM_IGNORECASE,
+ env,
+ -1,
+ L"true",
+ -1 ) == CSTR_EQUAL )
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+int
+env_to_int_ex(
+ _In_z_ const wchar_t* name,
+ _In_opt_z_ const wchar_t* deprecatedName,
+ _In_ int defval,
+ _In_ int minval
+ )
+{
+ wchar_t val[12];
+ DWORD err = MPIU_Getenv( name, val, _countof(val) );
+ if( err == ERROR_ENVVAR_NOT_FOUND && deprecatedName != nullptr )
+ {
+ err = MPIU_Getenv( deprecatedName, val, _countof(val) );
+ }
+
+ if( err != NOERROR )
+ {
+ return defval;
+ }
+
+ defval = _wtoi(val);
+ if(defval < minval)
+ {
+ return minval;
+ }
+
+ return defval;
+}
+
+
+_Success_(return == TRUE)
+BOOL
+env_to_range_ex(
+ _In_z_ const wchar_t* name,
+ _In_opt_z_ const wchar_t* deprecatedName,
+ _In_ int minval,
+ _In_ int maxval,
+ _In_ bool allowSingleValue,
+ _Out_ int* low,
+ _Out_ int* high
+ )
+{
+ //
+ // We need a string big enough to be able to store
+ // -INT_MAX : INT_MAX. 64 should be plenty
+ //
+ wchar_t range[64];
+ wchar_t* next_token = nullptr;
+ DWORD err = MPIU_Getenv( name, range, _countof(range) );
+ if( err == ERROR_ENVVAR_NOT_FOUND && deprecatedName != nullptr )
+ {
+ err = MPIU_Getenv( deprecatedName, range, _countof(range) );
+ }
+
+ if( err != NOERROR )
+ {
+ return FALSE;
+ }
+
+ const wchar_t* pCur;
+
+ //
+ // tokenize min,max OR min:max OR min..max
+ //
+ pCur = wcstok_s( range, L",.:", &next_token );
+ if( pCur == nullptr )
+ {
+ return FALSE;
+ }
+
+ int tmpLow = _wtoi( pCur );
+ if( tmpLow < minval )
+ {
+ tmpLow = minval;
+ }
+
+ int tmpHigh;
+ pCur = wcstok_s( nullptr, L",.:", &next_token );
+ if( pCur != nullptr )
+ {
+ tmpHigh = _wtoi( pCur );
+ if( tmpHigh > maxval )
+ {
+ tmpHigh = maxval;
+ }
+ }
+ else if( allowSingleValue )
+ {
+ tmpHigh = tmpLow;
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ if( tmpHigh < tmpLow )
+ {
+ return FALSE;
+ }
+
+ *low = tmpLow;
+ *high = tmpHigh;
+ return TRUE;
+}
+
+
+#define MAX_TCP_PORT 65535
+
+int
+FindNextOpenPort(
+ int startPort
+ )
+{
+ if( startPort > MAX_TCP_PORT )
+ {
+ return 0;
+ }
+
+ WSADATA wsaData;
+ int ret = WSAStartup( MAKEWORD(2,0), &wsaData );
+ if( ret != 0 )
+ {
+ return 0;
+ }
+
+ SOCKET server;
+ SOCKADDR_IN sockAddr;
+ int port = startPort;
+
+ sockAddr.sin_family = AF_INET;
+ sockAddr.sin_addr.s_addr = INADDR_ANY;
+
+ server = socket( AF_INET, SOCK_STREAM, 0 );
+ if( server == INVALID_SOCKET )
+ {
+ return 0;
+ }
+
+ for( ;; )
+ {
+ sockAddr.sin_port = htons( static_cast( port ) );
+ if( bind( server,
+ reinterpret_cast( &sockAddr ),
+ sizeof( sockAddr ) ) == SOCKET_ERROR )
+ {
+ ++port;
+ if( port > MAX_TCP_PORT )
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ //
+ // Found an open port
+ //
+ break;
+ }
+ }
+
+ closesocket( server );
+ WSACleanup();
+ return port;
+}
diff --git a/src/mpi/common/mpiutil.h b/src/mpi/common/mpiutil.h
new file mode 100644
index 0000000..d25e084
--- /dev/null
+++ b/src/mpi/common/mpiutil.h
@@ -0,0 +1,360 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#if !defined(MPIUTIL_H_INCLUDED)
+#define MPIUTIL_H_INCLUDED
+#include
+#include
+
+/*
+ * Debuging function and macros
+ */
+void MPIU_dbg_preinit(void);
+
+_Success_(return==MPI_SUCCESS)
+int MPIU_dbg_init(
+ _In_ unsigned int rank,
+ _In_ unsigned int world_size
+ );
+
+void
+MPIU_dbg_printf(
+ _Printf_format_string_ const char *str,
+ ...
+ );
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIU_Parse_rank_range(
+ _In_ unsigned int rank,
+ _In_z_ const char* range,
+ _In_ unsigned int world_size,
+ _Out_ bool* isWithinRange,
+ _Out_writes_(world_size) unsigned int* total_unique_ranks
+ );
+
+struct MPID_Comm;
+
+
+/*@
+ MPID_Abort - Abort at least the processes in the specified communicator.
+
+ Input Parameters:
++ comm - Communicator of processes to abort
+. intern - indicates if the abort is internal or by the application.
+. exit_code - Exit code to return to the calling environment. See notes.
+- error_msg - error message (not optional)
+
+ Return value:
+ 'MPI_SUCCESS' or an MPI error code. Normally, this routine should not
+ return, since the calling process must be a member of the communicator.
+ However, under some circumstances, the 'MPID_Abort' might fail; in this
+ case, returning an error indication is appropriate.
+
+ Notes:
+
+ In a fault-tolerant MPI implementation, this operation should abort `only`
+ the processes in the specified communicator. Any communicator that shares
+ processes with the aborted communicator becomes invalid. For more
+ details, see (paper not yet written on fault-tolerant MPI).
+
+ In particular, if the communicator is 'MPI_COMM_SELF', only the calling
+ process should be aborted.
+
+ The 'exit_code' is the exit code that this particular process will
+ attempt to provide to the 'mpiexec' or other program invocation
+ environment. See 'mpiexec' for a discussion of how exit codes from
+ many processes may be combined.
+
+ If the error_msg field is non-nullptr this string will be used as the message
+ with the abort output. Otherwise, the output message will be base on the
+ error message associated with the mpi_errno.
+
+ An external agent that is aborting processes can invoke this with either
+ 'MPI_COMM_WORLD' or 'MPI_COMM_SELF'. For example, if the process manager
+ wishes to abort a group of processes, it should cause 'MPID_Abort' to
+ be invoked with 'MPI_COMM_SELF' on each process in the group.
+
+ Question:
+ An alternative design is to provide an 'MPID_Group' instead of a
+ communicator. This would allow a process manager to ask the ADI
+ to kill an entire group of processes without needing a communicator.
+ However, the implementation of 'MPID_Abort' will either do this by
+ communicating with other processes or by requesting the process manager
+ to kill the processes. That brings up this question: should
+ 'MPID_Abort' use 'PMI' to kill processes? Should it be required to
+ notify the process manager? What about persistent resources (such
+ as SYSV segments or forked processes)?
+
+ This suggests that for any persistent resource, an exit handler be
+ defined. These would be executed by 'MPID_Abort' or 'MPID_Finalize'.
+ See the implementation of 'MPI_Finalize' for an example of exit callbacks.
+ In addition, code that registered persistent resources could use persistent
+ storage (i.e., a file) to record that information, allowing cleanup
+ utilities (such as 'mpiexec') to remove any resources left after the
+ process exits.
+
+ 'MPI_Finalize' requires that attributes on 'MPI_COMM_SELF' be deleted
+ before anything else happens; this allows libraries to attach end-of-job
+ actions to 'MPI_Finalize'. It is valuable to have a similar
+ capability on 'MPI_Abort', with the caveat that 'MPI_Abort' may not
+ guarantee that the run-on-abort routines were called. This provides a
+ consistent way for the MPICH implementation to handle freeing any
+ persistent resources. However, such callbacks must be limited since
+ communication may not be possible once 'MPI_Abort' is called. Further,
+ any callbacks must guarantee that they have finite termination.
+
+ One possible extension would be to allow `users` to add actions to be
+ run when 'MPI_Abort' is called, perhaps through a special attribute value
+ applied to 'MPI_COMM_SELF'. Note that is is incorrect to call the delete
+ functions for the normal attributes on 'MPI_COMM_SELF' because MPI
+ only specifies that those are run on 'MPI_Finalize' (i.e., normal
+ termination).
+
+ Module:
+ MPID_CORE
+ @*/
+_Analysis_noreturn_
+DECLSPEC_NORETURN
+int MPID_Abort(
+ _Inout_opt_ MPID_Comm* comm,
+ _In_ BOOL intern,
+ _In_ int exit_code,
+ _In_z_ const char* error_msg
+ );
+
+_Success_(return>=0)
+int
+MPIU_Internal_error_printf(
+ _Printf_format_string_ const char *str,
+ ...
+ );
+
+_Success_(return>=0)
+int
+MPIU_Error_printf(
+ _Printf_format_string_ const char *str,
+ ...
+ );
+
+
+static inline void MPIU_Debug_break(void)
+{
+ //
+ // Debug break without giving a chance to any exception handler to ignore the break
+ //
+ __try
+ {
+ __debugbreak();
+ }
+ __except(UnhandledExceptionFilter(GetExceptionInformation()))
+ OACR_WARNING_SUPPRESS(EXCEPT_BLOCK_EMPTY,"lucasm: debug break handler")
+ {
+ }
+}
+
+
+#define ASSERT(a_) MPIU_Assert(a_)
+#define VERIFY(a_) MPIU_Assertp(a_)
+
+/*
+ * MPIU_Assert()
+ *
+ * Similar to assert() except that it performs an MPID_Abort() when the
+ * assertion fails. Also, for Windows, it doesn't popup a
+ * mesage box on a remote machine.
+ *
+ * MPIU_AssertDecl may be used to include declarations only needed
+ * when MPIU_Assert is non-null (e.g., when assertions are enabled)
+ */
+
+#if DBG
+
+#define MPIU_DebugBuildCode(a_) a_
+#define MPIU_Assert(a_) \
+ (void) ((!!(a_)) || \
+ (MPIU_Internal_error_printf("Assertion failed in %s(%d): %s\n", __FILE__, __LINE__, #a_), 0) || \
+ (MPIU_Debug_break(), 0) || \
+ (MPID_Abort(nullptr, TRUE, 0, "assertion failed")) \
+ ); __analysis_assume(a_)
+
+#define MPIU_Assertp(a_) MPIU_Assert(a_)
+
+#else
+
+#define MPIU_Assert(a_) __analysis_assume(a_)
+#define MPIU_DebugBuildCode(a_)
+
+/*
+ * MPIU_Assertp()
+ *
+ * Similar to MPIU_Assert() except that these assertions persist regardless of
+ * DBG. MPIU_Assertp() may be used for error checking in prototype code, although
+ * it should be converted real error checking and reporting once the prototype
+ * becomes part of the official and supported code base.
+ */
+#define MPIU_Assertp(a_) \
+ (void) ((!!(a_)) || \
+ (MPIU_Internal_error_printf("Assertion failed in %s(%d): %s\n", __FILE__, __LINE__, #a_), 0) || \
+ (MPID_Abort(nullptr, TRUE, 0, "assertion failed")) \
+ ); __analysis_assume(a_)
+
+#endif
+
+
+/*@ env_is_on - Check if an environment variable is in the 'on' state
+
+ Return value:
+ 'def' if the env var is not set
+ 1 if the env var is set to 'on'
+ 0 if the env var is set not to to 'on'
+
+ @*/
+BOOL
+env_is_on_ex(
+ _In_z_ const wchar_t* name,
+ _In_opt_z_ const wchar_t* deprecatedName,
+ _In_ BOOL defval
+ );
+
+
+int
+env_to_int_ex(
+ _In_z_ const wchar_t* name,
+ _In_opt_z_ const wchar_t* deprecatedName,
+ _In_ int defval,
+ _In_ int minval
+ );
+
+
+_Success_(return == TRUE)
+BOOL
+env_to_range_ex(
+ _In_z_ const wchar_t* name,
+ _In_opt_z_ const wchar_t* deprecatedName,
+ _In_ int minval,
+ _In_ int maxval,
+ _In_ bool allowSingleValue,
+ _Out_ int* low,
+ _Out_ int* high
+ );
+
+
+inline BOOL
+env_is_on(
+ _In_z_ const wchar_t* name,
+ _In_ BOOL defval
+ )
+{
+ return env_is_on_ex(name, nullptr, defval);
+}
+
+
+inline int
+env_to_int(
+ _In_z_ const wchar_t *name,
+ _In_ int defval,
+ _In_ int minval
+ )
+{
+ return env_to_int_ex(name, nullptr, defval, minval);
+}
+
+
+_Success_(return == TRUE)
+inline BOOL
+env_to_range(
+ _In_z_ const wchar_t* name,
+ _In_ int minval,
+ _In_ int maxval,
+ _In_ bool allowSingleValue,
+ _Out_ int* low,
+ _Out_ int* high
+ )
+{
+ return env_to_range_ex( name, nullptr, minval, maxval, allowSingleValue, low, high );
+}
+
+
+_Success_(return == NO_ERROR)
+DWORD
+MPIU_Getenv(
+ _In_z_ PCSTR name,
+ _Out_writes_z_(cchBuffer)PSTR buffer,
+ _In_ DWORD cchBuffer
+ );
+
+
+_Success_(return == NO_ERROR)
+DWORD
+MPIU_Getenv(
+ _In_z_ PCWSTR name,
+ _Out_writes_z_(cchBuffer)PWSTR buffer,
+ _In_ DWORD cchBuffer
+ );
+
+
+//
+// Max string representation of GUID with hyphens, no braces
+//
+#define GUID_STRING_LENGTH 36
+
+static inline void
+GuidToStr(
+ _In_ const GUID& guid,
+ _Out_writes_z_(cchBuffer) char* buffer,
+ _In_range_(>,GUID_STRING_LENGTH) size_t cchBuffer
+ )
+{
+ MPIU_Assert( cchBuffer > GUID_STRING_LENGTH );
+
+ OACR_WARNING_SUPPRESS(HRESULT_NOT_CHECKED, "Ignore return value, we have an existing assert for buffer size");
+ OACR_WARNING_SUPPRESS(USE_WIDE_API, "MS MPI uses ANSI character set.");
+ (void) StringCchPrintfA(
+ buffer,
+ cchBuffer,
+ "%8.8x-%4.4x-%4.4x-%2.2x%2.2x-%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x",
+ guid.Data1, guid.Data2, guid.Data3,
+ guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3],
+ guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]
+ );
+}
+
+
+static inline void
+GuidToStr(
+ _In_ const GUID& guid,
+ _Out_writes_z_(cchBuffer) wchar_t* buffer,
+ _In_range_(>,GUID_STRING_LENGTH) size_t cchBuffer
+ )
+{
+ MPIU_Assert( cchBuffer > GUID_STRING_LENGTH );
+
+ OACR_WARNING_SUPPRESS(HRESULT_NOT_CHECKED, "Ignore return value, we have an existing assert for buffer size");
+ (void) StringCchPrintfW(
+ buffer,
+ cchBuffer,
+ L"%8.8x-%4.4x-%4.4x-%2.2x%2.2x-%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x",
+ guid.Data1, guid.Data2, guid.Data3,
+ guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3],
+ guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]
+ );
+}
+
+
+//
+// Summary: Given a starting port, find an open TCP port that can be
+// used for listening
+//
+int
+FindNextOpenPort(
+ int startPort
+ );
+
+#endif /* !defined(MPIUTIL_H_INCLUDED) */
diff --git a/src/mpi/common/msgprint.cpp b/src/mpi/common/msgprint.cpp
new file mode 100644
index 0000000..1091ba6
--- /dev/null
+++ b/src/mpi/common/msgprint.cpp
@@ -0,0 +1,50 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "precomp.h"
+#include
+#include
+
+_Success_(return>=0)
+int
+MPIU_Error_printf(
+ _Printf_format_string_ const char *str,
+ ...
+ )
+{
+ int n;
+ va_list list;
+
+ va_start(list, str);
+ n = vfprintf(stderr, str, list);
+ va_end(list);
+
+ fflush(stderr);
+
+ return n;
+}
+
+_Success_(return>=0)
+int
+MPIU_Internal_error_printf(
+ _Printf_format_string_ const char *str,
+ ...
+ )
+{
+ int n;
+ va_list list;
+
+ va_start(list, str);
+ n = vfprintf(stderr, str, list);
+ va_end(list);
+
+ fflush(stderr);
+
+ return n;
+}
+
diff --git a/src/mpi/common/parse.sub b/src/mpi/common/parse.sub
new file mode 100644
index 0000000..2a2494e
--- /dev/null
+++ b/src/mpi/common/parse.sub
@@ -0,0 +1,228 @@
+#! /usr/bin/perl
+#
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License.
+#
+# File of useful routines use to process the MPI source files. This is
+# included by routines that process parameters and error messages, along with
+# tools to check for proper usage (such as system routines and preprocessor
+# tests).
+
+#
+# GetBalancedParen( FD, curline )
+# Returns a balanced parenthesis string, starting at curline. Reads from FD
+# if necessary. Skips any comments.
+# Returns the pair (leading, result, remainder)
+# Leading is anything before the opening paren. If no opening paren in the
+# line, returns the current line as "leading"
+# Newlines are removed.
+sub GetBalancedParen {
+ my $paren_count = 1;
+ my $result = "";
+ my $count = 0;
+ my $leading = "";
+ my $maxcount = 200;
+ $FD = $_[0];
+ $curline = $_[1];
+ # Remove escaped newlines
+ $curline =~ s/\\$//;
+
+ if ($curline =~ /^([^\(]*)\((.*)$/) {
+ $leading = $1;
+ $curline = $2;
+ $result = "(";
+ print "Found open paren\n" if $debug;
+ }
+ else {
+ $leading = $curline;
+ return ($leading, "", "" );
+ }
+
+ while ($count < $maxcount && $paren_count > 0) {
+ if ($curline =~ /^([^\(\)]*\()(.*$)/) {
+ # Found an opening paren
+ $result .= $1;
+ $curline = $2;
+ $paren_count++;
+ print "Found open paren\n" if $debug;
+ }
+ elsif ($curline =~ /^([^\(\)]*\))(.*$)/) {
+ # Found a closing paren
+ $result .= $1;
+ $curline = $2;
+ $paren_count--;
+ print "Found close paren\n" if $debug;
+ }
+ else {
+ # Need to read a new line
+ $result .= $curline;
+ $curline = <$FD>;
+ $curline =~ s/[\r]*\n//;
+ # Remove escaped newlines
+ $curline =~ s/\\$//;
+ }
+ $count ++;
+ }
+ return ($leading, $result, $curline);
+}
+
+# Like get balanced paren, but for a string. Simpler because it does not need
+# to handle balanced text.
+sub GetString {
+ my $result = "";
+ my $count = 0;
+ my $leading = "";
+ my $maxcount = 200;
+ $FD = $_[0];
+ $curline = $_[1];
+
+ if ($curline =~ /^([^\"]*)\"(.*)$/) {
+ $leading = $1;
+ $curline = $2;
+ $result = "\"";
+ print "Found quote\n" if $debug;
+ }
+ else {
+ $leading = $curline;
+ return ($leading, "", "" );
+ }
+
+ while ($count < $maxcount) {
+ if ($curline =~ /^([^\"]*\\\")(.*$)/) {
+ # Found an escaped quote
+ $result .= $1;
+ $curline = $2;
+ print "Found escaped quote\n" if $debug;
+ }
+ elsif ($curline =~ /^([^\"]*\")(.*$)/) {
+ # Found the closing quote
+ $result .= $1;
+ $curline = $2;
+ print "Found closing quote\n" if $debug;
+ last;
+ }
+ else {
+ # Need to read a new line
+ $result .= $curline;
+ $curline = <$FD>;
+ $curline =~ s/[\r]*\n//;
+ }
+ $count ++;
+ }
+ return ($leading, $result, $curline);
+}
+#
+# GetSubArgs( FD, curline ) returns an array of the arguments of a routine.
+# Specifically, it converts (a,b,c) into an array containing "a", "b", and "c".
+# The special feature of this is that any commas that are within balanced
+# parenthesis are included within their argument.
+# Actually returns
+# (leader, remainder, (@args) )
+# in this order so the last values are always all of the args
+# so you don't need to know
+sub GetSubArgs {
+ my @args = ();
+ my $curline;
+ my ($outer, $leader, $remainder, $arg);
+
+ $FD = $_[0];
+ $curline = $_[1];
+ # Remove any embedded newlines
+ $curline =~ s/[\r\n]//g;
+
+ $curline =~ /^\(/ || die "No initial paren";
+ ($leader, $outer, $remainder ) = &GetBalancedParen( $FD, $curline );
+
+
+ # Strip off the first and last parens
+ # Because of the greedy algorithm, the \s before the closing paren
+ # won't be used. To avoid problems with empty arguments, we remove
+ # those blanks separately
+ $outer =~ /^\s*\(\s*(.*)\s*\)\s*$/;
+ $outer = $1;
+ if ($outer =~ /(.*)\s+$/) { $outer = $1; }
+ print "Line to tokenize is $outer\n" if $debug;
+ $arg = "";
+ while ($outer ne "") {
+ if ($outer =~ /^([^,\(\"]*)\s*,\s*(.*$)/) {
+ # simple arg
+ $arg .= $1;
+ $args[$#args+1] = $arg;
+ print "Found simple arg $arg (remainder $2)\n" if $debug;
+ $outer = $2;
+ $arg = "";
+ }
+ elsif ($outer =~ /^([^,\"]*)\((.*$)/) {
+ # arg with ()
+ ($startarg,$bal,$outer) = &GetBalancedParen( $FD, $outer );
+ $arg = $arg . $startarg . $bal;
+ # Rest of code will catch the rest
+ }
+ elsif ($outer =~ /^([^,\(]*)\"(.*$)/) {
+ # arg with ""
+ ($startarg,$string,$outer) = &GetString( $FD, $outer );
+ print "string is $string\n" if $debug;
+ $arg = $arg . $startarg . $string;
+ # Rest of code will catch the rest
+ }
+ else {
+ # no comma
+ print "Adding |$outer| to arg $arg\n" if $debug;
+ $arg .= $outer;
+ $outer = "";
+ }
+ }
+ if ($arg ne "") {
+ $args[$#args+1] = $arg;
+ }
+ print "Number of args is 1+$#args\n" if $debug;
+ return ($leader, $remainder, @args );
+}
+
+# remainder = StripComments( FD, inputline )
+# removes comments from a line and returns the line. Read more if necessary
+# Places the comment into $comment_line;
+# The external "cxx_header" adds // to the comments stripped
+
+# Set a default value for cxx_header
+if (!defined($cxx_header)) {
+ $cxx_header = 1;
+}
+
+sub StripComments {
+ my $FD = $_[0];
+ my $curline = $_[1];
+ my $remainder = "";
+ $comment_line = "";
+ if ($cxx_header == 1 && $curline =~ /(\/\/.*)/) {
+ $comment_line = $1;
+ $curline =~ s/\/\/.*//;
+ print "Removed C++ comment, now is $curline\n" if $debug;
+ return $curline;
+ }
+ while ($curline =~ /\/\*/) {
+ print "Curline = $curline\n" if $debug;
+ if ($curline =~ /(\/\*.*?\*\/)/s) {
+ $comment_line = $1;
+ $curline =~ s/\/\*.*?\*\///s;
+ print "Removed comment, now is $curline\n" if $debug;
+ # Keep looking for comments incase the line has multiple
+ # comments
+ }
+ else {
+ # Keep collecting until we find the end of the comment
+ if (eof($FD)) {
+ print STDOUT "Unterminated comment found$errsrc!\n";
+ my $line = $curline;
+ if ($line =~ /(.*)\n/) { $line = "$1"; }
+ print STDOUT "Comment begins with $line\n";
+ return $curline;
+ }
+ $curline .= <$FD>;
+ }
+ }
+ return $curline;
+}
+
+# Since this is a required package, indicate that we are successful.
+return 1;
diff --git a/src/mpi/common/parsing.cpp b/src/mpi/common/parsing.cpp
new file mode 100644
index 0000000..d24766f
--- /dev/null
+++ b/src/mpi/common/parsing.cpp
@@ -0,0 +1,159 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include
+#include
+#include
+
+
+_Ret_z_
+static const char*
+find_digits(
+ _In_z_ const char* p
+ )
+{
+ while(!isdigit(static_cast(*p)))
+ {
+ if(*p == '\0')
+ return p;
+
+ p++;
+ }
+
+ return p;
+}
+
+
+_Ret_z_
+static const char*
+skip_digits(
+ _In_z_ const char* p
+ )
+{
+ while(isdigit(static_cast(*p)))
+ {
+ p++;
+ }
+
+ return p;
+}
+
+//
+// Given the range of ranks a,b,d-f,x-z in which separators are any character
+// except '-', this function checks whether the given rank belongs to this range
+//
+// Input:
+// rank - the rank to check for inclusion
+// range - the string containing the range of ranks
+// world_size - the size of MPI_COMM_WORLD
+//
+// Output
+// isWithinRange - true if the rank belongs to this range
+// total_unique_ranks - the number of unique ranks in the list
+//
+// Return: MPI_SUCCESS if the call succeeded, error otherwise.
+//
+//
+_Success_(return==MPI_SUCCESS)
+int
+MPIU_Parse_rank_range(
+ _In_ unsigned int rank,
+ _In_z_ const char* range,
+ _In_ unsigned int world_size,
+ _Out_ bool* isWithinRange,
+ _Out_writes_(world_size) unsigned int* total_unique_ranks
+ )
+{
+ MPIU_Assert( range != nullptr );
+
+ if( CompareStringA( LOCALE_INVARIANT,
+ 0,
+ range,
+ -1,
+ "all",
+ -1 ) == CSTR_EQUAL ||
+ CompareStringA( LOCALE_INVARIANT,
+ 0,
+ range,
+ -1,
+ "*",
+ -1 ) == CSTR_EQUAL )
+ {
+ *isWithinRange = true;
+ *total_unique_ranks = world_size;
+ return MPI_SUCCESS;
+ }
+
+ //
+ // The first character has to be a digit
+ //
+ if( !isdigit( static_cast( range[0] ) ) )
+ {
+ return MPIU_ERR_CREATE( MPI_ERR_OTHER, "**invalidrange %s", range );
+ }
+
+ bool *ranks = new bool[world_size]();
+
+ if (ranks == nullptr)
+ {
+ return MPIU_ERR_NOMEM();
+ }
+
+ for (unsigned int i=0; i < world_size; i++ )
+ {
+ ranks[i] = false;
+ }
+
+ bool found = false;
+ unsigned int total = 0;
+ for( const char* curPos = find_digits( range );
+ *curPos != '\0';
+ curPos = find_digits( curPos ) )
+ {
+ unsigned int low = atoi( curPos );
+ unsigned int high = low;
+
+ curPos = skip_digits( curPos );
+
+ if( *curPos == '-' )
+ {
+ //
+ // Anything not a digit and not '-' is a valid separator
+ //
+ curPos ++;
+ if ( isdigit( static_cast( *curPos ) ) )
+ {
+ high = atoi( curPos );
+ }
+ }
+
+ for ( unsigned int i = low; i <= high; i++ )
+ {
+ if ( i >= world_size )
+ {
+ delete [] ranks;
+ return MPIU_ERR_CREATE(MPI_ERR_OTHER, "**rank %d %d", i, world_size );
+ }
+ if ( ranks[i] == true )
+ {
+ delete [] ranks;
+ return MPIU_ERR_CREATE(MPI_ERR_OTHER, "**rangedup %s %d", range, i);
+ }
+ ranks[i] = true;
+ total++;
+ }
+
+ if ( rank >= low && rank <= high )
+ {
+ found = true;
+ }
+ curPos = skip_digits( curPos );
+ }
+
+ *total_unique_ranks = total;
+ *isWithinRange = found;
+
+ delete [] ranks;
+ return MPI_SUCCESS;
+}
diff --git a/src/mpi/common/precomp.h b/src/mpi/common/precomp.h
new file mode 100644
index 0000000..493fa46
--- /dev/null
+++ b/src/mpi/common/precomp.h
@@ -0,0 +1,16 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include "assertutil.h"
+#include "mpidef.h"
+#include "mpierror.h"
+#include "mpiutil.h"
+#include "mpimem.h"
+#include "mpistr.h"
+#include
\ No newline at end of file
diff --git a/src/mpi/common/rpcutil.cpp b/src/mpi/common/rpcutil.cpp
new file mode 100644
index 0000000..5eb1e84
--- /dev/null
+++ b/src/mpi/common/rpcutil.cpp
@@ -0,0 +1,426 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "precomp.h"
+#include "rpc.h"
+#include "rpcutil.h"
+#include "ntdsapi.h"
+
+#define SECURITY_WIN32
+#include "security.h"
+
+extern "C"
+void __RPC_FAR *
+__RPC_USER MIDL_user_allocate(
+ size_t cBytes
+ )
+{
+ return malloc( cBytes );
+}
+
+
+extern "C"
+void __RPC_USER
+MIDL_user_free(
+ void* pBuffer
+ )
+{
+ free( pBuffer );
+}
+
+
+//
+// Summary:
+// Start the RPC server
+//
+// In:
+// pProtSeq : The protocol sequence
+// pEndpoint : The endpoint (port). If null, we use dynamic port
+// rpcInterface : The RPC interface handle
+// pSecurityCallbackFn: The security callback function
+// maxConcurrentCalls : Number of concurrent client calls that the server will accept
+// localOnly : If true, use only LRPC for the server
+//
+// Out:
+// pPort : The TCP port that the RPC server is accepting calls on
+// pLrpcEndpoint : The LRPC port that the RPC server is accepting calls on
+//
+// Return:
+// NOERROR on success, other errors otherwise
+//
+_Success_(return == NOERROR)
+RPC_STATUS
+StartRpcServer(
+ _In_z_ PCWSTR pProtSeq,
+ _In_opt_z_ PCWSTR pEndpoint,
+ _In_ RPC_IF_HANDLE rpcInterface,
+ _In_opt_ RPC_IF_CALLBACK_FN* pSecurityCallbackFn,
+ _Out_opt_ UINT16* pPort,
+ _Out_opt_ GUID* pLrpcEndpoint,
+ _In_ UINT maxConcurrentCalls,
+ _In_ bool localOnly
+ )
+{
+ RPC_STATUS status = RPC_S_OK;
+
+ GUID lrpcEp = {0};
+ if( pLrpcEndpoint != nullptr )
+ {
+ //
+ // Enable listening on LRPC.
+ // We generate a GUID to use as endpoint for LRPC.
+ //
+ status = UuidCreate( &lrpcEp );
+ if( status != RPC_S_OK && status != RPC_S_UUID_LOCAL_ONLY )
+ {
+ return status;
+ }
+
+ wchar_t guidStr[37];
+ GuidToStr( lrpcEp, guidStr, _countof(guidStr) );
+
+ wchar_t protSeq[] = L"ncalrpc";
+ status = RpcServerUseProtseqEpW(
+ protSeq,
+ RPC_C_PROTSEQ_MAX_REQS_DEFAULT,
+ guidStr,
+ nullptr
+ );
+ if( status != RPC_S_OK )
+ {
+ return status;
+ }
+ }
+
+ if( localOnly == false )
+ {
+ //
+ // Dynamic and static endpoints are setup through different RPC APIs
+ //
+ if( pEndpoint == nullptr )
+ {
+ //
+ // Specify that we will be using TCP socket with dynamic endpoint
+ //
+ status = RpcServerUseProtseqW(
+ const_cast( pProtSeq ),
+ RPC_C_PROTSEQ_MAX_REQS_DEFAULT,
+ nullptr
+ );
+ }
+ else
+ {
+ //
+ // Specify that we will be using TCP socket with static endpoint
+ //
+ status = RpcServerUseProtseqEpW(
+ const_cast( pProtSeq ),
+ RPC_C_PROTSEQ_MAX_REQS_DEFAULT,
+ const_cast( pEndpoint ),
+ nullptr
+ );
+ }
+
+ if( status != RPC_S_OK )
+ {
+ return status;
+ }
+
+ wchar_t computerName[MAX_COMPUTERNAME_LENGTH + 1];
+ DWORD len = _countof(computerName);
+ if( GetComputerNameW( computerName, &len ) == 0 )
+ {
+ return GetLastError();
+ }
+
+ wchar_t spn[MAX_PATH+1];
+ len = _countof(spn);
+
+ status = DsMakeSpnW(
+ MSMPI_SPN_SERVICE_NAME,
+ computerName,
+ nullptr,
+ 0,
+ nullptr,
+ &len,
+ spn );
+ if( status != ERROR_SUCCESS )
+ {
+ return status;
+ }
+
+#if !defined(MSMPI_NO_SEC)
+ status = RpcServerRegisterAuthInfoW( spn,
+ RPC_C_AUTHN_GSS_NEGOTIATE,
+ nullptr,
+ nullptr );
+ if( status != RPC_S_OK )
+ {
+ return status;
+ }
+#else
+ pSecurityCallbackFn = nullptr;
+#endif
+ }
+ else
+ {
+ ASSERT( pLrpcEndpoint != nullptr && pPort == nullptr );
+ pSecurityCallbackFn = nullptr;
+ }
+
+ //
+ // Register the interface and start the server
+ //
+ status = RpcServerRegisterIfEx( rpcInterface,
+ nullptr,
+ nullptr,
+ RPC_IF_AUTOLISTEN,
+ maxConcurrentCalls,
+ pSecurityCallbackFn );
+ if( status != RPC_S_OK )
+ {
+ return status;
+ }
+
+ if( pPort == nullptr )
+ {
+ if( pLrpcEndpoint != nullptr )
+ {
+ *pLrpcEndpoint = lrpcEp;
+ }
+
+ return NOERROR;
+ }
+
+ //
+ // Extract the dynamic port that the server is using
+ //
+ RPC_BINDING_VECTOR* pBindingVector;
+
+ //
+ // Get the server binding handle vector. This vector has information
+ // about the server bindings (which includes the port)
+ //
+ status = RpcServerInqBindings( &pBindingVector );
+ if( status != RPC_S_OK )
+ {
+ return status;
+ }
+
+ wchar_t* bindingStr;
+ wchar_t* endpointStr;
+ wchar_t* protSeqStr;
+ bool found = false;
+ for( unsigned i = 0; i < pBindingVector->Count; ++i )
+ {
+ status = RpcBindingToStringBindingW(
+ pBindingVector->BindingH[i],
+ reinterpret_cast( &bindingStr ) );
+
+ if( status != RPC_S_OK )
+ {
+ return status;
+ }
+
+ //
+ // Get the port
+ //
+ status = RpcStringBindingParseW(
+ bindingStr,
+ nullptr,
+ &protSeqStr,
+ nullptr,
+ &endpointStr,
+ nullptr
+ );
+ RpcStringFreeW( &bindingStr );
+ if( status != RPC_S_OK )
+ {
+ return status;
+ }
+
+ if( CompareStringW( LOCALE_INVARIANT,
+ 0,
+ protSeqStr,
+ -1,
+ L"ncacn_ip_tcp",
+ -1 ) == CSTR_EQUAL )
+ {
+ *pPort = static_cast(_wtoi( endpointStr ));
+ found = true;
+
+ if( env_is_on(L"MPIEXEC_USE_NP", FALSE) )
+ {
+ wchar_t npEndpoint[64];
+ MPIU_Snprintf(
+ npEndpoint,
+ _countof( npEndpoint ),
+ L"\\pipe\\msmpi\\smpd\\%s",
+ endpointStr );
+
+ status = RpcServerUseProtseqEpW(
+ reinterpret_cast( L"ncacn_np" ),
+ 0,
+ reinterpret_cast( npEndpoint ),
+ NULL );
+ }
+ RpcStringFreeW( &endpointStr );
+ RpcStringFreeW( &protSeqStr );
+ break;
+ }
+
+ RpcStringFreeW( &endpointStr );
+ RpcStringFreeW( &protSeqStr );
+ }
+
+ RpcBindingVectorFree( &pBindingVector );
+
+ ASSERT( found == true );
+ if( pLrpcEndpoint != nullptr )
+ {
+ *pLrpcEndpoint = lrpcEp;
+ }
+
+ if( status != RPC_S_OK )
+ {
+ return status;
+ }
+ return NOERROR;
+}
+
+
+//
+// Summary:
+// Stop this RPC server
+//
+// In:
+// rpcInterface : The RPC interface handle
+//
+RPC_STATUS
+StopRpcServer(
+ _In_ RPC_IF_HANDLE rpcInterface
+ )
+{
+ return RpcServerUnregisterIf( rpcInterface, nullptr, FALSE );
+}
+
+
+_Success_(return == NOERROR)
+RPC_STATUS
+CreateRpcBinding(
+ _In_ PCWSTR pProtSeq,
+ _In_opt_z_ PCWSTR pHostName,
+ _In_ PCWSTR pEndpoint,
+ _In_ UINT AuthnLevel,
+ _In_ UINT AuthnSvc,
+ _In_opt_ RPC_AUTH_IDENTITY_HANDLE pAuthIdentity,
+ _Out_ handle_t* phBinding
+ )
+{
+ PWSTR bindingStr;
+ RPC_STATUS status = RpcStringBindingComposeW(
+ nullptr,
+ const_cast(pProtSeq),
+ const_cast(pHostName),
+ const_cast(pEndpoint),
+ nullptr,
+ &bindingStr
+ );
+ if( status != RPC_S_OK )
+ {
+ return status;
+ }
+
+ handle_t hBinding;
+ status = RpcBindingFromStringBindingW(
+ bindingStr,
+ &hBinding
+ );
+ RpcStringFreeW( &bindingStr );
+ if( status != RPC_S_OK )
+ {
+ return status;
+ }
+
+#if !defined(MSMPI_NO_SEC)
+ wchar_t* pSpn = nullptr;
+ wchar_t spn[MAX_PATH+1];
+
+ SEC_WINNT_AUTH_IDENTITY_EXW secAuth;
+ if( AuthnSvc == RPC_C_AUTHN_GSS_NEGOTIATE &&
+ pAuthIdentity == nullptr )
+ {
+ DWORD len = _countof(spn);
+ status = DsMakeSpnW(
+ MSMPI_SPN_SERVICE_NAME,
+ pHostName,
+ nullptr,
+ 0,
+ nullptr,
+ &len,
+ spn );
+ if( status != ERROR_SUCCESS )
+ {
+ if( status == ERROR_INVALID_PARAMETER )
+ {
+ //
+ // This should only happen because the host is an IP
+ // address and not a proper name. Kerberos requires
+ // names which means we will have to disable Kerberos
+ // to authenticate.
+ //
+ InitializeAuthIdentity(
+ disableKerbStr,
+ _countof(DISABLE_KERB_STR) - 1,
+ &secAuth );
+ pAuthIdentity = &secAuth;
+ }
+ else
+ {
+ RpcBindingFree( &hBinding );
+ return status;
+ }
+ }
+ else
+ {
+ pSpn = spn;
+ }
+ }
+
+ status = RpcBindingSetAuthInfoW(
+ hBinding,
+ pSpn,
+ AuthnLevel,
+ AuthnSvc,
+ pAuthIdentity,
+ 0 );
+ if( status != RPC_S_OK )
+ {
+ RpcBindingFree( &hBinding );
+ return status;
+ }
+#else
+ UNREFERENCED_PARAMETER(AuthnLevel);
+ UNREFERENCED_PARAMETER(AuthnSvc);
+ UNREFERENCED_PARAMETER(pAuthIdentity);
+#endif
+
+ *phBinding = hBinding;
+ return NOERROR;
+}
+
+
+void
+InitializeAuthIdentity(
+ _In_ PWSTR packageStr,
+ _In_ DWORD packageLen,
+ _Out_ SEC_WINNT_AUTH_IDENTITY_EXW* pSecAuth
+ )
+{
+ ZeroMemory( pSecAuth, sizeof(*pSecAuth) );
+ pSecAuth->Version = SEC_WINNT_AUTH_IDENTITY_VERSION;
+ pSecAuth->Length = sizeof(*pSecAuth);
+ pSecAuth->PackageList = packageStr;
+ pSecAuth->PackageListLength = packageLen;
+ pSecAuth->Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE;
+}
diff --git a/src/mpi/common/rpcutil.h b/src/mpi/common/rpcutil.h
new file mode 100644
index 0000000..9ec04b6
--- /dev/null
+++ b/src/mpi/common/rpcutil.h
@@ -0,0 +1,54 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+#include "rpc.h"
+
+#ifndef SECURITY_WIN32
+#define SECURITY_WIN32
+#endif
+
+#include
+
+#define DISABLE_KERB_STR L"!Kerberos"
+__declspec(selectany) extern wchar_t disableKerbStr[] = DISABLE_KERB_STR;
+
+_Success_(return == NOERROR)
+RPC_STATUS
+StartRpcServer(
+ _In_z_ PCWSTR pProtSeq,
+ _In_opt_z_ PCWSTR pEndpoint,
+ _In_ RPC_IF_HANDLE rpcInterface,
+ _In_opt_ RPC_IF_CALLBACK_FN* pSecurityCallbackFn,
+ _Out_opt_ UINT16* pPort,
+ _Out_opt_ GUID* pLrpcEndpoint,
+ _In_ UINT maxConcurrentCalls = RPC_C_LISTEN_MAX_CALLS_DEFAULT,
+ _In_ bool localOnly = false
+ );
+
+
+RPC_STATUS
+StopRpcServer(
+ _In_ RPC_IF_HANDLE rpcInterface
+ );
+
+
+_Success_(return == NOERROR)
+RPC_STATUS
+CreateRpcBinding(
+ _In_z_ PCWSTR pProtSeq,
+ _In_opt_z_ PCWSTR pHostName,
+ _In_z_ PCWSTR pEndpoint,
+ _In_ UINT AuthnLevel,
+ _In_ UINT AuthnSvc,
+ _In_opt_ RPC_AUTH_IDENTITY_HANDLE pAuthIdentity,
+ _Out_ handle_t* phBinding
+ );
+
+
+void
+InitializeAuthIdentity(
+ _In_ PWSTR packageStr,
+ _In_ DWORD packageLen,
+ _Out_ SEC_WINNT_AUTH_IDENTITY_EXW* pSecAuth
+ );
diff --git a/src/mpi/common/sock.cpp b/src/mpi/common/sock.cpp
new file mode 100644
index 0000000..a57b481
--- /dev/null
+++ b/src/mpi/common/sock.cpp
@@ -0,0 +1,1918 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "precomp.h"
+#include "mpisock.h"
+#include // acceptex
+#include // keep-alive definitions
+#include "mpitrace.h"
+#include
+#include
+
+#define SOCKI_TCP_BUFFER_SIZE 32*1024
+
+//
+// Shorthand macros
+//
+#define MPIU_E_FAIL_GLE2(gle_) \
+ MPIU_ERR_CREATE(MPI_ERR_OTHER, "**fail %s %d", get_error_string(gle_), gle_)
+
+
+void sock_hard_close_socket(
+ _Inout_ sock_state_t* sock,
+ _Inout_ sock_overlapped_t* pov
+ );
+
+void sock_graceful_close_socket(
+ _Inout_ sock_state_t *sock,
+ _Inout_ sock_overlapped_t* pov
+ );
+
+static int g_socket_rbuffer_size = SOCKI_TCP_BUFFER_SIZE;
+static int g_socket_sbuffer_size = SOCKI_TCP_BUFFER_SIZE;
+static int g_init_called = 0;
+static int g_min_port = 0;
+static int g_max_port = 0;
+static struct in_addr g_netmask = { 0 };
+static struct in_addr g_netaddr = { 0 };
+static int g_connect_retries = MSMPI_DEFAULT_CONNECT_RETRIES;
+
+
+#ifndef STATUS_CANCELLED
+#define STATUS_CANCELLED ((DWORD)0xC0000120L)
+#endif
+
+
+//
+// Utility function to return the IPv4 address string of the remote host
+// connect to this socket.
+//
+_Success_(*pPort != 0)
+void
+get_sock_peer_address(
+ _In_ _Pre_satisfies_(0 != pSock->sock) sock_state_t* pSock,
+ _Out_writes_bytes_(addrLen) char* pAddr,
+ _In_ int addrLen,
+ _Out_ int* pPort
+ )
+{
+ struct sockaddr_in info;
+ int len = sizeof(struct sockaddr_in);
+
+ MPIU_Assert(nullptr != pAddr);
+ MPIU_Assert(nullptr != pSock);
+ MPIU_Assert(0 != pSock->sock);
+ MPIU_Assert(nullptr != pPort);
+
+ int status = getpeername(
+ pSock->sock,
+ reinterpret_cast(&info),
+ &len
+ );
+
+ if(0 != status)
+ {
+ *pPort = 0;
+ }
+ else
+ {
+ MPIU_Strcpy( pAddr, addrLen, inet_ntoa( info.sin_addr ) );
+ *pPort = info.sin_port;
+ }
+}
+
+
+_Success_(return == NOERROR)
+static int
+sock_get_overlapped_result(
+ _In_ sock_overlapped_t* pov
+ )
+{
+ int gle = NO_ERROR;
+ DWORD Flags;
+ DWORD BytesTransferred;
+
+ //
+ // Special check for error codes returned when the socket get closed.
+ // Cannot retrive the error with an invalid socket; 'manually' translate
+ // to winsock error code.
+ //
+ NTSTATUS status = ExGetStatus(&pov->exov);
+ if(status == STATUS_CANCELLED)
+ {
+ Trace_SOCKETS_Error_sock_get_overlapped_result();
+ return WSA_OPERATION_ABORTED;
+ }
+
+ MPIU_Assert(pov->sock->sock != INVALID_SOCKET);
+ if(!WSAGetOverlappedResult(pov->sock->sock, &pov->exov.ov, &BytesTransferred, FALSE, &Flags))
+ {
+ gle = WSAGetLastError();
+ Trace_SOCKETS_Error_sock_get_overlapped_result_Failed(gle, get_error_string(gle));
+ }
+ MPIU_Assert(gle != WSAENOTSOCK);
+ return gle;
+}
+
+
+static void
+TrimIOV(
+ _Inout_ _Outptr_ WSABUF** piov,
+ _Inout_ int* piovlen,
+ _In_ MPIU_Bsize_t num_bytes
+ )
+{
+ WSABUF* iov = *piov;
+ int iovlen = *piovlen;
+
+ while (num_bytes != 0)
+ {
+ MPIU_Assert(num_bytes > 0);
+ MPIU_Assert(iovlen > 0);
+
+ if(iov[0].len <= num_bytes)
+ {
+ num_bytes -= iov[0].len;
+ iov++;
+ iovlen--;
+ }
+ else
+ {
+ iov[0].len -= num_bytes;
+ iov[0].buf += num_bytes;
+ num_bytes = 0;
+ }
+ }
+
+ *piov = iov;
+ *piovlen = iovlen;
+}
+
+
+_Success_(return==NO_ERROR)
+static int
+sock_safe_send(
+ _In_ SOCKET sock,
+ _Inout_ WSABUF* iov,
+ _In_ int iovlen,
+ _Out_ DWORD* pBytesSent,
+ _Inout_opt_ OVERLAPPED* ov)
+{
+ int rc;
+ WSABUF tmp = iov[0];
+
+ for(;;)
+ {
+ MPIU_Assert(iov[0].len > 0);
+ if(WSASend(sock, iov, iovlen, pBytesSent, 0, ov, nullptr) != SOCKET_ERROR)
+ return NO_ERROR;
+
+ rc = WSAGetLastError();
+ if(rc == WSA_IO_PENDING)
+ return NO_ERROR;
+
+ //
+ // An error is returned for nonblocking sockets (w/o) overlapped.
+ // The caller should handle that error.
+ //
+ MPIU_Assert((rc != WSAEWOULDBLOCK) || (ov == nullptr));
+ MPIU_Assert(rc != WSAESHUTDOWN);
+
+ if(rc != WSAENOBUFS)
+ {
+ Trace_SOCKETS_Error_sock_safe_send(rc, get_error_string(rc));
+ return rc;
+ }
+
+ //
+ // No buffers for send, use the temporary overlapped to send
+ // the first buffer only.
+ //
+ iov = &tmp;
+ iovlen = 1;
+
+ //
+ // Reduce the buffer size, don't let the size go down to zero. Assume
+ // that eventually winsock will return success or a different error than
+ // WSAENOBUFFS.
+ //
+ tmp.len = tmp.len / 2 + 1;
+ Sleep(0);
+ }
+}
+
+
+_Success_(return==NO_ERROR)
+static int
+sock_safe_receive(
+ _In_ SOCKET sock,
+ _In_ const WSABUF* iov,
+ _In_opt_ int /*iovlen*/,
+ _Inout_opt_ OVERLAPPED* ov
+ )
+{
+ int rc;
+ WSABUF tmp = iov[0];
+ DWORD Flags;
+
+ //
+ // Cap the size to receive to avoid excessive probe and lock in the kernel.
+ // N.B. The excessive probe and lock happens because of partial receives of large buffers.
+ // The buffer is repeatedly re-posted for receive and being probed and locked again.
+ // It's better to use MSG_WAITALL, but unfortonatly it can not be mixed with
+ // non-blocking buffers
+ //
+ tmp.len = min(tmp.len, 1024 * 1024);
+
+ for(;;)
+ {
+ Flags = 0;
+ if(WSARecv(sock, &tmp, 1, nullptr, &Flags, ov, nullptr) != SOCKET_ERROR)
+ return NO_ERROR;
+
+ rc = WSAGetLastError();
+ if(rc == WSA_IO_PENDING)
+ return NO_ERROR;
+
+ //
+ // An error is returned for nonblocking sockets (w/o) overlapped.
+ // The caller should handle that error.
+ //
+ MPIU_Assert((rc != WSAEWOULDBLOCK) || (ov == nullptr));
+ MPIU_Assert(rc != WSAESHUTDOWN);
+
+ if(rc != WSAENOBUFS)
+ {
+ Trace_SOCKETS_Error_sock_safe_receive(rc, get_error_string(rc));
+ return rc;
+ }
+
+ //
+ // Reduce the buffer size, don't let the size go down to zero. Assume
+ // that eventually winsock will return success or a different error than
+ // WSAENOBUFFS.
+ //
+ tmp.len = tmp.len / 2 + 1;
+ Sleep(0);
+ }
+}
+
+
+/* sock functions */
+_Success_(return!=nullptr)
+static sock_state_t*
+sock_create_state(
+ _In_ sock_close_routine pfnClose
+ )
+{
+ sock_state_t *p = MPIU_Malloc_obj(sock_state_t);
+ if(p == nullptr)
+ return nullptr;
+
+ memset(p, 0, sizeof(*p));
+ p->pfnClose = pfnClose;
+ p->sock = INVALID_SOCKET;
+ p->set = EX_INVALID_SET;
+ return p;
+}
+
+
+static inline void
+sock_free_state(
+ _In_ _Post_ptr_invalid_ sock_state_t *p
+ )
+{
+ MPIU_Free(p);
+}
+
+
+static
+inline
+void
+sock_init_overlapped(
+ _Inout_ sock_overlapped_t* p,
+ _In_ sock_state_t *sock,
+ _In_ ExCompletionRoutine pfnSuccess,
+ _In_ ExCompletionRoutine pfnFailure
+ )
+{
+ ExInitOverlapped(&p->exov, pfnSuccess, pfnFailure);
+ p->sock = sock;
+}
+
+
+static inline sock_overlapped_t*
+sock_ov_from_exov(
+ _In_ EXOVERLAPPED* pexov
+ )
+{
+ return CONTAINING_RECORD(pexov, sock_overlapped_t, exov);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+CallbackFailure(
+ _In_ sock_overlapped_t* pov,
+ _In_ int error)
+{
+ MPIDU_Sock_context_t* psc = CONTAINING_RECORD(pov, MPIDU_Sock_context_t, sov);
+ return ExCallFailure(&psc->uov, error, 0 /*BytesTransferred*/);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+CallbackSuccess(
+ _In_ sock_overlapped_t* pov,
+ _In_ MPIU_Bsize_t num_bytes)
+{
+ MPIDU_Sock_context_t* psc = CONTAINING_RECORD(pov, MPIDU_Sock_context_t, sov);
+ return ExCallSuccess(&psc->uov, MPI_SUCCESS, num_bytes);
+}
+
+
+static void set_port_range(void)
+{
+ if( env_to_range_ex(
+ L"MSMPI_PORT_RANGE",
+ L"MPICH_PORT_RANGE",
+ 0,
+ 65535,
+ FALSE,
+ &g_min_port,
+ &g_max_port ) == FALSE )
+ {
+ g_min_port = g_max_port = 0;
+ }
+}
+
+
+HRESULT ParseNetmask( _In_z_ PCWSTR szNetmaskEnv, _Out_ IN_ADDR* pAddr, _Out_ IN_ADDR* pMask )
+{
+ //
+ // Max of "xxx.xxx.xxx.xxx/xxx.xxx.xxx.xxx" plus null is 32
+ //
+ WCHAR env[32];
+ ULONG ret = MPIU_Getenv( szNetmaskEnv,
+ env,
+ _countof(env) );
+ if( ret != NOERROR )
+ {
+ return HRESULT_FROM_WIN32(ret);
+ }
+
+ PCWSTR delim;
+ ret = RtlIpv4StringToAddressW( env, TRUE, &delim, pAddr );
+ if( FAILED( ret ) )
+ {
+ return ret;
+ }
+
+ if( *delim != L'/' )
+ {
+ return E_INVALIDARG;
+ }
+
+ PCWSTR szMask = delim + 1;
+ ret = RtlIpv4StringToAddressW( szMask, TRUE, &delim, pMask );
+ if( ret == STATUS_INVALID_PARAMETER )
+ {
+ int bits = _wtol( szMask );
+ if( bits <= 0 || bits > 31 )
+ {
+ return E_INVALIDARG;
+ }
+
+ //
+ // The addresses are stored in network byte order, so we must form the mask
+ // and swap it. Simply creating a mask in the lower bits doesn't work, for
+ // example a 12 bit mask shoudl result in s_addr == 0x0000F0FF (0xFFF00000 in
+ // host order).
+ //
+ // We perform a left shift to clear the lower bits, leaving only the desired
+ // number of most-significant bits set, then swap that to network order.
+ //
+ pMask->s_addr = _byteswap_ulong( ~0UL << (32 - bits) );
+ }
+ else if( FAILED( ret ) )
+ {
+ return ret;
+ }
+
+ pAddr->s_addr &= pMask->s_addr;
+ return S_OK;
+}
+
+
+static void set_netmask()
+{
+ if( FAILED( ParseNetmask( L"MPICH_NETMASK", &g_netaddr, &g_netmask ) ) )
+ {
+ g_netaddr.s_addr = 0;
+ g_netmask.s_addr = 0;
+ }
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int MPIDU_Sock_init()
+{
+ int v;
+ WSADATA wsaData;
+
+ if (g_init_called)
+ {
+ g_init_called++;
+ return MPI_SUCCESS;
+ }
+
+ /* Start the Winsock dll */
+ if ((v = WSAStartup(MAKEWORD(2, 0), &wsaData)) != 0)
+ {
+ Trace_SOCKETS_Error_MPIDU_Sock_init(v, get_error_string(v));
+ return MPIU_E_FAIL_GLE2(v);
+ }
+
+ /* get the socket buffers size */
+ v = env_to_int(L"MPICH_SOCKET_BUFFER_SIZE", SOCKI_TCP_BUFFER_SIZE, 0);
+ g_socket_rbuffer_size = env_to_int(L"MPICH_SOCKET_RBUFFER_SIZE", v, 0);
+ g_socket_sbuffer_size = env_to_int(L"MPICH_SOCKET_SBUFFER_SIZE", v, 0);
+
+ /* get the connect max retry count */
+ g_connect_retries = env_to_int_ex(
+ L"MSMPI_CONNECT_RETRIES",
+ L"MPICH_CONNECT_RETRIES",
+ MSMPI_DEFAULT_CONNECT_RETRIES,
+ 0
+ );
+
+ /* check to see if a port range was specified */
+ set_port_range();
+
+ /* check to see if a subnet was specified through the environment */
+ set_netmask();
+
+ g_init_called = 1;
+
+ return MPI_SUCCESS;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int MPIDU_Sock_finalize()
+{
+ MPIU_Assert(g_init_called);
+
+ g_init_called--;
+ if (g_init_called == 0)
+ {
+ WSACleanup();
+ }
+ return MPI_SUCCESS;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+add_host_description(
+ _In_z_ const char* host,
+ _Inout_ _Outptr_result_buffer_(*plen) PSTR* phost_description,
+ _Inout_ int* plen
+ )
+{
+ int str_errno = MPIU_Str_add_string(phost_description, plen, host);
+ if (str_errno != MPIU_STR_SUCCESS)
+ return MPIU_ERR_CREATE(MPI_ERR_OTHER, "**desc_len");
+
+ return MPI_SUCCESS;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+socki_get_host_list(
+ _In_z_ const char* hostname,
+ _Out_writes_z_(len) char* host_description,
+ _In_ int len
+ )
+{
+ int mpi_errno;
+ char** p;
+ struct hostent* res;
+
+ MPIU_Assert(len > 0);
+
+ *host_description = '\0';
+
+ res = gethostbyname(hostname);
+ if((res == nullptr) || (res->h_addr_list == nullptr))
+ {
+ int gle = WSAGetLastError();
+ Trace_SOCKETS_Error_socki_get_host_list(gle, get_error_string(gle), hostname);
+ return MPIU_E_FAIL_GLE2(gle);
+ }
+
+ /* add the ip addresses */
+ for(p = res->h_addr_list; *p != nullptr; p++)
+ {
+ mpi_errno = add_host_description(inet_ntoa(*(struct in_addr*)*p), &host_description, &len);
+ if (mpi_errno != MPI_SUCCESS)
+ {
+ Trace_SOCKETS_Error_socki_get_host_list_AddIp(mpi_errno);
+ return mpi_errno;
+ }
+ }
+
+ /* add the hostname to the end of the list */
+ mpi_errno = add_host_description(hostname, &host_description, &len);
+ if (mpi_errno != MPI_SUCCESS)
+ {
+ Trace_SOCKETS_Error_socki_get_host_list_AddHostname(mpi_errno);
+ return mpi_errno;
+ }
+
+ return MPI_SUCCESS;
+}
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_hostname_to_host_description(
+ _In_z_ const char* hostname,
+ _Out_writes_z_(len) char* host_description,
+ _In_ int len
+ )
+{
+ int mpi_errno;
+
+ MPIU_Assert(g_init_called);
+
+ mpi_errno = socki_get_host_list(hostname, host_description, len);
+ if (mpi_errno != MPI_SUCCESS)
+ return MPIU_ERR_FAIL(mpi_errno);
+
+ return MPI_SUCCESS;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_get_host_description(
+ _Out_writes_z_(len) char* host_description,
+ _In_ int len
+ )
+{
+ int mpi_errno;
+ char hostname[100];
+
+ MPIU_Assert(g_init_called);
+
+ if (gethostname(hostname, _countof(hostname)) == SOCKET_ERROR)
+ {
+ int gle = WSAGetLastError();
+ Trace_SOCKETS_Error_MPIDU_Sock_get_host_description(gle, get_error_string(gle));
+ return MPIU_E_FAIL_GLE2(gle);
+ }
+
+ mpi_errno = MPIDU_Sock_hostname_to_host_description(hostname, host_description, len);
+ if (mpi_errno != MPI_SUCCESS)
+ {
+ return MPIU_ERR_FAIL(mpi_errno);
+ }
+ return MPI_SUCCESS;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_native_to_sock(
+ _In_ ExSetHandle_t set,
+ _In_ MPIDU_SOCK_NATIVE_FD fd,
+ _Outptr_ sock_state_t **ppSock
+ )
+{
+ sock_state_t *sock_state;
+
+ MPIU_Assert(g_init_called);
+
+ /* setup the structures */
+ sock_state = sock_create_state(sock_graceful_close_socket);
+ if (sock_state == nullptr)
+ return MPIU_ERR_NOMEM();
+
+ sock_state->sock = fd;
+ sock_state->set = set;
+
+ /* associate the socket with the completion port */
+ ExAttachHandle(set, (HANDLE)sock_state->sock);
+
+ *ppSock = sock_state;
+
+ return MPI_SUCCESS;
+}
+
+
+static void
+set_socket_options(
+ _In_ SOCKET sock
+ )
+{
+ BOOL nodelay = TRUE;
+
+ /* set the socket buffers */
+ setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (const char*)&g_socket_rbuffer_size, sizeof(g_socket_rbuffer_size));
+ setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (const char*)&g_socket_sbuffer_size, sizeof(g_socket_sbuffer_size));
+
+ /* disable nagling */
+ setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (const char*)&nodelay, sizeof(nodelay));
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_create_native_fd(
+ _Out_ MPIDU_SOCK_NATIVE_FD* fd
+ )
+{
+ SOCKET s = WSASocketW(PF_INET, SOCK_STREAM, 0, nullptr, 0, WSA_FLAG_OVERLAPPED);
+ if (s == INVALID_SOCKET)
+ {
+ int gle = WSAGetLastError();
+ Trace_SOCKETS_Error_MPIDU_Sock_create_native_fd(gle, get_error_string(gle));
+ return MPIU_E_FAIL_GLE2(gle);
+ }
+
+ set_socket_options(s);
+
+ SetHandleInformation((HANDLE)s, HANDLE_FLAG_INHERIT, 0);
+
+ *fd = s;
+ return MPI_SUCCESS;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+easy_create_ranged(
+ _Out_ SOCKET *sock,
+ _In_ int port,
+ _In_ unsigned long addr
+ )
+{
+ int mpi_errno;
+ SOCKET temp_sock;
+ SOCKADDR_IN sockAddr;
+ int use_range = 0;
+
+ /* create the socket */
+ mpi_errno = MPIDU_Sock_create_native_fd(&temp_sock);
+ if (mpi_errno != MPI_SUCCESS)
+ return mpi_errno;
+
+ if (port == 0 && g_min_port != 0 && g_max_port != 0)
+ {
+ use_range = 1;
+ port = g_min_port;
+ }
+
+ memset(&sockAddr,0,sizeof(sockAddr));
+
+ sockAddr.sin_family = AF_INET;
+ sockAddr.sin_addr.s_addr = addr;
+ sockAddr.sin_port = _byteswap_ushort(static_cast(port));
+
+ for (;;)
+ {
+ if (bind(temp_sock, (const SOCKADDR*)&sockAddr, sizeof(sockAddr)) == SOCKET_ERROR)
+ {
+ if (use_range)
+ {
+ port++;
+ if (port > g_max_port)
+ {
+ Trace_SOCKETS_Error_easy_create_ranged_Port(port, g_max_port);
+ return MPIU_ERR_CREATE(MPI_ERR_OTHER, "**sock|getport");
+ }
+
+ sockAddr.sin_port = _byteswap_ushort(static_cast(port));
+ }
+ else
+ {
+ int gle = WSAGetLastError();
+ Trace_SOCKETS_Error_easy_create_ranged(gle, get_error_string(gle));
+ return MPIU_E_FAIL_GLE2(gle);
+ }
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ *sock = temp_sock;
+ return MPI_SUCCESS;
+}
+
+
+static inline int
+get_socket_port(
+ _In_ SOCKET sock
+ )
+{
+ struct sockaddr_in addr;
+ int name_len = sizeof(addr);
+
+ getsockname(sock, (struct sockaddr*)&addr, &name_len);
+ return ntohs(addr.sin_port);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_listen(
+ _In_ ExSetHandle_t set,
+ _In_ unsigned long addr,
+ _Inout_ int *port,
+ _Outptr_ sock_state_t **ppSock
+ )
+{
+ int mpi_errno;
+ sock_state_t *listen_state;
+
+ MPIU_Assert(g_init_called);
+
+ listen_state = sock_create_state(sock_hard_close_socket);
+ if(listen_state == nullptr)
+ return MPIU_ERR_NOMEM();
+
+ //
+ // Get a bound socket to a port in the range range specified by the
+ // MPICH_PORT_RANGE env var.
+ //
+ mpi_errno = easy_create_ranged(&listen_state->sock, *port, addr);
+ if (mpi_errno != MPI_SUCCESS)
+ return MPIU_ERR_FAIL(mpi_errno);
+
+ if (listen(listen_state->sock, SOMAXCONN) == SOCKET_ERROR)
+ {
+ int gle = WSAGetLastError();
+ Trace_SOCKETS_Error_MPIDU_Sock_listen(gle, get_error_string(gle));
+ return MPIU_E_FAIL_GLE2(gle);
+ }
+
+ ExAttachHandle(set, (HANDLE)listen_state->sock);
+
+ *port = get_socket_port(listen_state->sock);
+ listen_state->set = set;
+
+ *ppSock = listen_state;
+ return MPI_SUCCESS;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static inline int
+post_next_accept(
+ _In_ SOCKET listen_sock,
+ _Out_ SOCKET* pAcceptSock,
+ _Inout_ sock_overlapped_t* pov
+ )
+{
+ int mpi_errno;
+ DWORD nBytesReceived;
+ SOCKET sock;
+ mpi_errno = MPIDU_Sock_create_native_fd(&sock);
+ if (mpi_errno != MPI_SUCCESS)
+ return mpi_errno;
+
+ *pAcceptSock = sock;
+
+ if (!AcceptEx(
+ listen_sock,
+ sock,
+ pov->accept.accept_buffer,
+ 0,
+ sizeof(pov->accept.accept_buffer)/2,
+ sizeof(pov->accept.accept_buffer)/2,
+ &nBytesReceived,
+ &pov->exov.ov))
+ {
+ int gle = WSAGetLastError();
+ if (gle == ERROR_IO_PENDING)
+ return MPI_SUCCESS;
+
+ Trace_SOCKETS_Error_post_next_accept(gle, get_error_string(gle));
+
+ MPIU_Assert( sock != INVALID_SOCKET);
+ closesocket(sock);
+ *pAcceptSock = INVALID_SOCKET;
+ return MPIU_E_FAIL_GLE2(gle);
+ }
+ return MPI_SUCCESS;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int AcceptFailed(
+ _Inout_ EXOVERLAPPED* pexov
+ )
+{
+
+ int mpi_errno;
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+ //
+ // The overlapped structure might get freed in the callback, so save
+ // any context we need so we don't deref it after the callback.
+ //
+ sock_state_t* accept_state = pov->accept.accept_state;
+
+ MPIU_Assert( accept_state->sock != INVALID_SOCKET );
+ closesocket( accept_state->sock );
+ accept_state->sock = INVALID_SOCKET;
+
+ int gle = sock_get_overlapped_result(pov);
+
+ if( gle == WSAECONNRESET )
+ {
+ mpi_errno = post_next_accept(pov->sock->sock, &pov->accept.accept_state->sock, pov);
+ if(mpi_errno == MPI_SUCCESS)
+ {
+ Trace_SOCKETS_Info_AcceptFailed_ResetPosted();
+ return MPI_SUCCESS;
+ }
+ Trace_SOCKETS_Error_AcceptFailed_ResetPostFailed(mpi_errno);
+ }
+ else
+ {
+ Trace_SOCKETS_Error_AcceptFailed(gle, get_error_string(gle));
+ mpi_errno = CallbackFailure(pov, gle);
+ }
+
+ sock_free_state( accept_state );
+ return mpi_errno;
+}
+
+
+static void
+sock_finish_accept(
+ _Inout_ sock_state_t *accept_state,
+ _Inout_ sock_state_t *listener_sock
+ )
+{
+ u_long optval;
+
+ Trace_SOCKETS_Info_sock_finish_accept();
+
+ /* finish the accept */
+ setsockopt(accept_state->sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, (const char *)&listener_sock->sock, sizeof(listener_sock->sock));
+
+ /* set the socket to non-blocking */
+ optval = TRUE;
+ ioctlsocket(accept_state->sock, FIONBIO, &optval);
+
+ /* set the socket buffers */
+ set_socket_options(accept_state->sock);
+
+ /* associate the socket with the completion port */
+ ExAttachHandle(listener_sock->set, (HANDLE)accept_state->sock);
+
+ accept_state->set = listener_sock->set;
+ accept_state->pfnClose = sock_graceful_close_socket;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+AcceptSucceeded(
+ _Inout_ EXOVERLAPPED* pexov
+ )
+{
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+ sock_finish_accept(pov->accept.accept_state, pov->sock);
+ return CallbackSuccess(pov, 0);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_accept(
+ _In_ sock_state_t *listener_sock,
+ _Outptr_ sock_state_t **ppSock,
+ _In_ MPIDU_Sock_context_t* psc
+ )
+{
+ int mpi_errno;
+ sock_state_t *accept_state;
+ sock_overlapped_t* pov = &psc->sov;
+
+ MPIU_Assert(g_init_called);
+
+ accept_state = sock_create_state(sock_hard_close_socket);
+ if (accept_state == nullptr)
+ return MPIU_ERR_NOMEM();
+
+ sock_init_overlapped(pov, listener_sock, AcceptSucceeded, AcceptFailed);
+ pov->accept.accept_state = accept_state;
+ *ppSock = accept_state;
+ mpi_errno = post_next_accept(listener_sock->sock, &accept_state->sock, pov);
+ if (mpi_errno != MPI_SUCCESS)
+ {
+ *ppSock = nullptr;
+ sock_free_state(accept_state);
+ return MPIU_ERR_FAIL(mpi_errno);
+ }
+
+ return MPI_SUCCESS;
+}
+
+
+static const GUID xGuidConnectEx = WSAID_CONNECTEX;
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+gle_connect_ex(
+ _In_ SOCKET Socket,
+ _In_reads_bytes_(namelen) const struct sockaddr* name,
+ _In_ int namelen,
+ _Inout_opt_ OVERLAPPED* ov)
+{
+ int rc;
+ BOOL fSucc;
+ DWORD BytesReturned;
+ LPFN_CONNECTEX pfnConnectEx;
+
+ //
+ // Query the entry point every time since different providers
+ // have different entry points
+ //
+ rc = WSAIoctl(
+ Socket,
+ SIO_GET_EXTENSION_FUNCTION_POINTER,
+ (LPVOID)&xGuidConnectEx,
+ sizeof(xGuidConnectEx),
+ (LPVOID)&pfnConnectEx,
+ sizeof(pfnConnectEx),
+ &BytesReturned,
+ nullptr,
+ nullptr
+ );
+
+ if(rc == SOCKET_ERROR)
+ {
+ int gle = WSAGetLastError();
+ Trace_SOCKETS_Error_gle_connect_ex_WSAIoctlSocketError(gle, get_error_string(gle));
+ return gle;
+ }
+
+ fSucc = pfnConnectEx(
+ Socket,
+ name,
+ namelen,
+ nullptr,
+ 0,
+ &BytesReturned,
+ ov
+ );
+
+ if(fSucc)
+ {
+ struct sockaddr_in* pSockName = reinterpret_cast(const_cast(name));
+OACR_WARNING_SUPPRESS(26500, "Suppress false anvil warning.")
+ Trace_SOCKETS_Info_gle_connect_ex_Succeeded(inet_ntoa(pSockName->sin_addr), pSockName->sin_port);
+ return NO_ERROR;
+ }
+
+ rc = WSAGetLastError();
+ if(rc != WSA_IO_PENDING)
+ {
+ Trace_SOCKETS_Error_gle_connect_ex_pfnConnectEx(rc, get_error_string(rc));
+ return rc;
+ }
+
+ return NO_ERROR;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+gle_connect_host(
+ _Inout_ sock_overlapped_t* pov
+ )
+{
+ const sock_connect_context* scc = &pov->connect;
+
+ struct sockaddr_in sockAddr;
+ memset(&sockAddr,0,sizeof(sockAddr));
+ sockAddr.sin_family = AF_INET;
+ sockAddr.sin_addr.s_addr = inet_addr(scc->cur_host);
+ sockAddr.sin_port = _byteswap_ushort(static_cast(scc->port));
+ return gle_connect_ex(pov->sock->sock, (const SOCKADDR*)&sockAddr, sizeof(sockAddr), &pov->exov.ov);
+}
+
+
+#ifndef STATUS_TIMER_EXPIRED
+#define STATUS_TIMER_EXPIRED ((DWORD)0xC0000001L)
+#endif
+
+
+static void CALLBACK
+ConnectTimerCallback(
+ _In_ void* p,
+ _In_opt_ BOOLEAN /*TimerFired*/
+ )
+{
+ //
+ // *** This function is called on the timer thread. ***
+ //
+
+ //
+ // Queue the timer expiration callback back to the Executive thread.
+ // Post the overlapped with an error status value to invoke the ConnectFailed routine
+ // on the Executive thread.
+ //
+ sock_overlapped_t* pov = (sock_overlapped_t*)p;
+ ExPostOverlappedResult(pov->sock->set, &pov->exov, STATUS_TIMER_EXPIRED, 0);
+}
+
+
+_Success_(return==NO_ERROR)
+static int
+gle_postpone_retry_connect(
+ _Inout_ sock_overlapped_t* pov,
+ _In_ DWORD DueTime
+ )
+{
+ BOOL fSucc;
+
+ fSucc = CreateTimerQueueTimer(
+ &pov->connect.retry_timer,
+ nullptr,
+ ConnectTimerCallback,
+ pov,
+ DueTime,
+ 0,
+ WT_EXECUTEDEFAULT
+ );
+
+ if(!fSucc)
+ {
+ int gle = GetLastError();
+ Trace_SOCKETS_Error_gle_postpone_retry_connect(gle, get_error_string(gle));
+ return gle;
+ }
+
+ return NO_ERROR;
+}
+
+
+//
+// sock_cancel_inprogress_connect
+//
+// Helper function to cancel an in progress connect with an outstanding timer.
+//
+static void
+sock_cancel_inprogress_connect(
+ _Inout_ sock_state_t *sock
+ )
+{
+ HANDLE Timer;
+ sock_overlapped_t* pov = sock->connect.pov;
+
+ struct sockaddr_in addr;
+ int name_len = sizeof(addr);
+
+ getsockname(sock->sock, (struct sockaddr*)&addr, &name_len);
+ Trace_SOCKETS_Info_sock_cancel_inprogress_connect(inet_ntoa(addr.sin_addr), addr.sin_port);
+
+ if(pov == nullptr)
+ return;
+
+ //
+ // Set the timer nullptr to help ConnectFailed to identify that close was called
+ // and the timer has already been deleted.
+ //
+ Timer = pov->connect.retry_timer;
+ MPIU_Assert(Timer != nullptr);
+ pov->connect.retry_timer = nullptr;
+ sock->connect.pov = nullptr;
+
+ //
+ // Delete the timer and wait for the callback to complete its execution.
+ // N.B. The callback function execution is guaranteed, either the timer expires
+ // or the delete timer function runs it down.
+ //
+ OACR_WARNING_SUPPRESS(RETVAL_IGNORED_FUNC_COULD_FAIL, "Call blocks until timer is deleted.");
+ DeleteTimerQueueTimer(nullptr, Timer, INVALID_HANDLE_VALUE);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+ConnectFailed(
+ _Inout_ EXOVERLAPPED* pexov
+ )
+{
+ int gle;
+ int mpi_errno;
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+ sock_connect_context* scc = &pov->connect;
+ NTSTATUS status = ExGetStatus(&pov->exov);
+
+ if(status == STATUS_TIMER_EXPIRED)
+ {
+ //
+ // The timer has expired; verify race condition with close (the timer set to nullptr).
+ //
+ if(scc->retry_timer == nullptr)
+ {
+ //
+ // The socket was closed while the timer was armed; return with 'abort' error code.
+ // Don't use sock_get_overlapped_result to get the error code, the socket is invalid.
+ //
+ gle = WSA_OPERATION_ABORTED;
+ Trace_SOCKETS_Error_ConnectFailed(ConnectFailedEnumAbortedBeforeTimeout, status, pexov, scc->cur_host, scc->port);
+ goto fn_fail_gle;
+ }
+
+ //
+ // Delete the timer (no wait) and go try connecting again. No need to update the
+ // retry count, it was incremented when the timer was armed down below.
+ //
+ Trace_SOCKETS_Info_ConnectFailed(ConnectFailedEnumTimeout, status, pexov, scc->cur_host, scc->port);
+ BOOL timer_marked_for_deletion = DeleteTimerQueueTimer(nullptr, pov->connect.retry_timer, nullptr);
+ while(!timer_marked_for_deletion)
+ {
+ DWORD last_error = GetLastError();
+ if(last_error == ERROR_IO_PENDING)
+ {
+ break;
+ }
+ else
+ {
+ timer_marked_for_deletion = DeleteTimerQueueTimer(nullptr, pov->connect.retry_timer, nullptr);
+ }
+ }
+ scc->retry_timer = nullptr;
+ pov->sock->connect.pov = nullptr;
+ goto fn_connect;
+ }
+
+ //
+ // Check if this sock is closed or is being closed.
+ //
+ // N.B. Accessing the sock object here is safe only with asynchronous close which guarantee
+ // that all async completion routines are executed before the the sock object is deleted.
+ // Using sync close while connect is in progress will result in AV or memory corruption.
+ //
+ if(pov->sock->closing)
+ {
+ //
+ // The sock is closing do not retry to connect again.
+ //
+ gle = WSA_OPERATION_ABORTED;
+ Trace_SOCKETS_Error_ConnectFailed(ConnectFailedEnumAbortedClosing, status, pexov, scc->cur_host, scc->port);
+ goto fn_fail_gle;
+ }
+
+ scc->retry_count++;
+ gle = sock_get_overlapped_result(pov);
+ MPIU_Assert(gle != WSA_OPERATION_ABORTED);
+ if((gle == WSAECONNREFUSED || gle == WSAETIMEDOUT) && scc->retry_count <= g_connect_retries)
+ {
+ //
+ // Connection was refused, wait and retry.
+ //
+ DWORD t = scc->retry_count * (rand() % 256 + 16);
+ gle = gle_postpone_retry_connect(pov, t);
+ if(gle != NO_ERROR)
+ goto fn_fail_gle;
+
+ //
+ // Save the overlapped to be used by close while the timer has not expired.
+ //
+ Trace_SOCKETS_Info_ConnectFailed(ConnectFailedEnumRefused, gle, pexov, scc->cur_host, scc->port);
+ pov->sock->connect.pov = pov;
+ return MPI_SUCCESS;
+ }
+
+ //
+ // Capture the connect error
+ //
+ Trace_SOCKETS_Info_ConnectFailed(ConnectFailedEnumError, gle, pexov, scc->cur_host, scc->port);
+ scc->error = MPIU_ERR_GET(scc->error, "**sock_connect %s %d %s %d", scc->cur_host, scc->port, get_error_string(gle), gle);
+
+ //
+ // Move to the next host in the list
+ //
+ scc->retry_count = 0;
+ scc->cur_host = scc->cur_host + MPIU_Strlen( scc->cur_host ) + 1;
+ if(*scc->cur_host == '\0')
+ {
+ Trace_SOCKETS_Error_ConnectFailed(ConnectFailedEnumExhausted, gle, pexov, scc->cur_host, scc->port);
+ mpi_errno = MPIU_ERR_GET(scc->error, "**sock_connect %s %d %s", scc->host_description, scc->port, "exhausted all endpoints");
+ return CallbackFailure(pov, mpi_errno);
+ }
+
+fn_connect:
+ gle = gle_connect_host(pov);
+ if(gle != NO_ERROR)
+ goto fn_fail_gle;
+
+ return MPI_SUCCESS;
+
+fn_fail_gle:
+ Trace_SOCKETS_Error_ConnectFailed(ConnectFailedEnumFail, gle, pexov, scc->cur_host, scc->port);
+ mpi_errno = MPIU_E_FAIL_GLE2(gle);
+ return CallbackFailure(pov, mpi_errno);
+}
+
+
+static void
+sock_finish_connect(
+ _In_ sock_state_t *connect_state
+ )
+{
+ DWORD sockoptval;
+ u_long optval;
+
+ struct sockaddr_in addr;
+ int name_len = sizeof(addr);
+
+ getsockname(connect_state->sock, (struct sockaddr*)&addr, &name_len);
+ Trace_SOCKETS_Info_sock_finish_connect(inet_ntoa(addr.sin_addr), addr.sin_port);
+
+ /* update winsock connect context */
+ sockoptval = 1;
+ setsockopt(connect_state->sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, (const char*)&sockoptval, sizeof(DWORD));
+
+ /* set the socket to non-blocking */
+ optval = TRUE;
+ ioctlsocket(connect_state->sock, FIONBIO, &optval);
+
+ connect_state->pfnClose = sock_graceful_close_socket;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+ConnectSucceeded(
+ _Inout_ EXOVERLAPPED* pexov
+ )
+{
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+ //
+ // Check to see if the socket has been closed.
+ //
+ if(!pov->sock->closing)
+ {
+ sock_finish_connect(pov->sock);
+ }
+ return CallbackSuccess(pov, 0);
+}
+
+
+_Success_(return==NO_ERROR)
+static int
+gle_bind_any(
+ _In_ SOCKET Socket
+ )
+{
+ int rc;
+
+ struct sockaddr_in sockAddr;
+ memset(&sockAddr,0,sizeof(sockAddr));
+ sockAddr.sin_family = AF_INET;
+ sockAddr.sin_addr.s_addr = INADDR_ANY;
+ sockAddr.sin_port = 0;
+
+ rc = bind(Socket, (const SOCKADDR*)&sockAddr, sizeof(sockAddr));
+ if(rc == SOCKET_ERROR)
+ return WSAGetLastError();
+
+ return NO_ERROR;
+}
+
+
+//
+// This function saves the valid hosts lists from the host_description into a save
+// hosts string. Each name ends with a '\0' the last entry ends with a double '\0'.
+// N.B. that if the host_description is empty the output would be a single '\0'.
+//
+_Success_(return==MPI_SUCCESS)
+static int
+save_valid_endpoints(
+ _In_z_ const char* host_description,
+ _Out_writes_z_(len) char* hosts,
+ _In_ size_t len,
+ _In_ int port,
+ _In_ int usemask
+ )
+{
+ size_t n;
+ int str_errno;
+ int address_saved = 0;
+ int address_valid = 0;
+ struct hostent* lphost;
+ struct in_addr addr;
+ int mpi_errno = MPI_SUCCESS;
+
+ const char* p = host_description;
+
+ for(;;)
+ {
+ *hosts = '\0';
+
+ str_errno = MPIU_Str_get_string(&p, hosts, len);
+ if (str_errno != MPIU_STR_SUCCESS)
+ return MPIU_ERR_GET(mpi_errno, "**fail %d", str_errno);
+
+ n = MPIU_Strlen( hosts, len );
+ if(n == 0)
+ {
+ char no_endpoint[128];
+ const char* msg = no_endpoint;
+
+ if(address_saved > 0)
+ return MPI_SUCCESS;
+
+ if(address_valid == 0)
+ {
+ msg = "no endpoints";
+ }
+ else
+ {
+ MPIU_Szncpy(no_endpoint, "no endpoint matches the netmask ");
+ MPIU_Sznapp(no_endpoint, inet_ntoa(g_netaddr));
+ MPIU_Sznapp(no_endpoint, "/");
+ MPIU_Sznapp(no_endpoint, inet_ntoa(g_netmask));
+ msg = no_endpoint;
+ }
+
+ return MPIU_ERR_GET(mpi_errno, "**sock_connect %s %d %s", host_description, port, msg);
+ }
+
+
+ addr.s_addr = inet_addr(hosts);
+
+ if (addr.s_addr == INADDR_NONE || addr.s_addr == 0)
+ {
+ lphost = gethostbyname(hosts);
+ if (lphost != nullptr)
+ {
+ const char* s = inet_ntoa( *((struct in_addr*)lphost->h_addr) );
+ n = MPIU_Strlen( s, _countof("xxx.xxx.xxx.xxx") );
+ MPIU_Strncpy(hosts, s, len);
+ }
+ else
+ {
+ //
+ // Because the detailed error message calls get_error_string, the last error
+ // value could be overwritten, so we save it here so that it is preserved
+ // properly.
+ //
+ int gle = WSAGetLastError();
+ mpi_errno = MPIU_ERR_GET(mpi_errno, "**gethostbyname %s %d", get_error_string(gle), gle);
+ continue;
+ }
+ }
+
+ address_valid++;
+
+ /* if a subnet was specified, make sure the currently extracted ip falls in the subnet */
+ if (usemask)
+ {
+ if ((addr.s_addr & g_netmask.s_addr) != g_netaddr.s_addr)
+ {
+ /* this ip does not match, move to the next */
+ continue;
+ }
+ }
+
+ address_saved++;
+
+ //
+ // Adjust hosts buffer and the len left in that buffer
+ //
+ hosts += n + 1;
+ len -= n + 1;
+ }
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_connect(
+ _In_ ExSetHandle_t set,
+ _In_z_ const char* host_description,
+ _In_ int port,
+ _Outptr_ sock_state_t** ppSock,
+ _In_ int usemask,
+ _Inout_ MPIDU_Sock_context_t* psc
+ )
+{
+ int mpi_errno;
+ DWORD gle;
+ sock_state_t *connect_state;
+ sock_overlapped_t* pov = &psc->sov;
+
+ MPIU_Assert(ppSock);
+ MPIU_Assert(g_init_called);
+
+ *ppSock = nullptr;
+
+ if( MPIU_Strlen( host_description, SOCKI_DESCRIPTION_LENGTH + 1 ) == SIZE_MAX )
+ {
+ return MPIU_ERR_NOMEM();
+ }
+
+ /* setup the structures */
+ connect_state = sock_create_state(sock_hard_close_socket);
+ if (connect_state == nullptr)
+ {
+ return MPIU_ERR_NOMEM();
+ }
+
+ connect_state->set = set;
+
+ sock_init_overlapped(pov, connect_state, ConnectSucceeded, ConnectFailed);
+
+ mpi_errno = save_valid_endpoints(host_description, pov->connect.host_description, _countof(pov->connect.host_description), port, usemask);
+ if (mpi_errno != MPI_SUCCESS)
+ {
+ Trace_SOCKETS_Error_MPIDU_Sock_post_connect_endpoints(mpi_errno, host_description, port);
+ sock_free_state(connect_state);
+ return MPIU_ERR_FAIL(mpi_errno);
+ }
+
+ /* create a socket */
+ mpi_errno = MPIDU_Sock_create_native_fd(&connect_state->sock);
+ if (mpi_errno != MPI_SUCCESS)
+ {
+ sock_free_state(connect_state);
+ return MPIU_ERR_FAIL(mpi_errno);
+ }
+
+ //
+ // ConnectEx requires the socket to be bound
+ //
+ gle = gle_bind_any(connect_state->sock);
+ if(gle != NO_ERROR)
+ {
+ *ppSock = nullptr;
+ Trace_SOCKETS_Error_MPIDU_Sock_post_connect_gle_bind_any(gle, get_error_string(gle));
+ sock_free_state(connect_state);
+ return MPIU_E_FAIL_GLE2(gle);
+ }
+ MPIU_Assert(connect_state->sock != INVALID_SOCKET);
+ /* associate the socket with the completion port */
+ ExAttachHandle(set, (HANDLE)connect_state->sock);
+
+ pov->connect.error = MPI_SUCCESS;
+ pov->connect.port = port;
+ pov->connect.retry_count = 0;
+ pov->connect.retry_timer = nullptr;
+ pov->connect.cur_host = pov->connect.host_description;
+
+ gle = gle_connect_host(pov);
+ if(gle != NO_ERROR)
+ {
+ sock_free_state(connect_state);
+ return MPIU_E_FAIL_GLE2(gle);
+ }
+
+ *ppSock = connect_state;
+
+ return MPI_SUCCESS;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int HardCloseComplete(
+ _Inout_ EXOVERLAPPED* pexov
+ )
+{
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+ sock_free_state(pov->sock);
+ return CallbackSuccess(pov, 0);
+}
+
+
+void
+sock_hard_close_socket(
+ _Inout_ sock_state_t *sock,
+ _Inout_ sock_overlapped_t* pov
+ )
+{
+ sock_cancel_inprogress_connect(sock);
+ MPIU_Assert( sock->sock != INVALID_SOCKET );
+ closesocket(sock->sock);
+ sock->sock = INVALID_SOCKET;
+
+ sock_init_overlapped(pov, sock, HardCloseComplete, HardCloseComplete);
+ ExPostOverlappedResult(sock->set, &pov->exov, 0, 0);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+GracefulCloseFailed(
+ _Inout_ EXOVERLAPPED* pexov
+ )
+{
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+ sock_state_t *sock = pov->sock;
+
+ //
+ // Nothing to do here, receive failed to read the FD_CLOSE signal (zero bytes on receive)
+ // close the socket the hard way.
+ //
+ MPIU_Assert( sock->sock != INVALID_SOCKET );
+ if (closesocket(sock->sock) == SOCKET_ERROR)
+ {
+ int gle = WSAGetLastError();
+ struct sockaddr_in addr;
+ int len = sizeof(addr);
+ getsockname(sock->sock, reinterpret_cast(&addr), &len);
+ Trace_SOCKETS_Error_GracefulCloseFailed(gle, get_error_string(gle), inet_ntoa(addr.sin_addr), addr.sin_port);
+ int mpi_errno = MPIU_E_FAIL_GLE2(gle);
+ return CallbackFailure(pov, mpi_errno);
+ }
+ sock->sock = INVALID_SOCKET;
+ sock_free_state(sock);
+ return CallbackSuccess(pov, 0);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+GracefulDummyRecv(
+ _In_ const sock_state_t *sock,
+ _Inout_ sock_overlapped_t* pov)
+{
+ static char dummy_read_buffer[16];
+ static WSABUF dummy_iov = { sizeof(dummy_read_buffer), dummy_read_buffer };
+
+ return sock_safe_receive(sock->sock, &dummy_iov, 1, &pov->exov.ov);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+GracefulCloseSucceeded(
+ _Inout_ EXOVERLAPPED* pexov
+ )
+{
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+ DWORD num_bytes = ExGetBytesTransferred(pexov);
+ sock_state_t *sock = pov->sock;
+
+ if(num_bytes == 0)
+ {
+ //
+ // This is a graceful shutdown, FD_CLOSE received. Free the socket
+ //
+ Trace_SOCKETS_Info_GracefulCloseSucceeded();
+
+ closesocket(sock->sock);
+ sock->sock = INVALID_SOCKET;
+ sock_free_state(sock);
+ return CallbackSuccess(pov, 0);
+ }
+
+ //
+ // The other side is still sending data; repost the dummy receive to
+ // identify the FD_CLOSE signal.
+ //
+ if(GracefulDummyRecv(sock, pov) == NO_ERROR)
+ return MPI_SUCCESS;
+
+ return GracefulCloseFailed(&pov->exov);
+}
+
+
+void
+sock_graceful_close_socket(
+ _Inout_ sock_state_t *sock,
+ _Inout_ sock_overlapped_t* pov
+ )
+{
+ /* Mark the socket as non-writable */
+ if (shutdown(sock->sock, SD_SEND) == SOCKET_ERROR)
+ {
+ sock_hard_close_socket(sock, pov);
+ return;
+ }
+
+ //
+ // Post a dummy receive to identify the FD_CLOSE signal
+ //
+ sock_init_overlapped(pov, sock, GracefulCloseSucceeded, GracefulCloseFailed);
+ if(GracefulDummyRecv(sock, pov) == NO_ERROR)
+ return;
+
+ sock_hard_close_socket(sock, pov);
+}
+
+
+void
+MPIDU_Sock_post_close(
+ _Inout_ sock_state_t *sock,
+ _Inout_ MPIDU_Sock_context_t* psc
+ )
+{
+ sock_overlapped_t* pov = &psc->sov;
+
+ MPIU_Assert(g_init_called);
+ MPIU_Assert(!sock->closing);
+ sock->closing = TRUE;
+
+ sock->pfnClose(sock, pov);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+ReadFailed(
+ _Inout_ EXOVERLAPPED* pexov
+ )
+{
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+ int mpi_errno = MPIU_E_FAIL_GLE2(sock_get_overlapped_result(pov));
+ return CallbackFailure(pov, mpi_errno);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+ReadSucceeded(
+ _Inout_ EXOVERLAPPED* pexov)
+{
+ int gle;
+ int mpi_errno;
+ DWORD num_bytes = ExGetBytesTransferred(pexov);
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+
+ if(num_bytes == 0)
+ {
+ //
+ // Use MPI_SUCCESS as the error *class* to indicate graceful close
+ //
+ Trace_SOCKETS_Error_ReadSucceeded_ConnectionClosed();
+ mpi_errno = MPIU_ERR_CREATE(MPI_SUCCESS, "**sock|connclosed");
+ return CallbackFailure(pov, mpi_errno);
+ }
+
+ pov->read.total += num_bytes;
+ TrimIOV(&pov->read.iov, &pov->read.iovlen, num_bytes);
+
+ if((pov->read.iovlen == 0) || (pov->read.total >= pov->read.min_recv))
+ return CallbackSuccess(pov, pov->read.total);
+
+ /* post a read of the remaining data */
+ gle = sock_safe_receive(pov->sock->sock, pov->read.iov, pov->read.iovlen, &pov->exov.ov);
+ if(gle == NO_ERROR)
+ return MPI_SUCCESS;
+
+ Trace_SOCKETS_Error_ReadSucceeded_Error(gle, get_error_string(gle));
+
+ mpi_errno = MPIU_E_FAIL_GLE2(gle);
+ return CallbackFailure(pov, mpi_errno);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_read(
+ _Inout_ sock_state_t *sock,
+ _Out_writes_bytes_(len) void * buf,
+ _In_ MPIU_Bsize_t len,
+ _In_ MPIU_Bsize_t minbr,
+ _Inout_ MPIDU_Sock_context_t* psc
+ )
+{
+ sock_overlapped_t* pov = &psc->sov;
+ pov->read.tmpiov.len = len;
+ pov->read.tmpiov.buf = static_cast( buf );
+ return MPIDU_Sock_post_readv(sock, &pov->read.tmpiov, 1, minbr, psc);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_readv(
+ _Inout_ sock_state_t *sock,
+ _In_reads_(iov_n)WSABUF * iov,
+ _In_ int iov_n,
+ _In_ MPIU_Bsize_t minbr,
+ _Inout_ MPIDU_Sock_context_t* psc
+ )
+{
+ int gle;
+ sock_overlapped_t* pov = &psc->sov;
+
+ MPIU_Assert(g_init_called);
+
+ sock_init_overlapped(pov, sock, ReadSucceeded, ReadFailed);
+
+ /* strip any trailing empty buffers */
+ while (iov_n && iov[iov_n-1].len == 0)
+ {
+ iov_n--;
+ }
+
+ pov->read.iov = iov;
+ pov->read.iovlen = iov_n;
+ pov->read.min_recv = minbr;
+ pov->read.total = 0;
+
+ gle = sock_safe_receive(sock->sock, pov->read.iov, iov_n, &pov->exov.ov);
+ if(gle != NO_ERROR)
+ return MPIU_E_FAIL_GLE2(gle);
+
+ return MPI_SUCCESS;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+WriteFailed(
+ _Inout_ EXOVERLAPPED* pexov
+ )
+{
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+ int mpi_errno = MPIU_E_FAIL_GLE2(sock_get_overlapped_result(pov));
+ return CallbackFailure(pov, mpi_errno);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+static int
+WriteSucceeded(
+ _Inout_ EXOVERLAPPED* pexov
+ )
+{
+ int gle;
+ int mpi_errno;
+ DWORD nBytesSent;
+ DWORD num_bytes = ExGetBytesTransferred(pexov);
+ sock_overlapped_t* pov = sock_ov_from_exov(pexov);
+
+ pov->write.total += num_bytes;
+ TrimIOV(&pov->write.iov, &pov->write.iovlen, num_bytes);
+
+ if (pov->write.iovlen == 0)
+ return CallbackSuccess(pov, pov->write.total);
+
+ /* post a write of the remaining data */
+ gle = sock_safe_send(pov->sock->sock, pov->write.iov, pov->write.iovlen, &nBytesSent, &pov->exov.ov);
+ if(gle == NO_ERROR)
+ return MPI_SUCCESS;
+
+ mpi_errno = MPIU_E_FAIL_GLE2(gle);
+ return CallbackFailure(pov, mpi_errno);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_write(
+ _Inout_ sock_state_t *sock,
+ _In_reads_bytes_(min) const void* buf,
+ _In_ MPIU_Bsize_t min,
+ _Inout_ MPIDU_Sock_context_t* psc
+ )
+{
+ sock_overlapped_t* pov = &psc->sov;
+ pov->write.tmpiov.len = min;
+ pov->read.tmpiov.buf = static_cast( const_cast( buf ) );
+ return MPIDU_Sock_post_writev(sock, &pov->write.tmpiov, 1, psc);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_post_writev(
+ _Inout_ sock_state_t *sock,
+ _In_reads_(iov_n) WSABUF* iov,
+ _In_ int iov_n,
+ _Inout_ MPIDU_Sock_context_t* psc
+ )
+{
+ int gle;
+ DWORD nBytesSent;
+ sock_overlapped_t* pov = &psc->sov;
+
+ sock_init_overlapped(pov, sock, WriteSucceeded, WriteFailed);
+
+ /* strip any trailing empty buffers */
+ while (iov_n && iov[iov_n-1].len == 0)
+ {
+ iov_n--;
+ }
+
+ pov->write.iov = iov;
+ pov->write.iovlen = iov_n;
+ pov->write.total = 0;
+
+ gle = sock_safe_send(sock->sock, pov->write.iov, iov_n, &nBytesSent, &pov->exov.ov);
+ if(gle != NO_ERROR)
+ return MPIU_E_FAIL_GLE2(gle);
+
+ return MPI_SUCCESS;
+}
+
+
+void
+MPIDU_Sock_close(
+ _In_ _Post_invalid_ sock_state_t *sock
+ )
+{
+ MPIU_Assert(g_init_called);
+ MPIU_Assert(!sock->closing);
+
+ //
+ // Synchronous close can not be used while connect is in progress becaues the sock state
+ // is being freed here. Doing so will result in Access Violation or Memroy Corruption. (see
+ // comment in ConnectFailed). The assertion below is only a partial check to see that no
+ // connect timer is active. There is no validation when the a connect is actually posted.
+ //
+ MPIU_Assert(sock->connect.pov == nullptr);
+
+ MPIU_Assert( sock->sock != INVALID_SOCKET );
+ CancelIoEx( reinterpret_cast(sock->sock), nullptr );
+ closesocket(sock->sock);
+ sock->sock = INVALID_SOCKET;
+ sock_free_state(sock);
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_writev(
+ _In_ const sock_state_t * const sock,
+ _In_reads_(iov_n) WSABUF * iov,
+ _In_ int iov_n,
+ _Out_ MPIU_Bsize_t * num_written
+ )
+{
+ DWORD num_written_local;
+ int gle = sock_safe_send(sock->sock, iov, iov_n, &num_written_local, nullptr /*overlapped*/);
+ if(gle == NO_ERROR)
+ {
+ *num_written = num_written_local;
+ return MPI_SUCCESS;
+ }
+
+ *num_written = 0;
+
+ if(gle == WSAEWOULDBLOCK)
+ return MPI_SUCCESS;
+
+ return MPIU_E_FAIL_GLE2(gle);
+}
+
+
+_Success_(return >=0)
+int
+MPIDU_Sock_get_sock_id(
+ _In_ const sock_state_t * const sock
+ )
+{
+ if (sock == MPIDU_SOCK_INVALID_SOCK)
+ return -1;
+
+ return (int)sock->sock;
+}
+
+
+_Success_(return==MPI_SUCCESS)
+int
+MPIDU_Sock_keepalive(
+ _In_ const sock_state_t * const sock
+ )
+{
+ int rc;
+ DWORD nbytes;
+
+ //
+ // After 5 minutes of no network activity send up to ten keep-alive packets,
+ // while waiting 10 seconds between successive unacknowledged packets.
+ //
+ struct tcp_keepalive ka;
+ ka.onoff = 1;
+ ka.keepalivetime = 5*60*1000;
+ ka.keepaliveinterval = 10*1000;
+
+ rc = WSAIoctl(sock->sock, SIO_KEEPALIVE_VALS, &ka, sizeof(ka), nullptr, 0, &nbytes, nullptr, nullptr);
+ if(rc == NO_ERROR)
+ return MPI_SUCCESS;
+
+ rc = WSAGetLastError();
+ Trace_SOCKETS_Error_MPIDU_Sock_keepalive(rc, get_error_string(rc));
+ return MPIU_E_FAIL_GLE2(rc);
+}
diff --git a/src/mpi/common/traceManifest.vcxproj b/src/mpi/common/traceManifest.vcxproj
new file mode 100644
index 0000000..f80e80b
--- /dev/null
+++ b/src/mpi/common/traceManifest.vcxproj
@@ -0,0 +1,30 @@
+
+
+
+ {A697D69E-7F67-457F-9194-267C73C5B8CF}
+
+
+
+
+ false
+ None
+ WindowsUserModeDriver10.0
+ None
+
+
+
+
+
+
+ true
+ $(MPI_SRC_ROOT)\common\$(O)
+ true
+ $(MPI_SRC_ROOT)\common\$(O)
+ MpiTraceEvents
+ true
+ Trace
+ EVENT_
+
+
+
+
\ No newline at end of file
diff --git a/src/mpi/common/util.h b/src/mpi/common/util.h
new file mode 100644
index 0000000..be20394
--- /dev/null
+++ b/src/mpi/common/util.h
@@ -0,0 +1,171 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+//
+// Summary:
+// Ensure that the OS version is greater than or equal to the specified version.
+//
+// Parameters:
+// major - Windows major version
+// minor - Windows minor version
+//
+_Success_(return!=FALSE)
+BOOL
+CheckOSVersion(
+ _In_ DWORD major,
+ _In_ DWORD minor
+ );
+
+
+//
+// Summary:
+// Check if the smpd instance is running on azure and if so,
+// return the logical name of the node
+//
+// Input:
+// szBuffer: the size of the name buffer
+//
+// Output:
+// buffer : store the logical name. If null, name is not returned
+//
+// Return:
+// true if the node is on azure
+// false if the node is not on azure, or if the size of the buffer is
+// too small
+//
+//
+bool get_azure_node_logical_name(
+ _Out_opt_z_cap_(szBuffer) wchar_t* buffer,
+ _In_ DWORD szBuffer
+ );
+
+
+//
+// We use a looping count means because the 99% case, the
+// bits will all be sequential in the low 32bits of the
+// value. Most uses will be to count 2 to 8 bits, so the
+// loop will be less overall overhead for the 99% case.
+//
+template
+inline UINT8 CountBits( T value )
+{
+ UINT8 c = 0;
+ while( value != 0 )
+ {
+ c += static_cast(value & 1);
+ value >>= 1;
+ }
+ return c;
+}
+
+
+inline ULONG PowerOf2Floor( _In_range_(>, 0) ULONG value )
+{
+ MPIU_Assert( value != 0 );
+
+ ULONG msb;
+ _BitScanReverse( &msb, value );
+
+ return 1 << msb;
+}
+
+
+inline bool IsPowerOf2( _In_range_(>, 0) ULONG value )
+{
+ MPIU_Assert( value != 0 );
+
+ return ((value & (value - 1)) == 0);
+}
+
+
+#define POWER_OF_2_CEILING_LIMIT 0x80000000
+inline ULONG PowerOf2Ceiling( _In_range_(1, POWER_OF_2_CEILING_LIMIT) ULONG value )
+{
+ MPIU_Assert( value != 0 && value <= POWER_OF_2_CEILING_LIMIT );
+
+ if( IsPowerOf2( value ) )
+ {
+ return value;
+ }
+
+ ULONG msb;
+ _BitScanReverse( &msb, value );
+
+ return 2 << msb;
+}
+
+
+//
+// Returns the size of the binomial subtree with root 'rank' in a tree of 'size'
+// total nodes.
+//
+inline
+unsigned
+TreeSize(
+ _In_range_(0, size - 1) unsigned rank,
+ _In_range_(>, 0) unsigned size
+ )
+{
+ MPIU_Assert( size > 0 );
+ MPIU_Assert( rank < size );
+
+ ULONG k;
+ if( _BitScanForward( &k, rank ) == 0 )
+ {
+ MPIU_Assert( rank == 0 );
+ return size;
+ }
+
+ k = 1 << k;
+ return k < size - rank ? k : size - rank;
+}
+
+
+//
+// Returns the number of children in the biniomial subtree with root 'rank' in
+// a tree of 'size' total nodes.
+//
+inline
+unsigned
+ChildCount(
+ _In_range_(0, size - 1) unsigned rank,
+ _In_range_(>, 0) unsigned size
+ )
+{
+ MPIU_Assert( size > 0 );
+ MPIU_Assert( rank < size );
+
+ unsigned treeSize = TreeSize( rank, size );
+ MPIU_Assert(treeSize > 0);
+
+ ULONG msb;
+ _BitScanReverse( &msb, treeSize );
+
+ return IsPowerOf2( treeSize ) ? msb : msb + 1;
+}
+
+
+//
+// Returns max value in an array
+//
+inline
+int
+MaxElement(
+ _In_reads_(size) const int cnts[],
+ _In_range_(>, 0) unsigned size
+ )
+{
+ MPIU_Assert(size > 0);
+
+ int maxElement = cnts[0];
+ for (unsigned i = 1; i < size; ++i)
+ {
+ if (cnts[i] > maxElement)
+ {
+ maxElement = cnts[i];
+ }
+ }
+ return maxElement;
+}
\ No newline at end of file
diff --git a/src/mpi/dirs.proj b/src/mpi/dirs.proj
new file mode 100644
index 0000000..d8e9358
--- /dev/null
+++ b/src/mpi/dirs.proj
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/mpi/mpiexec/MpiexecPmiDbg.cpp b/src/mpi/mpiexec/MpiexecPmiDbg.cpp
new file mode 100644
index 0000000..bbefe66
--- /dev/null
+++ b/src/mpi/mpiexec/MpiexecPmiDbg.cpp
@@ -0,0 +1,160 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#include "mpiexec.h"
+#include "PmiDbgImpl.h"
+
+extern "C"
+{
+__declspec(dllexport) MPIR_PROCDESC* MPIR_Proctable;
+__declspec(dllexport) int MPIR_Proctable_size;
+__declspec(dllexport) volatile int MPIR_debug_state = 0;
+__declspec(dllexport) volatile int MPIR_being_debugged = 0;
+}
+
+//
+// FW define the callback functions.
+//
+static FN_PmiDbgControl MpiexecPmiDbgControlBeforeCreateProcesses;
+
+
+//
+// Define the notification events in Mpiexec
+//
+const PMIDBG_NOTIFICATION MpiexecNotifyInitialize =
+{
+ PMIDBG_NOTIFY_INITIALIZE,
+ NULL
+};
+
+const PMIDBG_NOTIFICATION MpiexecNotifyFinalize =
+{
+ PMIDBG_NOTIFY_FINALIZE,
+ NULL
+};
+
+const PMIDBG_NOTIFICATION MpiexecNotifyBeforeCreateProcesses =
+{
+ PMIDBG_NOTIFY_BEFORE_CREATE_PROCESSES,
+ MpiexecPmiDbgControlBeforeCreateProcesses
+};
+
+
+const PMIDBG_NOTIFICATION MpiexecNotifyAfterCreateProcesses =
+{
+ PMIDBG_NOTIFY_AFTER_CREATE_PROCESSES,
+ NULL
+};
+
+
+static HRESULT __stdcall
+MpiexecPmiDbgControlBeforeCreateProcesses(
+ __in PMIDBG_OPCODE_TYPE type,
+ __in void* pData,
+ __inout_bcount(cbBuffer) void* pBuffer,
+ __in SIZE_T cbBuffer
+ )
+{
+ va_list args = reinterpret_cast(pData);
+ smpd_global_t* pSmpdProcess = va_arg(args,smpd_global_t*);
+ smpd_host_t* pHosts = va_arg(args,smpd_host_t*);
+ switch(type)
+ {
+ case PMIDBG_OPCODE_GET_WORLD_SIZE:
+ {
+ if( cbBuffer < sizeof(pSmpdProcess->nproc) )
+ {
+ return HRESULT_FROM_WIN32(ERROR_BUFFER_OVERFLOW);
+ }
+ *(reinterpret_cast(pBuffer)) = static_cast( pSmpdProcess->nproc );
+ }
+ break;
+ case PMIDBG_OPCODE_ENUM_WORLD_NODES:
+ {
+ PMIDBG_ENUM_WORLD_NODES* pEnum;
+ smpd_host_t* pEntry;
+
+ if( cbBuffer < sizeof(*pEnum) )
+ {
+ return HRESULT_FROM_WIN32(ERROR_BUFFER_OVERFLOW);
+ }
+
+ pEnum = static_cast( pBuffer );
+
+ //
+ // If they pass in a list that is already at the end, we error
+ //
+ if( pEnum->Context == PMIDBG_ENUM_END )
+ {
+ return E_INVALIDARG;
+ }
+
+ if( pEnum->Context == PMIDBG_ENUM_BEGIN )
+ {
+ //
+ // If they are requesting the begin
+ //
+ pEntry = pHosts;
+ }
+ else
+ {
+ //
+ // Else use the context value passed in
+ // NOTE: PMIDBG_ENUM_BEGIN == 0, so null is not possible
+ //
+ pEntry = static_cast(
+ reinterpret_cast( pEnum->Context )->Next);
+ }
+
+ if( NULL == pEntry )
+ {
+ pEnum->Context = PMIDBG_ENUM_END;
+ }
+ else
+ {
+ pEnum->Context = reinterpret_cast( pEntry );
+ MPIU_WideCharToMultiByte( pEntry->name, &pEntry->nameA );
+
+ pEnum->Hostname = pEntry->nameA;
+ }
+ }
+ break;
+ case PMIDBG_OPCODE_GET_PROCSIZE_ADDR:
+ {
+ if( cbBuffer < sizeof(&MPIR_Proctable_size) )
+ {
+ return HRESULT_FROM_WIN32(ERROR_BUFFER_OVERFLOW);
+ }
+ *(reinterpret_cast(pBuffer)) = &MPIR_Proctable_size;
+ }
+ break;
+ case PMIDBG_OPCODE_GET_PROCTABLE_ADDR:
+ {
+ if( cbBuffer < sizeof(MPIR_Proctable) )
+ {
+ return HRESULT_FROM_WIN32(ERROR_BUFFER_OVERFLOW);
+ }
+ *(reinterpret_cast(pBuffer)) = &MPIR_Proctable;
+ }
+ break;
+ case PMIDBG_OPCODE_GET_DEBUG_MODE:
+ {
+ if( cbBuffer < sizeof(MPIDBG_DBG_MODE) )
+ {
+ return HRESULT_FROM_WIN32(ERROR_BUFFER_OVERFLOW);
+ }
+ if( MPIR_being_debugged == 1 )
+ {
+ *(reinterpret_cast(pBuffer)) = MPIDBG_DBG_LAUNCH;
+ }
+ else
+ {
+ *(reinterpret_cast(pBuffer)) = MPIDBG_DBG_ATTACH;
+ }
+ }
+ break;
+ default:
+ return E_INVALIDARG;
+ }
+ return S_OK;
+}
diff --git a/src/mpi/mpiexec/configfile.cpp b/src/mpi/mpiexec/configfile.cpp
new file mode 100644
index 0000000..232cde9
--- /dev/null
+++ b/src/mpi/mpiexec/configfile.cpp
@@ -0,0 +1,209 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "smpd.h"
+#include
+
+
+//
+// configfile_size
+//
+// Calculate the configfile size to allocate by adding 3 chars for each line len for the block
+// seperator " : ".
+// Zero is returned in indicate an error and errno is set to the error value.
+//
+static size_t configfile_size(FILE* fin)
+{
+ size_t size = 0;
+ wchar_t buffer[128];
+
+ while(fgetws(buffer, _countof(buffer), fin))
+ {
+ //
+ // fgetws skips over NULL characters: it is unsafe to use strlen to get the number
+ // of bytes read, since the user may erroneously supply a binary file.
+ //
+ size += _countof(buffer) + 3;
+ }
+
+ if(ferror(fin))
+ return 0;
+
+ if(fseek(fin, 0, SEEK_SET) != 0)
+ return 0;
+
+ return size + 3;
+}
+
+
+//
+// read_configfile
+//
+// Read the complete configfile into a buffer ( ':' seperated) and return the buffer.
+// NULL is returned in indicate an error and errno is set to the error value.
+//
+static wchar_t* read_configfile(FILE* fin)
+{
+ size_t size = configfile_size(fin);
+ if(size == 0)
+ return NULL;
+
+ if(size > INT_MAX)
+ {
+ _set_errno(E2BIG);
+ return NULL;
+ }
+
+ wchar_t* cmdline = static_cast( malloc( size * sizeof(wchar_t) ) );
+ if(cmdline == NULL)
+ {
+ _set_errno(ENOMEM);
+ return NULL;
+ }
+
+ wchar_t* line = cmdline;
+ int concat = 0;
+ while(fgetws(line, (int)(size), fin))
+ {
+ ASSERT(size > 1);
+
+ wchar_t* p = const_cast(skip_ws(line));
+
+ //
+ // On comment, read the next line to the same buffer location.
+ // Comment lines do not terminate concatenation, allowing commenting out parts
+ // of a long block
+ //
+ if(*p == L'#')
+ continue;
+
+ //
+ // On whitespace lines, read the next line to the same buffer location.
+ // Note that whitespace lines terminate line concatenation, and append the block
+ // end sequence " : ".
+ //
+ if(*p == L'\0')
+ {
+ if(!concat)
+ continue;
+
+ p = line - 1;
+ }
+ else
+ {
+
+ //
+ // Trim whitespace at the the end of the line (remove CR or LF characters).
+ // N.B. The line contain at least one non whitespace character; thus this code
+ // will not underflow the line.
+ //
+ p += MPIU_Strlen( p ) - 1;
+ while( iswspace(*p) )
+ {
+ p--;
+ }
+
+ size -= p - line;
+ line = p;
+
+ //
+ // Line break marker; read the next line into the same location
+ //
+ if(*p == L'\\')
+ {
+ concat = 1;
+ continue;
+ }
+ }
+
+ concat = 0;
+ *++p = L' ';
+ *++p = L':';
+ *++p = L' ';
+ ++p;
+
+ size -= p - line;
+ line = p;
+ }
+
+ if(ferror(fin))
+ {
+ free(cmdline);
+ return NULL;
+ }
+
+ if((line - cmdline > 3) && *(line - 2) == ':')
+ {
+ line -= 3;
+ }
+
+ *line = L'\0';
+
+ return cmdline;
+}
+
+
+//
+// smpd_get_argv_from_file
+//
+// Read the entire config file and set argv.
+// NULL is returned to indicate success; Error string is returned to indicate error.
+//
+_Success_( return == NULL )
+_Ret_maybenull_
+PCWSTR
+smpd_get_argv_from_file(
+ _In_ PCWSTR filename,
+ _Outptr_ wchar_t ***argvp
+ )
+{
+ FILE* fin = _wfopen(filename, L"r");
+ if(fin == NULL)
+ {
+ const wchar_t* res = _wcserror(errno);
+ _Analysis_assume_( res != nullptr );
+ return res;
+ }
+
+ wchar_t* cmdline = read_configfile(fin);
+
+ fclose(fin);
+
+ if (cmdline == nullptr)
+ {
+ const wchar_t* res = _wcserror(errno);
+ _Analysis_assume_( res != nullptr );
+ return res;
+ }
+
+ int numargs;
+ int numchars;
+ smpd_unpack_cmdline(cmdline, NULL, NULL, &numargs, &numchars);
+
+ if(numargs <= 1)
+ {
+ free(cmdline);
+ return L"no commands in file";
+ }
+
+ wchar_t** argv = (wchar_t**)malloc(numargs * sizeof(wchar_t*) +
+ numchars * sizeof(wchar_t));
+ if(argv == NULL)
+ {
+ free(cmdline);
+ const wchar_t* res = _wcserror(ENOMEM);
+ _Analysis_assume_( res != nullptr );
+ return res;
+ }
+
+ smpd_unpack_cmdline(cmdline, argv, (wchar_t*)(argv + numargs), &numargs, &numchars);
+
+ free(cmdline);
+ *argvp = argv;
+ return NULL;
+}
diff --git a/src/mpi/mpiexec/machinefile.cpp b/src/mpi/mpiexec/machinefile.cpp
new file mode 100644
index 0000000..0ef6d71
--- /dev/null
+++ b/src/mpi/mpiexec/machinefile.cpp
@@ -0,0 +1,200 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "smpd.h"
+#include
+#include
+
+//
+// machinefile_size
+//
+// Calculate the machinefile buffer size to allocate by adding 3 to the size of the file.
+// Zero is returned in indicate an error and errno is set to the error value.
+//
+static size_t machinefile_size(FILE* fin)
+{
+ size_t size = 0;
+ wchar_t buffer[128];
+
+
+ while(fgetws(buffer, _countof(buffer), fin))
+ {
+ //
+ // fgets skips over NULL characters: it is unsafe to use strlen to get the number
+ // of bytes read, since the user may erroneously supply a binary file.
+ //
+ size += _countof(buffer);
+ }
+
+ if(ferror(fin))
+ return 0;
+
+ if(fseek(fin, 0, SEEK_SET) != 0)
+ return 0;
+
+ return size + 3;
+}
+
+
+//
+// read_machinefile
+//
+// Read the complete machinefile into a buffer (space seperated), with the the number of
+// machines (lines) prepended to the buffer.
+// NULL is returned to indicate success; Error string is returned to indicate error.
+//
+_Success_( return == NULL )
+_Ret_maybenull_
+static PCWSTR
+read_machinefile(
+ _In_ FILE* fin,
+ _Outptr_result_z_ wchar_t** phosts
+ )
+{
+ const int x_space = 8;
+ size_t size = machinefile_size(fin);
+ if(size == 0)
+ return NULL;
+
+ if(size > INT_MAX)
+ return _wcserror(E2BIG);
+
+ size += x_space;
+
+ wchar_t* hosts = static_cast( malloc( size * sizeof(wchar_t) ) );
+ if(hosts == NULL)
+ {
+ return _wcserror(ENOMEM);
+ }
+
+ size_t hosts_len = size;
+ wchar_t* line = hosts;
+
+ //
+ // Prepend spaces
+ //
+ wmemset(line, L' ', x_space);
+ size -= x_space;
+ line += x_space;
+
+ int nhosts = 0;
+ while(fgetws(line, (int)(size), fin))
+ {
+ ASSERT(size > 1);
+
+ wchar_t* p = const_cast(skip_ws(line));
+
+ //
+ // On whitespace or comment, read the next line to the same buffer location.
+ //
+ if(*p == L'\0' || *p == L'#')
+ continue;
+
+ //
+ // Skip the machine name (must exist) and the optional whitespace and processors count.
+ //
+ p = const_cast(skip_graph(p));
+ p = const_cast(skip_ws(p));
+ p = const_cast(skip_digits(p));
+
+ //
+ // If we found explicit affinity masks, skip them so they get added to the string
+ // Explicit masks have the following format:
+ // hostname nproc,mask0[:group],...,maskN[:group]
+ //
+ while( *p == L',' )
+ {
+ p++;
+ if( *p != L'\0' )
+ {
+ p = const_cast(skip_hex(p));
+ if( *p == L':' )
+ {
+ p++;
+ if( *p != L'\0' )
+ {
+ p = const_cast(skip_digits(p));
+ }
+ };
+ }
+ }
+
+ p = const_cast(skip_ws(p));
+
+ if(*p != L'\0' && *p != L'#')
+ {
+ free(hosts);
+ return L"expecting a positive number of cores following the host name";
+ }
+
+ //
+ // Trim whitespace at the the end of the line (remove CR or LF characters).
+ //
+ --p;
+ while( iswspace(*p) )
+ {
+ p--;
+ }
+
+ *++p = L' ';
+ ++p;
+ size -= p - line;
+ line = p;
+ nhosts++;
+ }
+
+ if(ferror(fin))
+ {
+ free(hosts);
+ const wchar_t* res = _wcserror(errno);
+ _Analysis_assume_( res != nullptr );
+ return res;
+ }
+
+ if(nhosts == 0)
+ {
+ free(hosts);
+ return L"expecting host names in file";
+ }
+
+ *--line = L'\0';
+ _itow_s(nhosts, hosts, hosts_len, 10);
+ line = hosts + MPIU_Strlen( hosts, hosts_len );
+ *line = L' ';
+
+ *phosts = hosts;
+ return NULL;
+}
+
+
+//
+// smpd_get_hosts_from_file
+//
+// Read the entire machinefile into a string.
+// NULL is returned to indicate success; Error string is returned to indicate error.
+//
+_Success_( return == NULL )
+_Ret_maybenull_
+PCWSTR
+smpd_get_hosts_from_file(
+ _In_ PCWSTR filename,
+ _Outptr_result_z_ wchar_t** phosts
+ )
+{
+ FILE* fin = _wfopen(filename, L"r");
+ if(fin == NULL)
+ {
+ const wchar_t* res = _wcserror(errno);
+ _Analysis_assume_( res != nullptr );
+ return res;
+ }
+
+ const wchar_t* error = read_machinefile(fin, phosts);
+ fclose(fin);
+ return error;
+}
diff --git a/src/mpi/mpiexec/mp_parse_command_line.cpp b/src/mpi/mpiexec/mp_parse_command_line.cpp
new file mode 100644
index 0000000..1fdf190
--- /dev/null
+++ b/src/mpi/mpiexec/mp_parse_command_line.cpp
@@ -0,0 +1,3404 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpiexec.h"
+#include
+#include
+
+#define ENV_MPIEXEC_AFFINITY_TABLE L"MPIEXEC_AFFINITY_TABLE"
+#define ENV_MPIEXEC_HWTREE_TABLE L"MPIEXEC_HWTREE_TABLE"
+
+void mp_print_options(void)
+{
+ wprintf(
+ L"Microsoft MPI Startup Program [Version %d.%d.%d.%d]%s\n"
+ L"\n"
+ L"Launches an application on multiple hosts.\n"
+ L"\n"
+ L"Usage:\n"
+ L"\n"
+ L" mpiexec [options] executable [args] [ : [options] exe [args] : ... ]\n"
+ L" mpiexec -configfile \n"
+ L"\n"
+ L"Common options:\n"
+ L"\n"
+ L"-n \n"
+ L"-env \n"
+ L"-wdir \n"
+ L"-hosts n host1 [m1] host2 [m2] ... hostn [mn]\n"
+ L"-cores \n"
+ L"-lines\n"
+ L"-debug [0-3]\n"
+ L"-logfile \n"
+ L"\n"
+ L"Examples:\n"
+ L"\n"
+ L" mpiexec -n 4 pi.exe\n"
+ L" mpiexec -hosts 1 server1 master : -n 8 worker\n"
+ L"\n"
+ L"For a complete list of options, run mpiexec -help2\n"
+ L"For a list of environment variables, run mpiexec -help3\n"
+ L"\n"
+ L"You can reach the Microsoft MPI team via email at askmpi@microsoft.com\n",
+ MSMPI_VER_MAJOR(MSMPI_VER_EX),
+ MSMPI_VER_MINOR(MSMPI_VER_EX),
+ _BLDNUMMAJOR,
+ _BLDNUMMINOR,
+ MSMPI_BUILD_LABEL
+ );
+}
+
+
+static void mp_print_extra_options(void)
+{
+ wprintf(
+ L"Launches an application on multiple hosts.\n"
+ L"\n"
+ L"Usage:\n"
+ L"\n"
+ L" mpiexec [options] executable [args] [ : [options] exe [args] : ... ]\n"
+ L" mpiexec -configfile \n"
+ L"\n"
+ L"All options:\n"
+ L"\n"
+ L"-configfile \n"
+ L" Read mpiexec command line from .\n"
+ L" The lines of filename are command line sections of the form:\n"
+ L" [options] executable [arguments]\n"
+ L" The command may span multiple lines by terminating a line with '\\'.\n"
+ L" Comment lines begin with '#', and empty lines are ignored.\n"
+ L"\n"
+ L"-optionfile \n"
+ L" Read mpiexec command options from .\n"
+ L" The lines of are command options of the form:\n"
+ L" [option 1] [option 2] ... [option n]\n"
+ L" The options may span multiple lines by terminating a line with '\\'.\n"
+ L" Comment lines begin with '#' and empty lines are ignored.\n"
+ L" Additional options may be used as part of the command line.\n"
+ L"\n"
+ L"-n \n"
+ L"-np \n"
+ L" Launch the specified number of processes.\n"
+ L"\n"
+ L"-n *\n"
+ L"-np *\n"
+ L" Launch one process on each available core. The absence of the -n option is\n"
+ L" equivalent to -n *.\n"
+ L"\n"
+ L"-machinefile \n"
+ L" Read the list of hosts from on which to run the application.\n"
+ L" The format is one host per line, optionally followed by the number of cores.\n"
+ L" Comments are from '#' to end of line, and empty lines are ignored.\n"
+ L" The -n * option uses the sum of cores in the file.\n"
+ L"\n"
+ L"-host \n"
+ L" Launch the application on .\n"
+ L" The -n * option uses 1 core.\n"
+ L"\n"
+ L"-hosts n host1 [m1][,mask[:group]] host2 [m2] ... hostn [mn]\n"
+ L" Launch the application on n hosts with m(i) processes on host(i).\n"
+ L" The number of processes on each host is optional, and defaults to 1.\n"
+ L" The total number of processes is the sum of m1 + ... + mn.\n"
+ L" For each process count, an optional list of affinity masks can be specified\n"
+ L" that will assign the process to run on specific cores. The group can be \n"
+ L" specified for systems that support > 64 logical cores.\n"
+ L"\n"
+ L"-c \n"
+ L"-cores \n"
+ L" Set hosts to cores each. This option overrides the cores count\n"
+ L" specified for each host by the -hosts or the -machinefile options in all sections.\n"
+ L"\n"
+ L"-a\n"
+ L"-affinity\n"
+ L" Set the affinity mask to a single core for each of the launched processes,\n"
+ L" spreading them as far as possible throughout the available cores. This is the\n"
+ L" equivalent of using the \"-affinity_layout spr:L\" option.\n"
+ L"\n"
+ L"-al [:]\n"
+ L"-al [::]\n"
+ L"-affinity_layout [:] \n"
+ L"-affinity_layout [::]\n"
+ L" Set the algorithm used to distribute launched processes to the compute cores,\n"
+ L" and optionally specify the stride and the affinity target. The process\n"
+ L" affinity is set to the specified target. Setting the stride in addition to\n"
+ L" the target provides finer granularity (for example, \"spr:P:L\" will spread\n"
+ L" processes across physical cores, and will bind each process to a single\n"
+ L" logical core). If no stride is specified, it is assumed to be the same as the\n"
+ L" target (for example, \"seq:p\" is the same as \"seq:p:p\"). Specifying affinity\n"
+ L" with this option overrides any setting for the MPIEXEC_AFFINITY environment\n"
+ L" variable.\n"
+ L"\n"
+ L" The following table lists the values for the parameter:\n"
+ L" Value Description\n"
+ L" -------- -----------------------------------------------------------------\n"
+ L" 0 No affinity (overrides any setting for MPIEXEC_AFFINITY).\n"
+ L" 1 or spr Spread: Distribute the processes as far as possible. (Default)\n"
+ L" 2 or seq Sequential: Distribute the processes sequentially.\n"
+ L" 3 or bal Balanced: Distribute the processes over the available NUMA nodes.\n"
+ L"\n"
+ L" The following table lists the values for the and parameters:\n"
+ L" Value Description\n"
+ L" -------- -----------------------------------------------------------------\n"
+ L" l or L Assign each process to a logical core. (Default)\n"
+ L" p or P Assign each process to a physical core.\n"
+ L" n or N Assign each process to a NUMA node.\n"
+ L"\n"
+ L"-aa\n"
+ L"-affinity_auto\n"
+ L" affinity_auto complements affinity_layout's behavior. It targets the\n"
+ L" case where multiple jobs are running on the same node. When it is set, mpiexec\n"
+ L" - Reads in the affinity settings which are published by other jobs running\n"
+ L" on the same node, and determines the cores that are in use.\n"
+ L" - Runs the specified MPIEXEC_AFFINITY algorithm avoiding the cores that are\n"
+ L" in use, and calculates the affinity setting for this job.\n"
+ L" - Publishes the calculated affinity settings so that upcoming jobs can avoid\n"
+ L" the cores that are in use.\n"
+ L" This way, multiple jobs on the same node can use the cores in a mutually exclusive\n"
+ L" manner.\n"
+ L"\n"
+ L"-dir \n"
+ L"-wdir \n"
+ L" Set the working directory for the launched application. The directory may be\n"
+ L" a local or remote path and may include environment variables to be expanded at\n"
+ L" the target host. The maximum accepted length of the path is 260 characters.\n"
+ L"\n"
+ L"-env \n"
+ L" Set an environment variable for the launched application.\n"
+ L"\n"
+ L"-genvlist [,env2,env3,...]\n"
+ L" Pass the values of the specified environment variables to the launched\n"
+ L" application. The list is a comma separated list of environment variables.\n"
+ L"\n"
+ L"-exitcodes\n"
+ L" Print the processes exit codes at the end of the run.\n"
+ L"\n"
+ L"-priority {0-4}\n"
+ L" Set the process startup priority class.\n"
+ L" The priority values are: 0=idle, 1=below, 2=normal, 3=above, 4=high.\n"
+ L" the default is -priority normal.\n"
+ L"\n"
+ L"-p \n"
+ L"-port \n"
+ L" Specify the port that smpd is listening on.\n"
+ L"\n"
+ L"-path [;...]\n"
+ L" Search for the application on the specified path on the target host.\n"
+ L" To specify multiple paths, separate paths with a semicolon ';'.\n"
+ L" Does not replace or append the PATH environment variable.\n"
+ L"\n"
+ L"-timeout \n"
+ L" Set the timeout for the job.\n"
+ L"\n"
+ L"-job \n"
+ L" Associate the application with a job created by the Windows HPC Server.\n"
+ L"\n"
+ L"-l\n"
+ L"-lines\n"
+ L" Prefix the output with the process rank.\n"
+ L"\n"
+ L"-d [level]\n"
+ L"-debug [level]\n"
+ L" Print debug output to stderr. Level is: 0=none, 1=error, 2=debug 3=both.\n"
+ L" When level is not specified '2=debug' is used.\n"
+ L"\n"
+ L"-logFile \n"
+ L" Redirect logs to the given file.\n"
+ L"\n"
+ L"-genv, -gpath, -gdir, -gwdir, -ghost, -gmachinefile\n"
+ L" These options are the global version of the corresponding option affecting all\n"
+ L" sections of the command line.\n"
+ L"\n"
+ L"-pwd \n"
+ L"Authenticate the user with the provided password. This option is only valid when\n"
+ L"MS-MPI Launch Service is being used.\n"
+ L"\n"
+ L"-saveCreds\n"
+ L"Notify the launch service to save credentials. This option is only valid when -pwd\n"
+ L"is provided.\n"
+ L"After a successful invocation of saveCreds, it is not necessary to provide\n"
+ L"the password with -pwd unless the password is changed.\n"
+ L"\n"
+ L"-unicode\n"
+ L"Switch mpiexec output to unicode stream. This only affects the output of mpiexec.\n"
+ L"Unicode path and executables are supported with or without this option.\n"
+ L"\n"
+ L"-?\n"
+ L"-help\n"
+ L" Display a list of common options for mpiexec command line.\n"
+ L"\n"
+ L"-??\n"
+ L"-help2\n"
+ L" Display this help message.\n"
+ L"\n"
+ L"-???\n"
+ L"-help3\n"
+ L" List environment variables.\n"
+ L"\n"
+ L"\n"
+ L"Examples:\n"
+ L"\n"
+ L" Run four pi.exe processes on the local host with four cores:\n"
+ L" mpiexec pi.exe\n"
+ L" mpiexec -n * pi\n"
+ L"\n"
+ L" Run one master process and three worker processes on the local host with four\n"
+ L" cores:\n"
+ L" mpiexec -n 1 master : worker\n"
+ L"\n"
+ L" Run one master process and 31 worker processes on the hosts listed in the\n"
+ L" hosts.txt file (which lists four hosts with 8 cores each):\n"
+ L" mpiexec -gmachinefile hosts.txt -n 1 master : worker\n"
+ );
+}
+
+
+static void mp_print_environment_variables(void)
+{
+ wprintf(
+ L"MPIEXEC environment variables:\n"
+ L"These environment variables are equivalent to the command line options and take\n"
+ L"effect only if the equivalent command line option is not specified. These\n"
+ L"environment variables should be set before mpiexec is launched.\n"
+ L"\n"
+ L"Environment variables with numerical values will use the closer of the\n"
+ L"specified maximum or minimum allowed value for that environment variable if the\n"
+ L"specified value is out of range.\n"
+ L"\n"
+ L"MPIEXEC_AFFINITY=[[:]] or [[::]]\n"
+ L" Set the algorithm used to distribute launched processes to the compute cores,\n"
+ L" and optionally specify the stride and the affinity target. The process\n"
+ L" affinity is set to the specified target. Setting the stride in addition to\n"
+ L" the target provides finer granularity (for example, \"spr:p:l\" will spread\n"
+ L" processes across physical cores, and will bind each process to a single\n"
+ L" logical core). If no stride is specified, it is assumed to be the same as the\n"
+ L" target (for example, \"seq:p\" is the same as \"seq:p:p\"). Specifying affinity\n"
+ L" with the -affinity or -affinity_layout options overrides any setting on this\n"
+ L" environment variable.\n"
+ L" \n"
+ L" The following table lists the values for the parameter:\n"
+ L" Value Description\n"
+ L" -------- -----------------------------------------------------------------\n"
+ L" 0 No affinity (overrides any setting for MPIEXEC_AFFINITY).\n"
+ L" 1 or spr Spread: Distribute the processes as far as possible.\n"
+ L" 2 or seq Sequential: Distribute the processes sequentially.\n"
+ L" 3 or bal Balanced: Distribute the processes over the available NUMA nodes.\n"
+ L"\n"
+ L" The following table lists the values for the and parameters:\n"
+ L" Value Description\n"
+ L" -------- -----------------------------------------------------------------\n"
+ L" l or L Assign each process to a logical core. (Default)\n"
+ L" p or P Assign each process to a physical core.\n"
+ L" n or N Assign each process to a NUMA node.\n"
+ L"\n"
+ L"MPIEXEC_AFFINITY_AUTO=[0|1]\n"
+ L" MPIEXEC_AFFINITY_AUTO complements MPIEXEC_AFFINITY's behavior. It targets the\n"
+ L" case where multiple jobs are running on the same node. When it is set, mpiexec\n"
+ L" - Reads in the affinity settings which are published by other jobs running\n"
+ L" on the same node, and determines the cores that are in use.\n"
+ L" - Runs the specified MPIEXEC_AFFINITY algorithm avoiding the cores that are\n"
+ L" in use, and calculates the affinity setting for this job.\n"
+ L" - Publishes the calculated affinity settings so that upcoming jobs can avoid\n"
+ L" the cores that are in use.\n"
+ L" This way, multiple jobs on the same node can use the cores in a mutually exclusive\n"
+ L" manner.\n"
+ L"\n"
+ L"MPIEXEC_TIMEOUT=seconds\n"
+ L" Set the timeout for the job.\n"
+ L"\n"
+ L"MPIEXEC_CONNECT_RETRIES\n"
+ L" Set the number of retries for connection failures for mpiexec and smpd.\n"
+ L" The default value is 12.\n"
+ L"\n"
+ L"MPIEXEC_CONNECT_RETRY_INTERVAL\n"
+ L" Set the number of seconds to wait before retrying a failed connection.\n"
+ L" The default value is 5.\n"
+ L"\n"
+ L"MPIEXEC_DISABLE_KERB=[0|1|2]\n"
+ L" When set to 1, MS-MPI process management (mpiexec and smpd) will not use Kerberos.\n"
+ L" When set to 2, MS-MPI process management will use NTLM.\n"
+ L"\n"
+ L"MPICH environment variables:\n"
+ L"The MPICH environment variables are set using the -env, -genv or -genvlist\n"
+ L"command line options. These variables are visible to the launched application\n"
+ L"and are affecting its execution.\n"
+ L"\n"
+ L"MPICH_NETMASK=address/subnet\n"
+ L" When set, limits the Sockets and Network Direct interconnects to use only\n"
+ L" connections that match the network mask. For example, the following value will\n"
+ L" use only networks that match 10.0.0.x.: \n"
+ L" -env MPICH_NETMASK 10.0.0.5/255.255.255.0\n"
+ L" or\n"
+ L" -env MPICH_NETMASK 10.0.0.5/24\n"
+ L"\n"
+ L"MPICH_SOCKET_BUFFER_SIZE=size (bytes)\n"
+ L" Set the Sockets send and receive buffer sizes in bytes (SO_SNDBUF and\n"
+ L" SO_RCVBUF). The default is 32768.\n"
+ L"\n"
+ L"MPICH_SOCKET_RBUFFER_SIZE=size (bytes)\n"
+ L" Set the Sockets receive buffer size in bytes (SO_RCVBUF).\n"
+ L" Overrides any values specified by MPICH_SOCKET_BUFFER_SIZE.\n"
+ L" The default is 32768.\n"
+ L"\n"
+ L"MPICH_SOCKET_SBUFFER_SIZE=size (bytes)\n"
+ L" Set the Sockets send buffer size in bytes (SO_SNDBUF).\n"
+ L" Overrides any value specified by MPICH_SOCKET_BUFFER_SIZE.\n"
+ L" The default is 32768.\n"
+ L"\n"
+ L"MPICH_PORT_RANGE=min,max\n"
+ L" **Deprecated** see MSMPI_PORT_RANGE.\n"
+ L"\n"
+ L"MPICH_DISABLE_ND\n"
+ L" **Deprecated** see MSMPI_DISABLE_ND.\n"
+ L"\n"
+ L"MPICH_DISABLE_SHM\n"
+ L" **Deprecated** See MSMPI_DISABLE_SHM.\n"
+ L"\n"
+ L"MPICH_DISABLE_SOCK=[0|1]\n"
+ L" **Deprecated** See MSMPI_DISABLE_SOCK.\n"
+ L"\n"
+ L"MPICH_PROGRESS_SPIN_LIMIT\n"
+ L" **Deprecated** See MSMPI_PROGRESS_SPIN_LIMIT.\n"
+ L"\n"
+ L"MPICH_SHM_EAGER_LIMIT\n"
+ L" **Deprecated** See MSMPI_SHM_EAGER_LIMIT.\n"
+ L"\n"
+ L"MPICH_SOCK_EAGER_LIMIT\n"
+ L" **Deprecated** See MSMPI_SOCK_EAGER_LIMIT.\n"
+ L"\n"
+ L"MPICH_ND_EAGER_LIMIT\n"
+ L" **Deprecated** See MSMPI_ND_EAGER_LIMIT.\n"
+ L"\n"
+ L"MPICH_ND_ENABLE_FALLBACK\n"
+ L" **Deprecated** See MSMPI_ND_ENABLE_FALLBACK.\n"
+ L"\n"
+ L"MPICH_ND_ZCOPY_THRESHOLD\n"
+ L" **Deprecated** See MSMPI_ND_ZCOPY_THRESHOLD.\n"
+ L"\n"
+ L"MPICH_ND_MR_CACHE_SIZE\n"
+ L" **Deprecated** See MSMPI_ND_MR_CACHE_SIZE.\n"
+ L"\n"
+ L"MPICH_CONNECT_RETRIES\n"
+ L" **Deprecated** See MSMPI_CONNECT_RETRIES.\n"
+ L"\n"
+ L"MPICH_INIT_BREAK\n"
+ L" **Deprecated** See MSMPI_INIT_BREAK.\n"
+ L"\n"
+ L"MPICH_CONNECTIVITY_TABLE\n"
+ L" **Deprecated** See MSMPI_CONNECTIVITY_TABLE.\n"
+ L"\n"
+ L"MSMPI environment variables:\n"
+ L"The MSMPI environment variables are set using the -env, -genv or -genvlist\n"
+ L"command line options. These variables are visible to the launched application\n"
+ L"and affect its execution.\n"
+ L"\n"
+ L"MSMPI_PORT_RANGE=min,max\n"
+ L" Set the Sockets listener port range.\n"
+ L"\n"
+ L"MSMPI_ND_PORT_RANGE=min,max\n"
+ L" Set the Network Direct listener port range.\n"
+ L"\n"
+ L"MSMPI_DISABLE_ND=[0|1]\n"
+ L" When set to 1, disables the use of the Network Direct interconnect.\n"
+ L"\n"
+ L"MSMPI_DISABLE_SHM=[0|1]\n"
+ L" When set to 1, disables the use of the Shared Memory interconnect\n"
+ L"\n"
+ L"MSMPI_DISABLE_SOCK=[0|1]\n"
+ L" When set to 1, disables the use of the Sockets interconnect.\n"
+ L"\n"
+ L"MSMPI_PROGRESS_SPIN_LIMIT=number\n"
+ L" Set the progress engine fixed spin count limit (1 - 2G).\n"
+ L" The default of 0 uses an adaptive spin count limit.\n"
+ L" For oversubscribed cores use a low value fixed spin limit (e.g., 16)\n"
+ L"\n"
+ L"MSMPI_SHM_EAGER_LIMIT=size (bytes)\n"
+ L" Set the message size above which to use the rendezvous protocol for shared\n"
+ L" memory communication. The default is 128000 (1500 - 2G).\n"
+ L"\n"
+ L"MSMPI_SOCK_EAGER_LIMIT=size (bytes)\n"
+ L" Set the message size above which to use the rendezvous protocol for sockets\n"
+ L" communication. The default is 128000 (1500 - 2G).\n"
+ L"\n"
+ L"MSMPI_ND_EAGER_LIMIT=size (bytes)\n"
+ L" Set the message size above which to use the rendezvous protocol for\n"
+ L" Network Direct communication. The default is 128000 (1500 - 2G).\n"
+ L"\n"
+ L"MSMPI_ND_ENABLE_FALLBACK=[0|1]\n"
+ L" When set to 1, enables the use of the sockets interconnect if the Network\n"
+ L" Direct interconnect is enabled but connection over Network Direct fails.\n"
+ L"\n"
+ L"MSMPI_ND_ZCOPY_THRESHOLD=size (bytes)\n"
+ L" Set the message size above which to perform zcopy transfers.\n"
+ L" The default value of -1 disables zcopy transfers.\n"
+ L" The value 0 uses the threshold indicated by the Network Direct provider.\n"
+ L"\n"
+ L"MSMPI_ND_MR_CACHE_SIZE=size (MB)\n"
+ L" Set the size in megabytes of the Network Direct memory registration cache.\n"
+ L" The default is half of physical memory divided by the number of cores.\n"
+ L"\n"
+ L"MSMPI_ND_SENDQ_DEPTH=number\n"
+ L" Set the maximum number of sends that can be outstanding on a Network\n"
+ L" Direct QueuePair, from 1 to 128, rounded up to the nearest power of two\n"
+ L" (default 16). Applies only to Network Direct v2.\n"
+ L"\n"
+ L"MSMPI_ND_RECVQ_DEPTH=number\n"
+ L" Set the maximum number of receives that can be outstanding on a Network\n"
+ L" Direct QueuePair, from 2 to 128, rounded up to the nearest power of two\n"
+ L" (default 128). Applies only to Network Direct v2.\n"
+ L"\n"
+ L"MSMPI_CONNECT_RETRIES=n\n"
+ L" Set the number of times to retry Network Direct or Socket connection.\n"
+ L" The default is 5.\n"
+ L"\n"
+ L"MSMPI_INIT_BREAK=[preinit|all|*|]\n"
+ L" When set, the application debug breaks at MPI initialization.\n"
+ L" preinit - break before MPI is initialized on all ranks.\n"
+ L" all - break after MPI is initialized on all ranks.\n"
+ L" * - break after MPI is initialized on all ranks.\n"
+ L" - break after MPI is initialized on ranks specified in .\n"
+ L" The rank range is in the form a,c-e; where a c and e are decimal integers.\n"
+ L"\n"
+ L"MSMPI_CONNECTIVITY_TABLE=[0|1]\n"
+ L" When set to 1, displays information about the communication channels used.\n"
+ L"\n"
+ L"MSMPI_SOCK_COMPRESSION_THRESHOLD=n\n"
+ L" When set, the MPI library attempts to compress all messages communicated using\n"
+ L" the sockets channel that are larger, in bytes, than the specified threshold\n"
+ L" (threshold values that are below the minimum will be rounded up to the minimum\n"
+ L" threshold of 512).\n"
+ L"\n"
+ L"MSMPI_HA_COLLECTIVE=[all|]\n"
+ L" Specifies which hierarchy-aware collective algorithms to use. These algorithms\n"
+ L" rely on the hierarchy of rank interconnects to achive better performance.\n"
+ L" The default is to enable all available HA algorithms.\n"
+ L" off - disable all hierarchy aware algorithms\n"
+ L" all - enable hierarchy awareness for Bcast, Barrier, Reduce,\n"
+ L" and Allreduce operations.\n"
+ L" - enable hierarchy awareness for one or more of Bcast,\n"
+ L" Barrier, Reduce, and Allreduce.\n"
+ L" The collectives are specified in the form a[,b]*; where a, b are one of Bcast,\n"
+ L" Barrier, Reduce, or Allreduce. Any combination of operations may be\n"
+ L" specified.\n"
+ L"\n"
+ L"MSMPI_TUNE_COLLECTIVE=[all|]\n"
+ L" When set, the MPI library runs a series of trials to determine what data size\n"
+ L" to use for various algorithms that make up a collective operation.\n"
+ L" all - tune all collective operations that have multiple algorithms.\n"
+ L" - tune specified collective operations to optimize performance.\n"
+ L" The collectives are specified in the form of a[,b]* where a, b are one of\n"
+ L" Bcast, Reduce, Allreduce, Gather, Allgather, Reducescatter, and Alltoall. Any\n"
+ L" combination of operations may be specified.\n"
+ L"\n"
+ L"MSMPI_TUNE_PRINT_SETTINGS=[optionfile|cluscfg|mpiexec]\n"
+ L" When set in concert with MSMPI_TUNE_COLLECTIVE, the MPI library produces\n"
+ L" the values that are determined to be optimal for selecting the available\n"
+ L" collective algorithms.\n"
+ L" optionfile - print the values determined for optimal performance in\n"
+ L" format, one on each line. The resulting file can be\n"
+ L" used with the -optionfile argument.\n"
+ L" cluscfg - print the values determined for optimal performance in a script\n"
+ L" format that will set the environment via cluscfg.\n"
+ L" mpiexec - print the values determined for optimal performance in a block of\n"
+ L" -env flags that can be passed to mpiexec.\n"
+ L"\n"
+ L"MSMPI_TUNE_SETTINGS_FILE=\n"
+ L" When used in concert with MSMPI_TUNE_COLLECTIVE writes the output of tuning to\n"
+ L" the specified file. The default is to write the output on the console. The\n"
+ L" output is always written by rank 0.\n"
+ L"\n"
+ L"MSMPI_TUNE_TIME_LIMIT=n\n"
+ L" When set in concert with MSMPI_TUNE_COLLECTIVE, changes the default limit\n"
+ L" used, in seconds, for running the trials to optimize the collective\n"
+ L" operations. This time limit is a suggestion to the MPI library and does not\n"
+ L" represent a hard limit. Every collective that is tuned is run a minimum of\n"
+ L" five times for each data size. The default time limit is 60 seconds.\n"
+ L"\n"
+ L"MSMPI_TUNE_ITERATION_LIMIT=n\n"
+ L" When set in concert with MSMPI_TUNE_COLLECTIVE, changes the default maximum\n"
+ L" number of trials for each data size and algorithm. The default iteration limit\n"
+ L" is 10000. The minimum value is five (5).\n"
+ L"\n"
+ L"MSMPI_TUNE_SIZE_LIMIT=n\n"
+ L" When set in concert with MSMPI_TUNE_COLLECTIVE, changes the default maximum\n"
+ L" data size in bytes, to attempt for time trials of collective algorithms. Every\n"
+ L" data size that is a power of two that is less than the size limit is tested.\n"
+ L" The default size limit is 16777216. The minimum value is one (1).\n"
+ L"\n"
+ L"MSMPI_TUNE_VERBOSE=[0|1|2]\n"
+ L" When set in concert with MSMPI_TUNE_COLLECTIVE, the MPI library produces\n"
+ L" verbose output while running the trials to optimize the collective operations.\n"
+ L" Verbose output is off by default. All output is written to the console by\n"
+ L" rank 0.\n"
+ L" 0 - verbose output is turned off.\n"
+ L" 1 - print data tables.\n"
+ L" 2 - debug output.\n"
+ L"\n"
+ L"MSMPI_PRECONNECT=[all|*|