зеркало из https://github.com/microsoft/git.git
Merge branch 'vfs-2.44.0' into for-each-repo-stop-on-error
This commit is contained in:
Коммит
86c0809ed8
|
@ -6,6 +6,7 @@
|
|||
*.pm text eol=lf diff=perl
|
||||
*.py text eol=lf diff=python
|
||||
*.bat text eol=crlf
|
||||
*.png binary
|
||||
CODE_OF_CONDUCT.md -whitespace
|
||||
/Documentation/**/*.txt text eol=lf
|
||||
/command-list.txt text eol=lf
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
- [ ] I was not able to find an [open](https://github.com/microsoft/git/issues?q=is%3Aopen)
|
||||
or [closed](https://github.com/microsoft/git/issues?q=is%3Aclosed) issue matching
|
||||
what I'm seeing, including in [the `git-for-windows/git` tracker](https://github.com/git-for-windows/git/issues).
|
||||
|
||||
### Setup
|
||||
|
||||
- Which version of `microsoft/git` are you using? Is it 32-bit or 64-bit?
|
||||
|
||||
```
|
||||
$ git --version --build-options
|
||||
|
||||
** insert your machine's response here **
|
||||
```
|
||||
|
||||
Are you using Scalar or VFS for Git?
|
||||
|
||||
** insert your answer here **
|
||||
|
||||
If VFS for Git, then what version?
|
||||
|
||||
```
|
||||
$ gvfs version
|
||||
|
||||
** insert your machine's response here **
|
||||
```
|
||||
|
||||
- Which version of Windows are you running? Vista, 7, 8, 10? Is it 32-bit or 64-bit?
|
||||
|
||||
```
|
||||
$ cmd.exe /c ver
|
||||
|
||||
** insert your machine's response here **
|
||||
```
|
||||
|
||||
- Any other interesting things about your environment that might be related
|
||||
to the issue you're seeing?
|
||||
|
||||
** insert your response here **
|
||||
|
||||
### Details
|
||||
|
||||
- Which terminal/shell are you running Git from? e.g Bash/CMD/PowerShell/other
|
||||
|
||||
** insert your response here **
|
||||
|
||||
- What commands did you run to trigger this issue? If you can provide a
|
||||
[Minimal, Complete, and Verifiable example](http://stackoverflow.com/help/mcve)
|
||||
this will help us understand the issue.
|
||||
|
||||
```
|
||||
** insert your commands here **
|
||||
```
|
||||
- What did you expect to occur after running these commands?
|
||||
|
||||
** insert here **
|
||||
|
||||
- What actually happened instead?
|
||||
|
||||
** insert here **
|
||||
|
||||
- If the problem was occurring with a specific repository, can you specify
|
||||
the repository?
|
||||
|
||||
* [ ] Public repo: **insert URL here**
|
||||
* [ ] Windows monorepo
|
||||
* [ ] Office monorepo
|
||||
* [ ] Other Microsoft-internal repo: **insert name here**
|
||||
* [ ] Other internal repo.
|
|
@ -1,10 +1,10 @@
|
|||
Thanks for taking the time to contribute to Git! Please be advised that the
|
||||
Git community does not use github.com for their contributions. Instead, we use
|
||||
a mailing list (git@vger.kernel.org) for code submissions, code reviews, and
|
||||
bug reports. Nevertheless, you can use GitGitGadget (https://gitgitgadget.github.io/)
|
||||
to conveniently send your Pull Requests commits to our mailing list.
|
||||
Thanks for taking the time to contribute to Git!
|
||||
|
||||
For a single-commit pull request, please *leave the pull request description
|
||||
empty*: your commit message itself should describe your changes.
|
||||
This fork contains changes specific to monorepo scenarios. If you are an
|
||||
external contributor, then please detail your reason for submitting to
|
||||
this fork:
|
||||
|
||||
Please read the "guidelines for contributing" linked above!
|
||||
* [ ] This is an early version of work already under review upstream.
|
||||
* [ ] This change only applies to interactions with Azure DevOps and the
|
||||
GVFS Protocol.
|
||||
* [ ] This change only applies to the virtualization hook and VFS for Git.
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
# Configuration for sentiment-bot - https://github.com/behaviorbot/sentiment-bot
|
||||
|
||||
# *Required* toxicity threshold between 0 and .99 with the higher numbers being
|
||||
# the most toxic. Anything higher than this threshold will be marked as toxic
|
||||
# and commented on
|
||||
sentimentBotToxicityThreshold: .7
|
||||
|
||||
# *Required* Comment to reply with
|
||||
sentimentBotReplyComment: >
|
||||
Please be sure to review the code of conduct and be respectful of other users. cc/ @git-for-windows/trusted-git-for-windows-developers
|
|
@ -0,0 +1,13 @@
|
|||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||
# especially
|
||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot#enabling-dependabot-version-updates-for-actions
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions" # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "weekly"
|
|
@ -0,0 +1,157 @@
|
|||
SHELL := /bin/bash
|
||||
SUDO := sudo
|
||||
C_INCLUDE_PATH := /usr/include
|
||||
CPLUS_INCLUDE_PATH := /usr/include
|
||||
LD_LIBRARY_PATH := /usr/lib
|
||||
|
||||
OSX_VERSION := $(shell sw_vers -productVersion)
|
||||
TARGET_FLAGS := -mmacosx-version-min=$(OSX_VERSION) -DMACOSX_DEPLOYMENT_TARGET=$(OSX_VERSION)
|
||||
|
||||
uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not')
|
||||
|
||||
ARCH_UNIV := universal
|
||||
ARCH_FLAGS := -arch x86_64 -arch arm64
|
||||
|
||||
CFLAGS := $(TARGET_FLAGS) $(ARCH_FLAGS)
|
||||
LDFLAGS := $(TARGET_FLAGS) $(ARCH_FLAGS)
|
||||
|
||||
PREFIX := /usr/local
|
||||
GIT_PREFIX := $(PREFIX)/git
|
||||
|
||||
BUILD_DIR := $(GITHUB_WORKSPACE)/payload
|
||||
DESTDIR := $(PWD)/stage/git-$(ARCH_UNIV)-$(VERSION)
|
||||
ARTIFACTDIR := build-artifacts
|
||||
SUBMAKE := $(MAKE) C_INCLUDE_PATH="$(C_INCLUDE_PATH)" CPLUS_INCLUDE_PATH="$(CPLUS_INCLUDE_PATH)" LD_LIBRARY_PATH="$(LD_LIBRARY_PATH)" TARGET_FLAGS="$(TARGET_FLAGS)" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" NO_GETTEXT=1 NO_DARWIN_PORTS=1 prefix=$(GIT_PREFIX) GIT_BUILT_FROM_COMMIT="$(GIT_BUILT_FROM_COMMIT)" DESTDIR=$(DESTDIR)
|
||||
CORES := $(shell bash -c "sysctl hw.ncpu | awk '{print \$$2}'")
|
||||
|
||||
# Guard against environment variables
|
||||
APPLE_APP_IDENTITY =
|
||||
APPLE_INSTALLER_IDENTITY =
|
||||
APPLE_KEYCHAIN_PROFILE =
|
||||
|
||||
.PHONY: image pkg payload codesign notarize
|
||||
|
||||
.SECONDARY:
|
||||
|
||||
$(DESTDIR)$(GIT_PREFIX)/VERSION-$(VERSION)-$(ARCH_UNIV):
|
||||
rm -f $(BUILD_DIR)/git-$(VERSION)/osx-installed*
|
||||
mkdir -p $(DESTDIR)$(GIT_PREFIX)
|
||||
touch $@
|
||||
|
||||
$(BUILD_DIR)/git-$(VERSION)/osx-built-keychain:
|
||||
cd $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain; $(SUBMAKE) CFLAGS="$(CFLAGS) -g -O2 -Wall"
|
||||
touch $@
|
||||
|
||||
$(BUILD_DIR)/git-$(VERSION)/osx-built:
|
||||
[ -d $(DESTDIR)$(GIT_PREFIX) ] && $(SUDO) rm -rf $(DESTDIR) || echo ok
|
||||
cd $(BUILD_DIR)/git-$(VERSION); $(SUBMAKE) -j $(CORES) all strip
|
||||
echo "================"
|
||||
echo "Dumping Linkage"
|
||||
cd $(BUILD_DIR)/git-$(VERSION); ./git version
|
||||
echo "===="
|
||||
cd $(BUILD_DIR)/git-$(VERSION); /usr/bin/otool -L ./git
|
||||
echo "===="
|
||||
cd $(BUILD_DIR)/git-$(VERSION); /usr/bin/otool -L ./git-http-fetch
|
||||
echo "===="
|
||||
cd $(BUILD_DIR)/git-$(VERSION); /usr/bin/otool -L ./git-http-push
|
||||
echo "===="
|
||||
cd $(BUILD_DIR)/git-$(VERSION); /usr/bin/otool -L ./git-remote-http
|
||||
echo "===="
|
||||
cd $(BUILD_DIR)/git-$(VERSION); /usr/bin/otool -L ./git-gvfs-helper
|
||||
echo "================"
|
||||
touch $@
|
||||
|
||||
$(BUILD_DIR)/git-$(VERSION)/osx-installed-bin: $(BUILD_DIR)/git-$(VERSION)/osx-built $(BUILD_DIR)/git-$(VERSION)/osx-built-keychain
|
||||
cd $(BUILD_DIR)/git-$(VERSION); $(SUBMAKE) install
|
||||
cp $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain/git-credential-osxkeychain $(DESTDIR)$(GIT_PREFIX)/bin/git-credential-osxkeychain
|
||||
mkdir -p $(DESTDIR)$(GIT_PREFIX)/contrib/completion
|
||||
cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-completion.bash $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
|
||||
cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-completion.zsh $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
|
||||
cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-prompt.sh $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
|
||||
# This is needed for Git-Gui, GitK
|
||||
mkdir -p $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl
|
||||
[ ! -f $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl/Error.pm ] && cp $(BUILD_DIR)/git-$(VERSION)/perl/private-Error.pm $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl/Error.pm || echo done
|
||||
touch $@
|
||||
|
||||
$(BUILD_DIR)/git-$(VERSION)/osx-installed-man: $(BUILD_DIR)/git-$(VERSION)/osx-installed-bin
|
||||
mkdir -p $(DESTDIR)$(GIT_PREFIX)/share/man
|
||||
cp -R $(GITHUB_WORKSPACE)/manpages/ $(DESTDIR)$(GIT_PREFIX)/share/man
|
||||
touch $@
|
||||
|
||||
$(BUILD_DIR)/git-$(VERSION)/osx-built-subtree:
|
||||
cd $(BUILD_DIR)/git-$(VERSION)/contrib/subtree; $(SUBMAKE) XML_CATALOG_FILES="$(XML_CATALOG_FILES)" all git-subtree.1
|
||||
touch $@
|
||||
|
||||
$(BUILD_DIR)/git-$(VERSION)/osx-installed-subtree: $(BUILD_DIR)/git-$(VERSION)/osx-built-subtree
|
||||
mkdir -p $(DESTDIR)
|
||||
cd $(BUILD_DIR)/git-$(VERSION)/contrib/subtree; $(SUBMAKE) XML_CATALOG_FILES="$(XML_CATALOG_FILES)" install install-man
|
||||
touch $@
|
||||
|
||||
$(BUILD_DIR)/git-$(VERSION)/osx-installed-assets: $(BUILD_DIR)/git-$(VERSION)/osx-installed-bin
|
||||
mkdir -p $(DESTDIR)$(GIT_PREFIX)/etc
|
||||
cat assets/etc/gitconfig.osxkeychain >> $(DESTDIR)$(GIT_PREFIX)/etc/gitconfig
|
||||
cp assets/uninstall.sh $(DESTDIR)$(GIT_PREFIX)/uninstall.sh
|
||||
sh -c "echo .DS_Store >> $(DESTDIR)$(GIT_PREFIX)/share/git-core/templates/info/exclude"
|
||||
|
||||
symlinks:
|
||||
mkdir -p $(ARTIFACTDIR)$(PREFIX)/bin
|
||||
cd $(ARTIFACTDIR)$(PREFIX)/bin; find ../git/bin -type f -exec ln -sf {} \;
|
||||
for man in man1 man3 man5 man7; do mkdir -p $(ARTIFACTDIR)$(PREFIX)/share/man/$$man; (cd $(ARTIFACTDIR)$(PREFIX)/share/man/$$man; ln -sf ../../../git/share/man/$$man/* ./); done
|
||||
ruby ../scripts/symlink-git-hardlinks.rb $(ARTIFACTDIR)
|
||||
touch $@
|
||||
|
||||
$(BUILD_DIR)/git-$(VERSION)/osx-installed: $(DESTDIR)$(GIT_PREFIX)/VERSION-$(VERSION)-$(ARCH_UNIV) $(BUILD_DIR)/git-$(VERSION)/osx-installed-man $(BUILD_DIR)/git-$(VERSION)/osx-installed-assets $(BUILD_DIR)/git-$(VERSION)/osx-installed-subtree
|
||||
find $(DESTDIR)$(GIT_PREFIX) -type d -exec chmod ugo+rx {} \;
|
||||
find $(DESTDIR)$(GIT_PREFIX) -type f -exec chmod ugo+r {} \;
|
||||
touch $@
|
||||
|
||||
$(BUILD_DIR)/git-$(VERSION)/osx-built-assert-$(ARCH_UNIV): $(BUILD_DIR)/git-$(VERSION)/osx-built
|
||||
File $(BUILD_DIR)/git-$(VERSION)/git
|
||||
File $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain/git-credential-osxkeychain
|
||||
touch $@
|
||||
|
||||
disk-image/VERSION-$(VERSION)-$(ARCH_UNIV):
|
||||
rm -f disk-image/*.pkg disk-image/VERSION-* disk-image/.DS_Store
|
||||
mkdir disk-image
|
||||
touch "$@"
|
||||
|
||||
pkg_cmd := pkgbuild --identifier com.git.pkg --version $(VERSION) \
|
||||
--root $(ARTIFACTDIR)$(PREFIX) --scripts assets/scripts \
|
||||
--install-location $(PREFIX) --component-plist ./assets/git-components.plist
|
||||
|
||||
ifdef APPLE_INSTALLER_IDENTITY
|
||||
pkg_cmd += --sign "$(APPLE_INSTALLER_IDENTITY)"
|
||||
endif
|
||||
|
||||
pkg_cmd += disk-image/git-$(VERSION)-$(ARCH_UNIV).pkg
|
||||
disk-image/git-$(VERSION)-$(ARCH_UNIV).pkg: disk-image/VERSION-$(VERSION)-$(ARCH_UNIV) symlinks
|
||||
$(pkg_cmd)
|
||||
|
||||
git-%-$(ARCH_UNIV).dmg:
|
||||
hdiutil create git-$(VERSION)-$(ARCH_UNIV).uncompressed.dmg -fs HFS+ -srcfolder disk-image -volname "Git $(VERSION) $(ARCH_UNIV)" -ov 2>&1 | tee err || { \
|
||||
grep "Resource busy" err && \
|
||||
sleep 5 && \
|
||||
hdiutil create git-$(VERSION)-$(ARCH_UNIV).uncompressed.dmg -fs HFS+ -srcfolder disk-image -volname "Git $(VERSION) $(ARCH_UNIV)" -ov; }
|
||||
hdiutil convert -format UDZO -o $@ git-$(VERSION)-$(ARCH_UNIV).uncompressed.dmg
|
||||
rm -f git-$(VERSION)-$(ARCH_UNIV).uncompressed.dmg
|
||||
|
||||
payload: $(BUILD_DIR)/git-$(VERSION)/osx-installed $(BUILD_DIR)/git-$(VERSION)/osx-built-assert-$(ARCH_UNIV)
|
||||
|
||||
pkg: disk-image/git-$(VERSION)-$(ARCH_UNIV).pkg
|
||||
|
||||
image: git-$(VERSION)-$(ARCH_UNIV).dmg
|
||||
|
||||
ifdef APPLE_APP_IDENTITY
|
||||
codesign:
|
||||
@$(CURDIR)/../scripts/codesign.sh --payload="build-artifacts/usr/local/git" \
|
||||
--identity="$(APPLE_APP_IDENTITY)" \
|
||||
--entitlements="$(CURDIR)/entitlements.xml"
|
||||
endif
|
||||
|
||||
# Notarization can only happen if the package is fully signed
|
||||
ifdef APPLE_KEYCHAIN_PROFILE
|
||||
notarize:
|
||||
@$(CURDIR)/../scripts/notarize.sh \
|
||||
--package="disk-image/git-$(VERSION)-$(ARCH_UNIV).pkg" \
|
||||
--keychain-profile="$(APPLE_KEYCHAIN_PROFILE)"
|
||||
endif
|
|
@ -0,0 +1,2 @@
|
|||
[credential]
|
||||
helper = osxkeychain
|
|
@ -0,0 +1,18 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<array>
|
||||
<dict>
|
||||
<key>BundleHasStrictIdentifier</key>
|
||||
<true/>
|
||||
<key>BundleIsRelocatable</key>
|
||||
<false/>
|
||||
<key>BundleIsVersionChecked</key>
|
||||
<true/>
|
||||
<key>BundleOverwriteAction</key>
|
||||
<string>upgrade</string>
|
||||
<key>RootRelativeBundlePath</key>
|
||||
<string>git/share/git-gui/lib/Git Gui.app</string>
|
||||
</dict>
|
||||
</array>
|
||||
</plist>
|
|
@ -0,0 +1,62 @@
|
|||
#!/bin/bash
|
||||
INSTALL_DST="$2"
|
||||
SCALAR_C_CMD="$INSTALL_DST/git/bin/scalar"
|
||||
SCALAR_DOTNET_CMD="/usr/local/scalar/scalar"
|
||||
SCALAR_UNINSTALL_SCRIPT="/usr/local/scalar/uninstall_scalar.sh"
|
||||
|
||||
function cleanupScalar()
|
||||
{
|
||||
echo "checking whether Scalar was installed"
|
||||
if [ ! -f "$SCALAR_C_CMD" ]; then
|
||||
echo "Scalar not installed; exiting..."
|
||||
return 0
|
||||
fi
|
||||
echo "Scalar is installed!"
|
||||
|
||||
echo "looking for Scalar.NET"
|
||||
if [ ! -f "$SCALAR_DOTNET_CMD" ]; then
|
||||
echo "Scalar.NET not found; exiting..."
|
||||
return 0
|
||||
fi
|
||||
echo "Scalar.NET found!"
|
||||
|
||||
currentUser=$(echo "show State:/Users/ConsoleUser" | scutil | awk '/Name :/ { print $3 }')
|
||||
|
||||
# Re-register Scalar.NET repositories with the newly-installed Scalar
|
||||
for repo in $($SCALAR_DOTNET_CMD list); do
|
||||
(
|
||||
PATH="$INSTALL_DST/git/bin:$PATH"
|
||||
sudo -u "$currentUser" scalar register $repo || \
|
||||
echo "warning: skipping re-registration of $repo"
|
||||
)
|
||||
done
|
||||
|
||||
# Uninstall Scalar.NET
|
||||
echo "removing Scalar.NET"
|
||||
|
||||
# Add /usr/local/bin to path - default install location of Homebrew
|
||||
PATH="/usr/local/bin:$PATH"
|
||||
if (sudo -u "$currentUser" brew list --cask scalar); then
|
||||
# Remove from Homebrew
|
||||
sudo -u "$currentUser" brew remove --cask scalar || echo "warning: Scalar.NET uninstall via Homebrew completed with code $?"
|
||||
echo "Scalar.NET uninstalled via Homebrew!"
|
||||
elif (sudo -u "$currentUser" brew list --cask scalar-azrepos); then
|
||||
sudo -u "$currentUser" brew remove --cask scalar-azrepos || echo "warning: Scalar.NET with GVFS uninstall via Homebrew completed with code $?"
|
||||
echo "Scalar.NET with GVFS uninstalled via Homebrew!"
|
||||
elif [ -f $SCALAR_UNINSTALL_SCRIPT ]; then
|
||||
# If not installed with Homebrew, manually remove package
|
||||
sudo -S sh $SCALAR_UNINSTALL_SCRIPT || echo "warning: Scalar.NET uninstall completed with code $?"
|
||||
echo "Scalar.NET uninstalled!"
|
||||
else
|
||||
echo "warning: Scalar.NET uninstall script not found"
|
||||
fi
|
||||
|
||||
# Re-create the Scalar symlink, in case it was removed by the Scalar.NET uninstall operation
|
||||
mkdir -p $INSTALL_DST/bin
|
||||
/bin/ln -Fs "$SCALAR_C_CMD" "$INSTALL_DST/bin/scalar"
|
||||
}
|
||||
|
||||
# Run Scalar cleanup (will exit if not applicable)
|
||||
cleanupScalar
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,34 @@
|
|||
#!/bin/bash -e
|
||||
if [ ! -r "/usr/local/git" ]; then
|
||||
echo "Git doesn't appear to be installed via this installer. Aborting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$1" != "--yes" ]; then
|
||||
echo "This will uninstall git by removing /usr/local/git/, and symlinks"
|
||||
printf "Type 'yes' if you are sure you wish to continue: "
|
||||
read response
|
||||
else
|
||||
response="yes"
|
||||
fi
|
||||
|
||||
if [ "$response" == "yes" ]; then
|
||||
# remove all of the symlinks we've created
|
||||
pkgutil --files com.git.pkg | grep bin | while read f; do
|
||||
if [ -L /usr/local/$f ]; then
|
||||
sudo rm /usr/local/$f
|
||||
fi
|
||||
done
|
||||
|
||||
# forget receipts.
|
||||
pkgutil --packages | grep com.git.pkg | xargs -I {} sudo pkgutil --forget {}
|
||||
echo "Uninstalled"
|
||||
|
||||
# The guts all go here.
|
||||
sudo rm -rf /usr/local/git/
|
||||
else
|
||||
echo "Aborted"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,12 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>com.apple.security.cs.allow-jit</key>
|
||||
<true/>
|
||||
<key>com.apple.security.cs.allow-unsigned-executable-memory</key>
|
||||
<true/>
|
||||
<key>com.apple.security.cs.disable-library-validation</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</plist>
|
|
@ -0,0 +1,65 @@
|
|||
#!/bin/bash
|
||||
|
||||
sign_directory () {
|
||||
(
|
||||
cd "$1"
|
||||
for f in *
|
||||
do
|
||||
macho=$(file --mime $f | grep mach)
|
||||
# Runtime sign dylibs and Mach-O binaries
|
||||
if [[ $f == *.dylib ]] || [ ! -z "$macho" ];
|
||||
then
|
||||
echo "Runtime Signing $f"
|
||||
codesign -s "$IDENTITY" $f --timestamp --force --options=runtime --entitlements $ENTITLEMENTS_FILE
|
||||
elif [ -d "$f" ];
|
||||
then
|
||||
echo "Signing files in subdirectory $f"
|
||||
sign_directory "$f"
|
||||
|
||||
else
|
||||
echo "Signing $f"
|
||||
codesign -s "$IDENTITY" $f --timestamp --force
|
||||
fi
|
||||
done
|
||||
)
|
||||
}
|
||||
|
||||
for i in "$@"
|
||||
do
|
||||
case "$i" in
|
||||
--payload=*)
|
||||
SIGN_DIR="${i#*=}"
|
||||
shift # past argument=value
|
||||
;;
|
||||
--identity=*)
|
||||
IDENTITY="${i#*=}"
|
||||
shift # past argument=value
|
||||
;;
|
||||
--entitlements=*)
|
||||
ENTITLEMENTS_FILE="${i#*=}"
|
||||
shift # past argument=value
|
||||
;;
|
||||
*)
|
||||
die "unknown option '$i'"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$SIGN_DIR" ]; then
|
||||
echo "error: missing directory argument"
|
||||
exit 1
|
||||
elif [ -z "$IDENTITY" ]; then
|
||||
echo "error: missing signing identity argument"
|
||||
exit 1
|
||||
elif [ -z "$ENTITLEMENTS_FILE" ]; then
|
||||
echo "error: missing entitlements file argument"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "======== INPUTS ========"
|
||||
echo "Directory: $SIGN_DIR"
|
||||
echo "Signing identity: $IDENTITY"
|
||||
echo "Entitlements: $ENTITLEMENTS_FILE"
|
||||
echo "======== END INPUTS ========"
|
||||
|
||||
sign_directory "$SIGN_DIR"
|
|
@ -0,0 +1,35 @@
|
|||
#!/bin/bash
|
||||
|
||||
for i in "$@"
|
||||
do
|
||||
case "$i" in
|
||||
--package=*)
|
||||
PACKAGE="${i#*=}"
|
||||
shift # past argument=value
|
||||
;;
|
||||
--keychain-profile=*)
|
||||
KEYCHAIN_PROFILE="${i#*=}"
|
||||
shift # past argument=value
|
||||
;;
|
||||
*)
|
||||
die "unknown option '$i'"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$PACKAGE" ]; then
|
||||
echo "error: missing package argument"
|
||||
exit 1
|
||||
elif [ -z "$KEYCHAIN_PROFILE" ]; then
|
||||
echo "error: missing keychain profile argument"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Exit as soon as any line fails
|
||||
set -e
|
||||
|
||||
# Send the notarization request
|
||||
xcrun notarytool submit -v "$PACKAGE" -p "$KEYCHAIN_PROFILE" --wait
|
||||
|
||||
# Staple the notarization ticket (to allow offline installation)
|
||||
xcrun stapler staple -v "$PACKAGE"
|
|
@ -0,0 +1,19 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
install_prefix = ARGV[0]
|
||||
puts install_prefix
|
||||
git_binary = File.join(install_prefix, '/usr/local/git/bin/git')
|
||||
|
||||
[
|
||||
['git' , File.join(install_prefix, '/usr/local/git/bin')],
|
||||
['../../bin/git', File.join(install_prefix, '/usr/local/git/libexec/git-core')]
|
||||
].each do |link, path|
|
||||
Dir.glob(File.join(path, '*')).each do |file|
|
||||
next if file == git_binary
|
||||
puts "#{file} #{File.size(file)} == #{File.size(git_binary)}"
|
||||
next unless File.size(file) == File.size(git_binary)
|
||||
puts "Symlinking #{file}"
|
||||
puts `ln -sf #{link} #{file}`
|
||||
exit $?.exitstatus if $?.exitstatus != 0
|
||||
end
|
||||
end
|
|
@ -0,0 +1,763 @@
|
|||
name: build-git-installers
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]*vfs*' # matches "v<number><any characters>vfs<any characters>"
|
||||
|
||||
jobs:
|
||||
# Check prerequisites for the workflow
|
||||
prereqs:
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
outputs:
|
||||
tag_name: ${{ steps.tag.outputs.name }} # The full name of the tag, e.g. v2.32.0.vfs.0.0
|
||||
tag_version: ${{ steps.tag.outputs.version }} # The version number (without preceding "v"), e.g. 2.32.0.vfs.0.0
|
||||
steps:
|
||||
- name: Validate tag
|
||||
run: |
|
||||
echo "$GITHUB_REF" |
|
||||
grep -E '^refs/tags/v2\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.vfs\.0\.(0|[1-9][0-9]*)(\.rc[0-9])?$' || {
|
||||
echo "::error::${GITHUB_REF#refs/tags/} is not of the form v2.<X>.<Y>.vfs.0.<W>[.rc<N>]" >&2
|
||||
exit 1
|
||||
}
|
||||
- name: Determine tag to build
|
||||
run: |
|
||||
echo "name=${GITHUB_REF#refs/tags/}" >>$GITHUB_OUTPUT
|
||||
echo "version=${GITHUB_REF#refs/tags/v}" >>$GITHUB_OUTPUT
|
||||
id: tag
|
||||
- name: Clone git
|
||||
uses: actions/checkout@v3
|
||||
- name: Validate the tag identified with trigger
|
||||
run: |
|
||||
die () {
|
||||
echo "::error::$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# `actions/checkout` only downloads the peeled tag (i.e. the commit)
|
||||
git fetch origin +$GITHUB_REF:$GITHUB_REF
|
||||
|
||||
# Verify that the tag is annotated
|
||||
test $(git cat-file -t "$GITHUB_REF") == "tag" || die "Tag ${{ steps.tag.outputs.name }} is not annotated"
|
||||
|
||||
# Verify tag follows rules in GIT-VERSION-GEN (i.e., matches the specified "DEF_VER" in
|
||||
# GIT-VERSION-FILE) and matches tag determined from trigger
|
||||
make GIT-VERSION-FILE
|
||||
test "${{ steps.tag.outputs.version }}" == "$(sed -n 's/^GIT_VERSION = //p'< GIT-VERSION-FILE)" || die "GIT-VERSION-FILE tag does not match ${{ steps.tag.outputs.name }}"
|
||||
# End check prerequisites for the workflow
|
||||
|
||||
# Build Windows installers (x86_64 installer & portable)
|
||||
windows_pkg:
|
||||
runs-on: windows-2019
|
||||
environment: release
|
||||
needs: prereqs
|
||||
env:
|
||||
GPG_OPTIONS: "--batch --yes --no-tty --list-options no-show-photos --verify-options no-show-photos --pinentry-mode loopback"
|
||||
HOME: "${{github.workspace}}\\home"
|
||||
USERPROFILE: "${{github.workspace}}\\home"
|
||||
steps:
|
||||
- name: Configure user
|
||||
shell: bash
|
||||
run:
|
||||
USER_NAME="${{github.actor}}" &&
|
||||
USER_EMAIL="${{github.actor}}@users.noreply.github.com" &&
|
||||
mkdir -p "$HOME" &&
|
||||
git config --global user.name "$USER_NAME" &&
|
||||
git config --global user.email "$USER_EMAIL" &&
|
||||
echo "PACKAGER=$USER_NAME <$USER_EMAIL>" >>$GITHUB_ENV
|
||||
- uses: git-for-windows/setup-git-for-windows-sdk@v1
|
||||
with:
|
||||
flavor: build-installers
|
||||
- name: Clone build-extra
|
||||
shell: bash
|
||||
run: |
|
||||
git clone --filter=blob:none --single-branch -b main https://github.com/git-for-windows/build-extra /usr/src/build-extra
|
||||
- name: Clone git
|
||||
shell: bash
|
||||
run: |
|
||||
# Since we cannot directly clone a specified tag (as we would a branch with `git clone -b <branch name>`),
|
||||
# this clone has to be done manually (via init->fetch->reset).
|
||||
|
||||
tag_name="${{ needs.prereqs.outputs.tag_name }}" &&
|
||||
git -c init.defaultBranch=main init &&
|
||||
git remote add -f origin https://github.com/git-for-windows/git &&
|
||||
git fetch "https://github.com/${{github.repository}}" refs/tags/${tag_name}:refs/tags/${tag_name} &&
|
||||
git reset --hard ${tag_name}
|
||||
- name: Prepare home directory for code-signing
|
||||
env:
|
||||
CODESIGN_P12: ${{secrets.CODESIGN_P12}}
|
||||
CODESIGN_PASS: ${{secrets.CODESIGN_PASS}}
|
||||
if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
|
||||
shell: bash
|
||||
run: |
|
||||
cd home &&
|
||||
mkdir -p .sig &&
|
||||
echo -n "$CODESIGN_P12" | tr % '\n' | base64 -d >.sig/codesign.p12 &&
|
||||
echo -n "$CODESIGN_PASS" >.sig/codesign.pass
|
||||
git config --global alias.signtool '!sh "/usr/src/build-extra/signtool.sh"'
|
||||
- name: Prepare home directory for GPG signing
|
||||
if: env.GPGKEY != ''
|
||||
shell: bash
|
||||
run: |
|
||||
# This section ensures that the identity for the GPG key matches the git user identity, otherwise
|
||||
# signing will fail
|
||||
|
||||
echo '${{secrets.PRIVGPGKEY}}' | tr % '\n' | gpg $GPG_OPTIONS --import &&
|
||||
info="$(gpg --list-keys --with-colons "${GPGKEY%% *}" | cut -d : -f 1,10 | sed -n '/^uid/{s|uid:||p;q}')" &&
|
||||
git config --global user.name "${info% <*}" &&
|
||||
git config --global user.email "<${info#*<}"
|
||||
env:
|
||||
GPGKEY: ${{secrets.GPGKEY}}
|
||||
- name: Build mingw-w64-x86_64-git
|
||||
env:
|
||||
GPGKEY: "${{secrets.GPGKEY}}"
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
|
||||
# Make sure that there is a `/usr/bin/git` that can be used by `makepkg-mingw`
|
||||
printf '#!/bin/sh\n\nexec /mingw64/bin/git.exe "$@"\n' >/usr/bin/git &&
|
||||
|
||||
# Restrict `PATH` to MSYS2 and to Visual Studio (to let `cv2pdb` find the relevant DLLs)
|
||||
PATH="/mingw64/bin:/usr/bin:/C/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64:/C/Windows/system32"
|
||||
|
||||
type -p mspdb140.dll || exit 1
|
||||
|
||||
sh -x /usr/src/build-extra/please.sh build-mingw-w64-git --only-64-bit --build-src-pkg -o artifacts HEAD &&
|
||||
if test -n "$GPGKEY"
|
||||
then
|
||||
for tar in artifacts/*.tar*
|
||||
do
|
||||
/usr/src/build-extra/gnupg-with-gpgkey.sh --detach-sign --no-armor $tar
|
||||
done
|
||||
fi &&
|
||||
|
||||
b=$PWD/artifacts &&
|
||||
version=${{ needs.prereqs.outputs.tag_name }} &&
|
||||
(cd /usr/src/MINGW-packages/mingw-w64-git &&
|
||||
cp PKGBUILD.$version PKGBUILD &&
|
||||
git commit -s -m "mingw-w64-git: new version ($version)" PKGBUILD &&
|
||||
git bundle create "$b"/MINGW-packages.bundle origin/main..main)
|
||||
- name: Publish mingw-w64-x86_64-git
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: pkg-x86_64
|
||||
path: artifacts
|
||||
windows_artifacts:
|
||||
runs-on: windows-2019
|
||||
environment: release
|
||||
needs: [prereqs, windows_pkg]
|
||||
env:
|
||||
HOME: "${{github.workspace}}\\home"
|
||||
strategy:
|
||||
matrix:
|
||||
artifact:
|
||||
- name: installer
|
||||
fileprefix: Git
|
||||
- name: portable
|
||||
fileprefix: PortableGit
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Download pkg-x86_64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: pkg-x86_64
|
||||
path: pkg-x86_64
|
||||
- uses: git-for-windows/setup-git-for-windows-sdk@v1
|
||||
with:
|
||||
flavor: build-installers
|
||||
- name: Clone build-extra
|
||||
shell: bash
|
||||
run: |
|
||||
git clone --filter=blob:none --single-branch -b main https://github.com/git-for-windows/build-extra /usr/src/build-extra
|
||||
- name: Prepare home directory for code-signing
|
||||
env:
|
||||
CODESIGN_P12: ${{secrets.CODESIGN_P12}}
|
||||
CODESIGN_PASS: ${{secrets.CODESIGN_PASS}}
|
||||
if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p home/.sig &&
|
||||
echo -n "$CODESIGN_P12" | tr % '\n' | base64 -d >home/.sig/codesign.p12 &&
|
||||
echo -n "$CODESIGN_PASS" >home/.sig/codesign.pass &&
|
||||
git config --global alias.signtool '!sh "/usr/src/build-extra/signtool.sh"'
|
||||
- name: Retarget auto-update to microsoft/git
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
|
||||
b=/usr/src/build-extra &&
|
||||
|
||||
filename=$b/git-update-git-for-windows.config
|
||||
tr % '\t' >$filename <<-\EOF &&
|
||||
[update]
|
||||
%fromFork = microsoft/git
|
||||
EOF
|
||||
|
||||
sed -i -e '/^#include "file-list.iss"/a\
|
||||
Source: {#SourcePath}\\..\\git-update-git-for-windows.config; DestDir: {app}\\mingw64\\bin; Flags: replacesameversion; AfterInstall: DeleteFromVirtualStore' \
|
||||
-e '/^Type: dirifempty; Name: {app}\\{#MINGW_BITNESS}$/i\
|
||||
Type: files; Name: {app}\\{#MINGW_BITNESS}\\bin\\git-update-git-for-windows.config\
|
||||
Type: dirifempty; Name: {app}\\{#MINGW_BITNESS}\\bin' \
|
||||
$b/installer/install.iss
|
||||
- name: Set alerts to continue until upgrade is taken
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
|
||||
b=/mingw64/bin &&
|
||||
|
||||
sed -i -e '6 a use_recently_seen=no' \
|
||||
$b/git-update-git-for-windows
|
||||
- name: Set the installer Publisher to the Git Fundamentals team
|
||||
shell: bash
|
||||
run: |
|
||||
b=/usr/src/build-extra &&
|
||||
sed -i -e 's/^\(AppPublisher=\).*/\1The Git Fundamentals Team at GitHub/' $b/installer/install.iss
|
||||
- name: Let the installer configure Visual Studio to use the installed Git
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
|
||||
b=/usr/src/build-extra &&
|
||||
|
||||
sed -i -e '/^ *InstallAutoUpdater();$/a\
|
||||
CustomPostInstall();' \
|
||||
-e '/^ *UninstallAutoUpdater();$/a\
|
||||
CustomPostUninstall();' \
|
||||
$b/installer/install.iss &&
|
||||
|
||||
cat >>$b/installer/helpers.inc.iss <<\EOF
|
||||
|
||||
procedure CustomPostInstall();
|
||||
begin
|
||||
if not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\15.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
|
||||
not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\16.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
|
||||
not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\17.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
|
||||
not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\18.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
|
||||
not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\19.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
|
||||
not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\20.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) then
|
||||
LogError('Could not register TeamFoundation\GitSourceControl');
|
||||
end;
|
||||
|
||||
procedure CustomPostUninstall();
|
||||
begin
|
||||
if not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\15.0\TeamFoundation\GitSourceControl','GitPath') or
|
||||
not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\16.0\TeamFoundation\GitSourceControl','GitPath') or
|
||||
not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\17.0\TeamFoundation\GitSourceControl','GitPath') or
|
||||
not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\18.0\TeamFoundation\GitSourceControl','GitPath') or
|
||||
not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\19.0\TeamFoundation\GitSourceControl','GitPath') or
|
||||
not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\20.0\TeamFoundation\GitSourceControl','GitPath') then
|
||||
LogError('Could not register TeamFoundation\GitSourceControl');
|
||||
end;
|
||||
EOF
|
||||
- name: Enable Scalar/C and the auto-updater in the installer by default
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
|
||||
b=/usr/src/build-extra &&
|
||||
|
||||
sed -i -e "/ChosenOptions:=''/a\\
|
||||
if (ExpandConstant('{param:components|/}')='/') then begin\n\
|
||||
WizardSelectComponents('autoupdate');\n\
|
||||
#ifdef WITH_SCALAR\n\
|
||||
WizardSelectComponents('scalar');\n\
|
||||
#endif\n\
|
||||
end;" $b/installer/install.iss
|
||||
- name: Build 64-bit ${{matrix.artifact.name}}
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
|
||||
# Copy the PDB archive to the directory where `--include-pdbs` expects it
|
||||
b=/usr/src/build-extra &&
|
||||
mkdir -p $b/cached-source-packages &&
|
||||
cp pkg-x86_64/*-pdb* $b/cached-source-packages/ &&
|
||||
|
||||
# Build the installer, embedding PDBs
|
||||
eval $b/please.sh make_installers_from_mingw_w64_git --include-pdbs \
|
||||
--version=${{ needs.prereqs.outputs.tag_version }} \
|
||||
-o artifacts --${{matrix.artifact.name}} \
|
||||
--pkg=pkg-x86_64/mingw-w64-x86_64-git-[0-9]*.tar.xz \
|
||||
--pkg=pkg-x86_64/mingw-w64-x86_64-git-doc-html-[0-9]*.tar.xz &&
|
||||
|
||||
if test portable = '${{matrix.artifact.name}}' && test -n "$(git config alias.signtool)"
|
||||
then
|
||||
git signtool artifacts/PortableGit-*.exe
|
||||
fi &&
|
||||
openssl dgst -sha256 artifacts/${{matrix.artifact.fileprefix}}-*.exe | sed "s/.* //" >artifacts/sha-256.txt
|
||||
- name: Verify that .exe files are code-signed
|
||||
if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
|
||||
shell: bash
|
||||
run: |
|
||||
PATH=$PATH:"/c/Program Files (x86)/Windows Kits/10/App Certification Kit/" \
|
||||
signtool verify //pa artifacts/${{matrix.artifact.fileprefix}}-*.exe
|
||||
- name: Publish ${{matrix.artifact.name}}-x86_64
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: win-${{matrix.artifact.name}}-x86_64
|
||||
path: artifacts
|
||||
# End build Windows installers
|
||||
|
||||
# Build and sign Mac OSX installers & upload artifacts
|
||||
create-macos-artifacts:
|
||||
strategy:
|
||||
matrix:
|
||||
arch:
|
||||
- name: arm64
|
||||
runner: macos-latest-xl-arm64
|
||||
runs-on: ${{ matrix.arch.runner }}
|
||||
needs: prereqs
|
||||
env:
|
||||
VERSION: "${{ needs.prereqs.outputs.tag_version }}"
|
||||
environment: release
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: 'git'
|
||||
|
||||
- name: Install Git dependencies
|
||||
run: |
|
||||
set -ex
|
||||
|
||||
# Install x86_64 packages
|
||||
arch -x86_64 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
arch -x86_64 /usr/local/bin/brew install gettext
|
||||
|
||||
# Install arm64 packages
|
||||
brew install automake asciidoc xmlto docbook
|
||||
brew link --force gettext
|
||||
|
||||
# Make universal gettext library
|
||||
lipo -create -output libintl.a /usr/local/opt/gettext/lib/libintl.a /opt/homebrew/opt/gettext/lib/libintl.a
|
||||
|
||||
- name: Set up signing/notarization infrastructure
|
||||
env:
|
||||
A1: ${{ secrets.APPLICATION_CERTIFICATE_BASE64 }}
|
||||
A2: ${{ secrets.APPLICATION_CERTIFICATE_PASSWORD }}
|
||||
I1: ${{ secrets.INSTALLER_CERTIFICATE_BASE64 }}
|
||||
I2: ${{ secrets.INSTALLER_CERTIFICATE_PASSWORD }}
|
||||
N1: ${{ secrets.APPLE_TEAM_ID }}
|
||||
N2: ${{ secrets.APPLE_DEVELOPER_ID }}
|
||||
N3: ${{ secrets.APPLE_DEVELOPER_PASSWORD }}
|
||||
N4: ${{ secrets.APPLE_KEYCHAIN_PROFILE }}
|
||||
run: |
|
||||
echo "Setting up signing certificates"
|
||||
security create-keychain -p pwd $RUNNER_TEMP/buildagent.keychain
|
||||
security default-keychain -s $RUNNER_TEMP/buildagent.keychain
|
||||
security unlock-keychain -p pwd $RUNNER_TEMP/buildagent.keychain
|
||||
# Prevent re-locking
|
||||
security set-keychain-settings $RUNNER_TEMP/buildagent.keychain
|
||||
|
||||
echo "$A1" | base64 -D > $RUNNER_TEMP/cert.p12
|
||||
security import $RUNNER_TEMP/cert.p12 \
|
||||
-k $RUNNER_TEMP/buildagent.keychain \
|
||||
-P "$A2" \
|
||||
-T /usr/bin/codesign
|
||||
security set-key-partition-list \
|
||||
-S apple-tool:,apple:,codesign: \
|
||||
-s -k pwd \
|
||||
$RUNNER_TEMP/buildagent.keychain
|
||||
|
||||
echo "$I1" | base64 -D > $RUNNER_TEMP/cert.p12
|
||||
security import $RUNNER_TEMP/cert.p12 \
|
||||
-k $RUNNER_TEMP/buildagent.keychain \
|
||||
-P "$I2" \
|
||||
-T /usr/bin/pkgbuild
|
||||
security set-key-partition-list \
|
||||
-S apple-tool:,apple:,pkgbuild: \
|
||||
-s -k pwd \
|
||||
$RUNNER_TEMP/buildagent.keychain
|
||||
|
||||
echo "Setting up notarytool"
|
||||
xcrun notarytool store-credentials \
|
||||
--team-id "$N1" \
|
||||
--apple-id "$N2" \
|
||||
--password "$N3" \
|
||||
"$N4"
|
||||
|
||||
- name: Build, sign, and notarize artifacts
|
||||
env:
|
||||
A3: ${{ secrets.APPLE_APPLICATION_SIGNING_IDENTITY }}
|
||||
I3: ${{ secrets.APPLE_INSTALLER_SIGNING_IDENTITY }}
|
||||
N4: ${{ secrets.APPLE_KEYCHAIN_PROFILE }}
|
||||
run: |
|
||||
die () {
|
||||
echo "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Trace execution, stop on error
|
||||
set -ex
|
||||
|
||||
# Write to "version" file to force match with trigger payload version
|
||||
echo "${{ needs.prereqs.outputs.tag_version }}" >>git/version
|
||||
|
||||
# Configure universal build
|
||||
cat >git/config.mak <<EOF
|
||||
# Create universal binaries. HOST_CPU is a bit of a lie and only
|
||||
# used in 'git version --build-options'. We'll fix that in code.
|
||||
HOST_CPU = universal
|
||||
BASIC_CFLAGS += -arch arm64 -arch x86_64
|
||||
EOF
|
||||
|
||||
# Configure the Git build to pick up gettext
|
||||
homebrew_prefix="$(brew --prefix)"
|
||||
cat >>git/config.mak <<EOF
|
||||
CFLAGS = -I$homebrew_prefix/include -I/usr/local/opt/gettext/include
|
||||
LDFLAGS = -L"$(pwd)"
|
||||
EOF
|
||||
|
||||
# Configure the Git to use the OS supplied libcurl.
|
||||
cat >>git/config.mak <<EOF
|
||||
CURL_LDFLAGS := -lcurl
|
||||
CURL_CONFIG := /usr/bin/true
|
||||
EOF
|
||||
|
||||
# Avoid even building the dashed built-ins; Those should be hard-linked
|
||||
# copies of the `git` executable but would end up as actual copies instead,
|
||||
# bloating the size of the `.dmg` indecently.
|
||||
echo 'SKIP_DASHED_BUILT_INS = YabbaDabbaDoo' >>git/config.mak
|
||||
|
||||
# To make use of the catalogs...
|
||||
export XML_CATALOG_FILES=$homebrew_prefix/etc/xml/catalog
|
||||
|
||||
make -C git -j$(sysctl -n hw.physicalcpu) GIT-VERSION-FILE dist dist-doc
|
||||
|
||||
export GIT_BUILT_FROM_COMMIT=$(gunzip -c git/git-$VERSION.tar.gz | git get-tar-commit-id) ||
|
||||
die "Could not determine commit for build"
|
||||
|
||||
# Extract tarballs
|
||||
mkdir payload manpages
|
||||
tar -xvf git/git-$VERSION.tar.gz -C payload
|
||||
tar -xvf git/git-manpages-$VERSION.tar.gz -C manpages
|
||||
|
||||
# Lay out payload
|
||||
cp git/config.mak payload/git-$VERSION/config.mak
|
||||
make -C git/.github/macos-installer V=1 payload
|
||||
|
||||
# Codesign payload
|
||||
cp -R stage/git-universal-$VERSION/ \
|
||||
git/.github/macos-installer/build-artifacts
|
||||
make -C git/.github/macos-installer V=1 codesign \
|
||||
APPLE_APP_IDENTITY="$A3" || die "Creating signed payload failed"
|
||||
|
||||
# Build and sign pkg
|
||||
make -C git/.github/macos-installer V=1 pkg \
|
||||
APPLE_INSTALLER_IDENTITY="$I3" \
|
||||
|| die "Creating signed pkg failed"
|
||||
|
||||
# Notarize pkg
|
||||
make -C git/.github/macos-installer V=1 notarize \
|
||||
APPLE_INSTALLER_IDENTITY="$I3" APPLE_KEYCHAIN_PROFILE="$N4" \
|
||||
|| die "Creating signed and notarized pkg failed"
|
||||
|
||||
# Create DMG
|
||||
make -C git/.github/macos-installer V=1 image || die "Creating DMG failed"
|
||||
|
||||
# Move all artifacts into top-level directory
|
||||
mv git/.github/macos-installer/disk-image/*.pkg git/.github/macos-installer/
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: macos-artifacts
|
||||
path: |
|
||||
git/.github/macos-installer/*.dmg
|
||||
git/.github/macos-installer/*.pkg
|
||||
# End build and sign Mac OSX installers
|
||||
|
||||
# Build and sign Debian package
|
||||
create-linux-artifacts:
|
||||
runs-on: ubuntu-latest
|
||||
needs: prereqs
|
||||
environment: release
|
||||
steps:
|
||||
- name: Install git dependencies
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update -q
|
||||
sudo apt-get install -y -q --no-install-recommends gettext libcurl4-gnutls-dev libpcre3-dev asciidoc xmlto
|
||||
|
||||
- name: Clone git
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: git
|
||||
|
||||
- name: Build and create Debian package
|
||||
run: |
|
||||
set -ex
|
||||
|
||||
die () {
|
||||
echo "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "${{ needs.prereqs.outputs.tag_version }}" >>git/version
|
||||
make -C git GIT-VERSION-FILE
|
||||
|
||||
VERSION="${{ needs.prereqs.outputs.tag_version }}"
|
||||
|
||||
ARCH="$(dpkg-architecture -q DEB_HOST_ARCH)"
|
||||
if test -z "$ARCH"; then
|
||||
die "Could not determine host architecture!"
|
||||
fi
|
||||
|
||||
PKGNAME="microsoft-git_$VERSION"
|
||||
PKGDIR="$(dirname $(pwd))/$PKGNAME"
|
||||
|
||||
rm -rf "$PKGDIR"
|
||||
mkdir -p "$PKGDIR"
|
||||
|
||||
DESTDIR="$PKGDIR" make -C git -j5 V=1 DEVELOPER=1 \
|
||||
USE_LIBPCRE=1 \
|
||||
NO_CROSS_DIRECTORY_HARDLINKS=1 \
|
||||
ASCIIDOC8=1 ASCIIDOC_NO_ROFF=1 \
|
||||
ASCIIDOC='TZ=UTC asciidoc' \
|
||||
prefix=/usr/local \
|
||||
gitexecdir=/usr/local/lib/git-core \
|
||||
libexecdir=/usr/local/lib/git-core \
|
||||
htmldir=/usr/local/share/doc/git/html \
|
||||
install install-doc install-html
|
||||
|
||||
cd ..
|
||||
mkdir "$PKGNAME/DEBIAN"
|
||||
|
||||
# Based on https://packages.ubuntu.com/xenial/vcs/git
|
||||
cat >"$PKGNAME/DEBIAN/control" <<EOF
|
||||
Package: microsoft-git
|
||||
Version: $VERSION
|
||||
Section: vcs
|
||||
Priority: optional
|
||||
Architecture: $ARCH
|
||||
Depends: libcurl3-gnutls, liberror-perl, libexpat1, libpcre2-8-0, perl, perl-modules, zlib1g
|
||||
Maintainer: Git Fundamentals <git-fundamentals@github.com>
|
||||
Description: Git client built from the https://github.com/microsoft/git repository,
|
||||
specialized in supporting monorepo scenarios. Includes the Scalar CLI.
|
||||
EOF
|
||||
|
||||
dpkg-deb -Zxz --build "$PKGNAME"
|
||||
# Move Debian package for later artifact upload
|
||||
mv "$PKGNAME.deb" "$GITHUB_WORKSPACE"
|
||||
|
||||
- name: Log into Azure
|
||||
uses: azure/login@v1
|
||||
with:
|
||||
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||
|
||||
- name: Prepare for GPG signing
|
||||
env:
|
||||
AZURE_VAULT: ${{ secrets.AZURE_VAULT }}
|
||||
GPG_KEY_SECRET_NAME: ${{ secrets.GPG_KEY_SECRET_NAME }}
|
||||
GPG_PASSPHRASE_SECRET_NAME: ${{ secrets.GPG_PASSPHRASE_SECRET_NAME }}
|
||||
GPG_KEYGRIP_SECRET_NAME: ${{ secrets.GPG_KEYGRIP_SECRET_NAME }}
|
||||
run: |
|
||||
# Install debsigs
|
||||
sudo apt install debsigs
|
||||
|
||||
# Download GPG key, passphrase, and keygrip from Azure Key Vault
|
||||
key=$(az keyvault secret show --name $GPG_KEY_SECRET_NAME --vault-name $AZURE_VAULT --query "value")
|
||||
passphrase=$(az keyvault secret show --name $GPG_PASSPHRASE_SECRET_NAME --vault-name $AZURE_VAULT --query "value")
|
||||
keygrip=$(az keyvault secret show --name $GPG_KEYGRIP_SECRET_NAME --vault-name $AZURE_VAULT --query "value")
|
||||
|
||||
# Remove quotes from downloaded values
|
||||
key=$(sed -e 's/^"//' -e 's/"$//' <<<"$key")
|
||||
passphrase=$(sed -e 's/^"//' -e 's/"$//' <<<"$passphrase")
|
||||
keygrip=$(sed -e 's/^"//' -e 's/"$//' <<<"$keygrip")
|
||||
|
||||
# Import GPG key
|
||||
echo "$key" | base64 -d | gpg --import --no-tty --batch --yes
|
||||
|
||||
# Configure GPG
|
||||
echo "allow-preset-passphrase" > ~/.gnupg/gpg-agent.conf
|
||||
gpg-connect-agent RELOADAGENT /bye
|
||||
/usr/lib/gnupg2/gpg-preset-passphrase --preset "$keygrip" <<<"$passphrase"
|
||||
|
||||
- name: Sign Debian package
|
||||
run: |
|
||||
# Sign Debian package
|
||||
version="${{ needs.prereqs.outputs.tag_version }}"
|
||||
debsigs --sign=origin --verify --check microsoft-git_"$version".deb
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: linux-artifacts
|
||||
path: |
|
||||
*.deb
|
||||
# End build and sign Debian package
|
||||
|
||||
# Validate installers
|
||||
validate-installers:
|
||||
name: Validate installers
|
||||
strategy:
|
||||
matrix:
|
||||
component:
|
||||
- os: ubuntu-latest
|
||||
artifact: linux-artifacts
|
||||
command: git
|
||||
- os: macos-latest-xl-arm64
|
||||
artifact: macos-artifacts
|
||||
command: git
|
||||
- os: macos-latest
|
||||
artifact: macos-artifacts
|
||||
command: git
|
||||
- os: windows-latest
|
||||
artifact: win-installer-x86_64
|
||||
command: $PROGRAMFILES\Git\cmd\git.exe
|
||||
runs-on: ${{ matrix.component.os }}
|
||||
needs: [prereqs, windows_artifacts, create-macos-artifacts, create-linux-artifacts]
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.component.artifact }}
|
||||
|
||||
- name: Install Windows
|
||||
if: contains(matrix.component.os, 'windows')
|
||||
shell: pwsh
|
||||
run: |
|
||||
$exePath = Get-ChildItem -Path ./*.exe | %{$_.FullName}
|
||||
Start-Process -Wait -FilePath "$exePath" -ArgumentList "/SILENT /VERYSILENT /NORESTART /SUPPRESSMSGBOXES /ALLOWDOWNGRADE=1"
|
||||
|
||||
- name: Install Linux
|
||||
if: contains(matrix.component.os, 'ubuntu')
|
||||
run: |
|
||||
debpath=$(find ./*.deb)
|
||||
sudo apt install $debpath
|
||||
|
||||
- name: Install macOS
|
||||
if: contains(matrix.component.os, 'macos')
|
||||
run: |
|
||||
# avoid letting Homebrew's `git` in `/opt/homebrew/bin` override `/usr/local/bin/git`
|
||||
arch="$(uname -m)"
|
||||
test arm64 != "$arch" ||
|
||||
brew uninstall git
|
||||
|
||||
pkgpath=$(find ./*universal*.pkg)
|
||||
sudo installer -pkg $pkgpath -target /
|
||||
|
||||
- name: Validate
|
||||
shell: bash
|
||||
run: |
|
||||
"${{ matrix.component.command }}" --version | sed 's/git version //' >actual
|
||||
echo ${{ needs.prereqs.outputs.tag_version }} >expect
|
||||
cmp expect actual || exit 1
|
||||
|
||||
- name: Validate universal binary CPU architecture
|
||||
if: contains(matrix.component.os, 'macos')
|
||||
shell: bash
|
||||
run: |
|
||||
set -ex
|
||||
git version --build-options >actual
|
||||
cat actual
|
||||
grep "cpu: $(uname -m)" actual
|
||||
# End validate installers
|
||||
|
||||
create-github-release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
needs: [validate-installers, create-linux-artifacts, create-macos-artifacts, windows_artifacts, prereqs]
|
||||
env:
|
||||
AZURE_VAULT: ${{ secrets.AZURE_VAULT }}
|
||||
GPG_PUBLIC_KEY_SECRET_NAME: ${{ secrets.GPG_PUBLIC_KEY_SECRET_NAME }}
|
||||
environment: release
|
||||
if: |
|
||||
success() ||
|
||||
(needs.create-linux-artifacts.result == 'skipped' &&
|
||||
needs.create-macos-artifacts.result == 'success' &&
|
||||
needs.windows_artifacts.result == 'success')
|
||||
steps:
|
||||
- name: Download Windows portable installer
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: win-portable-x86_64
|
||||
path: win-portable-x86_64
|
||||
|
||||
- name: Download Windows x86_64 installer
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: win-installer-x86_64
|
||||
path: win-installer-x86_64
|
||||
|
||||
- name: Download macOS artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: macos-artifacts
|
||||
path: macos-artifacts
|
||||
|
||||
- name: Download Debian package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: linux-artifacts
|
||||
path: deb-package
|
||||
|
||||
- name: Log into Azure
|
||||
uses: azure/login@v1
|
||||
with:
|
||||
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||
|
||||
- name: Download GPG public key signature file
|
||||
run: |
|
||||
az keyvault secret show --name "$GPG_PUBLIC_KEY_SECRET_NAME" \
|
||||
--vault-name "$AZURE_VAULT" --query "value" \
|
||||
| sed -e 's/^"//' -e 's/"$//' | base64 -d >msft-git-public.asc
|
||||
mv msft-git-public.asc deb-package
|
||||
|
||||
- uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
var releaseMetadata = {
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
};
|
||||
|
||||
// Create the release
|
||||
var tagName = "${{ needs.prereqs.outputs.tag_name }}";
|
||||
var createdRelease = await github.rest.repos.createRelease({
|
||||
...releaseMetadata,
|
||||
draft: true,
|
||||
tag_name: tagName,
|
||||
name: tagName
|
||||
});
|
||||
releaseMetadata.release_id = createdRelease.data.id;
|
||||
|
||||
// Uploads contents of directory to the release created above
|
||||
async function uploadDirectoryToRelease(directory, includeExtensions=[]) {
|
||||
return fs.promises.readdir(directory)
|
||||
.then(async(files) => Promise.all(
|
||||
files.filter(file => {
|
||||
return includeExtensions.length==0 || includeExtensions.includes(path.extname(file).toLowerCase());
|
||||
})
|
||||
.map(async (file) => {
|
||||
var filePath = path.join(directory, file);
|
||||
github.rest.repos.uploadReleaseAsset({
|
||||
...releaseMetadata,
|
||||
name: file,
|
||||
headers: {
|
||||
"content-length": (await fs.promises.stat(filePath)).size
|
||||
},
|
||||
data: fs.createReadStream(filePath)
|
||||
});
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
// Upload Windows artifacts
|
||||
uploadDirectoryToRelease('win-installer-x86_64', ['.exe']),
|
||||
uploadDirectoryToRelease('win-portable-x86_64', ['.exe']),
|
||||
|
||||
// Upload Mac artifacts
|
||||
uploadDirectoryToRelease('macos-artifacts'),
|
||||
|
||||
// Upload Ubuntu artifacts
|
||||
uploadDirectoryToRelease('deb-package')
|
||||
]);
|
|
@ -0,0 +1,25 @@
|
|||
name: CLANG build ARM64
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
clang-build:
|
||||
runs-on: [Windows, ARM64]
|
||||
env:
|
||||
NO_PERL: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: git-for-windows/setup-git-for-windows-sdk@v1
|
||||
with:
|
||||
flavor: makepkg-git
|
||||
architecture: aarch64
|
||||
# This assumes that the job is running on a self-hosted runner,
|
||||
# in which case we need to cleanup SDK files.
|
||||
cleanup: true
|
||||
- name: Build Git CLANGARM64
|
||||
run: make -j`nproc`
|
|
@ -164,13 +164,16 @@ jobs:
|
|||
vs-build:
|
||||
name: win+VS build
|
||||
needs: ci-config
|
||||
if: github.event.repository.owner.login == 'git-for-windows' && needs.ci-config.outputs.enabled == 'yes'
|
||||
if: github.event.repository.owner.login == 'microsoft' && needs.ci-config.outputs.enabled == 'yes'
|
||||
env:
|
||||
NO_PERL: 1
|
||||
GIT_CONFIG_PARAMETERS: "'user.name=CI' 'user.email=ci@git'"
|
||||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
arch: [x64, arm64]
|
||||
concurrency:
|
||||
group: vs-build-${{ github.ref }}
|
||||
group: vs-build-${{ github.ref }}-${{ matrix.arch }}
|
||||
cancel-in-progress: ${{ needs.ci-config.outputs.skip_concurrent == 'yes' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
@ -181,26 +184,22 @@ jobs:
|
|||
repository: 'microsoft/vcpkg'
|
||||
path: 'compat/vcbuild/vcpkg'
|
||||
- name: download vcpkg artifacts
|
||||
shell: powershell
|
||||
run: |
|
||||
$urlbase = "https://dev.azure.com/git/git/_apis/build/builds"
|
||||
$id = ((Invoke-WebRequest -UseBasicParsing "${urlbase}?definitions=9&statusFilter=completed&resultFilter=succeeded&`$top=1").content | ConvertFrom-JSON).value[0].id
|
||||
$downloadUrl = ((Invoke-WebRequest -UseBasicParsing "${urlbase}/$id/artifacts").content | ConvertFrom-JSON).value[0].resource.downloadUrl
|
||||
(New-Object Net.WebClient).DownloadFile($downloadUrl, "compat.zip")
|
||||
Expand-Archive compat.zip -DestinationPath . -Force
|
||||
Remove-Item compat.zip
|
||||
uses: git-for-windows/get-azure-pipelines-artifact@v0
|
||||
with:
|
||||
repository: git/git
|
||||
definitionId: 9
|
||||
- name: add msbuild to PATH
|
||||
uses: microsoft/setup-msbuild@v1
|
||||
uses: microsoft/setup-msbuild@v2
|
||||
- name: copy dlls to root
|
||||
shell: cmd
|
||||
run: compat\vcbuild\vcpkg_copy_dlls.bat release
|
||||
run: compat\vcbuild\vcpkg_copy_dlls.bat release ${{ matrix.arch }}-windows
|
||||
- name: generate Visual Studio solution
|
||||
shell: bash
|
||||
run: |
|
||||
cmake `pwd`/contrib/buildsystems/ -DCMAKE_PREFIX_PATH=`pwd`/compat/vcbuild/vcpkg/installed/x64-windows \
|
||||
-DNO_GETTEXT=YesPlease -DPERL_TESTS=OFF -DPYTHON_TESTS=OFF -DCURL_NO_CURL_CMAKE=ON
|
||||
cmake `pwd`/contrib/buildsystems/ -DCMAKE_PREFIX_PATH=`pwd`/compat/vcbuild/vcpkg/installed/${{ matrix.arch }}-windows \
|
||||
-DNO_GETTEXT=YesPlease -DPERL_TESTS=OFF -DPYTHON_TESTS=OFF -DCURL_NO_CURL_CMAKE=ON -DCMAKE_GENERATOR_PLATFORM=${{ matrix.arch }} -DVCPKG_ARCH=${{ matrix.arch }}-windows -DHOST_CPU=${{ matrix.arch }}
|
||||
- name: MSBuild
|
||||
run: msbuild git.sln -property:Configuration=Release -property:Platform=x64 -maxCpuCount:4 -property:PlatformToolset=v142
|
||||
run: msbuild git.sln -property:Configuration=Release -property:Platform=${{ matrix.arch }} -maxCpuCount:4 -property:PlatformToolset=v142
|
||||
- name: bundle artifact tar
|
||||
shell: bash
|
||||
env:
|
||||
|
@ -214,7 +213,7 @@ jobs:
|
|||
- name: upload tracked files and build artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: vs-artifacts
|
||||
name: vs-artifacts-${{ matrix.arch }}
|
||||
path: artifacts
|
||||
vs-test:
|
||||
name: win+VS test
|
||||
|
@ -232,7 +231,7 @@ jobs:
|
|||
- name: download tracked files and build artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: vs-artifacts
|
||||
name: vs-artifacts-x64
|
||||
path: ${{github.workspace}}
|
||||
- name: extract tracked files and build artifacts
|
||||
shell: bash
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
name: Windows Nano Server tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
DEVELOPER: 1
|
||||
|
||||
jobs:
|
||||
test-nano-server:
|
||||
runs-on: windows-2022
|
||||
env:
|
||||
WINDBG_DIR: "C:/Program Files (x86)/Windows Kits/10/Debuggers/x64"
|
||||
IMAGE: mcr.microsoft.com/powershell:nanoserver-ltsc2022
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: git-for-windows/setup-git-for-windows-sdk@v1
|
||||
- name: build Git
|
||||
shell: bash
|
||||
run: make -j15
|
||||
- name: pull nanoserver image
|
||||
shell: bash
|
||||
run: docker pull $IMAGE
|
||||
- name: run nano-server test
|
||||
shell: bash
|
||||
run: |
|
||||
docker run \
|
||||
--user "ContainerAdministrator" \
|
||||
-v "$WINDBG_DIR:C:/dbg" \
|
||||
-v "$(cygpath -aw /mingw64/bin):C:/mingw64-bin" \
|
||||
-v "$(cygpath -aw .):C:/test" \
|
||||
$IMAGE pwsh.exe -Command '
|
||||
# Extend the PATH to include the `.dll` files in /mingw64/bin/
|
||||
$env:PATH += ";C:\mingw64-bin"
|
||||
|
||||
# For each executable to test pick some no-operation set of
|
||||
# flags/subcommands or something that should quickly result in an
|
||||
# error with known exit code that is not a negative 32-bit
|
||||
# number, and set the expected return code appropriately.
|
||||
#
|
||||
# Only test executables that could be expected to run in a UI
|
||||
# less environment.
|
||||
#
|
||||
# ( Executable path, arguments, expected return code )
|
||||
# also note space is required before close parenthesis (a
|
||||
# powershell quirk when defining nested arrays like this)
|
||||
|
||||
$executables_to_test = @(
|
||||
("C:\test\git.exe", "", 1 ),
|
||||
("C:\test\scalar.exe", "version", 0 )
|
||||
)
|
||||
|
||||
foreach ($executable in $executables_to_test)
|
||||
{
|
||||
Write-Output "Now testing $($executable[0])"
|
||||
&$executable[0] $executable[1]
|
||||
if ($LASTEXITCODE -ne $executable[2]) {
|
||||
# if we failed, run the debugger to find out what function
|
||||
# or DLL could not be found and then exit the script with
|
||||
# failure The missing DLL or EXE will be referenced near
|
||||
# the end of the output
|
||||
|
||||
# Set a flag to have the debugger show loader stub
|
||||
# diagnostics. This requires running as administrator,
|
||||
# otherwise the flag will be ignored.
|
||||
C:\dbg\gflags -i $executable[0] +SLS
|
||||
|
||||
C:\dbg\cdb.exe -c "g" -c "q" $executable[0] $executable[1]
|
||||
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
exit 0
|
||||
'
|
|
@ -0,0 +1,31 @@
|
|||
name: Update Homebrew Tap
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
steps:
|
||||
- id: version
|
||||
name: Compute version number
|
||||
run: |
|
||||
echo "result=$(echo $GITHUB_REF | sed -e "s/^refs\/tags\/v//")" >>$GITHUB_OUTPUT
|
||||
- id: hash
|
||||
name: Compute release asset hash
|
||||
uses: mjcheetham/asset-hash@v1.1
|
||||
with:
|
||||
asset: /git-(.*)\.pkg/
|
||||
hash: sha256
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Update scalar Cask
|
||||
uses: mjcheetham/update-homebrew@v1.3
|
||||
with:
|
||||
token: ${{ secrets.HOMEBREW_TOKEN }}
|
||||
tap: microsoft/git
|
||||
name: microsoft-git
|
||||
type: cask
|
||||
version: ${{ steps.version.outputs.result }}
|
||||
sha256: ${{ steps.hash.outputs.result }}
|
||||
alwaysUsePullRequest: false
|
|
@ -0,0 +1,41 @@
|
|||
name: "release-winget"
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release:
|
||||
description: 'Release Id'
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: windows-latest
|
||||
environment: release
|
||||
steps:
|
||||
- name: Publish manifest with winget-create
|
||||
run: |
|
||||
# Get correct release asset
|
||||
$github = Get-Content '${{ github.event_path }}' | ConvertFrom-Json
|
||||
$asset = $github.release.assets | Where-Object -Property name -match '64-bit.exe$'
|
||||
|
||||
# Remove 'v' and 'vfs' from the version
|
||||
$github.release.tag_name -match '\d.*'
|
||||
$version = $Matches[0] -replace ".vfs",""
|
||||
|
||||
# Download wingetcreate and create manifests
|
||||
Invoke-WebRequest https://aka.ms/wingetcreate/latest -OutFile wingetcreate.exe
|
||||
.\wingetcreate.exe update Microsoft.Git -u $asset.browser_download_url -v $version -o manifests
|
||||
|
||||
# Manually substitute the name of the default branch in the License
|
||||
# and Copyright URLs since the tooling cannot do that for us.
|
||||
$shortenedVersion = $version -replace ".{4}$"
|
||||
$manifestPath = dir -Path ./manifests -Filter Microsoft.Git.locale.en-US.yaml -Recurse | %{$_.FullName}
|
||||
sed -i "s/vfs-[.0-9]*/vfs-$shortenedVersion/g" "$manifestPath"
|
||||
|
||||
# Submit manifests
|
||||
$manifestDirectory = Split-Path "$manifestPath"
|
||||
.\wingetcreate.exe submit -t "${{ secrets.WINGET_TOKEN }}" $manifestDirectory
|
||||
shell: powershell
|
|
@ -0,0 +1,220 @@
|
|||
name: Scalar Functional Tests
|
||||
|
||||
env:
|
||||
SCALAR_REPOSITORY: microsoft/scalar
|
||||
SCALAR_REF: main
|
||||
DEBUG_WITH_TMATE: false
|
||||
SCALAR_TEST_SKIP_VSTS_INFO: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ vfs-*, tentative/vfs-* ]
|
||||
pull_request:
|
||||
branches: [ vfs-*, features/* ]
|
||||
|
||||
jobs:
|
||||
scalar:
|
||||
name: "Scalar Functional Tests"
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Order by runtime (in descending order)
|
||||
os: [windows-2019, macos-11, ubuntu-20.04, ubuntu-22.04]
|
||||
# Scalar.NET used to be tested using `features: [false, experimental]`
|
||||
# But currently, Scalar/C ignores `feature.scalar` altogether, so let's
|
||||
# save some electrons and run only one of them...
|
||||
features: [ignored]
|
||||
exclude:
|
||||
# The built-in FSMonitor is not (yet) supported on Linux
|
||||
- os: ubuntu-20.04
|
||||
features: experimental
|
||||
- os: ubuntu-22.04
|
||||
features: experimental
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
env:
|
||||
BUILD_FRAGMENT: bin/Release/netcoreapp3.1
|
||||
GIT_FORCE_UNTRACKED_CACHE: 1
|
||||
|
||||
steps:
|
||||
- name: Check out Git's source code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup build tools on Windows
|
||||
if: runner.os == 'Windows'
|
||||
uses: git-for-windows/setup-git-for-windows-sdk@v1
|
||||
|
||||
- name: Provide a minimal `install` on Windows
|
||||
if: runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
test -x /usr/bin/install ||
|
||||
tr % '\t' >/usr/bin/install <<-\EOF
|
||||
#!/bin/sh
|
||||
|
||||
cmd=cp
|
||||
while test $# != 0
|
||||
do
|
||||
%case "$1" in
|
||||
%-d) cmd="mkdir -p";;
|
||||
%-m) shift;; # ignore mode
|
||||
%*) break;;
|
||||
%esac
|
||||
%shift
|
||||
done
|
||||
|
||||
exec $cmd "$@"
|
||||
EOF
|
||||
|
||||
- name: Install build dependencies for Git (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get -q -y install libssl-dev libcurl4-openssl-dev gettext
|
||||
|
||||
- name: Build and install Git
|
||||
shell: bash
|
||||
env:
|
||||
NO_TCLTK: Yup
|
||||
run: |
|
||||
# We do require a VFS version
|
||||
def_ver="$(sed -n 's/DEF_VER=\(.*vfs.*\)/\1/p' GIT-VERSION-GEN)"
|
||||
test -n "$def_ver"
|
||||
|
||||
# Ensure that `git version` reflects DEF_VER
|
||||
case "$(git describe --match "v[0-9]*vfs*" HEAD)" in
|
||||
${def_ver%%.vfs.*}.vfs.*) ;; # okay, we can use this
|
||||
*) git -c user.name=ci -c user.email=ci@github tag -m for-testing ${def_ver}.NNN.g$(git rev-parse --short HEAD);;
|
||||
esac
|
||||
|
||||
SUDO=
|
||||
extra=
|
||||
case "${{ runner.os }}" in
|
||||
Windows)
|
||||
extra=DESTDIR=/c/Progra~1/Git
|
||||
cygpath -aw "/c/Program Files/Git/cmd" >>$GITHUB_PATH
|
||||
;;
|
||||
Linux)
|
||||
SUDO=sudo
|
||||
extra=prefix=/usr
|
||||
;;
|
||||
macOS)
|
||||
SUDO=sudo
|
||||
extra=prefix=/usr/local
|
||||
;;
|
||||
esac
|
||||
|
||||
$SUDO make -j5 $extra install
|
||||
|
||||
- name: Ensure that we use the built Git and Scalar
|
||||
shell: bash
|
||||
run: |
|
||||
type -p git
|
||||
git version
|
||||
case "$(git version)" in *.vfs.*) echo Good;; *) exit 1;; esac
|
||||
type -p scalar
|
||||
scalar version
|
||||
case "$(scalar version 2>&1)" in *.vfs.*) echo Good;; *) exit 1;; esac
|
||||
|
||||
- name: Check out Scalar's source code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Indicate full history so Nerdbank.GitVersioning works.
|
||||
path: scalar
|
||||
repository: ${{ env.SCALAR_REPOSITORY }}
|
||||
ref: ${{ env.SCALAR_REF }}
|
||||
|
||||
- name: Setup .NET Core
|
||||
uses: actions/setup-dotnet@v3
|
||||
with:
|
||||
dotnet-version: '3.1.x'
|
||||
|
||||
- name: Install dependencies
|
||||
run: dotnet restore
|
||||
working-directory: scalar
|
||||
env:
|
||||
DOTNET_NOLOGO: 1
|
||||
|
||||
- name: Build
|
||||
working-directory: scalar
|
||||
run: dotnet build --configuration Release --no-restore -p:UseAppHost=true # Force generation of executable on macOS.
|
||||
|
||||
- name: Setup platform (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
echo "BUILD_PLATFORM=${{ runner.os }}" >>$GITHUB_ENV
|
||||
echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$GITHUB_ENV
|
||||
|
||||
- name: Setup platform (Mac)
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
echo 'BUILD_PLATFORM=Mac' >>$GITHUB_ENV
|
||||
echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$GITHUB_ENV
|
||||
|
||||
- name: Setup platform (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
run: |
|
||||
echo "BUILD_PLATFORM=${{ runner.os }}" >>$env:GITHUB_ENV
|
||||
echo 'BUILD_FILE_EXT=.exe' >>$env:GITHUB_ENV
|
||||
echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$env:GITHUB_ENV
|
||||
|
||||
- name: Configure feature.scalar
|
||||
run: git config --global feature.scalar ${{ matrix.features }}
|
||||
|
||||
- id: functional_test
|
||||
name: Functional test
|
||||
timeout-minutes: 60
|
||||
working-directory: scalar
|
||||
shell: bash
|
||||
run: |
|
||||
export GIT_TRACE2_EVENT="$PWD/$TRACE2_BASENAME/Event"
|
||||
export GIT_TRACE2_PERF="$PWD/$TRACE2_BASENAME/Perf"
|
||||
export GIT_TRACE2_EVENT_BRIEF=true
|
||||
export GIT_TRACE2_PERF_BRIEF=true
|
||||
mkdir -p "$TRACE2_BASENAME"
|
||||
mkdir -p "$TRACE2_BASENAME/Event"
|
||||
mkdir -p "$TRACE2_BASENAME/Perf"
|
||||
git version --build-options
|
||||
cd ../out
|
||||
Scalar.FunctionalTests/$BUILD_FRAGMENT/Scalar.FunctionalTests$BUILD_FILE_EXT --test-scalar-on-path --test-git-on-path --timeout=300000 --full-suite
|
||||
|
||||
- name: Force-stop FSMonitor daemons and Git processes (Windows)
|
||||
if: runner.os == 'Windows' && (success() || failure())
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
wmic process get CommandLine,ExecutablePath,HandleCount,Name,ParentProcessID,ProcessID
|
||||
wmic process where "CommandLine Like '%fsmonitor--daemon %run'" delete
|
||||
wmic process where "ExecutablePath Like '%git.exe'" delete
|
||||
|
||||
- id: trace2_zip_unix
|
||||
if: runner.os != 'Windows' && ( success() || failure() ) && ( steps.functional_test.conclusion == 'success' || steps.functional_test.conclusion == 'failure' )
|
||||
name: Zip Trace2 Logs (Unix)
|
||||
shell: bash
|
||||
working-directory: scalar
|
||||
run: zip -q -r $TRACE2_BASENAME.zip $TRACE2_BASENAME/
|
||||
|
||||
- id: trace2_zip_windows
|
||||
if: runner.os == 'Windows' && ( success() || failure() ) && ( steps.functional_test.conclusion == 'success' || steps.functional_test.conclusion == 'failure' )
|
||||
name: Zip Trace2 Logs (Windows)
|
||||
working-directory: scalar
|
||||
run: Compress-Archive -DestinationPath ${{ env.TRACE2_BASENAME }}.zip -Path ${{ env.TRACE2_BASENAME }}
|
||||
|
||||
- name: Archive Trace2 Logs
|
||||
if: ( success() || failure() ) && ( steps.trace2_zip_unix.conclusion == 'success' || steps.trace2_zip_windows.conclusion == 'success' )
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.TRACE2_BASENAME }}.zip
|
||||
path: scalar/${{ env.TRACE2_BASENAME }}.zip
|
||||
retention-days: 3
|
||||
|
||||
# The GitHub Action `action-tmate` allows developers to connect to the running agent
|
||||
# using SSH (it will be a `tmux` session; on Windows agents it will be inside the MSYS2
|
||||
# environment in `C:\msys64`, therefore it can be slightly tricky to interact with
|
||||
# Git for Windows, which runs a slightly incompatible MSYS2 runtime).
|
||||
- name: action-tmate
|
||||
if: env.DEBUG_WITH_TMATE == 'true' && failure()
|
||||
uses: mxschmitt/action-tmate@v3
|
||||
with:
|
||||
limit-access-to-actor: true
|
|
@ -1,6 +1,7 @@
|
|||
/fuzz_corpora
|
||||
/GIT-BUILD-DIR
|
||||
/GIT-BUILD-OPTIONS
|
||||
/GIT-BUILT-FROM-COMMIT
|
||||
/GIT-CFLAGS
|
||||
/GIT-LDFLAGS
|
||||
/GIT-PREFIX
|
||||
|
@ -73,6 +74,7 @@
|
|||
/git-gc
|
||||
/git-get-tar-commit-id
|
||||
/git-grep
|
||||
/git-gvfs-helper
|
||||
/git-hash-object
|
||||
/git-help
|
||||
/git-hook
|
||||
|
@ -170,6 +172,7 @@
|
|||
/git-unpack-file
|
||||
/git-unpack-objects
|
||||
/git-update-index
|
||||
/git-update-microsoft-git
|
||||
/git-update-ref
|
||||
/git-update-server-info
|
||||
/git-upload-archive
|
||||
|
@ -247,3 +250,4 @@ Release/
|
|||
/git.VC.db
|
||||
*.dSYM
|
||||
/contrib/buildsystems/out
|
||||
CMakeSettings.json
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
# Architecture of Git for Windows
|
||||
|
||||
Git for Windows is a complex project.
|
||||
|
||||
## What _is_ Git for Windows?
|
||||
|
||||
### A fork of `git/git`
|
||||
|
||||
First and foremost, it is a friendly fork of [`git/git`](https://github.com/git/git), aiming to improve Git's Windows support. The [`git-for-windows/git`](https://github.com/git-for-windows/git) repository contains dozens of topics on top of `git/git`, some awaiting to be "upstreamed" (i.e. to be contributed to `git/git`), some still being stabilized, and a few topics are specific to the Git for Windows project and are not intended to be integrated into `git/git` at all.
|
||||
|
||||
### Enhancing and maintaining Git's support for Windows
|
||||
|
||||
On the source code side, Git's Windows support is made a bit more tricky than strictly necessary by the fact that Git does not have any platform abstraction layer (unlike other version control systems, such as Subversion). It relies on the presence of POSIX features such as the `hstrerror()` function, and on platforms lacking that functionality, Git provides shims. That leads to some challenges e.g. with the `stat()` function which is very slow on Windows because it has to collect much more metadata than what e.g. the very quick `GetFileAttributesExW()` Win32 API function provides, even when Git calls `stat()` merely to test for the presence of a file (for which all that gathered metadata is totally irrelevant).
|
||||
|
||||
### Providing more than just source code
|
||||
|
||||
In contrast to the Git project, Git for Windows not only publishes tagged source code versions, but full builds of Git. In fact, Git for Windows' primary purpose, as far as most users are concerned, is to provide a convenient installer that end-users can run to have Git on their computer, without ever having to check out `git-for-windows/git` let alone build it. In essence, Git for Windows has to maintain a separate project altogether in addition to the fork of `git/git`, just to build these release artifacts: [`git-for-windows/build-extra`](https://github.com/git-for-windows/build-extra). This repository also contains the definition for a couple of other release artifacts published by Git for Windows, e.g. the "portable" edition of Git for Windows which is a self-extracting 7-Zip archive that does not need to be installed.
|
||||
|
||||
### A software distribution, really
|
||||
|
||||
Another aspect that contributes to the complexity of Git for Windows is that it is not just building `git.exe` and distributes that. Due to its heritage within the Linux project, Git takes certain things for granted, such as the presence of a Unix shell, or for that matter, a package management system from which dependencies can be fetched and updated independently of Git itself. Things that are distinctly not present in most Windows setups. To accommodate for that, Git for Windows originally relied on the MSys project, a minimal fork of Cygwin providing a Unix shell ("Bash"), a Perl interpreter and similar Unix-like tools, and on the MINGW project, a project to build libraries and executables using a GNU C Compiler that relies only on Win32 API functions. As of Git for Windows v2.x, the project has switched away from [MSys](https://sourceforge.net/projects/mingw/files/MSYS/)/[MinGW](https://osdn.net/projects/mingw/) (due to less-than-active maintenance) to [the MSYS2 project](https://msys2.org). That switch brought along the benefit of a robust package management system based on [Pacman](https://archlinux.org/pacman/) (hailing from Arch Linux). To support Windows users, who are in general unfamiliar with Linux-like package management and the need to update installed packages frequently, Git for Windows bundles a subset of its own fork of MSYS2. To put things in perspective: Git for Windows bundles files from ~170 packages, one of which contains Git, and another one contains Git's help files. In that respect, Git for Windows acts like a distribution more than like a mere single software application.
|
||||
|
||||
Most of MSYS2's packages that are bundled in Git for Windows are consumed directly from MSYS2. Others need forks that are maintained by Git for Windows project, to support Git for Windows better. These forks live in the [`git-for-windows/MSYS2-packages`](https://github.com/git-for-windows/MSYS2-packages) and [`git-for-windows/MINGW-packages`](https://github.com/git-for-windows/MINGW-packages) repositories. There are several reasons justifying these forks. For example, the Git for Windows' flavor of the MSYS2 runtime behaves like Git's test suite expects it while MSYS2's flavor does not. Another example: The Bash executable bundled in Git for Windows is code-signed with the same certificate as `git.exe` to help anti-malware programs get out of the users' way. That is why Git for Windows maintains its own `bash` Pacman package. And since MSYS2 dropped 32-bit support already, Git for Windows has to update the 32-bit Pacman packages itself, which is done in the git-for-windows/MSYS2-packages repository. (Side note: the 32-bit issue is a bit more complicated, actually: MSYS2 _still_ builds _MINGW_ packages targeting i686 processors, but no longer any _MSYS_ packages for said processor architecture, and Git for Windows does not keep all of the 32-bit MSYS packages up to date but instead judiciously decides which packages are vital enough as far as Git is concerned to justify the maintenance cost.)
|
||||
|
||||
### Supporting third-party applications that use Git's functionality
|
||||
|
||||
Since the infrastructure required by Git is non-trivial the installer (or for that matter, the Portable Git) is not exactly light-weight: As of January 2023, both artifacts are over fifty megabytes. This is a problem for third-party applications wishing to bundle a version of Git for Windows, which is often advisable given that applications may depend on features that have been introduced only in recent Git versions and therefore relying on an installed Git for Windows could break things. To help with that, the Git for Windows project also provides MinGit as a release artifact, a zip file that is much smaller than the full installer and that contains only the parts of Git for Windows relevant for third-party applications. It lacks Git GUI, for example, as well as the terminal program MinTTY, or for that matter, the documentation.
|
||||
|
||||
### Supporting `git/git`'s GitHub workflows
|
||||
|
||||
The Git for Windows project is also responsible for keeping the Windows part of `git/git`'s automated builds up and running. On Windows, there is no canonical and easy way to get a build environment necessary to build Git and run its test suite, therefore this is a non-trivial task that comes with its own maintenance cost. Git for Windows provides two GitHub Actions to help with that: [`git-for-windows/setup-git-for-windows-sdk`](https://github.com/git-for-windows/setup-git-for-windows-sdk) to set up a tiny subset of Git for Windows' full SDK (which would require about 500MB to be cloned, as opposed to the ~75MB of that subset) and [`git-for-windows/get-azure-pipelines-artifact`](https://github.com/git-for-windows/get-azure-pipelines-artifact) e.g. to download some regularly pre-built artifacts (for example, when `git/git`'s automated tests ran on an Ubuntu version that did not provide an up to date [Coccinelle](https://coccinelle.gitlabpages.inria.fr/website/) package, this GitHub Action was used to download a pre-built version of that Debian package).
|
||||
|
||||
## Maintaining Git for Windows' components
|
||||
|
||||
Git for Windows uses a combination of [a GitHub App called GitForWindowsHelper](https://github.com/git-for-windows/gfw-helper-github-app) (to listen for so-called [slash commands](https://github.com/git-for-windows/gfw-helper-github-app#slash-commands)) combined with workflows in [the `git-for-windows-automation` repository](https://github.com/git-for-windows/git-for-windows-automation/) (for computationally heavy tasks) to support Git for Windows' repetitive tasks.
|
||||
|
||||
This heavy automation serves two purposes:
|
||||
|
||||
1. Document the knowledge about "how things are done" in the Git for Windows project.
|
||||
2. Make Git for Windows' maintenance less tedious by off-loading as many tasks onto machines as possible.
|
||||
|
||||
One neat trick of some `git-for-windows-automation` workflows is that they "mirror back" check runs to the targeted PRs in another repository. This essentially allows versioning the source code independently of the workflow definition.
|
||||
|
||||
Here is a diagram showing how the bits and pieces fit together.
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
A[`monitor-components`] --> |opens| B
|
||||
B{issues labeled<br />`component-update`} --> |/open pr| C
|
||||
C((GitForWindowsHelper)) --> |triggers| D
|
||||
D[`open-pr`] --> |opens| E
|
||||
E{PR in</br>MINGW-packages<br />MSYS2-packages<br />build-extra} --> |closes| B
|
||||
E --> |/deploy| F
|
||||
F((GitForWindowsHelper)) --> |triggers| G
|
||||
G[`build-and-deploy`] --> |deploys to| H
|
||||
H{Pacman repository}
|
||||
C --> |backed by| I
|
||||
F --> |backed by| I
|
||||
I[[Azure Function]]
|
||||
D --> |running in| J
|
||||
G --> | running in| J
|
||||
J[[git-for-windows-automation]]
|
||||
K[[git-sdk-32<br />git-sdk-64<br />git-sdk-arm64]] --> |syncing from| H
|
||||
B --> |/add release note| L
|
||||
L[`add-release-note`]
|
||||
```
|
||||
|
||||
For the curious mind, here are [detailed instructions how the Azure Function backing the GitForWindowsHelper GitHub App was set up](https://github.com/git-for-windows/gfw-helper-github-app#how-this-github-app-was-set-up).
|
||||
|
||||
### The `monitor-components` workflow
|
||||
|
||||
When new versions of components that Git for Windows builds become available, new Pacman packages have to be built. To this end, [the `monitor-components` workflow](https://github.com/git-for-windows/git/blob/main/.github/workflows/monitor-components.yml) monitors a couple of RSS feeds and opens new tickets labeled `component-update` for such new versions.
|
||||
|
||||
### Opening Pull Requests to update Git for Windows' components
|
||||
|
||||
After determining that such a ticket indeed indicates the need for a new Pacman package build, a Git for Windows maintainer issues the `/open pr` command via an issue comment ([example](https://github.com/git-for-windows/git/issues/4281#issuecomment-1426859787)), which gets picked up by the GitForWindowsHelper GitHub App, which in turn triggers [the `open-pr` workflow](https://github.com/git-for-windows/git-for-windows-automation/blob/main/.github/workflows/open-pr.yml) in the `git-for-windows-automation` repository.
|
||||
|
||||
### Deploying the Pacman packages
|
||||
|
||||
This will open a Pull Request in one of Git for Windows' repositories, and once the PR build passes, a Git for Windows maintainer issues the `/deploy` command ([example](https://github.com/git-for-windows/MINGW-packages/pull/69#issuecomment-1427591890)), which gets picked up by the GitForWindowsHelper GitHub App, which triggers [the `build-and-deploy` workflow](https://github.com/git-for-windows/git-for-windows-automation/blob/main/.github/workflows/build-and-deploy.yml).
|
||||
|
||||
### Adding release notes
|
||||
|
||||
Finally, once the packages have been built and deployed to the Pacman repository (which is hosted in Azure Blob Storage), a Git for Windows maintainer will merge the PR(s), which in turn will close the ticket, and the maintainer then issues an `/add release note` command ([example](https://github.com/git-for-windows/MINGW-packages/pull/69#issuecomment-1427782230)), which again gets picked up by the GitForWindowsHelper GitHub App that triggers [the `add-release-note` workflow](https://github.com/git-for-windows/build-extra/blob/main/.github/workflows/add-release-note.yml) that creates and pushes a new commit to the `ReleaseNotes.md` file in `build-extra` ([example](https://github.com/git-for-windows/build-extra/commit/b39c148ff8dc0e987afdb677d17c46a8e99fd0ef)).
|
||||
|
||||
## Releasing official Git for Windows versions
|
||||
|
||||
A relatively infrequent part of Git for Windows' maintainers' duties, if the most rewarding part, is the task of releasing new versions of Git for Windows.
|
||||
|
||||
Most commonly, this is done in response to the "upstream" Git project releasing a new version. When that happens, a Git for Windows maintainer runs [the helper script](https://github.com/git-for-windows/build-extra/blob/main/shears.sh) to perform a "merging rebase" (i.e. a rebase that starts with a fake-merge of the previous tip commit, to maintain both a clean set of commits as well as a [fast-forwarding](https://git-scm.com/docs/git-merge#Documentation/git-merge.txt---ff-only) commit history).
|
||||
|
||||
Once that is done, the maintainer will open a Pull Request to benefit from the automated builds and tests ([example](https://github.com/git-for-windows/git/pull/4160)) as well as from reviews of the [`range-diff`](https://git-scm.com/docs/git-range-diff) relative to the current `main` branch.
|
||||
|
||||
Once everything looks good, the maintainer will issue the `/git-artifacts` command ([example](https://github.com/git-for-windows/git/pull/4160#issuecomment-1346801735)). This will trigger an automated workflow that builds all of the release artifacts: installers, Portable Git, MinGit, `.tar.xz` archive and a NuGet package. Apart from the NuGet package, two sets of artifacts are built: targeting 32-bit ("x86") and 64-bit ("amd64").
|
||||
|
||||
Once these artifacts are built, the maintainer will download the installer and run [the "pre-flight checklist"](https://github.com/git-for-windows/build-extra/blob/main/installer/checklist.txt).
|
||||
|
||||
If everything looks good, a `/release` command will be issued, which triggers yet another workflow that will download the just-built-and-verified release artifacts, publish them as a new GitHub release, publish the NuGet packages, deploy the Pacman packages to the Pacman repository, send out an announcement mail, and update the respective repositories including [Git for Windows' website](https://gitforwindows.org/).
|
||||
|
||||
As mentioned [before](#architecture-of-git-for-windows), the `/git-artifacts` and `/release` commands are picked up by the GitForWindowsHelper GitHub App which subsequently triggers the respective workflows in the `git-for-windows-automation` repository. Here is a diagram:
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
A{Pull Request<br />updating to<br />new Git version} --> |/git-artifacts| B
|
||||
B((GitForWindowsHelper)) --> |triggers| C
|
||||
C[`tag-git`] --> |upon successful build<br />triggers| D
|
||||
D((GitForWindowsHelper)) --> |triggers| E
|
||||
E[`git-artifacts`]
|
||||
E --> |maintainer verifies artifacts| E
|
||||
A --> |upon verified `git-artifacts`<br />/release| F
|
||||
F[`release-git`]
|
||||
C --> |running in| J
|
||||
E --> | running in| J
|
||||
F --> | running in| J
|
||||
J[[git-for-windows-automation]]
|
||||
```
|
||||
|
||||
## Managing Windows/ARM64 builds
|
||||
|
||||
The GitForWindowsHelper comes in real handy for Git for Windows' Pacman packages for the `aarch64` architecture, i.e. for Windows/ARM64. These packages cannot be built in regular hosted GitHub Actions runners because there are none of that architecture. To help with that, the respective workflows in `git-for-windows-automation` use the label `runs-on: ["Windows", "ARM64"]` to indicate that they need a self-hosted Windows/ARM64 runner.
|
||||
|
||||
It would not be cost-effective to have a VM running permanently, hosting such a self-hosted runner: Git for Windows does not build such packages often enough (usually once or twice per week is more the norm).
|
||||
|
||||
Therefore, VMs providing self-hosted GitHub Actions runners are spun up and torn down as needed. This job is done by the GitForWindowsHelper:
|
||||
|
||||
- When a job is queued asking for above-mentioned labels, [the `create-self-hosted-runner` workflow](https://github.com/git-for-windows/git-for-windows-automation/blob/09ec165f44a0a3d84d8f0e26a4939667b4522635/.github/workflows/create-azure-self-hosted-runners.yml) is started. This deploys an Azure Resource Management template that creates an ephemeral self-hosted runner (i.e. a runner that will pick up one job and then is immediately unregistered).
|
||||
|
||||
- When a job with above-mentioned labels has finished, the GitForWindowsHelper triggers [the `delete-self-hosted-runner` workflow](https://github.com/git-for-windows/git-for-windows-automation/blob/09ec165f44a0a3d84d8f0e26a4939667b4522635/.github/workflows/delete-self-hosted-runner.yml) that tears down the now no longer used VM.
|
||||
|
||||
The GitForWindowsHelper GitHub App will also detect when a job is queued for a PR from a forked repository. This is considered unauthorized use, and the job will be canceled immediately by the GitHub App instead of spinning up a self-hosted runner for it.
|
|
@ -0,0 +1,59 @@
|
|||
Branches used in this repo
|
||||
==========================
|
||||
|
||||
The document explains the branching structure that we are using in the VFSForGit repository as well as the forking strategy that we have adopted for contributing.
|
||||
|
||||
Repo Branches
|
||||
-------------
|
||||
|
||||
1. `vfs-#`
|
||||
|
||||
These branches are used to track the specific version that match Git for Windows with the VFSForGit specific patches on top. When a new version of Git for Windows is released, the VFSForGit patches will be rebased on that windows version and a new gvfs-# branch created to create pull requests against.
|
||||
|
||||
#### Examples
|
||||
|
||||
```
|
||||
vfs-2.27.0
|
||||
vfs-2.30.0
|
||||
```
|
||||
|
||||
The versions of git for VFSForGit are based on the Git for Windows versions. v2.20.0.vfs.1 will correspond with the v2.20.0.windows.1 with the VFSForGit specific patches applied to the windows version.
|
||||
|
||||
2. `vfs-#-exp`
|
||||
|
||||
These branches are for releasing experimental features to early adopters. They
|
||||
should contain everything within the corresponding `vfs-#` branch; if the base
|
||||
branch updates, then merge into the `vfs-#-exp` branch as well.
|
||||
|
||||
Tags
|
||||
----
|
||||
|
||||
We are using annotated tags to build the version number for git. The build will look back through the commit history to find the first tag matching `v[0-9]*vfs*` and build the git version number using that tag.
|
||||
|
||||
Full releases are of the form `v2.XX.Y.vfs.Z.W` where `v2.XX.Y` comes from the
|
||||
upstream version and `Z.W` are custom updates within our fork. Specifically,
|
||||
the `.Z` value represents the "compatibility level" with VFS for Git. Only
|
||||
increase this version when making a breaking change with a released version
|
||||
of VFS for Git. The `.W` version is used for minor updates between major
|
||||
versions.
|
||||
|
||||
Experimental releases are of the form `v2.XX.Y.vfs.Z.W.exp`. The `.exp`
|
||||
suffix indicates that experimental features are available. The rest of the
|
||||
version string comes from the full release tag. These versions will only
|
||||
be made available as pre-releases on the releases page, never a full release.
|
||||
|
||||
Forking
|
||||
-------
|
||||
|
||||
A personal fork of this repository and a branch in that repository should be used for development.
|
||||
|
||||
These branches should be based on the latest vfs-# branch. If there are work in progress pull requests that you have based on a previous version branch when a new version branch is created, you will need to move your patches to the new branch to get them in that latest version.
|
||||
|
||||
#### Example
|
||||
|
||||
```
|
||||
git clone <personal fork repo URL>
|
||||
git remote add ms https://github.com/Microsoft/git.git
|
||||
git checkout -b my-changes ms/vfs-2.20.0 --no-track
|
||||
git push -fu origin HEAD
|
||||
```
|
|
@ -1,9 +1,9 @@
|
|||
# Git Code of Conduct
|
||||
# Git for Windows Code of Conduct
|
||||
|
||||
This code of conduct outlines our expectations for participants within
|
||||
the Git community, as well as steps for reporting unacceptable behavior.
|
||||
We are committed to providing a welcoming and inspiring community for
|
||||
all and expect our code of conduct to be honored. Anyone who violates
|
||||
the **Git for Windows** community, as well as steps for reporting unacceptable
|
||||
behavior. We are committed to providing a welcoming and inspiring community
|
||||
for all and expect our code of conduct to be honored. Anyone who violates
|
||||
this code of conduct may be banned from the community.
|
||||
|
||||
## Our Pledge
|
||||
|
@ -65,8 +65,8 @@ representative at an online or offline event.
|
|||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
git@sfconservancy.org, or individually:
|
||||
reported by contacting the Git for Windows maintainer or the community leaders
|
||||
responsible for enforcement at git@sfconservancy.org, or individually:
|
||||
|
||||
- Ævar Arnfjörð Bjarmason <avarab@gmail.com>
|
||||
- Christian Couder <christian.couder@gmail.com>
|
||||
|
|
|
@ -0,0 +1,417 @@
|
|||
How to Contribute to Git for Windows
|
||||
====================================
|
||||
|
||||
Git was originally designed for Unix systems and still today, all the build tools for the Git
|
||||
codebase assume you have standard Unix tools available in your path. If you have an open-source
|
||||
mindset and want to start contributing to Git, but primarily use a Windows machine, then you may
|
||||
have trouble getting started. This guide is for you.
|
||||
|
||||
Get the Source
|
||||
--------------
|
||||
|
||||
Clone the [GitForWindows repository on GitHub](https://github.com/git-for-windows/git).
|
||||
It is helpful to create your own fork for storing your development branches.
|
||||
|
||||
Windows uses different line endings than Unix systems. See
|
||||
[this GitHub article on working with line endings](https://help.github.com/articles/dealing-with-line-endings/#refreshing-a-repository-after-changing-line-endings)
|
||||
if you have trouble with line endings.
|
||||
|
||||
Build the Source
|
||||
----------------
|
||||
|
||||
First, download and install the latest [Git for Windows SDK (64-bit)](https://github.com/git-for-windows/build-extra/releases/latest).
|
||||
When complete, you can run the Git SDK, which creates a new Git Bash terminal window with
|
||||
the additional development commands, such as `make`.
|
||||
|
||||
As of time of writing, the SDK uses a different credential manager, so you may still want to use normal Git
|
||||
Bash for interacting with your remotes. Alternatively, use SSH rather than HTTPS and
|
||||
avoid credential manager problems.
|
||||
|
||||
You should now be ready to type `make` from the root of your `git` source directory.
|
||||
Here are some helpful variations:
|
||||
|
||||
* `make -j[N] DEVELOPER=1`: Compile new sources using up to N concurrent processes.
|
||||
The `DEVELOPER` flag turns on all warnings; code failing these warnings will not be
|
||||
accepted upstream ("upstream" = "the core Git project").
|
||||
* `make clean`: Delete all compiled files.
|
||||
|
||||
When running `make`, you can use `-j$(nproc)` to automatically use the number of processors
|
||||
on your machine as the number of concurrent build processes.
|
||||
|
||||
You can go deeper on the Windows-specific build process by reading the
|
||||
[technical overview](https://github.com/git-for-windows/git/wiki/Technical-overview) or the
|
||||
[guide to compiling Git with Visual Studio](https://github.com/git-for-windows/git/wiki/Compiling-Git-with-Visual-Studio).
|
||||
|
||||
## Building `git` on Windows with Visual Studio
|
||||
|
||||
The typical approach to building `git` is to use the standard `Makefile` with GCC, as
|
||||
above. Developers working in a Windows environment may want to instead build with the
|
||||
[Microsoft Visual C++ compiler and libraries toolset (MSVC)](https://blogs.msdn.microsoft.com/vcblog/2017/03/07/msvc-the-best-choice-for-windows/).
|
||||
There are a few benefits to using MSVC over GCC during your development, including creating
|
||||
symbols for debugging and [performance tracing](https://github.com/Microsoft/perfview#perfview-overview).
|
||||
|
||||
There are two ways to build Git for Windows using MSVC. Each have their own merits.
|
||||
|
||||
### Using SDK Command Line
|
||||
|
||||
Use one of the following commands from the SDK Bash window to build Git for Windows:
|
||||
|
||||
```
|
||||
make MSVC=1 -j12
|
||||
make MSVC=1 DEBUG=1 -j12
|
||||
```
|
||||
|
||||
The first form produces release-mode binaries; the second produces debug-mode binaries.
|
||||
Both forms produce PDB files and can be debugged. However, the first is best for perf
|
||||
tracing and the second is best for single-stepping.
|
||||
|
||||
You can then open Visual Studio and select File -> Open -> Project/Solution and select
|
||||
the compiled `git.exe` file. This creates a basic solution and you can use the debugging
|
||||
and performance tracing tools in Visual Studio to monitor a Git process. Use the Debug
|
||||
Properties page to set the working directory and command line arguments.
|
||||
|
||||
Be sure to clean up before switching back to GCC (or to switch between debug and
|
||||
release MSVC builds):
|
||||
|
||||
```
|
||||
make MSVC=1 -j12 clean
|
||||
make MSVC=1 DEBUG=1 -j12 clean
|
||||
```
|
||||
|
||||
### Using the IDE
|
||||
|
||||
If you prefer working in Visual Studio with a solution full of projects, then you can use
|
||||
CMake, either by letting Visual Studio configure it automatically (simply open Git's
|
||||
top-level directory via `File>Open>Folder...`) or by (downloading and) running
|
||||
[CMake](https://cmake.org) manually.
|
||||
|
||||
What to Change?
|
||||
---------------
|
||||
|
||||
Many new contributors ask: What should I start working on?
|
||||
|
||||
One way to win big with the open-source community is to look at the
|
||||
[issues page](https://github.com/git-for-windows/git/issues) and see if there are any issues that
|
||||
you can fix quickly, or if anything catches your eye.
|
||||
|
||||
You can also look at [the unofficial Chromium issues page](https://crbug.com/git) for
|
||||
multi-platform issues. You can look at recent user questions on
|
||||
[the Git mailing list](https://public-inbox.org/git).
|
||||
|
||||
Or you can "scratch your own itch", i.e. address an issue you have with Git. The team at Microsoft where the Git for Windows maintainer works, for example, is focused almost entirely on [improving performance](https://blogs.msdn.microsoft.com/devops/2018/01/11/microsofts-performance-contributions-to-git-in-2017/).
|
||||
We approach our work by finding something that is slow and try to speed it up. We start our
|
||||
investigation by reliably reproducing the slow behavior, then running that example using
|
||||
the MSVC build and tracing the results in PerfView.
|
||||
|
||||
You could also think of something you wish Git could do, and make it do that thing! The
|
||||
only concern I would have with this approach is whether or not that feature is something
|
||||
the community also wants. If this excites you though, go for it! Don't be afraid to
|
||||
[get involved in the mailing list](http://vger.kernel.org/vger-lists.html#git) early for
|
||||
feedback on the idea.
|
||||
|
||||
Test Your Changes
|
||||
-----------------
|
||||
|
||||
After you make your changes, it is important that you test your changes. Manual testing is
|
||||
important, but checking and extending the existing test suite is even more important. You
|
||||
want to run the functional tests to see if you broke something else during your change, and
|
||||
you want to extend the functional tests to be sure no one breaks your feature in the future.
|
||||
|
||||
### Functional Tests
|
||||
|
||||
Navigate to the `t/` directory and type `make` to run all tests or use `prove` as
|
||||
[described in the Git for Windows wiki](https://github.com/git-for-windows/git/wiki/Building-Git):
|
||||
|
||||
```
|
||||
prove -j12 --state=failed,save ./t[0-9]*.sh
|
||||
```
|
||||
|
||||
You can also run each test directly by running the corresponding shell script with a name
|
||||
like `tNNNN-descriptor.sh`.
|
||||
|
||||
If you are adding new functionality, you may need to create unit tests by creating
|
||||
helper commands that test a very limited action. These commands are stored in `t/helpers`.
|
||||
When adding a helper, be sure to add a line to `t/Makefile` and to the `.gitignore` for the
|
||||
binary file you add. The Git community prefers functional tests using the full `git`
|
||||
executable, so try to exercise your new code using `git` commands before creating a test
|
||||
helper.
|
||||
|
||||
To find out why a test failed, repeat the test with the `-x -v -d -i` options and then
|
||||
navigate to the appropriate "trash" directory to see the data shape that was used for the
|
||||
test failed step.
|
||||
|
||||
Read [`t/README`](t/README) for more details.
|
||||
|
||||
### Performance Tests
|
||||
|
||||
If you are working on improving performance, you will need to be acquainted with the
|
||||
performance tests in `t/perf`. There are not too many performance tests yet, but adding one
|
||||
as your first commit in a patch series helps to communicate the boost your change provides.
|
||||
|
||||
To check the change in performance across multiple versions of `git`, you can use the
|
||||
`t/perf/run` script. For example, to compare the performance of `git rev-list` across the
|
||||
`core/master` and `core/next` branches compared to a `topic` branch, you can run
|
||||
|
||||
```
|
||||
cd t/perf
|
||||
./run core/master core/next topic -- p0001-rev-list.sh
|
||||
```
|
||||
|
||||
You can also set certain environment variables to help test the performance on different
|
||||
repositories or with more repetitions. The full list is available in
|
||||
[the `t/perf/README` file](t/perf/README),
|
||||
but here are a few important ones:
|
||||
|
||||
```
|
||||
GIT_PERF_REPO=/path/to/repo
|
||||
GIT_PERF_LARGE_REPO=/path/to/large/repo
|
||||
GIT_PERF_REPEAT_COUNT=10
|
||||
```
|
||||
|
||||
When running the performance tests on Linux, you may see a message "Can't locate JSON.pm in
|
||||
@INC" and that means you need to run `sudo cpanm install JSON` to get the JSON perl package.
|
||||
|
||||
For running performance tests, it can be helpful to set up a few repositories with strange
|
||||
data shapes, such as:
|
||||
|
||||
**Many objects:** Clone repos such as [Kotlin](https://github.com/jetbrains/kotlin), [Linux](https://github.com/torvalds/linux), or [Android](https://source.android.com/setup/downloading).
|
||||
|
||||
**Many pack-files:** You can split a fresh clone into multiple pack-files of size at most
|
||||
16MB by running `git repack -adfF --max-pack-size=16m`. See the
|
||||
[`git repack` documentation](https://git-scm.com/docs/git-repack) for more information.
|
||||
You can count the number of pack-files using `ls .git/objects/pack/*.pack | wc -l`.
|
||||
|
||||
**Many loose objects:** If you already split your repository into multiple pack-files, then
|
||||
you can pick one to split into loose objects using `cat .git/objects/pack/[id].pack | git unpack-objects`;
|
||||
delete the `[id].pack` and `[id].idx` files after this. You can count the number of loose
|
||||
bjects using `ls .git/objects/??/* | wc -l`.
|
||||
|
||||
**Deep history:** Usually large repositories also have deep histories, but you can use the
|
||||
[test-many-commits-1m repo](https://github.com/cirosantilli/test-many-commits-1m/) to
|
||||
target deep histories without the overhead of many objects. One issue with this repository:
|
||||
there are no merge commits, so you will need to use a different repository to test a "wide"
|
||||
commit history.
|
||||
|
||||
**Large Index:** You can generate a large index and repo by using the scripts in
|
||||
`t/perf/repos`. There are two scripts. `many-files.sh` which will generate a repo with
|
||||
same tree and blobs but different paths. Using `many-files.sh -d 5 -w 10 -f 9` will create
|
||||
a repo with ~1 million entries in the index. `inflate-repo.sh` will use an existing repo
|
||||
and copy the current work tree until it is a specified size.
|
||||
|
||||
Test Your Changes on Linux
|
||||
--------------------------
|
||||
|
||||
It can be important to work directly on the [core Git codebase](https://github.com/git/git),
|
||||
such as a recent commit into the `master` or `next` branch that has not been incorporated
|
||||
into Git for Windows. Also, it can help to run functional and performance tests on your
|
||||
code in Linux before submitting patches to the mailing list, which focuses on many platforms.
|
||||
The differences between Windows and Linux are usually enough to catch most cross-platform
|
||||
issues.
|
||||
|
||||
### Using the Windows Subsystem for Linux
|
||||
|
||||
The [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
|
||||
allows you to [install Ubuntu Linux as an app](https://www.microsoft.com/en-us/store/p/ubuntu/9nblggh4msv6)
|
||||
that can run Linux executables on top of the Windows kernel. Internally,
|
||||
Linux syscalls are interpreted by the WSL, everything else is plain Ubuntu.
|
||||
|
||||
First, open WSL (either type "Bash" in Cortana, or execute "bash.exe" in a CMD window).
|
||||
Then install the prerequisites, and `git` for the initial clone:
|
||||
|
||||
```
|
||||
sudo apt-get update
|
||||
sudo apt-get install git gcc make libssl-dev libcurl4-openssl-dev \
|
||||
libexpat-dev tcl tk gettext git-email zlib1g-dev
|
||||
```
|
||||
|
||||
Then, clone and build:
|
||||
|
||||
```
|
||||
git clone https://github.com/git-for-windows/git
|
||||
cd git
|
||||
git remote add -f upstream https://github.com/git/git
|
||||
make
|
||||
```
|
||||
|
||||
Be sure to clone into `/home/[user]/` and not into any folder under `/mnt/?/` or your build
|
||||
will fail due to colons in file names.
|
||||
|
||||
### Using a Linux Virtual Machine with Hyper-V
|
||||
|
||||
If you prefer, you can use a virtual machine (VM) to run Linux and test your changes in the
|
||||
full environment. The test suite runs a lot faster on Linux than on Windows or with the WSL.
|
||||
You can connect to the VM using an SSH terminal like
|
||||
[PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/).
|
||||
|
||||
The following instructions are for using Hyper-V, which is available in some versions of Windows.
|
||||
There are many virtual machine alternatives available, if you do not have such a version installed.
|
||||
|
||||
* [Download an Ubuntu Server ISO](https://www.ubuntu.com/download/server).
|
||||
* Open [Hyper-V Manager](https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v).
|
||||
* [Set up a virtual switch](https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/quick-start/connect-to-network)
|
||||
so your VM can reach the network.
|
||||
* Select "Quick Create", name your machine, select the ISO as installation source, and un-check
|
||||
"This virtual machine will run Windows."
|
||||
* Go through the Ubuntu install process, being sure to select to install OpenSSH Server.
|
||||
* When install is complete, log in and check the SSH server status with `sudo service ssh status`.
|
||||
* If the service is not found, install with `sudo apt-get install openssh-server`.
|
||||
* If the service is not running, then use `sudo service ssh start`.
|
||||
* Use `shutdown -h now` to shutdown the VM, go to the Hyper-V settings for the VM, expand Network Adapter
|
||||
to select "Advanced Features", and set the MAC address to be static (this can save your VM from losing
|
||||
network if shut down incorrectly).
|
||||
* Provide as many cores to your VM as you can (for parallel builds).
|
||||
* Restart your VM, but do not connect.
|
||||
* Use `ssh` in Git Bash, download [PuTTY](http://www.putty.org/), or use your favorite SSH client to connect to the VM through SSH.
|
||||
|
||||
In order to build and use `git`, you will need the following libraries via `apt-get`:
|
||||
|
||||
```
|
||||
sudo apt-get update
|
||||
sudo apt-get install git gcc make libssl-dev libcurl4-openssl-dev \
|
||||
libexpat-dev tcl tk gettext git-email zlib1g-dev
|
||||
```
|
||||
|
||||
To get your code from your Windows machine to the Linux VM, it is easiest to push the branch to your fork of Git and clone your fork in the Linux VM.
|
||||
|
||||
Don't forget to set your `git` config with your preferred name, email, and editor.
|
||||
|
||||
Polish Your Commits
|
||||
-------------------
|
||||
|
||||
Before submitting your patch, be sure to read the [coding guidelines](https://github.com/git/git/blob/master/Documentation/CodingGuidelines)
|
||||
and check your code to match as best you can. This can be a lot of effort, but it saves
|
||||
time during review to avoid style issues.
|
||||
|
||||
The other possibly major difference between the mailing list submissions and GitHub PR workflows
|
||||
is that each commit will be reviewed independently. Even if you are submitting a
|
||||
patch series with multiple commits, each commit must stand on it's own and be reviewable
|
||||
by itself. Make sure the commit message clearly explain the why of the commit not the how.
|
||||
Describe what is wrong with the current code and how your changes have made the code better.
|
||||
|
||||
When preparing your patch, it is important to put yourself in the shoes of the Git community.
|
||||
Accepting a patch requires more justification than approving a pull request from someone on
|
||||
your team. The community has a stable product and is responsible for keeping it stable. If
|
||||
you introduce a bug, then they cannot count on you being around to fix it. When you decided
|
||||
to start work on a new feature, they were not part of the design discussion and may not
|
||||
even believe the feature is worth introducing.
|
||||
|
||||
Questions to answer in your patch message (and commit messages) may include:
|
||||
* Why is this patch necessary?
|
||||
* How does the current behavior cause pain for users?
|
||||
* What kinds of repositories are necessary for noticing a difference?
|
||||
* What design options did you consider before writing this version? Do you have links to
|
||||
code for those alternate designs?
|
||||
* Is this a performance fix? Provide clear performance numbers for various well-known repos.
|
||||
|
||||
Here are some other tips that we use when cleaning up our commits:
|
||||
|
||||
* Commit messages should be wrapped at 76 columns per line (or less; 72 is also a
|
||||
common choice).
|
||||
* Make sure the commits are signed off using `git commit (-s|--signoff)`. See
|
||||
[SubmittingPatches](https://github.com/git/git/blob/v2.8.1/Documentation/SubmittingPatches#L234-L286)
|
||||
for more details about what this sign-off means.
|
||||
* Check for whitespace errors using `git diff --check [base]...HEAD` or `git log --check`.
|
||||
* Run `git rebase --whitespace=fix` to correct upstream issues with whitespace.
|
||||
* Become familiar with interactive rebase (`git rebase -i`) because you will be reordering,
|
||||
squashing, and editing commits as your patch or series of patches is reviewed.
|
||||
* Make sure any shell scripts that you add have the executable bit set on them. This is
|
||||
usually for test files that you add in the `/t` directory. You can use
|
||||
`git add --chmod=+x [file]` to update it. You can test whether a file is marked as executable
|
||||
using `git ls-files --stage \*.sh`; the first number is 100755 for executable files.
|
||||
* Your commit titles should match the "area: change description" format. Rules of thumb:
|
||||
* Choose "<area>: " prefix appropriately.
|
||||
* Keep the description short and to the point.
|
||||
* The word that follows the "<area>: " prefix is not capitalized.
|
||||
* Do not include a full-stop at the end of the title.
|
||||
* Read a few commit messages -- using `git log origin/master`, for instance -- to
|
||||
become acquainted with the preferred commit message style.
|
||||
* Build source using `make DEVELOPER=1` for extra-strict compiler warnings.
|
||||
|
||||
Submit Your Patch
|
||||
-----------------
|
||||
|
||||
Git for Windows [accepts pull requests on GitHub](https://github.com/git-for-windows/git/pulls), but
|
||||
these are reserved for Windows-specific improvements. For core Git, submissions are accepted on
|
||||
[the Git mailing list](https://public-inbox.org/git).
|
||||
|
||||
### Configure Git to Send Emails
|
||||
|
||||
There are a bunch of options for configuring the `git send-email` command. These options can
|
||||
be found in the documentation for
|
||||
[`git config`](https://git-scm.com/docs/git-config) and
|
||||
[`git send-email`](https://git-scm.com/docs/git-send-email).
|
||||
|
||||
```
|
||||
git config --global sendemail.smtpserver <smtp server>
|
||||
git config --global sendemail.smtpserverport 587
|
||||
git config --global sendemail.smtpencryption tls
|
||||
git config --global sendemail.smtpuser <email address>
|
||||
```
|
||||
|
||||
To avoid storing your password in the config file, store it in the Git credential manager:
|
||||
|
||||
```
|
||||
$ git credential fill
|
||||
protocol=smtp
|
||||
host=<stmp server>
|
||||
username=<email address>
|
||||
password=password
|
||||
```
|
||||
|
||||
Before submitting a patch, read the [Git documentation on submitting patches](https://github.com/git/git/blob/master/Documentation/SubmittingPatches).
|
||||
|
||||
To construct a patch set, use the `git format-patch` command. There are three important options:
|
||||
|
||||
* `--cover-letter`: If specified, create a `[v#-]0000-cover-letter.patch` file that can be
|
||||
edited to describe the patch as a whole. If you previously added a branch description using
|
||||
`git branch --edit-description`, you will end up with a 0/N mail with that description and
|
||||
a nice overall diffstat.
|
||||
* `--in-reply-to=[Message-ID]`: This will mark your cover letter as replying to the given
|
||||
message (which should correspond to your previous iteration). To determine the correct Message-ID,
|
||||
find the message you are replying to on [public-inbox.org/git](https://public-inbox.org/git) and take
|
||||
the ID from between the angle brackets.
|
||||
|
||||
* `--subject-prefix=[prefix]`: This defaults to [PATCH]. For subsequent iterations, you will want to
|
||||
override it like `--subject-prefix="[PATCH v2]"`. You can also use the `-v` option to have it
|
||||
automatically generate the version number in the patches.
|
||||
|
||||
If you have multiple commits and use the `--cover-letter` option be sure to open the
|
||||
`0000-cover-letter.patch` file to update the subject and add some details about the overall purpose
|
||||
of the patch series.
|
||||
|
||||
### Examples
|
||||
|
||||
To generate a single commit patch file:
|
||||
```
|
||||
git format-patch -s -o [dir] -1
|
||||
```
|
||||
To generate four patch files from the last three commits with a cover letter:
|
||||
```
|
||||
git format-patch --cover-letter -s -o [dir] HEAD~4
|
||||
```
|
||||
To generate version 3 with four patch files from the last four commits with a cover letter:
|
||||
```
|
||||
git format-patch --cover-letter -s -o [dir] -v 3 HEAD~4
|
||||
```
|
||||
|
||||
### Submit the Patch
|
||||
|
||||
Run [`git send-email`](https://git-scm.com/docs/git-send-email), starting with a test email:
|
||||
|
||||
```
|
||||
git send-email --to=yourself@address.com [dir with patches]/*.patch
|
||||
```
|
||||
|
||||
After checking the receipt of your test email, you can send to the list and to any
|
||||
potentially interested reviewers.
|
||||
|
||||
```
|
||||
git send-email --to=git@vger.kernel.org --cc=<email1> --cc=<email2> [dir with patches]/*.patch
|
||||
```
|
||||
|
||||
To submit a nth version patch (say version 3):
|
||||
|
||||
```
|
||||
git send-email --to=git@vger.kernel.org --cc=<email1> --cc=<email2> \
|
||||
--in-reply-to=<the message id of cover letter of patch v2> [dir with patches]/*.patch
|
||||
```
|
|
@ -443,6 +443,8 @@ include::config/gui.txt[]
|
|||
|
||||
include::config/guitool.txt[]
|
||||
|
||||
include::config/gvfs.txt[]
|
||||
|
||||
include::config/help.txt[]
|
||||
|
||||
include::config/http.txt[]
|
||||
|
@ -509,6 +511,8 @@ include::config/safe.txt[]
|
|||
|
||||
include::config/sendemail.txt[]
|
||||
|
||||
include::config/sendpack.txt[]
|
||||
|
||||
include::config/sequencer.txt[]
|
||||
|
||||
include::config/showbranch.txt[]
|
||||
|
@ -545,4 +549,6 @@ include::config/versionsort.txt[]
|
|||
|
||||
include::config/web.txt[]
|
||||
|
||||
include::config/windows.txt[]
|
||||
|
||||
include::config/worktree.txt[]
|
||||
|
|
|
@ -56,6 +56,9 @@ advice.*::
|
|||
Advice on how to set your identity configuration when
|
||||
your information is guessed from the system username and
|
||||
domain name.
|
||||
nameTooLong::
|
||||
Advice shown if a filepath operation is attempted where the
|
||||
path was too long.
|
||||
nestedTag::
|
||||
Advice shown if a user attempts to recursively tag a tag object.
|
||||
pushAlreadyExists::
|
||||
|
@ -146,4 +149,8 @@ advice.*::
|
|||
Advice shown when a user tries to create a worktree from an
|
||||
invalid reference, to instruct how to create a new unborn
|
||||
branch instead.
|
||||
|
||||
useCoreFSMonitorConfig::
|
||||
Advice shown if the deprecated 'core.useBuiltinFSMonitor' config
|
||||
setting is in use.
|
||||
--
|
||||
|
|
|
@ -111,6 +111,14 @@ Version 2 uses an opaque string so that the monitor can return
|
|||
something that can be used to determine what files have changed
|
||||
without race conditions.
|
||||
|
||||
core.virtualFilesystem::
|
||||
If set, the value of this variable is used as a command which
|
||||
will identify all files and directories that are present in
|
||||
the working directory. Git will only track and update files
|
||||
listed in the virtual file system. Using the virtual file system
|
||||
will supersede the sparse-checkout settings which will be ignored.
|
||||
See the "virtual file system" section of linkgit:githooks[5].
|
||||
|
||||
core.trustctime::
|
||||
If false, the ctime differences between the index and the
|
||||
working tree are ignored; useful when the inode change time
|
||||
|
@ -670,6 +678,19 @@ relatively high IO latencies. When enabled, Git will do the
|
|||
index comparison to the filesystem data in parallel, allowing
|
||||
overlapping IO's. Defaults to true.
|
||||
|
||||
core.fscache::
|
||||
Enable additional caching of file system data for some operations.
|
||||
+
|
||||
Git for Windows uses this to bulk-read and cache lstat data of entire
|
||||
directories (instead of doing lstat file by file).
|
||||
|
||||
core.longpaths::
|
||||
Enable long path (> 260) support for builtin commands in Git for
|
||||
Windows. This is disabled by default, as long paths are not supported
|
||||
by Windows Explorer, cmd.exe and the Git for Windows tool chain
|
||||
(msys, bash, tcl, perl...). Only enable this if you know what you're
|
||||
doing and are prepared to live with a few quirks.
|
||||
|
||||
core.unsetenvvars::
|
||||
Windows-only: comma-separated list of environment variables'
|
||||
names that need to be unset before spawning any other process.
|
||||
|
@ -715,6 +736,55 @@ core.multiPackIndex::
|
|||
single index. See linkgit:git-multi-pack-index[1] for more
|
||||
information. Defaults to true.
|
||||
|
||||
core.gvfs::
|
||||
Enable the features needed for GVFS. This value can be set to true
|
||||
to indicate all features should be turned on or the bit values listed
|
||||
below can be used to turn on specific features.
|
||||
+
|
||||
--
|
||||
GVFS_SKIP_SHA_ON_INDEX::
|
||||
Bit value 1
|
||||
Disables the calculation of the sha when writing the index
|
||||
GVFS_MISSING_OK::
|
||||
Bit value 4
|
||||
Normally git write-tree ensures that the objects referenced by the
|
||||
directory exist in the object database. This option disables this check.
|
||||
GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT::
|
||||
Bit value 8
|
||||
When marking entries to remove from the index and the working
|
||||
directory this option will take into account what the
|
||||
skip-worktree bit was set to so that if the entry has the
|
||||
skip-worktree bit set it will not be removed from the working
|
||||
directory. This will allow virtualized working directories to
|
||||
detect the change to HEAD and use the new commit tree to show
|
||||
the files that are in the working directory.
|
||||
GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK::
|
||||
Bit value 16
|
||||
While performing a fetch with a virtual file system we know
|
||||
that there will be missing objects and we don't want to download
|
||||
them just because of the reachability of the commits. We also
|
||||
don't want to download a pack file with commits, trees, and blobs
|
||||
since these will be downloaded on demand. This flag will skip the
|
||||
checks on the reachability of objects during a fetch as well as
|
||||
the upload pack so that extraneous objects don't get downloaded.
|
||||
GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS::
|
||||
Bit value 64
|
||||
With a virtual file system we only know the file size before any
|
||||
CRLF or smudge/clean filters processing is done on the client.
|
||||
To prevent file corruption due to truncation or expansion with
|
||||
garbage at the end, these filters must not run when the file
|
||||
is first accessed and brought down to the client. Git.exe can't
|
||||
currently tell the first access vs subsequent accesses so this
|
||||
flag just blocks them from occurring at all.
|
||||
GVFS_PREFETCH_DURING_FETCH::
|
||||
Bit value 128
|
||||
While performing a `git fetch` command, use the gvfs-helper to
|
||||
perform a "prefetch" of commits and trees.
|
||||
--
|
||||
|
||||
core.useGvfsHelper::
|
||||
TODO
|
||||
|
||||
core.sparseCheckout::
|
||||
Enable "sparse checkout" feature. See linkgit:git-sparse-checkout[1]
|
||||
for more information.
|
||||
|
@ -742,3 +812,18 @@ core.maxTreeDepth::
|
|||
tree (e.g., "a/b/cde/f" has a depth of 4). This is a fail-safe
|
||||
to allow Git to abort cleanly, and should not generally need to
|
||||
be adjusted. The default is 4096.
|
||||
|
||||
core.WSLCompat::
|
||||
Tells Git whether to enable wsl compatibility mode.
|
||||
The default value is false. When set to true, Git will set the mode
|
||||
bits of the file in the way of wsl, so that the executable flag of
|
||||
files can be set or read correctly.
|
||||
|
||||
core.configWriteLockTimeoutMS::
|
||||
When processes try to write to the config concurrently, it is likely
|
||||
that one process "wins" and the other process(es) fail to lock the
|
||||
config file. By configuring a timeout larger than zero, Git can be
|
||||
told to try to lock the config again a couple times within the
|
||||
specified timeout. If the timeout is configure to zero (which is the
|
||||
default), Git will fail immediately when the config is already
|
||||
locked.
|
||||
|
|
|
@ -9,6 +9,14 @@ credential.helper::
|
|||
Note that multiple helpers may be defined. See linkgit:gitcredentials[7]
|
||||
for details and examples.
|
||||
|
||||
credential.interactive::
|
||||
By default, Git and any configured credential helpers will ask for
|
||||
user input when new credentials are required. Many of these helpers
|
||||
will succeed based on stored credentials if those credentials are
|
||||
still valid. To avoid the possibility of user interactivity from
|
||||
Git, set `credential.interactive=false`. Some credential helpers
|
||||
respect this option as well.
|
||||
|
||||
credential.useHttpPath::
|
||||
When acquiring credentials, consider the "path" component of an http
|
||||
or https URL to be important. Defaults to false. See
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
gvfs.cache-server::
|
||||
TODO
|
||||
|
||||
gvfs.sharedcache::
|
||||
TODO
|
|
@ -189,11 +189,13 @@ http.sslBackend::
|
|||
|
||||
http.schannelCheckRevoke::
|
||||
Used to enforce or disable certificate revocation checks in cURL
|
||||
when http.sslBackend is set to "schannel". Defaults to `true` if
|
||||
unset. Only necessary to disable this if Git consistently errors
|
||||
and the message is about checking the revocation status of a
|
||||
certificate. This option is ignored if cURL lacks support for
|
||||
setting the relevant SSL option at runtime.
|
||||
when http.sslBackend is set to "schannel" via "true" and "false",
|
||||
respectively. Another accepted value is "best-effort" (the default)
|
||||
in which case revocation checks are performed, but errors due to
|
||||
revocation list distribution points that are offline are silently
|
||||
ignored, as well as errors due to certificates missing revocation
|
||||
list distribution points. This option is ignored if cURL lacks
|
||||
support for setting the relevant SSL option at runtime.
|
||||
|
||||
http.schannelUseSSLCAInfo::
|
||||
As of cURL v7.60.0, the Secure Channel backend can use the
|
||||
|
@ -203,6 +205,11 @@ http.schannelUseSSLCAInfo::
|
|||
when the `schannel` backend was configured via `http.sslBackend`,
|
||||
unless `http.schannelUseSSLCAInfo` overrides this behavior.
|
||||
|
||||
http.sslAutoClientCert::
|
||||
As of cURL v7.77.0, the Secure Channel backend won't automatically
|
||||
send client certificates from the Windows Certificate Store anymore.
|
||||
To opt in to the old behavior, http.sslAutoClientCert can be set.
|
||||
|
||||
http.pinnedPubkey::
|
||||
Public key of the https service. It may either be the filename of
|
||||
a PEM or DER encoded public key file or a string starting with
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
index.deleteSparseDirectories::
|
||||
When enabled, the cone mode sparse-checkout feature will delete
|
||||
directories that are outside of the sparse-checkout cone, unless
|
||||
such a directory contains an untracked, non-ignored file. Defaults
|
||||
to true.
|
||||
|
||||
index.recordEndOfIndexEntries::
|
||||
Specifies whether the index file should include an "End Of Index
|
||||
Entry" section. This reduces index load time on multiprocessor
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
sendpack.sideband::
|
||||
Allows to disable the side-band-64k capability for send-pack even
|
||||
when it is advertised by the server. Makes it possible to work
|
||||
around a limitation in the git for windows implementation together
|
||||
with the dump git protocol. Defaults to true.
|
|
@ -75,3 +75,25 @@ status.submoduleSummary::
|
|||
the --ignore-submodules=dirty command-line option or the 'git
|
||||
submodule summary' command, which shows a similar output but does
|
||||
not honor these settings.
|
||||
|
||||
status.deserializePath::
|
||||
EXPERIMENTAL, Pathname to a file containing cached status results
|
||||
generated by `--serialize`. This will be overridden by
|
||||
`--deserialize=<path>` on the command line. If the cache file is
|
||||
invalid or stale, git will fall-back and compute status normally.
|
||||
|
||||
status.deserializeWait::
|
||||
EXPERIMENTAL, Specifies what `git status --deserialize` should do
|
||||
if the serialization cache file is stale and whether it should
|
||||
fall-back and compute status normally. This will be overridden by
|
||||
`--deserialize-wait=<value>` on the command line.
|
||||
+
|
||||
--
|
||||
* `fail` - cause git to exit with an error when the status cache file
|
||||
is stale; this is intended for testing and debugging.
|
||||
* `block` - cause git to spin and periodically retry the cache file
|
||||
every 100 ms; this is intended to help coordinate with another git
|
||||
instance concurrently computing the cache file.
|
||||
* `no` - to immediately fall-back if cache file is stale. This is the default.
|
||||
* `<timeout>` - time (in tenths of a second) to spin and retry.
|
||||
--
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
windows.appendAtomically::
|
||||
By default, append atomic API is used on windows. But it works only with
|
||||
local disk files, if you're working on a network file system, you should
|
||||
set it false to turn it off.
|
|
@ -12,6 +12,7 @@ SYNOPSIS
|
|||
'git reset' [-q] [--pathspec-from-file=<file> [--pathspec-file-nul]] [<tree-ish>]
|
||||
'git reset' (--patch | -p) [<tree-ish>] [--] [<pathspec>...]
|
||||
'git reset' [--soft | --mixed [-N] | --hard | --merge | --keep] [-q] [<commit>]
|
||||
DEPRECATED: 'git reset' [-q] [--stdin [-z]] [<tree-ish>]
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
|
@ -133,6 +134,16 @@ OPTIONS
|
|||
+
|
||||
For more details, see the 'pathspec' entry in linkgit:gitglossary[7].
|
||||
|
||||
--stdin::
|
||||
DEPRECATED (use `--pathspec-from-file=-` instead): Instead of taking
|
||||
list of paths from the command line, read list of paths from the
|
||||
standard input. Paths are separated by LF (i.e. one path per line) by
|
||||
default.
|
||||
|
||||
-z::
|
||||
DEPRECATED (use `--pathspec-file-nul` instead): Only meaningful with
|
||||
`--stdin`; paths are separated with NUL character instead of LF.
|
||||
|
||||
EXAMPLES
|
||||
--------
|
||||
|
||||
|
|
|
@ -149,6 +149,21 @@ ignored, then the directory is not shown, but all contents are shown.
|
|||
threshold.
|
||||
See also linkgit:git-diff[1] `--find-renames`.
|
||||
|
||||
--serialize[=<path>]::
|
||||
(EXPERIMENTAL) Serialize raw status results to a file or stdout
|
||||
in a format suitable for use by `--deserialize`. If a path is
|
||||
given, serialize data will be written to that path *and* normal
|
||||
status output will be written to stdout. If path is omitted,
|
||||
only binary serialization data will be written to stdout.
|
||||
|
||||
--deserialize[=<path>]::
|
||||
(EXPERIMENTAL) Deserialize raw status results from a file or
|
||||
stdin rather than scanning the worktree. If `<path>` is omitted
|
||||
and `status.deserializePath` is unset, input is read from stdin.
|
||||
--no-deserialize::
|
||||
(EXPERIMENTAL) Disable implicit deserialization of status results
|
||||
from the value of `status.deserializePath`.
|
||||
|
||||
<pathspec>...::
|
||||
See the 'pathspec' entry in linkgit:gitglossary[7].
|
||||
|
||||
|
@ -422,6 +437,26 @@ quoted as explained for the configuration variable `core.quotePath`
|
|||
(see linkgit:git-config[1]).
|
||||
|
||||
|
||||
SERIALIZATION and DESERIALIZATION (EXPERIMENTAL)
|
||||
------------------------------------------------
|
||||
|
||||
The `--serialize` option allows git to cache the result of a
|
||||
possibly time-consuming status scan to a binary file. A local
|
||||
service/daemon watching file system events could use this to
|
||||
periodically pre-compute a fresh status result.
|
||||
|
||||
Interactive users could then use `--deserialize` to simply
|
||||
(and immediately) print the last-known-good result without
|
||||
waiting for the status scan.
|
||||
|
||||
The binary serialization file format includes some worktree state
|
||||
information allowing `--deserialize` to reject the cached data
|
||||
and force a normal status scan if, for example, the commit, branch,
|
||||
or status modes/options change. The format cannot, however, indicate
|
||||
when the cached data is otherwise stale -- that coordination belongs
|
||||
to the task driving the serializations.
|
||||
|
||||
|
||||
CONFIGURATION
|
||||
-------------
|
||||
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
git-update-microsoft-git(1)
|
||||
===========================
|
||||
|
||||
NAME
|
||||
----
|
||||
git-update-microsoft-git - Update the installed version of Git
|
||||
|
||||
|
||||
SYNOPSIS
|
||||
--------
|
||||
[verse]
|
||||
'git update-microsoft-git'
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
This version of Git is based on the Microsoft fork of Git, which
|
||||
has custom capabilities focused on supporting monorepos. This
|
||||
command checks for the latest release of that fork and installs
|
||||
it on your machine.
|
||||
|
||||
|
||||
GIT
|
||||
---
|
||||
Part of the linkgit:git[1] suite
|
|
@ -464,6 +464,14 @@ their values the same way as Boolean valued configuration variables, e.g.
|
|||
|
||||
Here are the variables:
|
||||
|
||||
System
|
||||
~~~~~~
|
||||
`HOME`::
|
||||
Specifies the path to the user's home directory. On Windows, if
|
||||
unset, Git will set a process environment variable equal to:
|
||||
`$HOMEDRIVE$HOMEPATH` if both `$HOMEDRIVE` and `$HOMEPATH` exist;
|
||||
otherwise `$USERPROFILE` if `$USERPROFILE` exists.
|
||||
|
||||
The Git Repository
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
These environment variables apply to 'all' core Git commands. Nb: it
|
||||
|
|
|
@ -403,6 +403,36 @@ sign `$` upon checkout. Any byte sequence that begins with
|
|||
with `$Id$` upon check-in.
|
||||
|
||||
|
||||
`symlink`
|
||||
^^^^^^^^^
|
||||
|
||||
On Windows, symbolic links have a type: a "file symlink" must point at
|
||||
a file, and a "directory symlink" must point at a directory. If the
|
||||
type of symlink does not match its target, it doesn't work.
|
||||
|
||||
Git does not record the type of symlink in the index or in a tree. On
|
||||
checkout it'll guess the type, which only works if the target exists
|
||||
at the time the symlink is created. This may often not be the case,
|
||||
for example when the link points at a directory inside a submodule.
|
||||
|
||||
The `symlink` attribute allows you to explicitly set the type of symlink
|
||||
to `file` or `dir`, so Git doesn't have to guess. If you have a set of
|
||||
symlinks that point at other files, you can do:
|
||||
|
||||
------------------------
|
||||
*.gif symlink=file
|
||||
------------------------
|
||||
|
||||
To tell Git that a symlink points at a directory, use:
|
||||
|
||||
------------------------
|
||||
tools_folder symlink=dir
|
||||
------------------------
|
||||
|
||||
The `symlink` attribute is ignored on platforms other than Windows,
|
||||
since they don't distinguish between different types of symlinks.
|
||||
|
||||
|
||||
`filter`
|
||||
^^^^^^^^
|
||||
|
||||
|
|
|
@ -751,6 +751,26 @@ and "0" meaning they were not.
|
|||
Only one parameter should be set to "1" when the hook runs. The hook
|
||||
running passing "1", "1" should not be possible.
|
||||
|
||||
virtualFilesystem
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
"Virtual File System" allows populating the working directory sparsely.
|
||||
The projection data is typically automatically generated by an external
|
||||
process. Git will limit what files it checks for changes as well as which
|
||||
directories are checked for untracked files based on the path names given.
|
||||
Git will also only update those files listed in the projection.
|
||||
|
||||
The hook is invoked when the configuration option core.virtualFilesystem
|
||||
is set. It takes one argument, a version (currently 1).
|
||||
|
||||
The hook should output to stdout the list of all files in the working
|
||||
directory that git should track. The paths are relative to the root
|
||||
of the working directory and are separated by a single NUL. Full paths
|
||||
('dir1/a.txt') as well as directories are supported (ie 'dir1/').
|
||||
|
||||
The exit status determines whether git will use the data from the
|
||||
hook. On error, git will abort the command with an error message.
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
linkgit:git-hook[1]
|
||||
|
|
|
@ -9,7 +9,8 @@ SYNOPSIS
|
|||
--------
|
||||
[verse]
|
||||
scalar clone [--single-branch] [--branch <main-branch>] [--full-clone]
|
||||
[--[no-]src] <url> [<enlistment>]
|
||||
[--[no-]src] [--local-cache-path <path>] [--cache-server-url <url>]
|
||||
<url> [<enlistment>]
|
||||
scalar list
|
||||
scalar register [<enlistment>]
|
||||
scalar unregister [<enlistment>]
|
||||
|
@ -17,6 +18,7 @@ scalar run ( all | config | commit-graph | fetch | loose-objects | pack-files )
|
|||
scalar reconfigure [ --all | <enlistment> ]
|
||||
scalar diagnose [<enlistment>]
|
||||
scalar delete <enlistment>
|
||||
scalar cache-server ( --get | --set <url> | --list [<remote>] ) [<enlistment>]
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
|
@ -90,6 +92,17 @@ cloning. If the HEAD at the remote did not point at any branch when
|
|||
A sparse-checkout is initialized by default. This behavior can be
|
||||
turned off via `--full-clone`.
|
||||
|
||||
--local-cache-path <path>::
|
||||
Override the path to the local cache root directory; Pre-fetched objects
|
||||
are stored into a repository-dependent subdirectory of that path.
|
||||
+
|
||||
The default is `<drive>:\.scalarCache` on Windows (on the same drive as the
|
||||
clone), and `~/.scalarCache` on macOS.
|
||||
|
||||
--cache-server-url <url>::
|
||||
Retrieve missing objects from the specified remote, which is expected to
|
||||
understand the GVFS protocol.
|
||||
|
||||
List
|
||||
~~~~
|
||||
|
||||
|
@ -163,6 +176,27 @@ delete <enlistment>::
|
|||
This subcommand lets you delete an existing Scalar enlistment from your
|
||||
local file system, unregistering the repository.
|
||||
|
||||
Cache-server
|
||||
~~~~~~~~~~~~
|
||||
|
||||
cache-server ( --get | --set <url> | --list [<remote>] ) [<enlistment>]::
|
||||
This command lets you query or set the GVFS-enabled cache server used
|
||||
to fetch missing objects.
|
||||
|
||||
--get::
|
||||
This is the default command mode: query the currently-configured cache
|
||||
server URL, if any.
|
||||
|
||||
--list::
|
||||
Access the `gvfs/info` endpoint of the specified remote (default:
|
||||
`origin`) to figure out which cache servers are available, if any.
|
||||
+
|
||||
In contrast to the `--get` command mode (which only accesses the local
|
||||
repository), this command mode triggers a request via the network that
|
||||
potentially requires authentication. If authentication is required, the
|
||||
configured credential helper is employed (see linkgit:git-credential[1]
|
||||
for details).
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
linkgit:git-clone[1], linkgit:git-maintenance[1].
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
Read Object Process
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The read-object process enables Git to read all missing blobs with a
|
||||
single process invocation for the entire life of a single Git command.
|
||||
This is achieved by using a packet format (pkt-line, see technical/
|
||||
protocol-common.txt) based protocol over standard input and standard
|
||||
output as follows. All packets, except for the "*CONTENT" packets and
|
||||
the "0000" flush packet, are considered text and therefore are
|
||||
terminated by a LF.
|
||||
|
||||
Git starts the process when it encounters the first missing object that
|
||||
needs to be retrieved. After the process is started, Git sends a welcome
|
||||
message ("git-read-object-client"), a list of supported protocol version
|
||||
numbers, and a flush packet. Git expects to read a welcome response
|
||||
message ("git-read-object-server"), exactly one protocol version number
|
||||
from the previously sent list, and a flush packet. All further
|
||||
communication will be based on the selected version.
|
||||
|
||||
The remaining protocol description below documents "version=1". Please
|
||||
note that "version=42" in the example below does not exist and is only
|
||||
there to illustrate how the protocol would look with more than one
|
||||
version.
|
||||
|
||||
After the version negotiation Git sends a list of all capabilities that
|
||||
it supports and a flush packet. Git expects to read a list of desired
|
||||
capabilities, which must be a subset of the supported capabilities list,
|
||||
and a flush packet as response:
|
||||
------------------------
|
||||
packet: git> git-read-object-client
|
||||
packet: git> version=1
|
||||
packet: git> version=42
|
||||
packet: git> 0000
|
||||
packet: git< git-read-object-server
|
||||
packet: git< version=1
|
||||
packet: git< 0000
|
||||
packet: git> capability=get
|
||||
packet: git> capability=have
|
||||
packet: git> capability=put
|
||||
packet: git> capability=not-yet-invented
|
||||
packet: git> 0000
|
||||
packet: git< capability=get
|
||||
packet: git< 0000
|
||||
------------------------
|
||||
The only supported capability in version 1 is "get".
|
||||
|
||||
Afterwards Git sends a list of "key=value" pairs terminated with a flush
|
||||
packet. The list will contain at least the command (based on the
|
||||
supported capabilities) and the sha1 of the object to retrieve. Please
|
||||
note, that the process must not send any response before it received the
|
||||
final flush packet.
|
||||
|
||||
When the process receives the "get" command, it should make the requested
|
||||
object available in the git object store and then return success. Git will
|
||||
then check the object store again and this time find it and proceed.
|
||||
------------------------
|
||||
packet: git> command=get
|
||||
packet: git> sha1=0a214a649e1b3d5011e14a3dc227753f2bd2be05
|
||||
packet: git> 0000
|
||||
------------------------
|
||||
|
||||
The process is expected to respond with a list of "key=value" pairs
|
||||
terminated with a flush packet. If the process does not experience
|
||||
problems then the list must contain a "success" status.
|
||||
------------------------
|
||||
packet: git< status=success
|
||||
packet: git< 0000
|
||||
------------------------
|
||||
|
||||
In case the process cannot or does not want to process the content, it
|
||||
is expected to respond with an "error" status.
|
||||
------------------------
|
||||
packet: git< status=error
|
||||
packet: git< 0000
|
||||
------------------------
|
||||
|
||||
In case the process cannot or does not want to process the content as
|
||||
well as any future content for the lifetime of the Git process, then it
|
||||
is expected to respond with an "abort" status at any point in the
|
||||
protocol.
|
||||
------------------------
|
||||
packet: git< status=abort
|
||||
packet: git< 0000
|
||||
------------------------
|
||||
|
||||
Git neither stops nor restarts the process in case the "error"/"abort"
|
||||
status is set.
|
||||
|
||||
If the process dies during the communication or does not adhere to the
|
||||
protocol then Git will stop the process and restart it with the next
|
||||
object that needs to be processed.
|
||||
|
||||
After the read-object process has processed an object it is expected to
|
||||
wait for the next "key=value" list containing a command. Git will close
|
||||
the command pipe on exit. The process is expected to detect EOF and exit
|
||||
gracefully on its own. Git will wait until the process has stopped.
|
||||
|
||||
A long running read-object process demo implementation can be found in
|
||||
`contrib/long-running-read-object/example.pl` located in the Git core
|
||||
repository. If you develop your own long running process then the
|
||||
`GIT_TRACE_PACKET` environment variables can be very helpful for
|
||||
debugging (see linkgit:git[1]).
|
|
@ -0,0 +1,107 @@
|
|||
Git status serialization format
|
||||
===============================
|
||||
|
||||
Git status serialization enables git to dump the results of a status scan
|
||||
to a binary file. This file can then be loaded by later status invocations
|
||||
to print the cached status results.
|
||||
|
||||
The file contains the essential fields from:
|
||||
() the index
|
||||
() the "struct wt_status" for the overall results
|
||||
() the contents of "struct wt_status_change_data" for tracked changed files
|
||||
() the list of untracked and ignored files
|
||||
|
||||
Version 1 Format:
|
||||
=================
|
||||
|
||||
The V1 file begins with a required header section followed by optional
|
||||
sections for each type of item (changed, untracked, ignored). Individual
|
||||
item sections are only present if necessary. Each item section begins
|
||||
with an item-type header with the number of items in the section.
|
||||
|
||||
Each "line" in the format is encoded using pkt-line with a final LF.
|
||||
Flush packets are used to terminate sections.
|
||||
|
||||
-----------------
|
||||
PKT-LINE("version" SP "1")
|
||||
<v1-header-section>
|
||||
[<v1-changed-item-section>]
|
||||
[<v1-untracked-item-section>]
|
||||
[<v1-ignored-item-section>]
|
||||
-----------------
|
||||
|
||||
|
||||
V1 Header
|
||||
---------
|
||||
|
||||
The v1-header-section fields are taken directly from "struct wt_status".
|
||||
Each field is printed on a separate pkt-line. Lines for NULL string
|
||||
values are omitted. All integers are printed with "%d". OIDs are
|
||||
printed in hex.
|
||||
|
||||
v1-header-section = <v1-index-headers>
|
||||
<v1-wt-status-headers>
|
||||
PKT-LINE(<flush>)
|
||||
|
||||
v1-index-headers = PKT-LINE("index_mtime" SP <sec> SP <nsec> LF)
|
||||
|
||||
v1-wt-status-headers = PKT-LINE("is_initial" SP <integer> LF)
|
||||
[ PKT-LINE("branch" SP <branch-name> LF) ]
|
||||
[ PKT-LINE("reference" SP <reference-name> LF) ]
|
||||
PKT-LINE("show_ignored_files" SP <integer> LF)
|
||||
PKT-LINE("show_untracked_files" SP <integer> LF)
|
||||
PKT-LINE("show_ignored_directory" SP <integer> LF)
|
||||
[ PKT-LINE("ignore_submodule_arg" SP <string> LF) ]
|
||||
PKT-LINE("detect_rename" SP <integer> LF)
|
||||
PKT-LINE("rename_score" SP <integer> LF)
|
||||
PKT-LINE("rename_limit" SP <integer> LF)
|
||||
PKT-LINE("detect_break" SP <integer> LF)
|
||||
PKT-LINE("sha1_commit" SP <oid> LF)
|
||||
PKT-LINE("committable" SP <integer> LF)
|
||||
PKT-LINE("workdir_dirty" SP <integer> LF)
|
||||
|
||||
|
||||
V1 Changed Items
|
||||
----------------
|
||||
|
||||
The v1-changed-item-section lists all of the changed items with one
|
||||
item per pkt-line. Each pkt-line contains: a binary block of data
|
||||
from "struct wt_status_serialize_data_fixed" in a fixed header where
|
||||
integers are in network byte order and OIDs are in raw (non-hex) form.
|
||||
This is followed by one or two raw pathnames (not c-quoted) with NUL
|
||||
terminators (both NULs are always present even if there is no rename).
|
||||
|
||||
v1-changed-item-section = PKT-LINE("changed" SP <count> LF)
|
||||
[ PKT-LINE(<changed_item> LF) ]+
|
||||
PKT-LINE(<flush>)
|
||||
|
||||
changed_item = <byte[4] worktree_status>
|
||||
<byte[4] index_status>
|
||||
<byte[4] stagemask>
|
||||
<byte[4] score>
|
||||
<byte[4] mode_head>
|
||||
<byte[4] mode_index>
|
||||
<byte[4] mode_worktree>
|
||||
<byte[4] dirty_submodule>
|
||||
<byte[4] new_submodule_commits>
|
||||
<byte[20] oid_head>
|
||||
<byte[20] oid_index>
|
||||
<byte[*] path>
|
||||
NUL
|
||||
[ <byte[*] src_path> ]
|
||||
NUL
|
||||
|
||||
|
||||
V1 Untracked and Ignored Items
|
||||
------------------------------
|
||||
|
||||
These sections are simple lists of pathnames. They ARE NOT
|
||||
c-quoted.
|
||||
|
||||
v1-untracked-item-section = PKT-LINE("untracked" SP <count> LF)
|
||||
[ PKT-LINE(<pathname> LF) ]+
|
||||
PKT-LINE(<flush>)
|
||||
|
||||
v1-ignored-item-section = PKT-LINE("ignored" SP <count> LF)
|
||||
[ PKT-LINE(<pathname> LF) ]+
|
||||
PKT-LINE(<flush>)
|
|
@ -1,7 +1,7 @@
|
|||
#!/bin/sh
|
||||
|
||||
GVF=GIT-VERSION-FILE
|
||||
DEF_VER=v2.44.0
|
||||
DEF_VER=v2.44.0.vfs.0.0
|
||||
|
||||
LF='
|
||||
'
|
||||
|
@ -12,10 +12,15 @@ if test -f version
|
|||
then
|
||||
VN=$(cat version) || VN="$DEF_VER"
|
||||
elif { test -d "${GIT_DIR:-.git}" || test -f .git; } &&
|
||||
VN=$(git describe --match "v[0-9]*" HEAD 2>/dev/null) &&
|
||||
VN=$(git describe --match "v[0-9]*vfs*" HEAD 2>/dev/null) &&
|
||||
case "$VN" in
|
||||
*$LF*) (exit 1) ;;
|
||||
v[0-9]*)
|
||||
if test "${VN%%.vfs.*}" != "${DEF_VER%%.vfs.*}"
|
||||
then
|
||||
echo "Found version $VN, which is not based on $DEF_VER" >&2
|
||||
exit 1
|
||||
fi
|
||||
git update-index -q --refresh
|
||||
test -z "$(git diff-index --name-only HEAD --)" ||
|
||||
VN="$VN-dirty" ;;
|
||||
|
|
2
INSTALL
2
INSTALL
|
@ -139,7 +139,7 @@ Issues of note:
|
|||
not need that functionality, use NO_CURL to build without
|
||||
it.
|
||||
|
||||
Git requires version "7.19.5" or later of "libcurl" to build
|
||||
Git requires version "7.21.3" or later of "libcurl" to build
|
||||
without NO_CURL. This version requirement may be bumped in
|
||||
the future.
|
||||
|
||||
|
|
113
Makefile
113
Makefile
|
@ -321,6 +321,10 @@ include shared.mak
|
|||
# Define GIT_USER_AGENT if you want to change how git identifies itself during
|
||||
# network interactions. The default is "git/$(GIT_VERSION)".
|
||||
#
|
||||
# Define GIT_BUILT_FROM_COMMIT if you want to force the commit hash identified
|
||||
# in 'git version --build-options' to a specific value. The default is the
|
||||
# commit hash of the current HEAD.
|
||||
#
|
||||
# Define DEFAULT_HELP_FORMAT to "man", "info" or "html"
|
||||
# (defaults to "man") if you want to have a different default when
|
||||
# "git help" is called without a parameter specifying the format.
|
||||
|
@ -464,6 +468,11 @@ include shared.mak
|
|||
#
|
||||
# CURL_LDFLAGS=-lcurl
|
||||
#
|
||||
# Define LAZYLOAD_LIBCURL to dynamically load the libcurl; This can be useful
|
||||
# if Multiple libcurl versions exist (with different file names) that link to
|
||||
# various SSL/TLS backends, to support the `http.sslBackend` runtime switch in
|
||||
# such a scenario.
|
||||
#
|
||||
# === Optional library: libpcre2 ===
|
||||
#
|
||||
# Define USE_LIBPCRE if you have and want to use libpcre. Various
|
||||
|
@ -815,6 +824,7 @@ TEST_BUILTINS_OBJS += test-hash-speed.o
|
|||
TEST_BUILTINS_OBJS += test-hash.o
|
||||
TEST_BUILTINS_OBJS += test-hashmap.o
|
||||
TEST_BUILTINS_OBJS += test-hexdump.o
|
||||
TEST_BUILTINS_OBJS += test-iconv.o
|
||||
TEST_BUILTINS_OBJS += test-json-writer.o
|
||||
TEST_BUILTINS_OBJS += test-lazy-init-name-hash.o
|
||||
TEST_BUILTINS_OBJS += test-match-trees.o
|
||||
|
@ -1042,6 +1052,8 @@ LIB_OBJS += git-zlib.o
|
|||
LIB_OBJS += gpg-interface.o
|
||||
LIB_OBJS += graph.o
|
||||
LIB_OBJS += grep.o
|
||||
LIB_OBJS += gvfs.o
|
||||
LIB_OBJS += gvfs-helper-client.o
|
||||
LIB_OBJS += hash-lookup.o
|
||||
LIB_OBJS += hashmap.o
|
||||
LIB_OBJS += help.o
|
||||
|
@ -1194,6 +1206,7 @@ LIB_OBJS += utf8.o
|
|||
LIB_OBJS += varint.o
|
||||
LIB_OBJS += version.o
|
||||
LIB_OBJS += versioncmp.o
|
||||
LIB_OBJS += virtualfilesystem.o
|
||||
LIB_OBJS += walker.o
|
||||
LIB_OBJS += wildmatch.o
|
||||
LIB_OBJS += worktree.o
|
||||
|
@ -1201,6 +1214,8 @@ LIB_OBJS += wrapper.o
|
|||
LIB_OBJS += write-or-die.o
|
||||
LIB_OBJS += ws.o
|
||||
LIB_OBJS += wt-status.o
|
||||
LIB_OBJS += wt-status-deserialize.o
|
||||
LIB_OBJS += wt-status-serialize.o
|
||||
LIB_OBJS += xdiff-interface.o
|
||||
|
||||
BUILTIN_OBJS += builtin/add.o
|
||||
|
@ -1316,6 +1331,7 @@ BUILTIN_OBJS += builtin/tag.o
|
|||
BUILTIN_OBJS += builtin/unpack-file.o
|
||||
BUILTIN_OBJS += builtin/unpack-objects.o
|
||||
BUILTIN_OBJS += builtin/update-index.o
|
||||
BUILTIN_OBJS += builtin/update-microsoft-git.o
|
||||
BUILTIN_OBJS += builtin/update-ref.o
|
||||
BUILTIN_OBJS += builtin/update-server-info.o
|
||||
BUILTIN_OBJS += builtin/upload-archive.o
|
||||
|
@ -1334,6 +1350,7 @@ BUILTIN_OBJS += builtin/write-tree.o
|
|||
# upstream unnecessarily (making merging in future changes easier).
|
||||
THIRD_PARTY_SOURCES += compat/inet_ntop.c
|
||||
THIRD_PARTY_SOURCES += compat/inet_pton.c
|
||||
THIRD_PARTY_SOURCES += compat/mimalloc/%
|
||||
THIRD_PARTY_SOURCES += compat/nedmalloc/%
|
||||
THIRD_PARTY_SOURCES += compat/obstack.%
|
||||
THIRD_PARTY_SOURCES += compat/poll/%
|
||||
|
@ -1616,16 +1633,32 @@ else
|
|||
CURL_LIBCURL =
|
||||
endif
|
||||
|
||||
ifndef CURL_LDFLAGS
|
||||
CURL_LDFLAGS = $(eval CURL_LDFLAGS := $$(shell $$(CURL_CONFIG) --libs))$(CURL_LDFLAGS)
|
||||
ifdef LAZYLOAD_LIBCURL
|
||||
LAZYLOAD_LIBCURL_OBJ = compat/lazyload-curl.o
|
||||
OBJECTS += $(LAZYLOAD_LIBCURL_OBJ)
|
||||
# The `CURL_STATICLIB` constant must be defined to avoid seeing the functions
|
||||
# declared as DLL imports
|
||||
CURL_CFLAGS = -DCURL_STATICLIB
|
||||
ifneq ($(uname_S),MINGW)
|
||||
ifneq ($(uname_S),Windows)
|
||||
CURL_LIBCURL = -ldl
|
||||
endif
|
||||
endif
|
||||
else
|
||||
ifndef CURL_LDFLAGS
|
||||
CURL_LDFLAGS = $(eval CURL_LDFLAGS := $$(shell $$(CURL_CONFIG) --libs))$(CURL_LDFLAGS)
|
||||
endif
|
||||
CURL_LIBCURL += $(CURL_LDFLAGS)
|
||||
endif
|
||||
CURL_LIBCURL += $(CURL_LDFLAGS)
|
||||
|
||||
ifndef CURL_CFLAGS
|
||||
CURL_CFLAGS = $(eval CURL_CFLAGS := $$(shell $$(CURL_CONFIG) --cflags))$(CURL_CFLAGS)
|
||||
endif
|
||||
BASIC_CFLAGS += $(CURL_CFLAGS)
|
||||
|
||||
PROGRAM_OBJS += gvfs-helper.o
|
||||
TEST_PROGRAMS_NEED_X += test-gvfs-protocol
|
||||
|
||||
REMOTE_CURL_PRIMARY = git-remote-http$X
|
||||
REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X
|
||||
REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES)
|
||||
|
@ -1640,7 +1673,7 @@ else
|
|||
endif
|
||||
ifdef USE_CURL_FOR_IMAP_SEND
|
||||
BASIC_CFLAGS += -DUSE_CURL_FOR_IMAP_SEND
|
||||
IMAP_SEND_BUILDDEPS = http.o
|
||||
IMAP_SEND_BUILDDEPS = http.o $(LAZYLOAD_LIBCURL_OBJ)
|
||||
IMAP_SEND_LDFLAGS += $(CURL_LIBCURL)
|
||||
endif
|
||||
ifndef NO_EXPAT
|
||||
|
@ -2071,6 +2104,43 @@ ifdef USE_NED_ALLOCATOR
|
|||
OVERRIDE_STRDUP = YesPlease
|
||||
endif
|
||||
|
||||
ifdef USE_MIMALLOC
|
||||
MIMALLOC_OBJS = \
|
||||
compat/mimalloc/alloc-aligned.o \
|
||||
compat/mimalloc/alloc.o \
|
||||
compat/mimalloc/arena.o \
|
||||
compat/mimalloc/bitmap.o \
|
||||
compat/mimalloc/heap.o \
|
||||
compat/mimalloc/init.o \
|
||||
compat/mimalloc/options.o \
|
||||
compat/mimalloc/os.o \
|
||||
compat/mimalloc/page.o \
|
||||
compat/mimalloc/random.o \
|
||||
compat/mimalloc/prim/windows/prim.o \
|
||||
compat/mimalloc/segment.o \
|
||||
compat/mimalloc/segment-cache.o \
|
||||
compat/mimalloc/segment-map.o \
|
||||
compat/mimalloc/stats.o
|
||||
|
||||
COMPAT_CFLAGS += -Icompat/mimalloc -DMI_DEBUG=0 -DUSE_MIMALLOC --std=gnu11
|
||||
COMPAT_OBJS += $(MIMALLOC_OBJS)
|
||||
|
||||
$(MIMALLOC_OBJS): COMPAT_CFLAGS += -DBANNED_H
|
||||
|
||||
$(MIMALLOC_OBJS): COMPAT_CFLAGS += \
|
||||
-Wno-attributes \
|
||||
-Wno-unknown-pragmas \
|
||||
-Wno-array-bounds
|
||||
|
||||
ifdef DEVELOPER
|
||||
$(MIMALLOC_OBJS): COMPAT_CFLAGS += \
|
||||
-Wno-pedantic \
|
||||
-Wno-declaration-after-statement \
|
||||
-Wno-old-style-definition \
|
||||
-Wno-missing-prototypes
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef OVERRIDE_STRDUP
|
||||
COMPAT_CFLAGS += -DOVERRIDE_STRDUP
|
||||
COMPAT_OBJS += compat/strdup.o
|
||||
|
@ -2319,6 +2389,15 @@ GIT-USER-AGENT: FORCE
|
|||
echo '$(GIT_USER_AGENT_SQ)' >GIT-USER-AGENT; \
|
||||
fi
|
||||
|
||||
GIT_BUILT_FROM_COMMIT = $(eval GIT_BUILT_FROM_COMMIT := $$(shell \
|
||||
GIT_CEILING_DIRECTORIES="$$(CURDIR)/.." \
|
||||
git rev-parse -q --verify HEAD 2>/dev/null))$(GIT_BUILT_FROM_COMMIT)
|
||||
GIT-BUILT-FROM-COMMIT: FORCE
|
||||
@if test x'$(GIT_BUILT_FROM_COMMIT)' != x"`cat GIT-BUILT-FROM-COMMIT 2>/dev/null`" ; then \
|
||||
echo >&2 " * new built-from commit"; \
|
||||
echo '$(GIT_BUILT_FROM_COMMIT)' >GIT-BUILT-FROM-COMMIT; \
|
||||
fi
|
||||
|
||||
ifdef DEFAULT_HELP_FORMAT
|
||||
BASIC_CFLAGS += -DDEFAULT_HELP_FORMAT='"$(DEFAULT_HELP_FORMAT)"'
|
||||
endif
|
||||
|
@ -2433,13 +2512,11 @@ PAGER_ENV_CQ_SQ = $(subst ','\'',$(PAGER_ENV_CQ))
|
|||
pager.sp pager.s pager.o: EXTRA_CPPFLAGS = \
|
||||
-DPAGER_ENV='$(PAGER_ENV_CQ_SQ)'
|
||||
|
||||
version.sp version.s version.o: GIT-VERSION-FILE GIT-USER-AGENT
|
||||
version.sp version.s version.o: GIT-VERSION-FILE GIT-USER-AGENT GIT-BUILT-FROM-COMMIT
|
||||
version.sp version.s version.o: EXTRA_CPPFLAGS = \
|
||||
'-DGIT_VERSION="$(GIT_VERSION)"' \
|
||||
'-DGIT_USER_AGENT=$(GIT_USER_AGENT_CQ_SQ)' \
|
||||
'-DGIT_BUILT_FROM_COMMIT="$(shell \
|
||||
GIT_CEILING_DIRECTORIES="$(CURDIR)/.." \
|
||||
git rev-parse -q --verify HEAD 2>/dev/null)"'
|
||||
'-DGIT_BUILT_FROM_COMMIT="$(GIT_BUILT_FROM_COMMIT)"'
|
||||
|
||||
$(BUILT_INS): git$X
|
||||
$(QUIET_BUILT_IN)$(RM) $@ && \
|
||||
|
@ -2680,6 +2757,7 @@ GIT_OBJS += git.o
|
|||
.PHONY: git-objs
|
||||
git-objs: $(GIT_OBJS)
|
||||
|
||||
SCALAR_OBJS := json-parser.o
|
||||
SCALAR_OBJS += scalar.o
|
||||
.PHONY: scalar-objs
|
||||
scalar-objs: $(SCALAR_OBJS)
|
||||
|
@ -2778,7 +2856,7 @@ gettext.sp gettext.s gettext.o: GIT-PREFIX
|
|||
gettext.sp gettext.s gettext.o: EXTRA_CPPFLAGS = \
|
||||
-DGIT_LOCALE_PATH='"$(localedir_relative_SQ)"'
|
||||
|
||||
http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp: SP_EXTRA_FLAGS += \
|
||||
http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp gvfs-helper.sp: SP_EXTRA_FLAGS += \
|
||||
-DCURL_DISABLE_TYPECHECK
|
||||
|
||||
pack-revindex.sp: SP_EXTRA_FLAGS += -Wno-memcpy-max-count
|
||||
|
@ -2812,10 +2890,10 @@ git-imap-send$X: imap-send.o $(IMAP_SEND_BUILDDEPS) GIT-LDFLAGS $(GITLIBS)
|
|||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
|
||||
$(IMAP_SEND_LDFLAGS) $(LIBS)
|
||||
|
||||
git-http-fetch$X: http.o http-walker.o http-fetch.o GIT-LDFLAGS $(GITLIBS)
|
||||
git-http-fetch$X: http.o http-walker.o http-fetch.o $(LAZYLOAD_LIBCURL_OBJ) GIT-LDFLAGS $(GITLIBS)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
|
||||
$(CURL_LIBCURL) $(LIBS)
|
||||
git-http-push$X: http.o http-push.o GIT-LDFLAGS $(GITLIBS)
|
||||
git-http-push$X: http.o http-push.o $(LAZYLOAD_LIBCURL_OBJ) GIT-LDFLAGS $(GITLIBS)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
|
||||
$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
|
||||
|
||||
|
@ -2825,14 +2903,18 @@ $(REMOTE_CURL_ALIASES): $(REMOTE_CURL_PRIMARY)
|
|||
ln -s $< $@ 2>/dev/null || \
|
||||
cp $< $@
|
||||
|
||||
$(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o GIT-LDFLAGS $(GITLIBS)
|
||||
$(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o $(LAZYLOAD_LIBCURL_OBJ) GIT-LDFLAGS $(GITLIBS)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
|
||||
$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
|
||||
|
||||
scalar$X: scalar.o GIT-LDFLAGS $(GITLIBS)
|
||||
scalar$X: $(SCALAR_OBJS) GIT-LDFLAGS $(GITLIBS)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \
|
||||
$(filter %.o,$^) $(LIBS)
|
||||
|
||||
git-gvfs-helper$X: gvfs-helper.o http.o GIT-LDFLAGS $(GITLIBS) $(LAZYLOAD_LIBCURL_OBJ)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
|
||||
$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
|
||||
|
||||
$(LIB_FILE): $(LIB_OBJS)
|
||||
$(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^
|
||||
|
||||
|
@ -3610,7 +3692,7 @@ dist: git-archive$(X) configure
|
|||
@$(MAKE) -C git-gui TARDIR=../.dist-tmp-dir/git-gui dist-version
|
||||
./git-archive --format=tar \
|
||||
$(GIT_ARCHIVE_EXTRA_FILES) \
|
||||
--prefix=$(GIT_TARNAME)/ HEAD^{tree} > $(GIT_TARNAME).tar
|
||||
--prefix=$(GIT_TARNAME)/ HEAD > $(GIT_TARNAME).tar
|
||||
@$(RM) -r .dist-tmp-dir
|
||||
gzip -f -9 $(GIT_TARNAME).tar
|
||||
|
||||
|
@ -3715,12 +3797,15 @@ ifdef MSVC
|
|||
$(RM) $(patsubst %.o,%.o.pdb,$(OBJECTS))
|
||||
$(RM) headless-git.o.pdb
|
||||
$(RM) $(patsubst %.exe,%.pdb,$(OTHER_PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.ilk,$(OTHER_PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.iobj,$(OTHER_PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.ipdb,$(OTHER_PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.pdb,$(PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.ilk,$(PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.iobj,$(PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.ipdb,$(PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.pdb,$(TEST_PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.ilk,$(TEST_PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.iobj,$(TEST_PROGRAMS))
|
||||
$(RM) $(patsubst %.exe,%.ipdb,$(TEST_PROGRAMS))
|
||||
$(RM) compat/vcbuild/MSVC-DEFS-GEN
|
||||
|
|
257
README.md
257
README.md
|
@ -1,74 +1,215 @@
|
|||
[![Build status](https://github.com/git/git/workflows/CI/badge.svg)](https://github.com/git/git/actions?query=branch%3Amaster+event%3Apush)
|
||||
`microsoft/git` and the Scalar CLI
|
||||
==================================
|
||||
|
||||
Git - fast, scalable, distributed revision control system
|
||||
[![Open in Visual Studio Code](https://open.vscode.dev/badges/open-in-vscode.svg)](https://open.vscode.dev/microsoft/git)
|
||||
[![Build status](https://github.com/microsoft/git/workflows/CI/badge.svg)](https://github.com/microsoft/git/actions/workflows/main.yml)
|
||||
|
||||
This is `microsoft/git`, a special Git distribution to support monorepo scenarios. If you are _not_
|
||||
working in a monorepo, you are likely searching for
|
||||
[Git for Windows](https://git-for-windows.github.io/) instead of this codebase.
|
||||
|
||||
In addition to the Git command-line interface (CLI), `microsoft/git` includes the Scalar CLI to
|
||||
further enable working with extremely large repositories. Scalar is a tool to apply the latest
|
||||
recommendations and use the most advanced Git features. You can read
|
||||
[the Scalar CLI documentation](Documentation/scalar.txt) or read our
|
||||
[Scalar user guide](contrib/scalar/docs/index.md) including
|
||||
[the philosophy of Scalar](contrib/scalar/docs/philosophy.md).
|
||||
|
||||
If you encounter problems with `microsoft/git`, please report them as
|
||||
[GitHub issues](https://github.com/microsoft/git/issues).
|
||||
|
||||
Why is this fork needed?
|
||||
=========================================================
|
||||
|
||||
Git is a fast, scalable, distributed revision control system with an
|
||||
unusually rich command set that provides both high-level operations
|
||||
and full access to internals.
|
||||
Git is awesome - it's a fast, scalable, distributed version control system with an unusually rich
|
||||
command set that provides both high-level operations and full access to internals. What more could
|
||||
you ask for?
|
||||
|
||||
Git is an Open Source project covered by the GNU General Public
|
||||
License version 2 (some parts of it are under different licenses,
|
||||
compatible with the GPLv2). It was originally written by Linus
|
||||
Torvalds with help of a group of hackers around the net.
|
||||
Well, because Git is a distributed version control system, each Git repository has a copy of all
|
||||
files in the entire history. As large repositories, aka _monorepos_ grow, Git can struggle to
|
||||
manage all that data. As Git commands like `status` and `fetch` get slower, developers stop waiting
|
||||
and start switching context. And context switches harm developer productivity.
|
||||
|
||||
Please read the file [INSTALL][] for installation instructions.
|
||||
`microsoft/git` is focused on addressing these performance woes and making the monorepo developer
|
||||
experience first-class. The Scalar CLI packages all of these recommendations into a simple set of
|
||||
commands.
|
||||
|
||||
Many Git online resources are accessible from <https://git-scm.com/>
|
||||
including full documentation and Git related tools.
|
||||
One major feature that Scalar recommends is [partial clone](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/),
|
||||
which reduces the amount of data transferred in order to work with a Git repository. While several
|
||||
services such as GitHub support partial clone, Azure Repos instead has an older version of this
|
||||
functionality called
|
||||
[the GVFS protocol](https://docs.microsoft.com/en-us/azure/devops/learn/git/gvfs-architecture#gvfs-protocol).
|
||||
The integration with the GVFS protocol present in `microsoft/git` is not appropriate to include in
|
||||
the core Git client because partial clone is the official version of that functionality.
|
||||
|
||||
See [Documentation/gittutorial.txt][] to get started, then see
|
||||
[Documentation/giteveryday.txt][] for a useful minimum set of commands, and
|
||||
`Documentation/git-<commandname>.txt` for documentation of each command.
|
||||
If git has been correctly installed, then the tutorial can also be
|
||||
read with `man gittutorial` or `git help tutorial`, and the
|
||||
documentation of each command with `man git-<commandname>` or `git help
|
||||
<commandname>`.
|
||||
Downloading and Installing
|
||||
=========================================================
|
||||
|
||||
CVS users may also want to read [Documentation/gitcvs-migration.txt][]
|
||||
(`man gitcvs-migration` or `git help cvs-migration` if git is
|
||||
installed).
|
||||
If you're working in a monorepo and want to take advantage of the performance boosts in
|
||||
`microsoft/git`, then you can download the latest version installer for your OS from the
|
||||
[Releases page](https://github.com/microsoft/git/releases). Alternatively, you can opt to install
|
||||
via the command line, using the below instructions for supported OSes:
|
||||
|
||||
The user discussion and development of Git take place on the Git
|
||||
mailing list -- everyone is welcome to post bug reports, feature
|
||||
requests, comments and patches to git@vger.kernel.org (read
|
||||
[Documentation/SubmittingPatches][] for instructions on patch submission
|
||||
and [Documentation/CodingGuidelines][]).
|
||||
## Windows
|
||||
|
||||
Those wishing to help with error message, usage and informational message
|
||||
string translations (localization l10) should see [po/README.md][]
|
||||
(a `po` file is a Portable Object file that holds the translations).
|
||||
__Note:__ Winget is still in public preview, meaning you currently
|
||||
[need to take special installation steps](https://docs.microsoft.com/en-us/windows/package-manager/winget/#install-winget):
|
||||
Either manually install the `.appxbundle` available at the
|
||||
[preview version of App Installer](https://www.microsoft.com/p/app-installer/9nblggh4nns1?ocid=9nblggh4nns1_ORSEARCH_Bing&rtc=1&activetab=pivot:overviewtab),
|
||||
or participate in the
|
||||
[Windows Insider flight ring](https://insider.windows.com/https://insider.windows.com/)
|
||||
since `winget` is available by default on preview versions of Windows.
|
||||
|
||||
To subscribe to the list, send an email to <git+subscribe@vger.kernel.org>
|
||||
(see https://subspace.kernel.org/subscribing.html for details). The mailing
|
||||
list archives are available at <https://lore.kernel.org/git/>,
|
||||
<https://marc.info/?l=git> and other archival sites.
|
||||
To install with Winget, run
|
||||
|
||||
Issues which are security relevant should be disclosed privately to
|
||||
the Git Security mailing list <git-security@googlegroups.com>.
|
||||
```shell
|
||||
winget install --id microsoft.git
|
||||
```
|
||||
|
||||
The maintainer frequently sends the "What's cooking" reports that
|
||||
list the current status of various development topics to the mailing
|
||||
list. The discussion following them give a good reference for
|
||||
project status, development direction and remaining tasks.
|
||||
Double-check that you have the right version by running these commands,
|
||||
which should have the same output:
|
||||
|
||||
The name "git" was given by Linus Torvalds when he wrote the very
|
||||
first version. He described the tool as "the stupid content tracker"
|
||||
and the name as (depending on your mood):
|
||||
```shell
|
||||
git version
|
||||
scalar version
|
||||
```
|
||||
|
||||
- random three-letter combination that is pronounceable, and not
|
||||
actually used by any common UNIX command. The fact that it is a
|
||||
mispronunciation of "get" may or may not be relevant.
|
||||
- stupid. contemptible and despicable. simple. Take your pick from the
|
||||
dictionary of slang.
|
||||
- "global information tracker": you're in a good mood, and it actually
|
||||
works for you. Angels sing, and a light suddenly fills the room.
|
||||
- "goddamn idiotic truckload of sh*t": when it breaks
|
||||
To upgrade `microsoft/git`, use the following Git command, which will download and install the latest
|
||||
release.
|
||||
|
||||
[INSTALL]: INSTALL
|
||||
[Documentation/gittutorial.txt]: Documentation/gittutorial.txt
|
||||
[Documentation/giteveryday.txt]: Documentation/giteveryday.txt
|
||||
[Documentation/gitcvs-migration.txt]: Documentation/gitcvs-migration.txt
|
||||
[Documentation/SubmittingPatches]: Documentation/SubmittingPatches
|
||||
[Documentation/CodingGuidelines]: Documentation/CodingGuidelines
|
||||
[po/README.md]: po/README.md
|
||||
```shell
|
||||
git update-microsoft-git
|
||||
```
|
||||
|
||||
You may also be alerted with a notification to upgrade, which presents a single-click process for
|
||||
running `git update-microsoft-git`.
|
||||
|
||||
## macOS
|
||||
|
||||
To install `microsoft/git` on macOS, first [be sure that Homebrew is installed](https://brew.sh/) then
|
||||
install the `microsoft-git` cask with these steps:
|
||||
|
||||
```shell
|
||||
brew tap microsoft/git
|
||||
brew install --cask microsoft-git
|
||||
```
|
||||
|
||||
Double-check that you have the right version by running these commands,
|
||||
which should have the same output:
|
||||
|
||||
```shell
|
||||
git version
|
||||
scalar version
|
||||
```
|
||||
|
||||
To upgrade microsoft/git, you can run the necessary `brew` commands:
|
||||
|
||||
```shell
|
||||
brew update
|
||||
brew upgrade --cask microsoft-git
|
||||
```
|
||||
|
||||
Or you can run the `git update-microsoft-git` command, which will run those brew commands for you.
|
||||
|
||||
## Linux
|
||||
### Ubuntu/Debian distributions
|
||||
|
||||
On newer distributions*, you can install using the most recent Debian package.
|
||||
To download and validate the signature of this package, run the following:
|
||||
|
||||
```shell
|
||||
# Install needed packages
|
||||
apt-get install -y curl debsig-verify
|
||||
|
||||
# Download public key signature file
|
||||
curl -s https://api.github.com/repos/microsoft/git/releases/latest \
|
||||
| grep -E 'browser_download_url.*msft-git-public.asc' \
|
||||
| cut -d : -f 2,3 \
|
||||
| tr -d \" \
|
||||
| xargs -I 'url' curl -L -o msft-git-public.asc 'url'
|
||||
|
||||
# De-armor public key signature file
|
||||
gpg --output msft-git-public.gpg --dearmor msft-git-public.asc
|
||||
|
||||
# Note that the fingerprint of this key is "B8F12E25441124E1", which you can
|
||||
# determine by running:
|
||||
gpg --show-keys msft-git-public.asc | head -n 2 | tail -n 1 | tail -c 17
|
||||
|
||||
# Copy de-armored public key to debsig keyring folder
|
||||
mkdir /usr/share/debsig/keyrings/B8F12E25441124E1
|
||||
mv msft-git-public.gpg /usr/share/debsig/keyrings/B8F12E25441124E1/
|
||||
|
||||
# Create an appropriate policy file
|
||||
mkdir /etc/debsig/policies/B8F12E25441124E1
|
||||
cat > /etc/debsig/policies/B8F12E25441124E1/generic.pol << EOL
|
||||
<?xml version="1.0"?>
|
||||
<!DOCTYPE Policy SYSTEM "https://www.debian.org/debsig/1.0/policy.dtd">
|
||||
<Policy xmlns="https://www.debian.org/debsig/1.0/">
|
||||
<Origin Name="Microsoft Git" id="B8F12E25441124E1" Description="Microsoft Git public key"/>
|
||||
<Selection>
|
||||
<Required Type="origin" File="msft-git-public.gpg" id="B8F12E25441124E1"/>
|
||||
</Selection>
|
||||
<Verification MinOptional="0">
|
||||
<Required Type="origin" File="msft-git-public.gpg" id="B8F12E25441124E1"/>
|
||||
</Verification>
|
||||
</Policy>
|
||||
EOL
|
||||
|
||||
# Download Debian package
|
||||
curl -s https://api.github.com/repos/microsoft/git/releases/latest \
|
||||
| grep "browser_download_url.*deb" \
|
||||
| cut -d : -f 2,3 \
|
||||
| tr -d \" \
|
||||
| xargs -I 'url' curl -L -o msft-git.deb 'url'
|
||||
|
||||
# Verify
|
||||
debsig-verify msft-git.deb
|
||||
|
||||
# Install
|
||||
sudo dpkg -i msft-git.deb
|
||||
```
|
||||
|
||||
Double-check that you have the right version by running these commands,
|
||||
which should have the same output:
|
||||
|
||||
```shell
|
||||
git version
|
||||
scalar version
|
||||
```
|
||||
|
||||
To upgrade, you will need to repeat these steps to reinstall.
|
||||
|
||||
*Older distributions are missing some required dependencies. Even
|
||||
though the package may appear to install successfully, `microsoft/
|
||||
git` will not function as expected. If you are running Ubuntu 18.04 or
|
||||
older, please follow the install from source instructions below
|
||||
instead of installing the debian package.
|
||||
|
||||
### Other distributions
|
||||
|
||||
You will need to compile and install `microsoft/git` from source:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/microsoft/git microsoft-git
|
||||
cd microsoft-git
|
||||
make -j12 prefix=/usr/local
|
||||
sudo make -j12 prefix=/usr/local install
|
||||
```
|
||||
|
||||
For more assistance building Git from source, see
|
||||
[the INSTALL file in the core Git project](https://github.com/git/git/blob/master/INSTALL).
|
||||
|
||||
Contributing
|
||||
=========================================================
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit <https://cla.microsoft.com>.
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
|
||||
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
|
50
SECURITY.md
50
SECURITY.md
|
@ -28,24 +28,38 @@ Examples for details to include:
|
|||
|
||||
## Supported Versions
|
||||
|
||||
There are no official "Long Term Support" versions in Git.
|
||||
Instead, the maintenance track (i.e. the versions based on the
|
||||
most recently published feature release, also known as ".0"
|
||||
version) sees occasional updates with bug fixes.
|
||||
Git for Windows is a "friendly fork" of [Git](https://git-scm.com/), i.e. changes in Git for Windows are frequently contributed back, and Git for Windows' release cycle closely following Git's.
|
||||
|
||||
Fixes to vulnerabilities are made for the maintenance track for
|
||||
the latest feature release and merged up to the in-development
|
||||
branches. The Git project makes no formal guarantee for any
|
||||
older maintenance tracks to receive updates. In practice,
|
||||
though, critical vulnerability fixes are applied not only to the
|
||||
most recent track, but to at least a couple more maintenance
|
||||
tracks.
|
||||
While Git maintains several release trains (when v2.19.1 was released, there were updates to v2.14.x-v2.18.x, too, for example), Git for Windows follows only the latest Git release. For example, there is no Git for Windows release corresponding to Git v2.16.5 (which was released after v2.19.0).
|
||||
|
||||
This is typically done by making the fix on the oldest and still
|
||||
relevant maintenance track, and merging it upwards to newer and
|
||||
newer maintenance tracks.
|
||||
One exception is [MinGit for Windows](https://github.com/git-for-windows/git/wiki/MinGit) (a minimal subset of Git for Windows, intended for bundling with third-party applications that do not need any interactive commands nor support for `git svn`): critical security fixes are backported to the v2.11.x, v2.14.x, v2.19.x, v2.21.x and v2.23.x release trains.
|
||||
|
||||
For example, v2.24.1 was released to address a couple of
|
||||
[CVEs](https://cve.mitre.org/), and at the same time v2.14.6,
|
||||
v2.15.4, v2.16.6, v2.17.3, v2.18.2, v2.19.3, v2.20.2, v2.21.1,
|
||||
v2.22.2 and v2.23.1 were released.
|
||||
## Version number scheme
|
||||
|
||||
The Git for Windows versions reflect the Git version on which they are based. For example, Git for Windows v2.21.0 is based on Git v2.21.0.
|
||||
|
||||
As Git for Windows bundles more than just Git (such as Bash, OpenSSL, OpenSSH, GNU Privacy Guard), sometimes there are interim releases without corresponding Git releases. In these cases, Git for Windows appends a number in parentheses, starting with the number 2, then 3, etc. For example, both Git for Windows v2.17.1 and v2.17.1(2) were based on Git v2.17.1, but the latter included updates for Git Credential Manager and Git LFS, fixing critical regressions.
|
||||
|
||||
## Tag naming scheme
|
||||
|
||||
Every Git for Windows version is tagged using a name that starts with the Git version on which it is based, with the suffix `.windows.<patchlevel>` appended. For example, Git for Windows v2.17.1' source code is tagged as [`v2.17.1.windows.1`](https://github.com/git-for-windows/git/releases/tag/v2.17.1.windows.1) (the patch level is always at least 1, given that Git for Windows always has patches on top of Git). Likewise, Git for Windows v2.17.1(2)' source code is tagged as [`v2.17.1.windows.2`](https://github.com/git-for-windows/git/releases/tag/v2.17.1.windows.2).
|
||||
|
||||
## Release Candidate (rc) versions
|
||||
|
||||
As a friendly fork of Git (the "upstream" project), Git for Windows is closely corelated to that project.
|
||||
|
||||
Consequently, Git for Windows publishes versions based on Git's release candidates (for upcoming "`.0`" versions, see [Git's release schedule](https://tinyurl.com/gitCal)). These versions end in `-rc<n>`, starting with `-rc0` for a very early preview of what is to come, and as with regular versions, Git for Windows tries to follow Git's releases as quickly as possible.
|
||||
|
||||
Note: there is currently a bug in the "Check daily for updates" code, where it mistakes the final version as a downgrade from release candidates. Example: if you installed Git for Windows v2.23.0-rc3 and enabled the auto-updater, it would ask you whether you want to "downgrade" to v2.23.0 when that version was available.
|
||||
|
||||
[All releases](https://github.com/git-for-windows/git/releases/), including release candidates, are listed via a link at the footer of the [Git for Windows](https://gitforwindows.org/) home page.
|
||||
|
||||
## Snapshot versions ('nightly builds')
|
||||
|
||||
Git for Windows also provides snapshots (these are not releases) of the the current development as per git-for-Windows/git's `master` branch at the [Snapshots](https://wingit.blob.core.windows.net/files/index.html) page. This link is also listed in the footer of the [Git for Windows](https://gitforwindows.org/) home page.
|
||||
|
||||
Note: even if those builds are not exactly "nightly", they are sometimes referred to as "nightly builds" to keep with other projects' nomenclature.
|
||||
|
||||
## Following upstream's developments
|
||||
|
||||
The [gitforwindows/git repository](https://github.com/git-for-windows/git) also provides the `shears/*` branches. The `shears/*` branches reflect Git for Windows' patches, rebased onto the upstream integration branches, [updated (mostly) via automated CI builds](https://dev.azure.com/git-for-windows/git/_build?definitionId=25).
|
||||
|
|
|
@ -14,7 +14,7 @@ int is_directory(const char *path)
|
|||
}
|
||||
|
||||
/* removes the last path component from 'path' except if 'path' is root */
|
||||
static void strip_last_component(struct strbuf *path)
|
||||
void strip_last_path_component(struct strbuf *path)
|
||||
{
|
||||
size_t offset = offset_1st_component(path->buf);
|
||||
size_t len = path->len;
|
||||
|
@ -93,6 +93,9 @@ static char *strbuf_realpath_1(struct strbuf *resolved, const char *path,
|
|||
goto error_out;
|
||||
}
|
||||
|
||||
if (platform_strbuf_realpath(resolved, path))
|
||||
return resolved->buf;
|
||||
|
||||
strbuf_addstr(&remaining, path);
|
||||
get_root_part(resolved, &remaining);
|
||||
|
||||
|
@ -116,7 +119,7 @@ static char *strbuf_realpath_1(struct strbuf *resolved, const char *path,
|
|||
continue; /* '.' component */
|
||||
} else if (next.len == 2 && !strcmp(next.buf, "..")) {
|
||||
/* '..' component; strip the last path component */
|
||||
strip_last_component(resolved);
|
||||
strip_last_path_component(resolved);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -168,7 +171,7 @@ static char *strbuf_realpath_1(struct strbuf *resolved, const char *path,
|
|||
* strip off the last component since it will
|
||||
* be replaced with the contents of the symlink
|
||||
*/
|
||||
strip_last_component(resolved);
|
||||
strip_last_path_component(resolved);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -10,6 +10,11 @@ char *real_pathdup(const char *path, int die_on_error);
|
|||
const char *absolute_path(const char *path);
|
||||
char *absolute_pathdup(const char *path);
|
||||
|
||||
/**
|
||||
* Remove the last path component from 'path' except if 'path' is root.
|
||||
*/
|
||||
void strip_last_path_component(struct strbuf *path);
|
||||
|
||||
/*
|
||||
* Concatenate "prefix" (if len is non-zero) and "path", with no
|
||||
* connecting characters (so "prefix" should end with a "/").
|
||||
|
|
2
advice.c
2
advice.c
|
@ -57,6 +57,7 @@ static struct {
|
|||
[ADVICE_GRAFT_FILE_DEPRECATED] = { "graftFileDeprecated" },
|
||||
[ADVICE_IGNORED_HOOK] = { "ignoredHook" },
|
||||
[ADVICE_IMPLICIT_IDENTITY] = { "implicitIdentity" },
|
||||
[ADVICE_NAME_TOO_LONG] = { "nameTooLong" },
|
||||
[ADVICE_NESTED_TAG] = { "nestedTag" },
|
||||
[ADVICE_OBJECT_NAME_WARNING] = { "objectNameWarning" },
|
||||
[ADVICE_PUSH_ALREADY_EXISTS] = { "pushAlreadyExists" },
|
||||
|
@ -81,6 +82,7 @@ static struct {
|
|||
[ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE] = { "submoduleAlternateErrorStrategyDie" },
|
||||
[ADVICE_SUGGEST_DETACHING_HEAD] = { "suggestDetachingHead" },
|
||||
[ADVICE_UPDATE_SPARSE_PATH] = { "updateSparsePath" },
|
||||
[ADVICE_USE_CORE_FSMONITOR_CONFIG] = { "useCoreFSMonitorConfig" },
|
||||
[ADVICE_WAITING_FOR_EDITOR] = { "waitingForEditor" },
|
||||
[ADVICE_WORKTREE_ADD_ORPHAN] = { "worktreeAddOrphan" },
|
||||
};
|
||||
|
|
2
advice.h
2
advice.h
|
@ -25,6 +25,7 @@ enum advice_type {
|
|||
ADVICE_GRAFT_FILE_DEPRECATED,
|
||||
ADVICE_IGNORED_HOOK,
|
||||
ADVICE_IMPLICIT_IDENTITY,
|
||||
ADVICE_NAME_TOO_LONG,
|
||||
ADVICE_NESTED_TAG,
|
||||
ADVICE_OBJECT_NAME_WARNING,
|
||||
ADVICE_PUSH_ALREADY_EXISTS,
|
||||
|
@ -49,6 +50,7 @@ enum advice_type {
|
|||
ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE,
|
||||
ADVICE_SUGGEST_DETACHING_HEAD,
|
||||
ADVICE_UPDATE_SPARSE_PATH,
|
||||
ADVICE_USE_CORE_FSMONITOR_CONFIG,
|
||||
ADVICE_WAITING_FOR_EDITOR,
|
||||
ADVICE_WORKTREE_ADD_ORPHAN,
|
||||
};
|
||||
|
|
20
apply.c
20
apply.c
|
@ -3379,6 +3379,24 @@ static int checkout_target(struct index_state *istate,
|
|||
{
|
||||
struct checkout costate = CHECKOUT_INIT;
|
||||
|
||||
/*
|
||||
* Do not checkout the entry if the skipworktree bit is set
|
||||
*
|
||||
* Both callers of this method (check_preimage and load_current)
|
||||
* check for the existance of the file before calling this
|
||||
* method so we know that the file doesn't exist at this point
|
||||
* and we don't need to perform that check again here.
|
||||
* We just need to check the skip-worktree and return.
|
||||
*
|
||||
* This is to prevent git from creating a file in the
|
||||
* working directory that has the skip-worktree bit on,
|
||||
* then updating the index from the patch and not keeping
|
||||
* the working directory version up to date with what it
|
||||
* changed the index version to be.
|
||||
*/
|
||||
if (ce_skip_worktree(ce))
|
||||
return 0;
|
||||
|
||||
costate.refresh_cache = 1;
|
||||
costate.istate = istate;
|
||||
if (checkout_entry(ce, &costate, NULL, NULL) ||
|
||||
|
@ -4393,7 +4411,7 @@ static int try_create_file(struct apply_state *state, const char *path,
|
|||
/* Although buf:size is counted string, it also is NUL
|
||||
* terminated.
|
||||
*/
|
||||
return !!symlink(buf, path);
|
||||
return !!create_symlink(state && state->repo ? state->repo->index : NULL, buf, path);
|
||||
|
||||
fd = open(path, O_CREAT | O_EXCL | O_WRONLY, (mode & 0100) ? 0777 : 0666);
|
||||
if (fd < 0)
|
||||
|
|
|
@ -0,0 +1,393 @@
|
|||
variables:
|
||||
Agent.Source.Git.ShallowFetchDepth: 1
|
||||
GIT_CONFIG_PARAMETERS: "'checkout.workers=56' 'user.name=CI' 'user.email=ci@git'"
|
||||
|
||||
jobs:
|
||||
- job: windows_build
|
||||
displayName: Windows Build
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: windows-latest
|
||||
timeoutInMinutes: 240
|
||||
steps:
|
||||
- bash: git clone --bare --depth=1 --filter=blob:none --single-branch -b main https://github.com/git-for-windows/git-sdk-64
|
||||
displayName: 'clone git-sdk-64'
|
||||
- bash: git clone --depth=1 --single-branch -b main https://github.com/git-for-windows/build-extra
|
||||
displayName: 'clone build-extra'
|
||||
- bash: sh -x ./build-extra/please.sh create-sdk-artifact --sdk=git-sdk-64.git --out=git-sdk-64-minimal minimal-sdk
|
||||
displayName: 'build git-sdk-64-minimal-sdk'
|
||||
- bash: |
|
||||
# Let Git ignore the SDK and the test-cache
|
||||
printf "%s\n" /git-sdk-64.git/ /build-extra/ /git-sdk-64-minimal/ /test-cache/ >>'.git/info/exclude'
|
||||
displayName: 'Ignore untracked directories'
|
||||
- bash: ci/make-test-artifacts.sh artifacts
|
||||
displayName: Build
|
||||
env:
|
||||
HOME: $(Build.SourcesDirectory)
|
||||
MSYSTEM: MINGW64
|
||||
DEVELOPER: 1
|
||||
NO_PERL: 1
|
||||
PATH: "$(Build.SourcesDirectory)\\git-sdk-64-minimal\\mingw64\\bin;$(Build.SourcesDirectory)\\git-sdk-64-minimal\\usr\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\system32\\wbem"
|
||||
- task: PublishPipelineArtifact@0
|
||||
displayName: 'Publish Pipeline Artifact: test artifacts'
|
||||
inputs:
|
||||
artifactName: 'windows-artifacts'
|
||||
targetPath: '$(Build.SourcesDirectory)\artifacts'
|
||||
- task: PublishPipelineArtifact@0
|
||||
displayName: 'Publish Pipeline Artifact: git-sdk-64-minimal'
|
||||
inputs:
|
||||
artifactName: 'git-sdk-64-minimal'
|
||||
targetPath: '$(Build.SourcesDirectory)\git-sdk-64-minimal'
|
||||
|
||||
- job: windows_test
|
||||
displayName: Windows Test
|
||||
dependsOn: windows_build
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: windows-latest
|
||||
timeoutInMinutes: 240
|
||||
strategy:
|
||||
parallel: 10
|
||||
steps:
|
||||
- task: DownloadPipelineArtifact@0
|
||||
displayName: 'Download Pipeline Artifact: test artifacts'
|
||||
inputs:
|
||||
artifactName: 'windows-artifacts'
|
||||
targetPath: '$(Build.SourcesDirectory)'
|
||||
- task: DownloadPipelineArtifact@0
|
||||
displayName: 'Download Pipeline Artifact: git-sdk-64-minimal'
|
||||
inputs:
|
||||
artifactName: 'git-sdk-64-minimal'
|
||||
targetPath: '$(Build.SourcesDirectory)\git-sdk-64-minimal'
|
||||
- bash: |
|
||||
test -f artifacts.tar.gz || {
|
||||
echo No test artifacts found\; skipping >&2
|
||||
exit 0
|
||||
}
|
||||
tar xf artifacts.tar.gz || exit 1
|
||||
|
||||
# Let Git ignore the SDK and the test-cache
|
||||
printf '%s\n' /git-sdk-64.git/ /build-extra/ /git-sdk-64-minimal/ /test-cache/ >>.git/info/exclude
|
||||
|
||||
ci/run-test-slice.sh $SYSTEM_JOBPOSITIONINPHASE $SYSTEM_TOTALJOBSINPHASE || {
|
||||
ci/print-test-failures.sh
|
||||
exit 1
|
||||
}
|
||||
displayName: 'Test (parallel)'
|
||||
env:
|
||||
HOME: $(Build.SourcesDirectory)
|
||||
MSYSTEM: MINGW64
|
||||
NO_SVN_TESTS: 1
|
||||
GIT_TEST_SKIP_REBASE_P: 1
|
||||
PATH: "$(Build.SourcesDirectory)\\git-sdk-64-minimal\\mingw64\\bin;$(Build.SourcesDirectory)\\git-sdk-64-minimal\\usr\\bin\\core_perl;$(Build.SourcesDirectory)\\git-sdk-64-minimal\\usr\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\system32\\wbem"
|
||||
- task: PublishTestResults@2
|
||||
displayName: 'Publish Test Results **/TEST-*.xml'
|
||||
inputs:
|
||||
mergeTestResults: true
|
||||
testRunTitle: 'windows'
|
||||
platform: Windows
|
||||
publishRunAttachments: false
|
||||
condition: succeededOrFailed()
|
||||
- task: PublishBuildArtifacts@1
|
||||
displayName: 'Publish trash directories of failed tests'
|
||||
condition: failed()
|
||||
inputs:
|
||||
PathtoPublish: t/failed-test-artifacts
|
||||
ArtifactName: failed-test-artifacts
|
||||
|
||||
- job: vs_build
|
||||
displayName: Visual Studio Build
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: windows-latest
|
||||
timeoutInMinutes: 240
|
||||
steps:
|
||||
- bash: git clone --bare --depth=1 --filter=blob:none --single-branch -b main https://github.com/git-for-windows/git-sdk-64
|
||||
displayName: 'clone git-sdk-64'
|
||||
- bash: git clone --depth=1 --single-branch -b main https://github.com/git-for-windows/build-extra
|
||||
displayName: 'clone build-extra'
|
||||
- bash: sh -x ./build-extra/please.sh create-sdk-artifact --sdk=git-sdk-64.git --out=git-sdk-64-minimal minimal-sdk
|
||||
displayName: 'build git-sdk-64-minimal-sdk'
|
||||
- bash: |
|
||||
# Let Git ignore the SDK and the test-cache
|
||||
printf "%s\n" /git-sdk-64-minimal/ /test-cache/ >>'.git/info/exclude'
|
||||
displayName: 'Ignore untracked directories'
|
||||
- bash: make NDEBUG=1 DEVELOPER=1 vcxproj
|
||||
displayName: Generate Visual Studio Solution
|
||||
env:
|
||||
HOME: $(Build.SourcesDirectory)
|
||||
MSYSTEM: MINGW64
|
||||
DEVELOPER: 1
|
||||
NO_PERL: 1
|
||||
PATH: "$(Build.SourcesDirectory)\\git-sdk-64-minimal\\mingw64\\bin;$(Build.SourcesDirectory)\\git-sdk-64-minimal\\usr\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\system32\\wbem"
|
||||
- powershell: |
|
||||
$urlbase = "https://dev.azure.com/git/git/_apis/build/builds"
|
||||
$id = ((Invoke-WebRequest -UseBasicParsing "${urlbase}?definitions=9&statusFilter=completed&resultFilter=succeeded&`$top=1").content | ConvertFrom-JSON).value[0].id
|
||||
$downloadUrl = ((Invoke-WebRequest -UseBasicParsing "${urlbase}/$id/artifacts").content | ConvertFrom-JSON).value[0].resource.downloadUrl
|
||||
(New-Object Net.WebClient).DownloadFile($downloadUrl, "compat.zip")
|
||||
Expand-Archive compat.zip -DestinationPath . -Force
|
||||
Remove-Item compat.zip
|
||||
displayName: 'Download vcpkg artifacts'
|
||||
- task: MSBuild@1
|
||||
inputs:
|
||||
solution: git.sln
|
||||
platform: x64
|
||||
configuration: Release
|
||||
maximumCpuCount: 4
|
||||
msbuildArguments: /p:PlatformToolset=v142
|
||||
- bash: |
|
||||
./compat/vcbuild/vcpkg_copy_dlls.bat release &&
|
||||
mkdir -p artifacts &&
|
||||
eval "$(make -n artifacts-tar INCLUDE_DLLS_IN_ARTIFACTS=YesPlease ARTIFACTS_DIRECTORY=artifacts | grep ^tar)"
|
||||
displayName: Bundle artifact tar
|
||||
env:
|
||||
HOME: $(Build.SourcesDirectory)
|
||||
MSYSTEM: MINGW64
|
||||
DEVELOPER: 1
|
||||
NO_PERL: 1
|
||||
MSVC: 1
|
||||
VCPKG_ROOT: $(Build.SourcesDirectory)\compat\vcbuild\vcpkg
|
||||
PATH: "$(Build.SourcesDirectory)\\git-sdk-64-minimal\\mingw64\\bin;$(Build.SourcesDirectory)\\git-sdk-64-minimal\\usr\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\system32\\wbem"
|
||||
- powershell: |
|
||||
$tag = (Invoke-WebRequest -UseBasicParsing "https://gitforwindows.org/latest-tag.txt").content
|
||||
$version = (Invoke-WebRequest -UseBasicParsing "https://gitforwindows.org/latest-version.txt").content
|
||||
$url = "https://github.com/git-for-windows/git/releases/download/${tag}/PortableGit-${version}-64-bit.7z.exe"
|
||||
(New-Object Net.WebClient).DownloadFile($url,"PortableGit.exe")
|
||||
& .\PortableGit.exe -y -oartifacts\PortableGit
|
||||
# Wait until it is unpacked
|
||||
while (-not @(Remove-Item -ErrorAction SilentlyContinue PortableGit.exe; $?)) { sleep 1 }
|
||||
displayName: Download & extract portable Git
|
||||
- task: PublishPipelineArtifact@0
|
||||
displayName: 'Publish Pipeline Artifact: MSVC test artifacts'
|
||||
inputs:
|
||||
artifactName: 'vs-artifacts'
|
||||
targetPath: '$(Build.SourcesDirectory)\artifacts'
|
||||
|
||||
- job: vs_test
|
||||
displayName: Visual Studio Test
|
||||
dependsOn: vs_build
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: windows-latest
|
||||
timeoutInMinutes: 240
|
||||
strategy:
|
||||
parallel: 10
|
||||
steps:
|
||||
- task: DownloadPipelineArtifact@0
|
||||
displayName: 'Download Pipeline Artifact: VS test artifacts'
|
||||
inputs:
|
||||
artifactName: 'vs-artifacts'
|
||||
targetPath: '$(Build.SourcesDirectory)'
|
||||
- bash: |
|
||||
test -f artifacts.tar.gz || {
|
||||
echo No test artifacts found\; skipping >&2
|
||||
exit 0
|
||||
}
|
||||
tar xf artifacts.tar.gz || exit 1
|
||||
|
||||
# Let Git ignore the SDK and the test-cache
|
||||
printf '%s\n' /PortableGit/ /test-cache/ >>.git/info/exclude
|
||||
|
||||
cd t &&
|
||||
PATH="$PWD/helper:$PATH" &&
|
||||
test-tool.exe run-command testsuite --jobs=10 -V -x --write-junit-xml \
|
||||
$(test-tool.exe path-utils slice-tests \
|
||||
$SYSTEM_JOBPOSITIONINPHASE $SYSTEM_TOTALJOBSINPHASE t[0-9]*.sh)
|
||||
displayName: 'Test (parallel)'
|
||||
env:
|
||||
HOME: $(Build.SourcesDirectory)
|
||||
MSYSTEM: MINGW64
|
||||
NO_SVN_TESTS: 1
|
||||
GIT_TEST_SKIP_REBASE_P: 1
|
||||
PATH: "$(Build.SourcesDirectory)\\PortableGit\\mingw64\\bin;$(Build.SourcesDirectory)\\PortableGit\\usr\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\system32\\wbem"
|
||||
- task: PublishTestResults@2
|
||||
displayName: 'Publish Test Results **/TEST-*.xml'
|
||||
inputs:
|
||||
mergeTestResults: true
|
||||
testRunTitle: 'vs'
|
||||
platform: Windows
|
||||
publishRunAttachments: false
|
||||
condition: succeededOrFailed()
|
||||
- task: PublishBuildArtifacts@1
|
||||
displayName: 'Publish trash directories of failed tests'
|
||||
condition: failed()
|
||||
inputs:
|
||||
PathtoPublish: t/failed-test-artifacts
|
||||
ArtifactName: failed-vs-test-artifacts
|
||||
|
||||
- job: linux_clang
|
||||
displayName: linux-clang
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
- bash: |
|
||||
export CC=clang || exit 1
|
||||
|
||||
ci/install-dependencies.sh || exit 1
|
||||
ci/run-build-and-tests.sh || {
|
||||
ci/print-test-failures.sh
|
||||
exit 1
|
||||
}
|
||||
displayName: 'ci/run-build-and-tests.sh'
|
||||
- task: PublishTestResults@2
|
||||
displayName: 'Publish Test Results **/TEST-*.xml'
|
||||
inputs:
|
||||
mergeTestResults: true
|
||||
testRunTitle: 'linux-clang'
|
||||
platform: Linux
|
||||
publishRunAttachments: false
|
||||
condition: succeededOrFailed()
|
||||
- task: PublishBuildArtifacts@1
|
||||
displayName: 'Publish trash directories of failed tests'
|
||||
condition: failed()
|
||||
inputs:
|
||||
PathtoPublish: t/failed-test-artifacts
|
||||
ArtifactName: failed-test-artifacts
|
||||
|
||||
- job: linux_gcc
|
||||
displayName: linux-gcc
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
- bash: |
|
||||
ci/install-dependencies.sh || exit 1
|
||||
ci/run-build-and-tests.sh || {
|
||||
ci/print-test-failures.sh
|
||||
exit 1
|
||||
}
|
||||
displayName: 'ci/run-build-and-tests.sh'
|
||||
- task: PublishTestResults@2
|
||||
displayName: 'Publish Test Results **/TEST-*.xml'
|
||||
inputs:
|
||||
mergeTestResults: true
|
||||
testRunTitle: 'linux-gcc'
|
||||
platform: Linux
|
||||
publishRunAttachments: false
|
||||
condition: succeededOrFailed()
|
||||
- task: PublishBuildArtifacts@1
|
||||
displayName: 'Publish trash directories of failed tests'
|
||||
condition: failed()
|
||||
inputs:
|
||||
PathtoPublish: t/failed-test-artifacts
|
||||
ArtifactName: failed-test-artifacts
|
||||
|
||||
- job: osx_clang
|
||||
displayName: osx-clang
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: macOS-latest
|
||||
steps:
|
||||
- bash: |
|
||||
export CC=clang
|
||||
|
||||
ci/install-dependencies.sh || exit 1
|
||||
ci/run-build-and-tests.sh || {
|
||||
ci/print-test-failures.sh
|
||||
exit 1
|
||||
}
|
||||
displayName: 'ci/run-build-and-tests.sh'
|
||||
- task: PublishTestResults@2
|
||||
displayName: 'Publish Test Results **/TEST-*.xml'
|
||||
inputs:
|
||||
mergeTestResults: true
|
||||
testRunTitle: 'osx-clang'
|
||||
platform: macOS
|
||||
publishRunAttachments: false
|
||||
condition: succeededOrFailed()
|
||||
- task: PublishBuildArtifacts@1
|
||||
displayName: 'Publish trash directories of failed tests'
|
||||
condition: failed()
|
||||
inputs:
|
||||
PathtoPublish: t/failed-test-artifacts
|
||||
ArtifactName: failed-test-artifacts
|
||||
|
||||
- job: osx_gcc
|
||||
displayName: osx-gcc
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: macOS-latest
|
||||
steps:
|
||||
- bash: |
|
||||
ci/install-dependencies.sh || exit 1
|
||||
ci/run-build-and-tests.sh || {
|
||||
ci/print-test-failures.sh
|
||||
exit 1
|
||||
}
|
||||
displayName: 'ci/run-build-and-tests.sh'
|
||||
- task: PublishTestResults@2
|
||||
displayName: 'Publish Test Results **/TEST-*.xml'
|
||||
inputs:
|
||||
mergeTestResults: true
|
||||
testRunTitle: 'osx-gcc'
|
||||
platform: macOS
|
||||
publishRunAttachments: false
|
||||
condition: succeededOrFailed()
|
||||
- task: PublishBuildArtifacts@1
|
||||
displayName: 'Publish trash directories of failed tests'
|
||||
condition: failed()
|
||||
inputs:
|
||||
PathtoPublish: t/failed-test-artifacts
|
||||
ArtifactName: failed-test-artifacts
|
||||
|
||||
- job: linux32
|
||||
displayName: Linux32
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
- bash: |
|
||||
res=0
|
||||
sudo AGENT_OS="$AGENT_OS" BUILD_BUILDNUMBER="$BUILD_BUILDNUMBER" BUILD_REPOSITORY_URI="$BUILD_REPOSITORY_URI" BUILD_SOURCEBRANCH="$BUILD_SOURCEBRANCH" BUILD_SOURCEVERSION="$BUILD_SOURCEVERSION" SYSTEM_PHASENAME="$SYSTEM_PHASENAME" SYSTEM_TASKDEFINITIONSURI="$SYSTEM_TASKDEFINITIONSURI" SYSTEM_TEAMPROJECT="$SYSTEM_TEAMPROJECT" CC=$CC MAKEFLAGS="$MAKEFLAGS" jobname=linux32 bash -lxc ci/run-docker.sh || res=1
|
||||
|
||||
sudo chmod a+r t/out/TEST-*.xml
|
||||
test ! -d t/failed-test-artifacts || sudo chmod a+r t/failed-test-artifacts
|
||||
|
||||
exit $res
|
||||
displayName: 'jobname=linux32 ci/run-docker.sh'
|
||||
- task: PublishTestResults@2
|
||||
displayName: 'Publish Test Results **/TEST-*.xml'
|
||||
inputs:
|
||||
mergeTestResults: true
|
||||
testRunTitle: 'linux32'
|
||||
platform: Linux
|
||||
publishRunAttachments: false
|
||||
condition: succeededOrFailed()
|
||||
- task: PublishBuildArtifacts@1
|
||||
displayName: 'Publish trash directories of failed tests'
|
||||
condition: failed()
|
||||
inputs:
|
||||
PathtoPublish: t/failed-test-artifacts
|
||||
ArtifactName: failed-test-artifacts
|
||||
|
||||
- job: static_analysis
|
||||
displayName: StaticAnalysis
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: ubuntu-22.04
|
||||
steps:
|
||||
- bash: |
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install -y coccinelle libcurl4-openssl-dev libssl-dev libexpat-dev gettext &&
|
||||
|
||||
export jobname=StaticAnalysis &&
|
||||
|
||||
ci/run-static-analysis.sh || exit 1
|
||||
displayName: 'ci/run-static-analysis.sh'
|
||||
|
||||
- job: documentation
|
||||
displayName: Documentation
|
||||
condition: succeeded()
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
- bash: |
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install -y asciidoc xmlto asciidoctor docbook-xsl-ns &&
|
||||
|
||||
export ALREADY_HAVE_ASCIIDOCTOR=yes. &&
|
||||
export jobname=Documentation &&
|
||||
|
||||
ci/test-documentation.sh || exit 1
|
||||
displayName: 'ci/test-documentation.sh'
|
|
@ -235,6 +235,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix);
|
|||
int cmd_unpack_file(int argc, const char **argv, const char *prefix);
|
||||
int cmd_unpack_objects(int argc, const char **argv, const char *prefix);
|
||||
int cmd_update_index(int argc, const char **argv, const char *prefix);
|
||||
int cmd_update_microsoft_git(int argc, const char **argv, const char *prefix);
|
||||
int cmd_update_ref(int argc, const char **argv, const char *prefix);
|
||||
int cmd_update_server_info(int argc, const char **argv, const char *prefix);
|
||||
int cmd_upload_archive(int argc, const char **argv, const char *prefix);
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "environment.h"
|
||||
#include "advice.h"
|
||||
#include "config.h"
|
||||
#include "lockfile.h"
|
||||
|
@ -45,6 +46,7 @@ static int chmod_pathspec(struct pathspec *pathspec, char flip, int show_only)
|
|||
int err;
|
||||
|
||||
if (!include_sparse &&
|
||||
!core_virtualfilesystem &&
|
||||
(ce_skip_worktree(ce) ||
|
||||
!path_in_sparse_checkout(ce->name, &the_index)))
|
||||
continue;
|
||||
|
@ -125,8 +127,9 @@ static int refresh(int verbose, const struct pathspec *pathspec)
|
|||
if (!seen[i]) {
|
||||
const char *path = pathspec->items[i].original;
|
||||
|
||||
if (matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
|
||||
!path_in_sparse_checkout(path, &the_index)) {
|
||||
if (!core_virtualfilesystem &&
|
||||
(matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
|
||||
!path_in_sparse_checkout(path, &the_index))) {
|
||||
string_list_append(&only_match_skip_worktree,
|
||||
pathspec->items[i].original);
|
||||
} else {
|
||||
|
@ -136,7 +139,11 @@ static int refresh(int verbose, const struct pathspec *pathspec)
|
|||
}
|
||||
}
|
||||
|
||||
if (only_match_skip_worktree.nr) {
|
||||
/*
|
||||
* When using a virtual filesystem, we might re-add a path
|
||||
* that is currently virtual and we want that to succeed.
|
||||
*/
|
||||
if (!core_virtualfilesystem && only_match_skip_worktree.nr) {
|
||||
advise_on_updating_sparse_paths(&only_match_skip_worktree);
|
||||
ret = 1;
|
||||
}
|
||||
|
@ -464,6 +471,10 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
|||
die_in_unpopulated_submodule(&the_index, prefix);
|
||||
die_path_inside_submodule(&the_index, &pathspec);
|
||||
|
||||
enable_fscache(0);
|
||||
/* We do not really re-read the index but update the up-to-date flags */
|
||||
preload_index(&the_index, &pathspec, 0);
|
||||
|
||||
if (add_new_files) {
|
||||
int baselen;
|
||||
|
||||
|
@ -512,7 +523,11 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
|||
if (seen[i])
|
||||
continue;
|
||||
|
||||
if (!include_sparse &&
|
||||
/*
|
||||
* When using a virtual filesystem, we might re-add a path
|
||||
* that is currently virtual and we want that to succeed.
|
||||
*/
|
||||
if (!include_sparse && !core_virtualfilesystem &&
|
||||
matches_skip_worktree(&pathspec, i, &skip_worktree_seen)) {
|
||||
string_list_append(&only_match_skip_worktree,
|
||||
pathspec.items[i].original);
|
||||
|
@ -536,7 +551,6 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
if (only_match_skip_worktree.nr) {
|
||||
advise_on_updating_sparse_paths(&only_match_skip_worktree);
|
||||
exit_status = 1;
|
||||
|
@ -570,5 +584,6 @@ finish:
|
|||
|
||||
dir_clear(&dir);
|
||||
clear_pathspec(&pathspec);
|
||||
enable_fscache(0);
|
||||
return exit_status;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "merge-recursive.h"
|
||||
#include "object-name.h"
|
||||
#include "object-store-ll.h"
|
||||
#include "packfile.h"
|
||||
#include "parse-options.h"
|
||||
#include "path.h"
|
||||
#include "preload-index.h"
|
||||
|
@ -401,6 +402,7 @@ static int checkout_worktree(const struct checkout_opts *opts,
|
|||
if (pc_workers > 1)
|
||||
init_parallel_checkout();
|
||||
|
||||
enable_fscache(the_index.cache_nr);
|
||||
for (pos = 0; pos < the_index.cache_nr; pos++) {
|
||||
struct cache_entry *ce = the_index.cache[pos];
|
||||
if (ce->ce_flags & CE_MATCHED) {
|
||||
|
@ -425,6 +427,7 @@ static int checkout_worktree(const struct checkout_opts *opts,
|
|||
errs |= run_parallel_checkout(&state, pc_workers, pc_threshold,
|
||||
NULL, NULL);
|
||||
mem_pool_discard(&ce_mem_pool, should_validate_cache_entries());
|
||||
disable_fscache();
|
||||
remove_marked_cache_entries(&the_index, 1);
|
||||
remove_scheduled_dirs();
|
||||
errs |= finish_delayed_checkout(&state, opts->show_progress);
|
||||
|
@ -1020,8 +1023,16 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
|
|||
remove_branch_state(the_repository, !opts->quiet);
|
||||
strbuf_release(&msg);
|
||||
if (!opts->quiet &&
|
||||
(new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD"))))
|
||||
(new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD")))) {
|
||||
unsigned long nr_unpack_entry_at_start;
|
||||
|
||||
trace2_region_enter("tracking", "report_tracking", the_repository);
|
||||
nr_unpack_entry_at_start = get_nr_unpack_entry();
|
||||
report_tracking(new_branch_info);
|
||||
trace2_data_intmax("tracking", NULL, "report_tracking/nr_unpack_entries",
|
||||
(intmax_t)(get_nr_unpack_entry() - nr_unpack_entry_at_start));
|
||||
trace2_region_leave("tracking", "report_tracking", the_repository);
|
||||
}
|
||||
}
|
||||
|
||||
static int add_pending_uninteresting_ref(const char *refname,
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "pathspec.h"
|
||||
#include "help.h"
|
||||
#include "prompt.h"
|
||||
#include "advice.h"
|
||||
|
||||
static int force = -1; /* unset */
|
||||
static int interactive;
|
||||
|
@ -39,6 +40,10 @@ static const char *msg_remove = N_("Removing %s\n");
|
|||
static const char *msg_would_remove = N_("Would remove %s\n");
|
||||
static const char *msg_skip_git_dir = N_("Skipping repository %s\n");
|
||||
static const char *msg_would_skip_git_dir = N_("Would skip repository %s\n");
|
||||
#ifndef CAN_UNLINK_MOUNT_POINTS
|
||||
static const char *msg_skip_mount_point = N_("Skipping mount point %s\n");
|
||||
static const char *msg_would_skip_mount_point = N_("Would skip mount point %s\n");
|
||||
#endif
|
||||
static const char *msg_warn_remove_failed = N_("failed to remove %s");
|
||||
static const char *msg_warn_lstat_failed = N_("could not lstat %s\n");
|
||||
static const char *msg_skip_cwd = N_("Refusing to remove current working directory\n");
|
||||
|
@ -183,6 +188,29 @@ static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (is_mount_point(path)) {
|
||||
#ifndef CAN_UNLINK_MOUNT_POINTS
|
||||
if (!quiet) {
|
||||
quote_path(path->buf, prefix, "ed, 0);
|
||||
printf(dry_run ?
|
||||
_(msg_would_skip_mount_point) :
|
||||
_(msg_skip_mount_point), quoted.buf);
|
||||
}
|
||||
*dir_gone = 0;
|
||||
#else
|
||||
if (!dry_run && unlink(path->buf)) {
|
||||
int saved_errno = errno;
|
||||
quote_path(path->buf, prefix, "ed, 0);
|
||||
errno = saved_errno;
|
||||
warning_errno(_(msg_warn_remove_failed), quoted.buf);
|
||||
*dir_gone = 0;
|
||||
ret = -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
dir = opendir(path->buf);
|
||||
if (!dir) {
|
||||
/* an empty dir could be removed even if it is unreadble */
|
||||
|
@ -192,6 +220,9 @@ static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
|
|||
quote_path(path->buf, prefix, "ed, 0);
|
||||
errno = saved_errno;
|
||||
warning_errno(_(msg_warn_remove_failed), quoted.buf);
|
||||
if (saved_errno == ENAMETOOLONG) {
|
||||
advise_if_enabled(ADVICE_NAME_TOO_LONG, _("Setting `core.longPaths` may allow the deletion to succeed."));
|
||||
}
|
||||
*dir_gone = 0;
|
||||
}
|
||||
ret = res;
|
||||
|
@ -227,6 +258,9 @@ static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
|
|||
quote_path(path->buf, prefix, "ed, 0);
|
||||
errno = saved_errno;
|
||||
warning_errno(_(msg_warn_remove_failed), quoted.buf);
|
||||
if (saved_errno == ENAMETOOLONG) {
|
||||
advise_if_enabled(ADVICE_NAME_TOO_LONG, _("Setting `core.longPaths` may allow the deletion to succeed."));
|
||||
}
|
||||
*dir_gone = 0;
|
||||
ret = 1;
|
||||
}
|
||||
|
@ -270,6 +304,9 @@ static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
|
|||
quote_path(path->buf, prefix, "ed, 0);
|
||||
errno = saved_errno;
|
||||
warning_errno(_(msg_warn_remove_failed), quoted.buf);
|
||||
if (saved_errno == ENAMETOOLONG) {
|
||||
advise_if_enabled(ADVICE_NAME_TOO_LONG, _("Setting `core.longPaths` may allow the deletion to succeed."));
|
||||
}
|
||||
*dir_gone = 0;
|
||||
ret = 1;
|
||||
}
|
||||
|
@ -1022,6 +1059,7 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
|
|||
|
||||
if (repo_read_index(the_repository) < 0)
|
||||
die(_("index file corrupt"));
|
||||
enable_fscache(the_index.cache_nr);
|
||||
|
||||
pl = add_pattern_list(&dir, EXC_CMDL, "--exclude option");
|
||||
for (i = 0; i < exclude_list.nr; i++)
|
||||
|
@ -1088,6 +1126,9 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
|
|||
qname = quote_path(item->string, NULL, &buf, 0);
|
||||
errno = saved_errno;
|
||||
warning_errno(_(msg_warn_remove_failed), qname);
|
||||
if (saved_errno == ENAMETOOLONG) {
|
||||
advise_if_enabled(ADVICE_NAME_TOO_LONG, _("Setting `core.longPaths` may allow the deletion to succeed."));
|
||||
}
|
||||
errors++;
|
||||
} else if (!quiet) {
|
||||
qname = quote_path(item->string, NULL, &buf, 0);
|
||||
|
@ -1096,6 +1137,7 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
|
|||
}
|
||||
}
|
||||
|
||||
disable_fscache();
|
||||
strbuf_release(&abs_path);
|
||||
strbuf_release(&buf);
|
||||
string_list_clear(&del_list, 0);
|
||||
|
|
|
@ -926,6 +926,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
|
|||
struct ref *mapped_refs = NULL;
|
||||
const struct ref *ref;
|
||||
struct strbuf key = STRBUF_INIT;
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
struct strbuf branch_top = STRBUF_INIT, reflog_msg = STRBUF_INIT;
|
||||
struct transport *transport = NULL;
|
||||
const char *src_ref_prefix = "refs/heads/";
|
||||
|
@ -1125,6 +1126,50 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
|
|||
git_dir = real_git_dir;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have a chicken-and-egg situation between initializing the refdb
|
||||
* and spawning transport helpers:
|
||||
*
|
||||
* - Initializing the refdb requires us to know about the object
|
||||
* format. We thus have to spawn the transport helper to learn
|
||||
* about it.
|
||||
*
|
||||
* - The transport helper may want to access the Git repository. But
|
||||
* because the refdb has not been initialized, we don't have "HEAD"
|
||||
* or "refs/". Thus, the helper cannot find the Git repository.
|
||||
*
|
||||
* Ideally, we would have structured the helper protocol such that it's
|
||||
* mandatory for the helper to first announce its capabilities without
|
||||
* yet assuming a fully initialized repository. Like that, we could
|
||||
* have added a "lazy-refdb-init" capability that announces whether the
|
||||
* helper is ready to handle not-yet-initialized refdbs. If any helper
|
||||
* didn't support them, we would have fully initialized the refdb with
|
||||
* the SHA1 object format, but later on bailed out if we found out that
|
||||
* the remote repository used a different object format.
|
||||
*
|
||||
* But we didn't, and thus we use the following workaround to partially
|
||||
* initialize the repository's refdb such that it can be discovered by
|
||||
* Git commands. To do so, we:
|
||||
*
|
||||
* - Create an invalid HEAD ref pointing at "refs/heads/.invalid".
|
||||
*
|
||||
* - Create the "refs/" directory.
|
||||
*
|
||||
* - Set up the ref storage format and repository version as
|
||||
* required.
|
||||
*
|
||||
* This is sufficient for Git commands to discover the Git directory.
|
||||
*/
|
||||
initialize_repository_version(GIT_HASH_UNKNOWN,
|
||||
the_repository->ref_storage_format, 1);
|
||||
|
||||
strbuf_addf(&buf, "%s/HEAD", git_dir);
|
||||
write_file(buf.buf, "ref: refs/heads/.invalid");
|
||||
|
||||
strbuf_reset(&buf);
|
||||
strbuf_addf(&buf, "%s/refs", git_dir);
|
||||
safe_create_dir(buf.buf, 1);
|
||||
|
||||
/*
|
||||
* additional config can be injected with -c, make sure it's included
|
||||
* after init_db, which clears the entire config environment.
|
||||
|
@ -1453,6 +1498,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
|
|||
free(remote_name);
|
||||
strbuf_release(&reflog_msg);
|
||||
strbuf_release(&branch_top);
|
||||
strbuf_release(&buf);
|
||||
strbuf_release(&key);
|
||||
free_refs(mapped_refs);
|
||||
free_refs(remote_head_points_at);
|
||||
|
|
240
builtin/commit.c
240
builtin/commit.c
|
@ -38,6 +38,7 @@
|
|||
#include "commit-reach.h"
|
||||
#include "commit-graph.h"
|
||||
#include "pretty.h"
|
||||
#include "trace2.h"
|
||||
|
||||
static const char * const builtin_commit_usage[] = {
|
||||
N_("git commit [-a | --interactive | --patch] [-s] [-v] [-u<mode>] [--amend]\n"
|
||||
|
@ -167,6 +168,122 @@ static int opt_parse_porcelain(const struct option *opt, const char *arg, int un
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int do_serialize = 0;
|
||||
static char *serialize_path = NULL;
|
||||
|
||||
static int reject_implicit = 0;
|
||||
static int do_implicit_deserialize = 0;
|
||||
static int do_explicit_deserialize = 0;
|
||||
static char *deserialize_path = NULL;
|
||||
|
||||
static enum wt_status_deserialize_wait implicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
|
||||
static enum wt_status_deserialize_wait explicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
|
||||
|
||||
/*
|
||||
* --serialize | --serialize=<path>
|
||||
*
|
||||
* Request that we serialize status output rather than or in addition to
|
||||
* printing in any of the established formats.
|
||||
*
|
||||
* Without a path, we write binary serialization data to stdout (and omit
|
||||
* the normal status output).
|
||||
*
|
||||
* With a path, we write binary serialization data to the <path> and then
|
||||
* write normal status output.
|
||||
*/
|
||||
static int opt_parse_serialize(const struct option *opt, const char *arg, int unset)
|
||||
{
|
||||
enum wt_status_format *value = (enum wt_status_format *)opt->value;
|
||||
if (unset || !arg)
|
||||
*value = STATUS_FORMAT_SERIALIZE_V1;
|
||||
|
||||
if (arg) {
|
||||
free(serialize_path);
|
||||
serialize_path = xstrdup(arg);
|
||||
}
|
||||
|
||||
if (do_explicit_deserialize)
|
||||
die("cannot mix --serialize and --deserialize");
|
||||
do_implicit_deserialize = 0;
|
||||
|
||||
do_serialize = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* --deserialize | --deserialize=<path> |
|
||||
* --no-deserialize
|
||||
*
|
||||
* Request that we deserialize status data from some existing resource
|
||||
* rather than performing a status scan.
|
||||
*
|
||||
* The input source can come from stdin or a path given here -- or be
|
||||
* inherited from the config settings.
|
||||
*/
|
||||
static int opt_parse_deserialize(const struct option *opt, const char *arg, int unset)
|
||||
{
|
||||
if (unset) {
|
||||
do_implicit_deserialize = 0;
|
||||
do_explicit_deserialize = 0;
|
||||
} else {
|
||||
if (do_serialize)
|
||||
die("cannot mix --serialize and --deserialize");
|
||||
if (arg) {
|
||||
/* override config or stdin */
|
||||
free(deserialize_path);
|
||||
deserialize_path = xstrdup(arg);
|
||||
}
|
||||
if (!deserialize_path || !*deserialize_path)
|
||||
do_explicit_deserialize = 1; /* read stdin */
|
||||
else if (wt_status_deserialize_access(deserialize_path, R_OK) == 0)
|
||||
do_explicit_deserialize = 1; /* can read from this file */
|
||||
else {
|
||||
/*
|
||||
* otherwise, silently fallback to the normal
|
||||
* collection scan
|
||||
*/
|
||||
do_implicit_deserialize = 0;
|
||||
do_explicit_deserialize = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum wt_status_deserialize_wait parse_dw(const char *arg)
|
||||
{
|
||||
int tenths;
|
||||
|
||||
if (!strcmp(arg, "fail"))
|
||||
return DESERIALIZE_WAIT__FAIL;
|
||||
else if (!strcmp(arg, "block"))
|
||||
return DESERIALIZE_WAIT__BLOCK;
|
||||
else if (!strcmp(arg, "no"))
|
||||
return DESERIALIZE_WAIT__NO;
|
||||
|
||||
/*
|
||||
* Otherwise, assume it is a timeout in tenths of a second.
|
||||
* If it contains a bogus value, atol() will return zero
|
||||
* which is OK.
|
||||
*/
|
||||
tenths = atol(arg);
|
||||
if (tenths < 0)
|
||||
tenths = DESERIALIZE_WAIT__NO;
|
||||
return tenths;
|
||||
}
|
||||
|
||||
static int opt_parse_deserialize_wait(const struct option *opt,
|
||||
const char *arg,
|
||||
int unset)
|
||||
{
|
||||
if (unset)
|
||||
explicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
|
||||
else
|
||||
explicit_deserialize_wait = parse_dw(arg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int opt_parse_m(const struct option *opt, const char *arg, int unset)
|
||||
{
|
||||
struct strbuf *buf = opt->value;
|
||||
|
@ -1168,6 +1285,8 @@ static void handle_untracked_files_arg(struct wt_status *s)
|
|||
s->show_untracked_files = SHOW_NORMAL_UNTRACKED_FILES;
|
||||
else if (!strcmp(untracked_files_arg, "all"))
|
||||
s->show_untracked_files = SHOW_ALL_UNTRACKED_FILES;
|
||||
else if (!strcmp(untracked_files_arg,"complete"))
|
||||
s->show_untracked_files = SHOW_COMPLETE_UNTRACKED_FILES;
|
||||
/*
|
||||
* Please update $__git_untracked_file_modes in
|
||||
* git-completion.bash when you add new options
|
||||
|
@ -1455,6 +1574,28 @@ static int git_status_config(const char *k, const char *v,
|
|||
s->relative_paths = git_config_bool(k, v);
|
||||
return 0;
|
||||
}
|
||||
if (!strcmp(k, "status.deserializepath")) {
|
||||
/*
|
||||
* Automatically assume deserialization if this is
|
||||
* set in the config and the file exists. Do not
|
||||
* complain if the file does not exist, because we
|
||||
* silently fall back to normal mode.
|
||||
*/
|
||||
if (v && *v && access(v, R_OK) == 0) {
|
||||
do_implicit_deserialize = 1;
|
||||
deserialize_path = xstrdup(v);
|
||||
} else {
|
||||
reject_implicit = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (!strcmp(k, "status.deserializewait")) {
|
||||
if (!v || !*v)
|
||||
implicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
|
||||
else
|
||||
implicit_deserialize_wait = parse_dw(v);
|
||||
return 0;
|
||||
}
|
||||
if (!strcmp(k, "status.showuntrackedfiles")) {
|
||||
if (!v)
|
||||
return config_error_nonbool(k);
|
||||
|
@ -1495,7 +1636,8 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
|||
static const char *rename_score_arg = (const char *)-1;
|
||||
static struct wt_status s;
|
||||
unsigned int progress_flag = 0;
|
||||
int fd;
|
||||
int try_deserialize;
|
||||
int fd = -1;
|
||||
struct object_id oid;
|
||||
static struct option builtin_status_options[] = {
|
||||
OPT__VERBOSE(&verbose, N_("be verbose")),
|
||||
|
@ -1510,6 +1652,15 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
|||
OPT_CALLBACK_F(0, "porcelain", &status_format,
|
||||
N_("version"), N_("machine-readable output"),
|
||||
PARSE_OPT_OPTARG, opt_parse_porcelain),
|
||||
{ OPTION_CALLBACK, 0, "serialize", &status_format,
|
||||
N_("path"), N_("serialize raw status data to path or stdout"),
|
||||
PARSE_OPT_OPTARG | PARSE_OPT_NONEG, opt_parse_serialize },
|
||||
{ OPTION_CALLBACK, 0, "deserialize", NULL,
|
||||
N_("path"), N_("deserialize raw status data from file"),
|
||||
PARSE_OPT_OPTARG, opt_parse_deserialize },
|
||||
{ OPTION_CALLBACK, 0, "deserialize-wait", NULL,
|
||||
N_("fail|block|no"), N_("how to wait if status cache file is invalid"),
|
||||
PARSE_OPT_OPTARG, opt_parse_deserialize_wait },
|
||||
OPT_SET_INT(0, "long", &status_format,
|
||||
N_("show status in long format (default)"),
|
||||
STATUS_FORMAT_LONG),
|
||||
|
@ -1554,10 +1705,54 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
|||
s.show_untracked_files == SHOW_NO_UNTRACKED_FILES)
|
||||
die(_("Unsupported combination of ignored and untracked-files arguments"));
|
||||
|
||||
if (s.show_untracked_files == SHOW_COMPLETE_UNTRACKED_FILES &&
|
||||
s.show_ignored_mode == SHOW_NO_IGNORED)
|
||||
die(_("Complete Untracked only supported with ignored files"));
|
||||
|
||||
parse_pathspec(&s.pathspec, 0,
|
||||
PATHSPEC_PREFER_FULL,
|
||||
prefix, argv);
|
||||
|
||||
/*
|
||||
* If we want to try to deserialize status data from a cache file,
|
||||
* we need to re-order the initialization code. The problem is that
|
||||
* this makes for a very nasty diff and causes merge conflicts as we
|
||||
* carry it forward. And it easy to mess up the merge, so we
|
||||
* duplicate some code here to hopefully reduce conflicts.
|
||||
*/
|
||||
try_deserialize = (!do_serialize &&
|
||||
(do_implicit_deserialize || do_explicit_deserialize));
|
||||
|
||||
/*
|
||||
* Disable deserialize when verbose is set because it causes us to
|
||||
* print diffs for each modified file, but that requires us to have
|
||||
* the index loaded and we don't want to do that (at least not now for
|
||||
* this seldom used feature). My fear is that would further tangle
|
||||
* the merge conflict with upstream.
|
||||
*
|
||||
* TODO Reconsider this in the future.
|
||||
*/
|
||||
if (try_deserialize && verbose) {
|
||||
trace2_data_string("status", the_repository, "deserialize/reject",
|
||||
"args/verbose");
|
||||
try_deserialize = 0;
|
||||
}
|
||||
|
||||
if (try_deserialize)
|
||||
goto skip_init;
|
||||
/*
|
||||
* If we implicitly received a status cache pathname from the config
|
||||
* and the file does not exist, we silently reject it and do the normal
|
||||
* status "collect". Fake up some trace2 messages to reflect this and
|
||||
* assist post-processors know this case is different.
|
||||
*/
|
||||
if (!do_serialize && reject_implicit) {
|
||||
trace2_cmd_mode("implicit-deserialize");
|
||||
trace2_data_string("status", the_repository, "deserialize/reject",
|
||||
"status-cache/access");
|
||||
}
|
||||
|
||||
enable_fscache(0);
|
||||
if (status_format != STATUS_FORMAT_PORCELAIN &&
|
||||
status_format != STATUS_FORMAT_PORCELAIN_V2)
|
||||
progress_flag = REFRESH_PROGRESS;
|
||||
|
@ -1571,6 +1766,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
|||
else
|
||||
fd = -1;
|
||||
|
||||
skip_init:
|
||||
s.is_initial = repo_get_oid(the_repository, s.reference, &oid) ? 1 : 0;
|
||||
if (!s.is_initial)
|
||||
oidcpy(&s.oid_commit, &oid);
|
||||
|
@ -1587,6 +1783,36 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
|||
s.rename_score = parse_rename_score(&rename_score_arg);
|
||||
}
|
||||
|
||||
if (try_deserialize) {
|
||||
int result;
|
||||
enum wt_status_deserialize_wait dw = implicit_deserialize_wait;
|
||||
if (explicit_deserialize_wait != DESERIALIZE_WAIT__UNSET)
|
||||
dw = explicit_deserialize_wait;
|
||||
if (dw == DESERIALIZE_WAIT__UNSET)
|
||||
dw = DESERIALIZE_WAIT__NO;
|
||||
|
||||
if (s.relative_paths)
|
||||
s.prefix = prefix;
|
||||
|
||||
trace2_cmd_mode("deserialize");
|
||||
result = wt_status_deserialize(&s, deserialize_path, dw);
|
||||
if (result == DESERIALIZE_OK)
|
||||
return 0;
|
||||
if (dw == DESERIALIZE_WAIT__FAIL)
|
||||
die(_("Rejected status serialization cache"));
|
||||
|
||||
/* deserialize failed, so force the initialization we skipped above. */
|
||||
enable_fscache(1);
|
||||
repo_read_index_preload(the_repository, &s.pathspec, 0);
|
||||
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, &s.pathspec, NULL, NULL);
|
||||
|
||||
if (use_optional_locks())
|
||||
fd = repo_hold_locked_index(the_repository, &index_lock, 0);
|
||||
else
|
||||
fd = -1;
|
||||
}
|
||||
|
||||
trace2_cmd_mode("collect");
|
||||
wt_status_collect(&s);
|
||||
|
||||
if (0 <= fd)
|
||||
|
@ -1595,9 +1821,21 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
|||
if (s.relative_paths)
|
||||
s.prefix = prefix;
|
||||
|
||||
if (serialize_path) {
|
||||
int fd_serialize = xopen(serialize_path,
|
||||
O_WRONLY | O_CREAT | O_TRUNC, 0666);
|
||||
if (fd_serialize < 0)
|
||||
die_errno(_("could not serialize to '%s'"),
|
||||
serialize_path);
|
||||
trace2_cmd_mode("serialize");
|
||||
wt_status_serialize_v1(fd_serialize, &s);
|
||||
close(fd_serialize);
|
||||
}
|
||||
|
||||
wt_status_print(&s);
|
||||
wt_status_collect_free_buffers(&s);
|
||||
|
||||
disable_fscache();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -523,7 +523,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
|
|||
}
|
||||
add_path(&wtdir, wtdir_len, dst_path);
|
||||
if (symlinks) {
|
||||
if (symlink(wtdir.buf, rdir.buf)) {
|
||||
if (create_symlink(lstate.istate, wtdir.buf, rdir.buf)) {
|
||||
ret = error_errno("could not symlink '%s' to '%s'", wtdir.buf, rdir.buf);
|
||||
goto finish;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
#include "string-list.h"
|
||||
#include "remote.h"
|
||||
#include "transport.h"
|
||||
#include "gvfs.h"
|
||||
#include "gvfs-helper-client.h"
|
||||
#include "packfile.h"
|
||||
#include "run-command.h"
|
||||
#include "parse-options.h"
|
||||
#include "sigchain.h"
|
||||
|
@ -1149,6 +1152,13 @@ static int store_updated_refs(struct display_state *display_state,
|
|||
|
||||
opt.exclude_hidden_refs_section = "fetch";
|
||||
rm = ref_map;
|
||||
|
||||
/*
|
||||
* Before checking connectivity, be really sure we have the
|
||||
* latest pack-files loaded into memory.
|
||||
*/
|
||||
reprepare_packed_git(the_repository);
|
||||
|
||||
if (check_connected(iterate_ref_map, &rm, &opt)) {
|
||||
rc = error(_("%s did not send all necessary objects\n"),
|
||||
display_state->url);
|
||||
|
@ -2386,6 +2396,9 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
|
|||
}
|
||||
string_list_remove_duplicates(&list, 0);
|
||||
|
||||
if (core_gvfs & GVFS_PREFETCH_DURING_FETCH)
|
||||
gh_client__prefetch(0, NULL);
|
||||
|
||||
if (negotiate_only) {
|
||||
struct oidset acked_commits = OIDSET_INIT;
|
||||
struct oidset_iter iter;
|
||||
|
|
81
builtin/gc.c
81
builtin/gc.c
|
@ -16,6 +16,7 @@
|
|||
#include "environment.h"
|
||||
#include "hex.h"
|
||||
#include "repository.h"
|
||||
#include "gvfs.h"
|
||||
#include "config.h"
|
||||
#include "tempfile.h"
|
||||
#include "lockfile.h"
|
||||
|
@ -638,6 +639,9 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
|
|||
if (quiet)
|
||||
strvec_push(&repack, "-q");
|
||||
|
||||
if ((!auto_gc || (auto_gc && gc_auto_threshold > 0)) && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
|
||||
die(_("'git gc' is not supported on a GVFS repo"));
|
||||
|
||||
if (auto_gc) {
|
||||
/*
|
||||
* Auto-gc should be least intrusive as possible.
|
||||
|
@ -1043,6 +1047,8 @@ static int write_loose_object_to_stdin(const struct object_id *oid,
|
|||
return ++(d->count) > d->batch_size;
|
||||
}
|
||||
|
||||
static const char *object_dir = NULL;
|
||||
|
||||
static int pack_loose(struct maintenance_run_opts *opts)
|
||||
{
|
||||
struct repository *r = the_repository;
|
||||
|
@ -1050,11 +1056,14 @@ static int pack_loose(struct maintenance_run_opts *opts)
|
|||
struct write_loose_object_data data;
|
||||
struct child_process pack_proc = CHILD_PROCESS_INIT;
|
||||
|
||||
if (!object_dir)
|
||||
object_dir = r->objects->odb->path;
|
||||
|
||||
/*
|
||||
* Do not start pack-objects process
|
||||
* if there are no loose objects.
|
||||
*/
|
||||
if (!for_each_loose_file_in_objdir(r->objects->odb->path,
|
||||
if (!for_each_loose_file_in_objdir(object_dir,
|
||||
bail_on_loose,
|
||||
NULL, NULL, NULL))
|
||||
return 0;
|
||||
|
@ -1064,7 +1073,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
|
|||
strvec_push(&pack_proc.args, "pack-objects");
|
||||
if (opts->quiet)
|
||||
strvec_push(&pack_proc.args, "--quiet");
|
||||
strvec_pushf(&pack_proc.args, "%s/pack/loose", r->objects->odb->path);
|
||||
strvec_pushf(&pack_proc.args, "%s/pack/loose", object_dir);
|
||||
|
||||
pack_proc.in = -1;
|
||||
|
||||
|
@ -1077,7 +1086,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
|
|||
data.count = 0;
|
||||
data.batch_size = 50000;
|
||||
|
||||
for_each_loose_file_in_objdir(r->objects->odb->path,
|
||||
for_each_loose_file_in_objdir(object_dir,
|
||||
write_loose_object_to_stdin,
|
||||
NULL,
|
||||
NULL,
|
||||
|
@ -1455,6 +1464,7 @@ static int maintenance_run(int argc, const char **argv, const char *prefix)
|
|||
{
|
||||
int i;
|
||||
struct maintenance_run_opts opts;
|
||||
const char *tmp_obj_dir = NULL;
|
||||
struct option builtin_maintenance_run_options[] = {
|
||||
OPT_BOOL(0, "auto", &opts.auto_flag,
|
||||
N_("run tasks based on the state of the repository")),
|
||||
|
@ -1488,6 +1498,18 @@ static int maintenance_run(int argc, const char **argv, const char *prefix)
|
|||
if (argc != 0)
|
||||
usage_with_options(builtin_maintenance_run_usage,
|
||||
builtin_maintenance_run_options);
|
||||
|
||||
/*
|
||||
* To enable the VFS for Git/Scalar shared object cache, use
|
||||
* the gvfs.sharedcache config option to redirect the
|
||||
* maintenance to that location.
|
||||
*/
|
||||
if (!git_config_get_value("gvfs.sharedcache", &tmp_obj_dir) &&
|
||||
tmp_obj_dir) {
|
||||
object_dir = xstrdup(tmp_obj_dir);
|
||||
setenv(DB_ENVIRONMENT, object_dir, 1);
|
||||
}
|
||||
|
||||
return maintenance_run_tasks(&opts);
|
||||
}
|
||||
|
||||
|
@ -1651,6 +1673,42 @@ static const char *get_frequency(enum schedule_priority schedule)
|
|||
}
|
||||
}
|
||||
|
||||
static const char *extraconfig[] = {
|
||||
"credential.interactive=false",
|
||||
"core.askPass=true", /* 'true' returns success, but no output. */
|
||||
NULL
|
||||
};
|
||||
|
||||
static const char *get_extra_config_parameters(void) {
|
||||
static const char *result = NULL;
|
||||
struct strbuf builder = STRBUF_INIT;
|
||||
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
for (const char **s = extraconfig; s && *s; s++)
|
||||
strbuf_addf(&builder, "-c %s ", *s);
|
||||
|
||||
result = strbuf_detach(&builder, NULL);
|
||||
return result;
|
||||
}
|
||||
|
||||
static const char *get_extra_launchctl_strings(void) {
|
||||
static const char *result = NULL;
|
||||
struct strbuf builder = STRBUF_INIT;
|
||||
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
for (const char **s = extraconfig; s && *s; s++) {
|
||||
strbuf_addstr(&builder, "<string>-c</string>\n");
|
||||
strbuf_addf(&builder, "<string>%s</string>\n", *s);
|
||||
}
|
||||
|
||||
result = strbuf_detach(&builder, NULL);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* get_schedule_cmd` reads the GIT_TEST_MAINT_SCHEDULER environment variable
|
||||
* to mock the schedulers that `git maintenance start` rely on.
|
||||
|
@ -1857,6 +1915,7 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit
|
|||
"<array>\n"
|
||||
"<string>%s/git</string>\n"
|
||||
"<string>--exec-path=%s</string>\n"
|
||||
"%s" /* For extra config parameters. */
|
||||
"<string>for-each-repo</string>\n"
|
||||
"<string>--keep-going</string>\n"
|
||||
"<string>--config=maintenance.repo</string>\n"
|
||||
|
@ -1866,7 +1925,8 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit
|
|||
"</array>\n"
|
||||
"<key>StartCalendarInterval</key>\n"
|
||||
"<array>\n";
|
||||
strbuf_addf(&plist, preamble, name, exec_path, exec_path, frequency);
|
||||
strbuf_addf(&plist, preamble, name, exec_path, exec_path,
|
||||
get_extra_launchctl_strings(), frequency);
|
||||
|
||||
switch (schedule) {
|
||||
case SCHEDULE_HOURLY:
|
||||
|
@ -2101,11 +2161,12 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority
|
|||
"<Actions Context=\"Author\">\n"
|
||||
"<Exec>\n"
|
||||
"<Command>\"%s\\headless-git.exe\"</Command>\n"
|
||||
"<Arguments>--exec-path=\"%s\" for-each-repo --keep-going --config=maintenance.repo maintenance run --schedule=%s</Arguments>\n"
|
||||
"<Arguments>--exec-path=\"%s\" %s for-each-repo --keep-going --config=maintenance.repo maintenance run --schedule=%s</Arguments>\n"
|
||||
"</Exec>\n"
|
||||
"</Actions>\n"
|
||||
"</Task>\n";
|
||||
fprintf(tfile->fp, xml, exec_path, exec_path, frequency);
|
||||
fprintf(tfile->fp, xml, exec_path, exec_path,
|
||||
get_extra_config_parameters(), frequency);
|
||||
strvec_split(&child.args, cmd);
|
||||
strvec_pushl(&child.args, "/create", "/tn", name, "/f", "/xml",
|
||||
get_tempfile_path(tfile), NULL);
|
||||
|
@ -2246,8 +2307,8 @@ static int crontab_update_schedule(int run_maintenance, int fd)
|
|||
"# replaced in the future by a Git command.\n\n");
|
||||
|
||||
strbuf_addf(&line_format,
|
||||
"%%d %%s * * %%s \"%s/git\" --exec-path=\"%s\" for-each-repo --keep-going --config=maintenance.repo maintenance run --schedule=%%s\n",
|
||||
exec_path, exec_path);
|
||||
"%%d %%s * * %%s \"%s/git\" --exec-path=\"%s\" %s for-each-repo --keep-going --config=maintenance.repo maintenance run --schedule=%%s\n",
|
||||
exec_path, exec_path, get_extra_config_parameters());
|
||||
fprintf(cron_in, line_format.buf, minute, "1-23", "*", "hourly");
|
||||
fprintf(cron_in, line_format.buf, minute, "0", "1-6", "daily");
|
||||
fprintf(cron_in, line_format.buf, minute, "0", "0", "weekly");
|
||||
|
@ -2447,7 +2508,7 @@ static int systemd_timer_write_service_template(const char *exec_path)
|
|||
"\n"
|
||||
"[Service]\n"
|
||||
"Type=oneshot\n"
|
||||
"ExecStart=\"%s/git\" --exec-path=\"%s\" for-each-repo --keep-going --config=maintenance.repo maintenance run --schedule=%%i\n"
|
||||
"ExecStart=\"%s/git\" --exec-path=\"%s\" %s for-each-repo --keep-going --config=maintenance.repo maintenance run --schedule=%%i\n"
|
||||
"LockPersonality=yes\n"
|
||||
"MemoryDenyWriteExecute=yes\n"
|
||||
"NoNewPrivileges=yes\n"
|
||||
|
@ -2457,7 +2518,7 @@ static int systemd_timer_write_service_template(const char *exec_path)
|
|||
"RestrictSUIDSGID=yes\n"
|
||||
"SystemCallArchitectures=native\n"
|
||||
"SystemCallFilter=@system-service\n";
|
||||
if (fprintf(file, unit, exec_path, exec_path) < 0) {
|
||||
if (fprintf(file, unit, exec_path, exec_path, get_extra_config_parameters()) < 0) {
|
||||
error(_("failed to write to '%s'"), filename);
|
||||
fclose(file);
|
||||
goto error;
|
||||
|
|
|
@ -810,7 +810,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
|
|||
read_lock();
|
||||
collision_test_needed =
|
||||
repo_has_object_file_with_flags(the_repository, oid,
|
||||
OBJECT_INFO_QUICK);
|
||||
OBJECT_INFO_FOR_PREFETCH);
|
||||
read_unlock();
|
||||
}
|
||||
|
||||
|
@ -1744,6 +1744,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
|
|||
unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */
|
||||
int report_end_of_input = 0;
|
||||
int hash_algo = 0;
|
||||
int dash_o = 0;
|
||||
|
||||
/*
|
||||
* index-pack never needs to fetch missing objects except when
|
||||
|
@ -1837,6 +1838,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
|
|||
if (index_name || (i+1) >= argc)
|
||||
usage(index_pack_usage);
|
||||
index_name = argv[++i];
|
||||
dash_o = 1;
|
||||
} else if (starts_with(arg, "--index-version=")) {
|
||||
char *c;
|
||||
opts.version = strtoul(arg + 16, &c, 10);
|
||||
|
@ -1879,6 +1881,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
|
|||
index_name = derive_filename(pack_name, "pack", "idx", &index_name_buf);
|
||||
|
||||
opts.flags &= ~(WRITE_REV | WRITE_REV_VERIFY);
|
||||
if (rev_index && dash_o && !ends_with(index_name, ".idx"))
|
||||
rev_index = 0;
|
||||
if (rev_index) {
|
||||
opts.flags |= verify ? WRITE_REV_VERIFY : WRITE_REV;
|
||||
if (index_name)
|
||||
|
|
|
@ -35,6 +35,10 @@
|
|||
#include "trace2.h"
|
||||
#include "dir.h"
|
||||
#include "add-interactive.h"
|
||||
#include "strbuf.h"
|
||||
#include "quote.h"
|
||||
#include "dir.h"
|
||||
#include "entry.h"
|
||||
|
||||
#define REFRESH_INDEX_DELAY_WARNING_IN_MS (2 * 1000)
|
||||
|
||||
|
@ -43,6 +47,7 @@ static const char * const git_reset_usage[] = {
|
|||
N_("git reset [-q] [<tree-ish>] [--] <pathspec>..."),
|
||||
N_("git reset [-q] [--pathspec-from-file [--pathspec-file-nul]] [<tree-ish>]"),
|
||||
N_("git reset --patch [<tree-ish>] [--] [<pathspec>...]"),
|
||||
N_("DEPRECATED: git reset [-q] [--stdin [-z]] [<tree-ish>]"),
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -150,9 +155,47 @@ static void update_index_from_diff(struct diff_queue_struct *q,
|
|||
|
||||
for (i = 0; i < q->nr; i++) {
|
||||
int pos;
|
||||
int respect_skip_worktree = 1;
|
||||
struct diff_filespec *one = q->queue[i]->one;
|
||||
struct diff_filespec *two = q->queue[i]->two;
|
||||
int is_in_reset_tree = one->mode && !is_null_oid(&one->oid);
|
||||
int is_missing = !(one->mode && !is_null_oid(&one->oid));
|
||||
int was_missing = !two->mode && is_null_oid(&two->oid);
|
||||
struct cache_entry *ce;
|
||||
struct cache_entry *ceBefore;
|
||||
struct checkout state = CHECKOUT_INIT;
|
||||
|
||||
/*
|
||||
* When using the virtual filesystem feature, the cache entries that are
|
||||
* added here will not have the skip-worktree bit set.
|
||||
*
|
||||
* Without this code there is data that is lost because the files that
|
||||
* would normally be in the working directory are not there and show as
|
||||
* deleted for the next status or in the case of added files just disappear.
|
||||
* We need to create the previous version of the files in the working
|
||||
* directory so that they will have the right content and the next
|
||||
* status call will show modified or untracked files correctly.
|
||||
*/
|
||||
if (core_virtualfilesystem && !file_exists(two->path))
|
||||
{
|
||||
pos = index_name_pos(&the_index, two->path, strlen(two->path));
|
||||
if ((pos >= 0 && ce_skip_worktree(the_index.cache[pos])) &&
|
||||
(is_missing || !was_missing))
|
||||
{
|
||||
state.force = 1;
|
||||
state.refresh_cache = 1;
|
||||
state.istate = &the_index;
|
||||
ceBefore = make_cache_entry(&the_index, two->mode,
|
||||
&two->oid, two->path,
|
||||
0, 0);
|
||||
if (!ceBefore)
|
||||
die(_("make_cache_entry failed for path '%s'"),
|
||||
two->path);
|
||||
|
||||
checkout_entry(ceBefore, &state, NULL, NULL);
|
||||
respect_skip_worktree = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_in_reset_tree && !intent_to_add) {
|
||||
remove_file_from_index(&the_index, one->path);
|
||||
|
@ -171,8 +214,14 @@ static void update_index_from_diff(struct diff_queue_struct *q,
|
|||
* to properly construct the reset sparse directory.
|
||||
*/
|
||||
pos = index_name_pos(&the_index, one->path, strlen(one->path));
|
||||
if ((pos >= 0 && ce_skip_worktree(the_index.cache[pos])) ||
|
||||
(pos < 0 && !path_in_sparse_checkout(one->path, &the_index)))
|
||||
|
||||
/*
|
||||
* Do not add the SKIP_WORKTREE bit back if we populated the
|
||||
* file on purpose in a virtual filesystem scenario.
|
||||
*/
|
||||
if (respect_skip_worktree &&
|
||||
((pos >= 0 && ce_skip_worktree(the_index.cache[pos])) ||
|
||||
(pos < 0 && !path_in_sparse_checkout(one->path, &the_index))))
|
||||
ce->ce_flags |= CE_SKIP_WORKTREE;
|
||||
|
||||
if (!ce)
|
||||
|
@ -331,6 +380,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
|
|||
struct object_id oid;
|
||||
struct pathspec pathspec;
|
||||
int intent_to_add = 0;
|
||||
int nul_term_line = 0, read_from_stdin = 0;
|
||||
const struct option options[] = {
|
||||
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
|
||||
OPT_BOOL(0, "no-refresh", &no_refresh,
|
||||
|
@ -359,6 +409,10 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
|
|||
N_("record only the fact that removed paths will be added later")),
|
||||
OPT_PATHSPEC_FROM_FILE(&pathspec_from_file),
|
||||
OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul),
|
||||
OPT_BOOL('z', NULL, &nul_term_line,
|
||||
N_("DEPRECATED (use --pathspec-file-nul instead): paths are separated with NUL character")),
|
||||
OPT_BOOL(0, "stdin", &read_from_stdin,
|
||||
N_("DEPRECATED (use --pathspec-from-file=- instead): read paths from <stdin>")),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
|
@ -368,6 +422,14 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
|
|||
PARSE_OPT_KEEP_DASHDASH);
|
||||
parse_args(&pathspec, argv, prefix, patch_mode, &rev);
|
||||
|
||||
if (read_from_stdin) {
|
||||
warning(_("--stdin is deprecated, please use --pathspec-from-file=- instead"));
|
||||
free(pathspec_from_file);
|
||||
pathspec_from_file = xstrdup("-");
|
||||
if (nul_term_line)
|
||||
pathspec_file_nul = 1;
|
||||
}
|
||||
|
||||
if (pathspec_from_file) {
|
||||
if (patch_mode)
|
||||
die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--patch");
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "environment.h"
|
||||
#include "advice.h"
|
||||
#include "config.h"
|
||||
#include "lockfile.h"
|
||||
|
@ -311,7 +312,7 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
|
|||
for (i = 0; i < the_index.cache_nr; i++) {
|
||||
const struct cache_entry *ce = the_index.cache[i];
|
||||
|
||||
if (!include_sparse &&
|
||||
if (!include_sparse && !core_virtualfilesystem &&
|
||||
(ce_skip_worktree(ce) ||
|
||||
!path_in_sparse_checkout(ce->name, &the_index)))
|
||||
continue;
|
||||
|
@ -348,7 +349,11 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
|
|||
*original ? original : ".");
|
||||
}
|
||||
|
||||
if (only_match_skip_worktree.nr) {
|
||||
/*
|
||||
* When using a virtual filesystem, we might re-add a path
|
||||
* that is currently virtual and we want that to succeed.
|
||||
*/
|
||||
if (!core_virtualfilesystem && only_match_skip_worktree.nr) {
|
||||
advise_on_updating_sparse_paths(&only_match_skip_worktree);
|
||||
ret = 1;
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ static int sparse_checkout_list(int argc, const char **argv, const char *prefix)
|
|||
|
||||
static void clean_tracked_sparse_directories(struct repository *r)
|
||||
{
|
||||
int i, was_full = 0;
|
||||
int i, value, was_full = 0;
|
||||
struct strbuf path = STRBUF_INIT;
|
||||
size_t pathlen;
|
||||
struct string_list_item *item;
|
||||
|
@ -123,6 +123,13 @@ static void clean_tracked_sparse_directories(struct repository *r)
|
|||
!r->index->sparse_checkout_patterns->use_cone_patterns)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Users can disable this behavior.
|
||||
*/
|
||||
if (!repo_config_get_bool(r, "index.deletesparsedirectories", &value) &&
|
||||
!value)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Use the sparse index as a data structure to assist finding
|
||||
* directories that are safe to delete. This conversion to a
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "gvfs.h"
|
||||
#include "bulk-checkin.h"
|
||||
#include "config.h"
|
||||
#include "environment.h"
|
||||
|
@ -1109,7 +1110,13 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
|||
argc = parse_options_end(&ctx);
|
||||
|
||||
getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
|
||||
if (mark_skip_worktree_only && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
|
||||
die(_("modifying the skip worktree bit is not supported on a GVFS repo"));
|
||||
|
||||
if (preferred_index_format) {
|
||||
if (preferred_index_format != 4 && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
|
||||
die(_("changing the index version is not supported on a GVFS repo"));
|
||||
|
||||
if (preferred_index_format < 0) {
|
||||
printf(_("%d\n"), the_index.version);
|
||||
} else if (preferred_index_format < INDEX_FORMAT_LB ||
|
||||
|
@ -1155,6 +1162,9 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
|||
end_odb_transaction();
|
||||
|
||||
if (split_index > 0) {
|
||||
if (gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
|
||||
die(_("split index is not supported on a GVFS repo"));
|
||||
|
||||
if (git_config_get_split_index() == 0)
|
||||
warning(_("core.splitIndex is set to false; "
|
||||
"remove or change it, if you really want to "
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
#include "builtin.h"
|
||||
#include "repository.h"
|
||||
#include "parse-options.h"
|
||||
#include "run-command.h"
|
||||
#include "strvec.h"
|
||||
|
||||
#if defined(GIT_WINDOWS_NATIVE)
|
||||
/*
|
||||
* On Windows, run 'git update-git-for-windows' which
|
||||
* is installed by the installer, based on the script
|
||||
* in git-for-windows/build-extra.
|
||||
*/
|
||||
static int platform_specific_upgrade(void)
|
||||
{
|
||||
struct child_process cp = CHILD_PROCESS_INIT;
|
||||
|
||||
strvec_push(&cp.args, "git-update-git-for-windows");
|
||||
return run_command(&cp);
|
||||
}
|
||||
#elif defined(__APPLE__)
|
||||
/*
|
||||
* On macOS, we expect the user to have the microsoft-git
|
||||
* cask installed via Homebrew. We check using these
|
||||
* commands:
|
||||
*
|
||||
* 1. 'brew update' to get latest versions.
|
||||
* 2. 'brew upgrade --cask microsoft-git' to get the
|
||||
* latest version.
|
||||
*/
|
||||
static int platform_specific_upgrade(void)
|
||||
{
|
||||
int res;
|
||||
struct child_process update = CHILD_PROCESS_INIT;
|
||||
struct child_process upgrade = CHILD_PROCESS_INIT;
|
||||
|
||||
printf("Updating Homebrew with 'brew update'\n");
|
||||
|
||||
strvec_pushl(&update.args, "brew", "update", NULL);
|
||||
res = run_command(&update);
|
||||
|
||||
if (res) {
|
||||
error(_("'brew update' failed; is brew installed?"));
|
||||
return 1;
|
||||
}
|
||||
|
||||
printf("Upgrading microsoft-git with 'brew upgrade --cask microsoft-git'\n");
|
||||
strvec_pushl(&upgrade.args, "brew", "upgrade", "--cask", "microsoft-git", NULL);
|
||||
res = run_command(&upgrade);
|
||||
|
||||
return res;
|
||||
}
|
||||
#else
|
||||
static int platform_specific_upgrade(void)
|
||||
{
|
||||
error(_("update-microsoft-git is not supported on this platform"));
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const char builtin_update_microsoft_git_usage[] =
|
||||
N_("git update-microsoft-git");
|
||||
|
||||
int cmd_update_microsoft_git(int argc, const char **argv, const char *prefix)
|
||||
{
|
||||
if (argc == 2 && !strcmp(argv[1], "-h"))
|
||||
usage(builtin_update_microsoft_git_usage);
|
||||
|
||||
return platform_specific_upgrade();
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "advice.h"
|
||||
#include "gvfs.h"
|
||||
#include "checkout.h"
|
||||
#include "config.h"
|
||||
#include "copy.h"
|
||||
|
@ -1402,6 +1403,13 @@ int cmd_worktree(int ac, const char **av, const char *prefix)
|
|||
|
||||
git_config(git_worktree_config, NULL);
|
||||
|
||||
/*
|
||||
* git-worktree is special-cased to work in Scalar repositories
|
||||
* even when they use the GVFS Protocol.
|
||||
*/
|
||||
if (core_gvfs & GVFS_USE_VIRTUAL_FILESYSTEM)
|
||||
die("'git %s' is not supported on a GVFS repo", "worktree");
|
||||
|
||||
if (!prefix)
|
||||
prefix = "";
|
||||
|
||||
|
|
43
cache-tree.c
43
cache-tree.c
|
@ -1,6 +1,7 @@
|
|||
#include "git-compat-util.h"
|
||||
#include "environment.h"
|
||||
#include "hex.h"
|
||||
#include "gvfs.h"
|
||||
#include "lockfile.h"
|
||||
#include "tree.h"
|
||||
#include "tree-walk.h"
|
||||
|
@ -229,7 +230,7 @@ static void discard_unused_subtrees(struct cache_tree *it)
|
|||
}
|
||||
}
|
||||
|
||||
int cache_tree_fully_valid(struct cache_tree *it)
|
||||
static int cache_tree_fully_valid_1(struct cache_tree *it)
|
||||
{
|
||||
int i;
|
||||
if (!it)
|
||||
|
@ -237,7 +238,7 @@ int cache_tree_fully_valid(struct cache_tree *it)
|
|||
if (it->entry_count < 0 || !repo_has_object_file(the_repository, &it->oid))
|
||||
return 0;
|
||||
for (i = 0; i < it->subtree_nr; i++) {
|
||||
if (!cache_tree_fully_valid(it->down[i]->cache_tree))
|
||||
if (!cache_tree_fully_valid_1(it->down[i]->cache_tree))
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
@ -248,6 +249,17 @@ static int must_check_existence(const struct cache_entry *ce)
|
|||
return !(repo_has_promisor_remote(the_repository) && ce_skip_worktree(ce));
|
||||
}
|
||||
|
||||
int cache_tree_fully_valid(struct cache_tree *it)
|
||||
{
|
||||
int result;
|
||||
|
||||
trace2_region_enter("cache_tree", "fully_valid", NULL);
|
||||
result = cache_tree_fully_valid_1(it);
|
||||
trace2_region_leave("cache_tree", "fully_valid", NULL);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int update_one(struct cache_tree *it,
|
||||
struct cache_entry **cache,
|
||||
int entries,
|
||||
|
@ -257,7 +269,8 @@ static int update_one(struct cache_tree *it,
|
|||
int flags)
|
||||
{
|
||||
struct strbuf buffer;
|
||||
int missing_ok = flags & WRITE_TREE_MISSING_OK;
|
||||
int missing_ok = gvfs_config_is_set(GVFS_MISSING_OK) ?
|
||||
WRITE_TREE_MISSING_OK : (flags & WRITE_TREE_MISSING_OK);
|
||||
int dryrun = flags & WRITE_TREE_DRY_RUN;
|
||||
int repair = flags & WRITE_TREE_REPAIR;
|
||||
int to_invalidate = 0;
|
||||
|
@ -426,7 +439,29 @@ static int update_one(struct cache_tree *it,
|
|||
continue;
|
||||
|
||||
strbuf_grow(&buffer, entlen + 100);
|
||||
strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
|
||||
|
||||
switch (mode) {
|
||||
case 0100644:
|
||||
strbuf_add(&buffer, "100644 ", 7);
|
||||
break;
|
||||
case 0100664:
|
||||
strbuf_add(&buffer, "100664 ", 7);
|
||||
break;
|
||||
case 0100755:
|
||||
strbuf_add(&buffer, "100755 ", 7);
|
||||
break;
|
||||
case 0120000:
|
||||
strbuf_add(&buffer, "120000 ", 7);
|
||||
break;
|
||||
case 0160000:
|
||||
strbuf_add(&buffer, "160000 ", 7);
|
||||
break;
|
||||
default:
|
||||
strbuf_addf(&buffer, "%o ", mode);
|
||||
break;
|
||||
}
|
||||
strbuf_add(&buffer, path + baselen, entlen);
|
||||
strbuf_addch(&buffer, '\0');
|
||||
strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
|
||||
|
||||
#if DEBUG_CACHE_TREE
|
||||
|
|
|
@ -224,6 +224,12 @@ then
|
|||
|
||||
GIT_TEST_OPTS="--write-junit-xml"
|
||||
JOBS=10
|
||||
case "$CI_OS_NAME" in
|
||||
linux) runs_on_pool=ubuntu-latest;;
|
||||
macos|osx) runs_on_pool=macos-latest;;
|
||||
windows_nt) runs_on_pool=windows-latest;;
|
||||
*) echo "Unhandled OS: $CI_OS_NAME" >&2; exit 1;;
|
||||
esac
|
||||
elif test true = "$GITHUB_ACTIONS"
|
||||
then
|
||||
CI_TYPE=github-actions
|
||||
|
|
|
@ -5,11 +5,6 @@
|
|||
|
||||
. ${0%/*}/lib.sh
|
||||
|
||||
case "$CI_OS_NAME" in
|
||||
windows*) cmd //c mklink //j t\\.prove "$(cygpath -aw "$cache_dir/.prove")";;
|
||||
*) ln -s "$cache_dir/.prove" t/.prove;;
|
||||
esac
|
||||
|
||||
run_tests=t
|
||||
|
||||
case "$jobname" in
|
||||
|
@ -55,4 +50,8 @@ then
|
|||
fi
|
||||
check_unignored_build_artifacts
|
||||
|
||||
case " $MAKE_TARGETS " in
|
||||
*" all "*) make -C contrib/subtree test;;
|
||||
esac
|
||||
|
||||
save_good_tree
|
||||
|
|
|
@ -5,11 +5,6 @@
|
|||
|
||||
. ${0%/*}/lib.sh
|
||||
|
||||
case "$CI_OS_NAME" in
|
||||
windows*) cmd //c mklink //j t\\.prove "$(cygpath -aw "$cache_dir/.prove")";;
|
||||
*) ln -s "$cache_dir/.prove" t/.prove;;
|
||||
esac
|
||||
|
||||
group "Run tests" make --quiet -C t T="$(cd t &&
|
||||
./helper/test-tool path-utils slice-tests "$1" "$2" t[0-9]*.sh |
|
||||
tr '\n' ' ')" ||
|
||||
|
@ -20,4 +15,7 @@ if [ "$1" == "0" ] ; then
|
|||
group "Run unit tests" make --quiet -C t unit-tests-prove
|
||||
fi
|
||||
|
||||
# Run the git subtree tests only if main tests succeeded
|
||||
test 0 != "$1" || make -C contrib/subtree test
|
||||
|
||||
check_unignored_build_artifacts
|
||||
|
|
9
commit.c
9
commit.c
|
@ -1,4 +1,5 @@
|
|||
#include "git-compat-util.h"
|
||||
#include "gvfs.h"
|
||||
#include "tag.h"
|
||||
#include "commit.h"
|
||||
#include "commit-graph.h"
|
||||
|
@ -559,13 +560,17 @@ int repo_parse_commit_internal(struct repository *r,
|
|||
.sizep = &size,
|
||||
.contentp = &buffer,
|
||||
};
|
||||
int ret;
|
||||
/*
|
||||
* Git does not support partial clones that exclude commits, so set
|
||||
* OBJECT_INFO_SKIP_FETCH_OBJECT to fail fast when an object is missing.
|
||||
*/
|
||||
int flags = OBJECT_INFO_LOOKUP_REPLACE | OBJECT_INFO_SKIP_FETCH_OBJECT |
|
||||
OBJECT_INFO_DIE_IF_CORRUPT;
|
||||
int ret;
|
||||
OBJECT_INFO_DIE_IF_CORRUPT;
|
||||
|
||||
/* But the GVFS Protocol _does_ support missing commits! */
|
||||
if (gvfs_config_is_set(GVFS_MISSING_OK))
|
||||
flags ^= OBJECT_INFO_SKIP_FETCH_OBJECT;
|
||||
|
||||
if (!item)
|
||||
return -1;
|
||||
|
|
|
@ -35,7 +35,19 @@ static inline uint64_t default_bswap64(uint64_t val)
|
|||
#undef bswap32
|
||||
#undef bswap64
|
||||
|
||||
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
|
||||
/**
|
||||
* __has_builtin is available since Clang 10 and GCC 10.
|
||||
* Below is a fallback for older compilers.
|
||||
*/
|
||||
#ifndef __has_builtin
|
||||
#define __has_builtin(x) 0
|
||||
#endif
|
||||
|
||||
#if __has_builtin(__builtin_bswap32) && __has_builtin(__builtin_bswap64)
|
||||
#define bswap32(x) __builtin_bswap32((x))
|
||||
#define bswap64(x) __builtin_bswap64((x))
|
||||
|
||||
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
|
||||
|
||||
#define bswap32 git_bswap32
|
||||
static inline uint32_t git_bswap32(uint32_t x)
|
||||
|
|
|
@ -34,7 +34,7 @@ struct fsm_health_data
|
|||
|
||||
struct wt_moved
|
||||
{
|
||||
wchar_t wpath[MAX_PATH + 1];
|
||||
wchar_t wpath[MAX_LONG_PATH + 1];
|
||||
BY_HANDLE_FILE_INFORMATION bhfi;
|
||||
} wt_moved;
|
||||
};
|
||||
|
@ -143,8 +143,8 @@ static int has_worktree_moved(struct fsmonitor_daemon_state *state,
|
|||
return 0;
|
||||
|
||||
case CTX_INIT:
|
||||
if (xutftowcs_path(data->wt_moved.wpath,
|
||||
state->path_worktree_watch.buf) < 0) {
|
||||
if (xutftowcs_long_path(data->wt_moved.wpath,
|
||||
state->path_worktree_watch.buf) < 0) {
|
||||
error(_("could not convert to wide characters: '%s'"),
|
||||
state->path_worktree_watch.buf);
|
||||
return -1;
|
||||
|
|
|
@ -28,7 +28,7 @@ struct one_watch
|
|||
DWORD count;
|
||||
|
||||
struct strbuf path;
|
||||
wchar_t wpath_longname[MAX_PATH + 1];
|
||||
wchar_t wpath_longname[MAX_LONG_PATH + 1];
|
||||
DWORD wpath_longname_len;
|
||||
|
||||
HANDLE hDir;
|
||||
|
@ -131,8 +131,8 @@ normalize:
|
|||
*/
|
||||
static void check_for_shortnames(struct one_watch *watch)
|
||||
{
|
||||
wchar_t buf_in[MAX_PATH + 1];
|
||||
wchar_t buf_out[MAX_PATH + 1];
|
||||
wchar_t buf_in[MAX_LONG_PATH + 1];
|
||||
wchar_t buf_out[MAX_LONG_PATH + 1];
|
||||
wchar_t *last;
|
||||
wchar_t *p;
|
||||
|
||||
|
@ -197,8 +197,8 @@ static enum get_relative_result get_relative_longname(
|
|||
const wchar_t *wpath, DWORD wpath_len,
|
||||
wchar_t *wpath_longname, size_t bufsize_wpath_longname)
|
||||
{
|
||||
wchar_t buf_in[2 * MAX_PATH + 1];
|
||||
wchar_t buf_out[MAX_PATH + 1];
|
||||
wchar_t buf_in[2 * MAX_LONG_PATH + 1];
|
||||
wchar_t buf_out[MAX_LONG_PATH + 1];
|
||||
DWORD root_len;
|
||||
DWORD out_len;
|
||||
|
||||
|
@ -298,10 +298,10 @@ static struct one_watch *create_watch(const char *path)
|
|||
FILE_SHARE_WRITE | FILE_SHARE_READ | FILE_SHARE_DELETE;
|
||||
HANDLE hDir;
|
||||
DWORD len_longname;
|
||||
wchar_t wpath[MAX_PATH + 1];
|
||||
wchar_t wpath_longname[MAX_PATH + 1];
|
||||
wchar_t wpath[MAX_LONG_PATH + 1];
|
||||
wchar_t wpath_longname[MAX_LONG_PATH + 1];
|
||||
|
||||
if (xutftowcs_path(wpath, path) < 0) {
|
||||
if (xutftowcs_long_path(wpath, path) < 0) {
|
||||
error(_("could not convert to wide characters: '%s'"), path);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -545,7 +545,7 @@ static int process_worktree_events(struct fsmonitor_daemon_state *state)
|
|||
struct string_list cookie_list = STRING_LIST_INIT_DUP;
|
||||
struct fsmonitor_batch *batch = NULL;
|
||||
const char *p = watch->buffer;
|
||||
wchar_t wpath_longname[MAX_PATH + 1];
|
||||
wchar_t wpath_longname[MAX_LONG_PATH + 1];
|
||||
|
||||
/*
|
||||
* If the kernel gets more events than will fit in the kernel
|
||||
|
|
|
@ -69,8 +69,8 @@ static int check_remote_protocol(wchar_t *wpath)
|
|||
*/
|
||||
int fsmonitor__get_fs_info(const char *path, struct fs_info *fs_info)
|
||||
{
|
||||
wchar_t wpath[MAX_PATH];
|
||||
wchar_t wfullpath[MAX_PATH];
|
||||
wchar_t wpath[MAX_LONG_PATH];
|
||||
wchar_t wfullpath[MAX_LONG_PATH];
|
||||
size_t wlen;
|
||||
UINT driveType;
|
||||
|
||||
|
@ -78,7 +78,7 @@ int fsmonitor__get_fs_info(const char *path, struct fs_info *fs_info)
|
|||
* Do everything in wide chars because the drive letter might be
|
||||
* a multi-byte sequence. See win32_has_dos_drive_prefix().
|
||||
*/
|
||||
if (xutftowcs_path(wpath, path) < 0) {
|
||||
if (xutftowcs_long_path(wpath, path) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ int fsmonitor__get_fs_info(const char *path, struct fs_info *fs_info)
|
|||
* slashes to backslashes. This is essential to get GetDriveTypeW()
|
||||
* correctly handle some UNC "\\server\share\..." paths.
|
||||
*/
|
||||
if (!GetFullPathNameW(wpath, MAX_PATH, wfullpath, NULL)) {
|
||||
if (!GetFullPathNameW(wpath, MAX_LONG_PATH, wfullpath, NULL)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,408 @@
|
|||
#include "../git-compat-util.h"
|
||||
#include "../git-curl-compat.h"
|
||||
#ifndef WIN32
|
||||
#include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The ABI version of libcurl is encoded in its shared libraries' file names.
|
||||
* This ABI version has not changed since October 2006 and is unlikely to be
|
||||
* changed in the future. See https://curl.se/libcurl/abi.html for details.
|
||||
*/
|
||||
#define LIBCURL_ABI_VERSION "4"
|
||||
|
||||
typedef void (*func_t)(void);
|
||||
|
||||
#ifndef WIN32
|
||||
#ifdef __APPLE__
|
||||
#define LIBCURL_FILE_NAME(base) base "." LIBCURL_ABI_VERSION ".dylib"
|
||||
#else
|
||||
#define LIBCURL_FILE_NAME(base) base ".so." LIBCURL_ABI_VERSION
|
||||
#endif
|
||||
|
||||
static void *load_library(const char *name)
|
||||
{
|
||||
return dlopen(name, RTLD_LAZY);
|
||||
}
|
||||
|
||||
static func_t load_function(void *handle, const char *name)
|
||||
{
|
||||
/*
|
||||
* Casting the return value of `dlsym()` to a function pointer is
|
||||
* explicitly allowed in recent POSIX standards, but GCC complains
|
||||
* about this in pedantic mode nevertheless. For more about this issue,
|
||||
* see https://stackoverflow.com/q/31526876/1860823 and
|
||||
* http://stackoverflow.com/a/36385690/1905491.
|
||||
*/
|
||||
func_t f;
|
||||
*(void **)&f = dlsym(handle, name);
|
||||
return f;
|
||||
}
|
||||
#else
|
||||
#define LIBCURL_FILE_NAME(base) base "-" LIBCURL_ABI_VERSION ".dll"
|
||||
|
||||
static void *load_library(const char *name)
|
||||
{
|
||||
size_t name_size = strlen(name) + 1;
|
||||
const char *path = getenv("PATH");
|
||||
char dll_path[MAX_PATH];
|
||||
|
||||
while (path && *path) {
|
||||
const char *sep = strchrnul(path, ';');
|
||||
size_t len = sep - path;
|
||||
|
||||
if (len && len + name_size < sizeof(dll_path)) {
|
||||
memcpy(dll_path, path, len);
|
||||
dll_path[len] = '/';
|
||||
memcpy(dll_path + len + 1, name, name_size);
|
||||
|
||||
if (!access(dll_path, R_OK)) {
|
||||
wchar_t wpath[MAX_PATH];
|
||||
int wlen = MultiByteToWideChar(CP_UTF8, 0, dll_path, -1, wpath, ARRAY_SIZE(wpath));
|
||||
void *res = wlen ? (void *)LoadLibraryExW(wpath, NULL, 0) : NULL;
|
||||
if (!res) {
|
||||
DWORD err = GetLastError();
|
||||
char buf[1024];
|
||||
|
||||
if (!FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_ARGUMENT_ARRAY |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
NULL, err, LANG_NEUTRAL,
|
||||
buf, sizeof(buf) - 1, NULL))
|
||||
xsnprintf(buf, sizeof(buf), "last error: %ld", err);
|
||||
error("LoadLibraryExW() failed with: %s", buf);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
path = *sep ? sep + 1 : NULL;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static func_t load_function(void *handle, const char *name)
|
||||
{
|
||||
return (func_t)GetProcAddress((HANDLE)handle, name);
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef char *(*curl_easy_escape_type)(CURL *handle, const char *string, int length);
|
||||
static curl_easy_escape_type curl_easy_escape_func;
|
||||
|
||||
typedef void (*curl_free_type)(void *p);
|
||||
static curl_free_type curl_free_func;
|
||||
|
||||
typedef CURLcode (*curl_global_init_type)(long flags);
|
||||
static curl_global_init_type curl_global_init_func;
|
||||
|
||||
typedef CURLsslset (*curl_global_sslset_type)(curl_sslbackend id, const char *name, const curl_ssl_backend ***avail);
|
||||
static curl_global_sslset_type curl_global_sslset_func;
|
||||
|
||||
typedef void (*curl_global_cleanup_type)(void);
|
||||
static curl_global_cleanup_type curl_global_cleanup_func;
|
||||
|
||||
typedef struct curl_slist *(*curl_slist_append_type)(struct curl_slist *list, const char *data);
|
||||
static curl_slist_append_type curl_slist_append_func;
|
||||
|
||||
typedef void (*curl_slist_free_all_type)(struct curl_slist *list);
|
||||
static curl_slist_free_all_type curl_slist_free_all_func;
|
||||
|
||||
typedef const char *(*curl_easy_strerror_type)(CURLcode error);
|
||||
static curl_easy_strerror_type curl_easy_strerror_func;
|
||||
|
||||
typedef CURLM *(*curl_multi_init_type)(void);
|
||||
static curl_multi_init_type curl_multi_init_func;
|
||||
|
||||
typedef CURLMcode (*curl_multi_add_handle_type)(CURLM *multi_handle, CURL *curl_handle);
|
||||
static curl_multi_add_handle_type curl_multi_add_handle_func;
|
||||
|
||||
typedef CURLMcode (*curl_multi_remove_handle_type)(CURLM *multi_handle, CURL *curl_handle);
|
||||
static curl_multi_remove_handle_type curl_multi_remove_handle_func;
|
||||
|
||||
typedef CURLMcode (*curl_multi_fdset_type)(CURLM *multi_handle, fd_set *read_fd_set, fd_set *write_fd_set, fd_set *exc_fd_set, int *max_fd);
|
||||
static curl_multi_fdset_type curl_multi_fdset_func;
|
||||
|
||||
typedef CURLMcode (*curl_multi_perform_type)(CURLM *multi_handle, int *running_handles);
|
||||
static curl_multi_perform_type curl_multi_perform_func;
|
||||
|
||||
typedef CURLMcode (*curl_multi_cleanup_type)(CURLM *multi_handle);
|
||||
static curl_multi_cleanup_type curl_multi_cleanup_func;
|
||||
|
||||
typedef CURLMsg *(*curl_multi_info_read_type)(CURLM *multi_handle, int *msgs_in_queue);
|
||||
static curl_multi_info_read_type curl_multi_info_read_func;
|
||||
|
||||
typedef const char *(*curl_multi_strerror_type)(CURLMcode error);
|
||||
static curl_multi_strerror_type curl_multi_strerror_func;
|
||||
|
||||
typedef CURLMcode (*curl_multi_timeout_type)(CURLM *multi_handle, long *milliseconds);
|
||||
static curl_multi_timeout_type curl_multi_timeout_func;
|
||||
|
||||
typedef CURL *(*curl_easy_init_type)(void);
|
||||
static curl_easy_init_type curl_easy_init_func;
|
||||
|
||||
typedef CURLcode (*curl_easy_perform_type)(CURL *curl);
|
||||
static curl_easy_perform_type curl_easy_perform_func;
|
||||
|
||||
typedef void (*curl_easy_cleanup_type)(CURL *curl);
|
||||
static curl_easy_cleanup_type curl_easy_cleanup_func;
|
||||
|
||||
typedef CURL *(*curl_easy_duphandle_type)(CURL *curl);
|
||||
static curl_easy_duphandle_type curl_easy_duphandle_func;
|
||||
|
||||
typedef CURLcode (*curl_easy_getinfo_long_type)(CURL *curl, CURLINFO info, long *value);
|
||||
static curl_easy_getinfo_long_type curl_easy_getinfo_long_func;
|
||||
|
||||
typedef CURLcode (*curl_easy_getinfo_pointer_type)(CURL *curl, CURLINFO info, void **value);
|
||||
static curl_easy_getinfo_pointer_type curl_easy_getinfo_pointer_func;
|
||||
|
||||
typedef CURLcode (*curl_easy_getinfo_off_t_type)(CURL *curl, CURLINFO info, curl_off_t *value);
|
||||
static curl_easy_getinfo_off_t_type curl_easy_getinfo_off_t_func;
|
||||
|
||||
typedef CURLcode (*curl_easy_setopt_long_type)(CURL *curl, CURLoption opt, long value);
|
||||
static curl_easy_setopt_long_type curl_easy_setopt_long_func;
|
||||
|
||||
typedef CURLcode (*curl_easy_setopt_pointer_type)(CURL *curl, CURLoption opt, void *value);
|
||||
static curl_easy_setopt_pointer_type curl_easy_setopt_pointer_func;
|
||||
|
||||
typedef CURLcode (*curl_easy_setopt_off_t_type)(CURL *curl, CURLoption opt, curl_off_t value);
|
||||
static curl_easy_setopt_off_t_type curl_easy_setopt_off_t_func;
|
||||
|
||||
static char ssl_backend[64];
|
||||
|
||||
static void lazy_load_curl(void)
|
||||
{
|
||||
static int initialized;
|
||||
void *libcurl = NULL;
|
||||
func_t curl_easy_getinfo_func, curl_easy_setopt_func;
|
||||
|
||||
if (initialized)
|
||||
return;
|
||||
|
||||
initialized = 1;
|
||||
if (ssl_backend[0]) {
|
||||
char dll_name[64 + 16];
|
||||
snprintf(dll_name, sizeof(dll_name) - 1,
|
||||
LIBCURL_FILE_NAME("libcurl-%s"), ssl_backend);
|
||||
libcurl = load_library(dll_name);
|
||||
}
|
||||
if (!libcurl)
|
||||
libcurl = load_library(LIBCURL_FILE_NAME("libcurl"));
|
||||
if (!libcurl)
|
||||
die("failed to load library '%s'", LIBCURL_FILE_NAME("libcurl"));
|
||||
|
||||
curl_easy_escape_func = (curl_easy_escape_type)load_function(libcurl, "curl_easy_escape");
|
||||
curl_free_func = (curl_free_type)load_function(libcurl, "curl_free");
|
||||
curl_global_init_func = (curl_global_init_type)load_function(libcurl, "curl_global_init");
|
||||
curl_global_sslset_func = (curl_global_sslset_type)load_function(libcurl, "curl_global_sslset");
|
||||
curl_global_cleanup_func = (curl_global_cleanup_type)load_function(libcurl, "curl_global_cleanup");
|
||||
curl_slist_append_func = (curl_slist_append_type)load_function(libcurl, "curl_slist_append");
|
||||
curl_slist_free_all_func = (curl_slist_free_all_type)load_function(libcurl, "curl_slist_free_all");
|
||||
curl_easy_strerror_func = (curl_easy_strerror_type)load_function(libcurl, "curl_easy_strerror");
|
||||
curl_multi_init_func = (curl_multi_init_type)load_function(libcurl, "curl_multi_init");
|
||||
curl_multi_add_handle_func = (curl_multi_add_handle_type)load_function(libcurl, "curl_multi_add_handle");
|
||||
curl_multi_remove_handle_func = (curl_multi_remove_handle_type)load_function(libcurl, "curl_multi_remove_handle");
|
||||
curl_multi_fdset_func = (curl_multi_fdset_type)load_function(libcurl, "curl_multi_fdset");
|
||||
curl_multi_perform_func = (curl_multi_perform_type)load_function(libcurl, "curl_multi_perform");
|
||||
curl_multi_cleanup_func = (curl_multi_cleanup_type)load_function(libcurl, "curl_multi_cleanup");
|
||||
curl_multi_info_read_func = (curl_multi_info_read_type)load_function(libcurl, "curl_multi_info_read");
|
||||
curl_multi_strerror_func = (curl_multi_strerror_type)load_function(libcurl, "curl_multi_strerror");
|
||||
curl_multi_timeout_func = (curl_multi_timeout_type)load_function(libcurl, "curl_multi_timeout");
|
||||
curl_easy_init_func = (curl_easy_init_type)load_function(libcurl, "curl_easy_init");
|
||||
curl_easy_perform_func = (curl_easy_perform_type)load_function(libcurl, "curl_easy_perform");
|
||||
curl_easy_cleanup_func = (curl_easy_cleanup_type)load_function(libcurl, "curl_easy_cleanup");
|
||||
curl_easy_duphandle_func = (curl_easy_duphandle_type)load_function(libcurl, "curl_easy_duphandle");
|
||||
|
||||
curl_easy_getinfo_func = load_function(libcurl, "curl_easy_getinfo");
|
||||
curl_easy_getinfo_long_func = (curl_easy_getinfo_long_type)curl_easy_getinfo_func;
|
||||
curl_easy_getinfo_pointer_func = (curl_easy_getinfo_pointer_type)curl_easy_getinfo_func;
|
||||
curl_easy_getinfo_off_t_func = (curl_easy_getinfo_off_t_type)curl_easy_getinfo_func;
|
||||
|
||||
curl_easy_setopt_func = load_function(libcurl, "curl_easy_setopt");
|
||||
curl_easy_setopt_long_func = (curl_easy_setopt_long_type)curl_easy_setopt_func;
|
||||
curl_easy_setopt_pointer_func = (curl_easy_setopt_pointer_type)curl_easy_setopt_func;
|
||||
curl_easy_setopt_off_t_func = (curl_easy_setopt_off_t_type)curl_easy_setopt_func;
|
||||
}
|
||||
|
||||
char *curl_easy_escape(CURL *handle, const char *string, int length)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_easy_escape_func(handle, string, length);
|
||||
}
|
||||
|
||||
void curl_free(void *p)
|
||||
{
|
||||
lazy_load_curl();
|
||||
curl_free_func(p);
|
||||
}
|
||||
|
||||
CURLcode curl_global_init(long flags)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_global_init_func(flags);
|
||||
}
|
||||
|
||||
CURLsslset curl_global_sslset(curl_sslbackend id, const char *name, const curl_ssl_backend ***avail)
|
||||
{
|
||||
if (name && strlen(name) < sizeof(ssl_backend))
|
||||
strlcpy(ssl_backend, name, sizeof(ssl_backend));
|
||||
|
||||
lazy_load_curl();
|
||||
return curl_global_sslset_func(id, name, avail);
|
||||
}
|
||||
|
||||
void curl_global_cleanup(void)
|
||||
{
|
||||
lazy_load_curl();
|
||||
curl_global_cleanup_func();
|
||||
}
|
||||
|
||||
struct curl_slist *curl_slist_append(struct curl_slist *list, const char *data)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_slist_append_func(list, data);
|
||||
}
|
||||
|
||||
void curl_slist_free_all(struct curl_slist *list)
|
||||
{
|
||||
lazy_load_curl();
|
||||
curl_slist_free_all_func(list);
|
||||
}
|
||||
|
||||
const char *curl_easy_strerror(CURLcode error)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_easy_strerror_func(error);
|
||||
}
|
||||
|
||||
CURLM *curl_multi_init(void)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_multi_init_func();
|
||||
}
|
||||
|
||||
CURLMcode curl_multi_add_handle(CURLM *multi_handle, CURL *curl_handle)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_multi_add_handle_func(multi_handle, curl_handle);
|
||||
}
|
||||
|
||||
CURLMcode curl_multi_remove_handle(CURLM *multi_handle, CURL *curl_handle)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_multi_remove_handle_func(multi_handle, curl_handle);
|
||||
}
|
||||
|
||||
CURLMcode curl_multi_fdset(CURLM *multi_handle, fd_set *read_fd_set, fd_set *write_fd_set, fd_set *exc_fd_set, int *max_fd)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_multi_fdset_func(multi_handle, read_fd_set, write_fd_set, exc_fd_set, max_fd);
|
||||
}
|
||||
|
||||
CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_multi_perform_func(multi_handle, running_handles);
|
||||
}
|
||||
|
||||
CURLMcode curl_multi_cleanup(CURLM *multi_handle)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_multi_cleanup_func(multi_handle);
|
||||
}
|
||||
|
||||
CURLMsg *curl_multi_info_read(CURLM *multi_handle, int *msgs_in_queue)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_multi_info_read_func(multi_handle, msgs_in_queue);
|
||||
}
|
||||
|
||||
const char *curl_multi_strerror(CURLMcode error)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_multi_strerror_func(error);
|
||||
}
|
||||
|
||||
CURLMcode curl_multi_timeout(CURLM *multi_handle, long *milliseconds)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_multi_timeout_func(multi_handle, milliseconds);
|
||||
}
|
||||
|
||||
CURL *curl_easy_init(void)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_easy_init_func();
|
||||
}
|
||||
|
||||
CURLcode curl_easy_perform(CURL *curl)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_easy_perform_func(curl);
|
||||
}
|
||||
|
||||
void curl_easy_cleanup(CURL *curl)
|
||||
{
|
||||
lazy_load_curl();
|
||||
curl_easy_cleanup_func(curl);
|
||||
}
|
||||
|
||||
CURL *curl_easy_duphandle(CURL *curl)
|
||||
{
|
||||
lazy_load_curl();
|
||||
return curl_easy_duphandle_func(curl);
|
||||
}
|
||||
|
||||
#ifndef CURL_IGNORE_DEPRECATION
|
||||
#define CURL_IGNORE_DEPRECATION(x) x
|
||||
#endif
|
||||
|
||||
#ifndef CURLOPTTYPE_BLOB
|
||||
#define CURLOPTTYPE_BLOB 40000
|
||||
#endif
|
||||
|
||||
#undef curl_easy_getinfo
|
||||
CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...)
|
||||
{
|
||||
va_list ap;
|
||||
CURLcode res;
|
||||
|
||||
va_start(ap, info);
|
||||
lazy_load_curl();
|
||||
CURL_IGNORE_DEPRECATION(
|
||||
if (info >= CURLINFO_LONG && info < CURLINFO_DOUBLE)
|
||||
res = curl_easy_getinfo_long_func(curl, info, va_arg(ap, long *));
|
||||
else if ((info >= CURLINFO_STRING && info < CURLINFO_LONG) ||
|
||||
(info >= CURLINFO_SLIST && info < CURLINFO_SOCKET))
|
||||
res = curl_easy_getinfo_pointer_func(curl, info, va_arg(ap, void **));
|
||||
else if (info >= CURLINFO_OFF_T)
|
||||
res = curl_easy_getinfo_off_t_func(curl, info, va_arg(ap, curl_off_t *));
|
||||
else
|
||||
die("%s:%d: TODO (info: %d)!", __FILE__, __LINE__, info);
|
||||
)
|
||||
va_end(ap);
|
||||
return res;
|
||||
}
|
||||
|
||||
#undef curl_easy_setopt
|
||||
CURLcode curl_easy_setopt(CURL *curl, CURLoption opt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
CURLcode res;
|
||||
|
||||
va_start(ap, opt);
|
||||
lazy_load_curl();
|
||||
CURL_IGNORE_DEPRECATION(
|
||||
if (opt >= CURLOPTTYPE_LONG && opt < CURLOPTTYPE_OBJECTPOINT)
|
||||
res = curl_easy_setopt_long_func(curl, opt, va_arg(ap, long));
|
||||
else if (opt >= CURLOPTTYPE_OBJECTPOINT && opt < CURLOPTTYPE_OFF_T)
|
||||
res = curl_easy_setopt_pointer_func(curl, opt, va_arg(ap, void *));
|
||||
else if (opt >= CURLOPTTYPE_OFF_T && opt < CURLOPTTYPE_BLOB)
|
||||
res = curl_easy_setopt_off_t_func(curl, opt, va_arg(ap, curl_off_t));
|
||||
else
|
||||
die("%s:%d: TODO (opt: %d)!", __FILE__, __LINE__, opt);
|
||||
)
|
||||
va_end(ap);
|
||||
return res;
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,298 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/prim.h" // mi_prim_get_default_heap
|
||||
|
||||
#include <string.h> // memset
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Aligned Allocation
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Fallback primitive aligned allocation -- split out for better codegen
|
||||
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
|
||||
{
|
||||
mi_assert_internal(size <= PTRDIFF_MAX);
|
||||
mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
|
||||
|
||||
const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
|
||||
const size_t padsize = size + MI_PADDING_SIZE;
|
||||
|
||||
// use regular allocation if it is guaranteed to fit the alignment constraints
|
||||
if (offset==0 && alignment<=padsize && padsize<=MI_MAX_ALIGN_GUARANTEE && (padsize&align_mask)==0) {
|
||||
void* p = _mi_heap_malloc_zero(heap, size, zero);
|
||||
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
|
||||
return p;
|
||||
}
|
||||
|
||||
void* p;
|
||||
size_t oversize;
|
||||
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
|
||||
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
|
||||
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
|
||||
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
|
||||
if mi_unlikely(offset != 0) {
|
||||
// todo: cannot support offset alignment for very large alignments yet
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset);
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
|
||||
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
|
||||
// zero afterwards as only the area from the aligned_p may be committed!
|
||||
if (p == NULL) return NULL;
|
||||
}
|
||||
else {
|
||||
// otherwise over-allocate
|
||||
oversize = size + alignment - 1;
|
||||
p = _mi_heap_malloc_zero(heap, oversize, zero);
|
||||
if (p == NULL) return NULL;
|
||||
}
|
||||
|
||||
// .. and align within the allocation
|
||||
const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask;
|
||||
const uintptr_t adjust = (poffset == 0 ? 0 : alignment - poffset);
|
||||
mi_assert_internal(adjust < alignment);
|
||||
void* aligned_p = (void*)((uintptr_t)p + adjust);
|
||||
if (aligned_p != p) {
|
||||
mi_page_t* page = _mi_ptr_page(p);
|
||||
mi_page_set_has_aligned(page, true);
|
||||
_mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
|
||||
}
|
||||
// todo: expand padding if overallocated ?
|
||||
|
||||
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
|
||||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
|
||||
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
|
||||
mi_assert_internal(mi_usable_size(aligned_p)>=size);
|
||||
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
|
||||
|
||||
// now zero the block if needed
|
||||
if (alignment > MI_ALIGNMENT_MAX) {
|
||||
// for the tracker, on huge aligned allocations only from the start of the large block is defined
|
||||
mi_track_mem_undefined(aligned_p, size);
|
||||
if (zero) {
|
||||
_mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p));
|
||||
}
|
||||
}
|
||||
|
||||
if (p != aligned_p) {
|
||||
mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
|
||||
}
|
||||
return aligned_p;
|
||||
}
|
||||
|
||||
// Primitive aligned allocation
|
||||
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
|
||||
{
|
||||
// note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
|
||||
if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment);
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
|
||||
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
|
||||
|
||||
// try first if there happens to be a small block available with just the right alignment
|
||||
if mi_likely(padsize <= MI_SMALL_SIZE_MAX && alignment <= padsize) {
|
||||
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
|
||||
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
|
||||
if mi_likely(page->free != NULL && is_aligned)
|
||||
{
|
||||
#if MI_STAT>1
|
||||
mi_heap_stat_increase(heap, malloc, size);
|
||||
#endif
|
||||
void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc
|
||||
mi_assert_internal(p != NULL);
|
||||
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
|
||||
mi_track_malloc(p,size,zero);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
// fallback
|
||||
return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero);
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Optimized mi_heap_malloc_aligned / mi_malloc_aligned
|
||||
// ------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
|
||||
if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) return NULL;
|
||||
#if !MI_PADDING
|
||||
// without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`)
|
||||
if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX)
|
||||
#else
|
||||
// with padding, we can only guarantee this for fixed alignments
|
||||
if mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
|
||||
&& size <= MI_SMALL_SIZE_MAX)
|
||||
#endif
|
||||
{
|
||||
// fast path for common alignment and size
|
||||
return mi_heap_malloc_small(heap, size);
|
||||
}
|
||||
else {
|
||||
return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// ensure a definition is emitted
|
||||
#if defined(__cplusplus)
|
||||
static void* _mi_heap_malloc_aligned = (void*)&mi_heap_malloc_aligned;
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Aligned Allocation
|
||||
// ------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_zalloc_aligned_at(heap, size, alignment, 0);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
size_t total;
|
||||
if (mi_count_size_overflow(count, size, &total)) return NULL;
|
||||
return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_calloc_aligned_at(heap,count,size,alignment,0);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
return mi_heap_malloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_malloc_aligned(mi_prim_get_default_heap(), size, alignment);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
return mi_heap_zalloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_zalloc_aligned(mi_prim_get_default_heap(), size, alignment);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
return mi_heap_calloc_aligned_at(mi_prim_get_default_heap(), count, size, alignment, offset);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_calloc_aligned(mi_prim_get_default_heap(), count, size, alignment);
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Aligned re-allocation
|
||||
// ------------------------------------------------------
|
||||
|
||||
static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept {
|
||||
mi_assert(alignment > 0);
|
||||
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
|
||||
if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero);
|
||||
size_t size = mi_usable_size(p);
|
||||
if (newsize <= size && newsize >= (size - (size / 2))
|
||||
&& (((uintptr_t)p + offset) % alignment) == 0) {
|
||||
return p; // reallocation still fits, is aligned and not more than 50% waste
|
||||
}
|
||||
else {
|
||||
// note: we don't zero allocate upfront so we only zero initialize the expanded part
|
||||
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
|
||||
if (newp != NULL) {
|
||||
if (zero && newsize > size) {
|
||||
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
|
||||
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
|
||||
_mi_memzero((uint8_t*)newp + start, newsize - start);
|
||||
}
|
||||
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
|
||||
mi_free(p); // only free if successful
|
||||
}
|
||||
return newp;
|
||||
}
|
||||
}
|
||||
|
||||
static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept {
|
||||
mi_assert(alignment > 0);
|
||||
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
|
||||
size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL)
|
||||
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
size_t total;
|
||||
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
|
||||
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
|
||||
size_t total;
|
||||
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
|
||||
return mi_heap_rezalloc_aligned(heap, p, total, alignment);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
return mi_heap_realloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_realloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
return mi_heap_rezalloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_rezalloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
|
||||
return mi_heap_recalloc_aligned_at(mi_prim_get_default_heap(), p, newcount, size, alignment, offset);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment);
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,935 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2019-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
"Arenas" are fixed area's of OS memory from which we can allocate
|
||||
large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
|
||||
In contrast to the rest of mimalloc, the arenas are shared between
|
||||
threads and need to be accessed using atomic operations.
|
||||
|
||||
Arenas are used to for huge OS page (1GiB) reservations or for reserving
|
||||
OS memory upfront which can be improve performance or is sometimes needed
|
||||
on embedded devices. We can also employ this with WASI or `sbrk` systems
|
||||
to reserve large arenas upfront and be able to reuse the memory more effectively.
|
||||
|
||||
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/atomic.h"
|
||||
|
||||
#include <string.h> // memset
|
||||
#include <errno.h> // ENOMEM
|
||||
|
||||
#include "bitmap.h" // atomic bitmap
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Block info: bit 0 contains the `in_use` bit, the upper bits the
|
||||
// size in count of arena blocks.
|
||||
typedef uintptr_t mi_block_info_t;
|
||||
#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN)
|
||||
#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB
|
||||
#define MI_MAX_ARENAS (112) // not more than 126 (since we use 7 bits in the memid and an arena index + 1)
|
||||
|
||||
// A memory arena descriptor
|
||||
typedef struct mi_arena_s {
|
||||
mi_arena_id_t id; // arena id; 0 for non-specific
|
||||
mi_memid_t memid; // memid of the memory area
|
||||
_Atomic(uint8_t*) start; // the start of the memory area
|
||||
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
|
||||
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
|
||||
size_t meta_size; // size of the arena structure itself (including its bitmaps)
|
||||
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
|
||||
int numa_node; // associated NUMA node
|
||||
bool exclusive; // only allow allocations if specifically for this arena
|
||||
bool is_large; // memory area consists of large- or huge OS pages (always committed)
|
||||
_Atomic(size_t) search_idx; // optimization to start the search for free blocks
|
||||
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
|
||||
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
|
||||
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
|
||||
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
|
||||
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
|
||||
} mi_arena_t;
|
||||
|
||||
|
||||
// The available arenas
|
||||
static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
|
||||
static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0
|
||||
|
||||
|
||||
//static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena id's
|
||||
id = arena_index + 1
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static size_t mi_arena_id_index(mi_arena_id_t id) {
|
||||
return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1);
|
||||
}
|
||||
|
||||
static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
|
||||
mi_assert_internal(arena_index < MI_MAX_ARENAS);
|
||||
return (int)arena_index + 1;
|
||||
}
|
||||
|
||||
mi_arena_id_t _mi_arena_id_none(void) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
|
||||
return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
|
||||
(arena_id == req_arena_id));
|
||||
}
|
||||
|
||||
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
|
||||
if (memid.memkind == MI_MEM_ARENA) {
|
||||
return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
|
||||
}
|
||||
else {
|
||||
return mi_arena_id_is_suitable(0, false, request_arena_id);
|
||||
}
|
||||
}
|
||||
|
||||
bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
|
||||
return (memid.memkind == MI_MEM_OS);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena allocations get a (currently) 16-bit memory id where the
|
||||
lower 8 bits are the arena id, and the upper bits the block index.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static size_t mi_block_count_of_size(size_t size) {
|
||||
return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static size_t mi_arena_block_size(size_t bcount) {
|
||||
return (bcount * MI_ARENA_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static size_t mi_arena_size(mi_arena_t* arena) {
|
||||
return mi_arena_block_size(arena->block_count);
|
||||
}
|
||||
|
||||
static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
|
||||
mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA);
|
||||
memid.mem.arena.id = id;
|
||||
memid.mem.arena.block_index = bitmap_index;
|
||||
memid.mem.arena.is_exclusive = is_exclusive;
|
||||
return memid;
|
||||
}
|
||||
|
||||
static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
|
||||
mi_assert_internal(memid.memkind == MI_MEM_ARENA);
|
||||
*arena_index = mi_arena_id_index(memid.mem.arena.id);
|
||||
*bitmap_index = memid.mem.arena.block_index;
|
||||
return memid.mem.arena.is_exclusive;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Special static area for mimalloc internal structures
|
||||
to avoid OS calls (for example, for the arena metadata)
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*MI_KiB) // 8 KiB on 64-bit
|
||||
|
||||
static uint8_t mi_arena_static[MI_ARENA_STATIC_MAX];
|
||||
static _Atomic(size_t) mi_arena_static_top;
|
||||
|
||||
static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
|
||||
*memid = _mi_memid_none();
|
||||
if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
|
||||
if ((mi_atomic_load_relaxed(&mi_arena_static_top) + size) > MI_ARENA_STATIC_MAX) return NULL;
|
||||
|
||||
// try to claim space
|
||||
if (alignment == 0) { alignment = 1; }
|
||||
const size_t oversize = size + alignment - 1;
|
||||
if (oversize > MI_ARENA_STATIC_MAX) return NULL;
|
||||
const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize);
|
||||
size_t top = oldtop + oversize;
|
||||
if (top > MI_ARENA_STATIC_MAX) {
|
||||
// try to roll back, ok if this fails
|
||||
mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// success
|
||||
*memid = _mi_memid_create(MI_MEM_STATIC);
|
||||
const size_t start = _mi_align_up(oldtop, alignment);
|
||||
uint8_t* const p = &mi_arena_static[start];
|
||||
_mi_memzero(p, size);
|
||||
return p;
|
||||
}
|
||||
|
||||
static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
|
||||
*memid = _mi_memid_none();
|
||||
|
||||
// try static
|
||||
void* p = mi_arena_static_zalloc(size, MI_ALIGNMENT_MAX, memid);
|
||||
if (p != NULL) return p;
|
||||
|
||||
// or fall back to the OS
|
||||
return _mi_os_alloc(size, memid, stats);
|
||||
}
|
||||
|
||||
static void mi_arena_meta_free(void* p, mi_memid_t memid, size_t size, mi_stats_t* stats) {
|
||||
if (mi_memkind_is_os(memid.memkind)) {
|
||||
_mi_os_free(p, size, memid, stats);
|
||||
}
|
||||
else {
|
||||
mi_assert(memid.memkind == MI_MEM_STATIC);
|
||||
}
|
||||
}
|
||||
|
||||
static void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
|
||||
return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex)));
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Thread safe allocation in an arena
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// claim the `blocks_inuse` bits
|
||||
static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
|
||||
{
|
||||
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
|
||||
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
|
||||
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
|
||||
return true;
|
||||
};
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena Allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
|
||||
bool commit, mi_memid_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
MI_UNUSED(arena_index);
|
||||
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
|
||||
|
||||
mi_bitmap_index_t bitmap_index;
|
||||
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
|
||||
|
||||
// claimed it!
|
||||
void* p = mi_arena_block_start(arena, bitmap_index);
|
||||
*memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
|
||||
memid->is_pinned = arena->memid.is_pinned;
|
||||
|
||||
// none of the claimed blocks should be scheduled for a decommit
|
||||
if (arena->blocks_purge != NULL) {
|
||||
// this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `blocks_inuse`).
|
||||
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index);
|
||||
}
|
||||
|
||||
// set the dirty bits (todo: no need for an atomic op here?)
|
||||
if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
|
||||
memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
||||
}
|
||||
|
||||
// set commit state
|
||||
if (arena->blocks_committed == NULL) {
|
||||
// always committed
|
||||
memid->initially_committed = true;
|
||||
}
|
||||
else if (commit) {
|
||||
// commit requested, but the range may not be committed as a whole: ensure it is committed now
|
||||
memid->initially_committed = true;
|
||||
bool any_uncommitted;
|
||||
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
|
||||
if (any_uncommitted) {
|
||||
bool commit_zero = false;
|
||||
if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
|
||||
memid->initially_committed = false;
|
||||
}
|
||||
else {
|
||||
if (commit_zero) { memid->initially_zero = true; }
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// no need to commit, but check if already fully committed
|
||||
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
// allocate in a speficic arena
|
||||
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
|
||||
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
||||
{
|
||||
MI_UNUSED_RELEASE(alignment);
|
||||
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
|
||||
const size_t bcount = mi_block_count_of_size(size);
|
||||
const size_t arena_index = mi_arena_id_index(arena_id);
|
||||
mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
|
||||
mi_assert_internal(size <= mi_arena_block_size(bcount));
|
||||
|
||||
// Check arena suitability
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
|
||||
if (arena == NULL) return NULL;
|
||||
if (!allow_large && arena->is_large) return NULL;
|
||||
if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
|
||||
if (req_arena_id == _mi_arena_id_none()) { // in not specific, check numa affinity
|
||||
const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node);
|
||||
if (match_numa_node) { if (!numa_suitable) return NULL; }
|
||||
else { if (numa_suitable) return NULL; }
|
||||
}
|
||||
|
||||
// try to allocate
|
||||
void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld);
|
||||
mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
// allocate from an arena with fallback to the OS
|
||||
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
|
||||
bool commit, bool allow_large,
|
||||
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
||||
{
|
||||
MI_UNUSED(alignment);
|
||||
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
|
||||
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
if mi_likely(max_arena == 0) return NULL;
|
||||
|
||||
if (req_arena_id != _mi_arena_id_none()) {
|
||||
// try a specific arena if requested
|
||||
if (mi_arena_id_index(req_arena_id) < max_arena) {
|
||||
void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// try numa affine allocation
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
|
||||
// try from another numa node instead..
|
||||
if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// try to reserve a fresh arena space
|
||||
static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id)
|
||||
{
|
||||
if (_mi_preloading()) return false; // use OS only while pre loading
|
||||
if (req_arena_id != _mi_arena_id_none()) return false;
|
||||
|
||||
const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count);
|
||||
if (arena_count > (MI_MAX_ARENAS - 4)) return false;
|
||||
|
||||
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
|
||||
if (arena_reserve == 0) return false;
|
||||
|
||||
if (!_mi_os_has_virtual_reserve()) {
|
||||
arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example)
|
||||
}
|
||||
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
|
||||
if (arena_count >= 8 && arena_count <= 128) {
|
||||
arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially
|
||||
}
|
||||
if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size
|
||||
|
||||
// commit eagerly?
|
||||
bool arena_commit = false;
|
||||
if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
|
||||
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
|
||||
|
||||
return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
|
||||
}
|
||||
|
||||
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
|
||||
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
mi_assert_internal(memid != NULL && tld != NULL);
|
||||
mi_assert_internal(size > 0);
|
||||
*memid = _mi_memid_none();
|
||||
|
||||
const int numa_node = _mi_os_numa_node(tld); // current numa node
|
||||
|
||||
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
|
||||
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
|
||||
void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
|
||||
// otherwise, try to first eagerly reserve a new arena
|
||||
if (req_arena_id == _mi_arena_id_none()) {
|
||||
mi_arena_id_t arena_id = 0;
|
||||
if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
|
||||
// and try allocate in there
|
||||
mi_assert_internal(req_arena_id == _mi_arena_id_none());
|
||||
p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we cannot use OS allocation, return NULL
|
||||
if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
|
||||
errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// finally, fall back to the OS
|
||||
if (align_offset > 0) {
|
||||
return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
|
||||
}
|
||||
else {
|
||||
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
|
||||
}
|
||||
}
|
||||
|
||||
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
|
||||
}
|
||||
|
||||
|
||||
void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
|
||||
if (size != NULL) *size = 0;
|
||||
size_t arena_index = mi_arena_id_index(arena_id);
|
||||
if (arena_index >= MI_MAX_ARENAS) return NULL;
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
|
||||
if (arena == NULL) return NULL;
|
||||
if (size != NULL) { *size = mi_arena_block_size(arena->block_count); }
|
||||
return arena->start;
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena purge
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static long mi_arena_purge_delay(void) {
|
||||
// <0 = no purging allowed, 0=immediate purging, >0=milli-second delay
|
||||
return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult));
|
||||
}
|
||||
|
||||
// reset or decommit in an arena and update the committed/decommit bitmaps
|
||||
// assumes we own the area (i.e. blocks_in_use is claimed by us)
|
||||
static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
|
||||
mi_assert_internal(arena->blocks_committed != NULL);
|
||||
mi_assert_internal(arena->blocks_purge != NULL);
|
||||
mi_assert_internal(!arena->memid.is_pinned);
|
||||
const size_t size = mi_arena_block_size(blocks);
|
||||
void* const p = mi_arena_block_start(arena, bitmap_idx);
|
||||
bool needs_recommit;
|
||||
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
|
||||
// all blocks are committed, we can purge freely
|
||||
needs_recommit = _mi_os_purge(p, size, stats);
|
||||
}
|
||||
else {
|
||||
// some blocks are not committed -- this can happen when a partially committed block is freed
|
||||
// in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
|
||||
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
|
||||
// and also undo the decommit stats (as it was already adjusted)
|
||||
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
|
||||
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
|
||||
_mi_stat_increase(&stats->committed, size);
|
||||
}
|
||||
|
||||
// clear the purged blocks
|
||||
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
|
||||
// update committed bitmap
|
||||
if (needs_recommit) {
|
||||
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
||||
}
|
||||
}
|
||||
|
||||
// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
|
||||
// Note: assumes we (still) own the area as we may purge immediately
|
||||
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
|
||||
mi_assert_internal(arena->blocks_purge != NULL);
|
||||
const long delay = mi_arena_purge_delay();
|
||||
if (delay < 0) return; // is purging allowed at all?
|
||||
|
||||
if (_mi_preloading() || delay == 0) {
|
||||
// decommit directly
|
||||
mi_arena_purge(arena, bitmap_idx, blocks, stats);
|
||||
}
|
||||
else {
|
||||
// schedule decommit
|
||||
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
|
||||
if (expire != 0) {
|
||||
mi_atomic_addi64_acq_rel(&arena->purge_expire, delay/10); // add smallish extra delay
|
||||
}
|
||||
else {
|
||||
mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay);
|
||||
}
|
||||
_mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// purge a range of blocks
|
||||
// return true if the full range was purged.
|
||||
// assumes we own the area (i.e. blocks_in_use is claimed by us)
|
||||
static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
|
||||
const size_t endidx = startidx + bitlen;
|
||||
size_t bitidx = startidx;
|
||||
bool all_purged = false;
|
||||
while (bitidx < endidx) {
|
||||
// count consequetive ones in the purge mask
|
||||
size_t count = 0;
|
||||
while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) {
|
||||
count++;
|
||||
}
|
||||
if (count > 0) {
|
||||
// found range to be purged
|
||||
const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
|
||||
mi_arena_purge(arena, range_idx, count, stats);
|
||||
if (count == bitlen) {
|
||||
all_purged = true;
|
||||
}
|
||||
}
|
||||
bitidx += (count+1); // +1 to skip the zero bit (or end)
|
||||
}
|
||||
return all_purged;
|
||||
}
|
||||
|
||||
// returns true if anything was purged
|
||||
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
|
||||
{
|
||||
if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
|
||||
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
|
||||
if (expire == 0) return false;
|
||||
if (!force && expire > now) return false;
|
||||
|
||||
// reset expire (if not already set concurrently)
|
||||
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, 0);
|
||||
|
||||
// potential purges scheduled, walk through the bitmap
|
||||
bool any_purged = false;
|
||||
bool full_purge = true;
|
||||
for (size_t i = 0; i < arena->field_count; i++) {
|
||||
size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
|
||||
if (purge != 0) {
|
||||
size_t bitidx = 0;
|
||||
while (bitidx < MI_BITMAP_FIELD_BITS) {
|
||||
// find consequetive range of ones in the purge mask
|
||||
size_t bitlen = 0;
|
||||
while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
|
||||
bitlen++;
|
||||
}
|
||||
// try to claim the longest range of corresponding in_use bits
|
||||
const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
|
||||
while( bitlen > 0 ) {
|
||||
if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
|
||||
break;
|
||||
}
|
||||
bitlen--;
|
||||
}
|
||||
// actual claimed bits at `in_use`
|
||||
if (bitlen > 0) {
|
||||
// read purge again now that we have the in_use bits
|
||||
purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
|
||||
if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
|
||||
full_purge = false;
|
||||
}
|
||||
any_purged = true;
|
||||
// release the claimed `in_use` bits again
|
||||
_mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index);
|
||||
}
|
||||
bitidx += (bitlen+1); // +1 to skip the zero (or end)
|
||||
} // while bitidx
|
||||
} // purge != 0
|
||||
}
|
||||
// if not fully purged, make sure to purge again in the future
|
||||
if (!full_purge) {
|
||||
const long delay = mi_arena_purge_delay();
|
||||
mi_msecs_t expected = 0;
|
||||
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay);
|
||||
}
|
||||
return any_purged;
|
||||
}
|
||||
|
||||
static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
|
||||
if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled
|
||||
|
||||
const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
|
||||
if (max_arena == 0) return;
|
||||
|
||||
// allow only one thread to purge at a time
|
||||
static mi_atomic_guard_t purge_guard;
|
||||
mi_atomic_guard(&purge_guard)
|
||||
{
|
||||
mi_msecs_t now = _mi_clock_now();
|
||||
size_t max_purge_count = (visit_all ? max_arena : 1);
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena != NULL) {
|
||||
if (mi_arena_try_purge(arena, now, force, stats)) {
|
||||
if (max_purge_count <= 1) break;
|
||||
max_purge_count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena free
|
||||
----------------------------------------------------------- */
|
||||
|
||||
void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
|
||||
mi_assert_internal(size > 0 && stats != NULL);
|
||||
mi_assert_internal(committed_size <= size);
|
||||
if (p==NULL) return;
|
||||
if (size==0) return;
|
||||
const bool all_committed = (committed_size == size);
|
||||
|
||||
if (mi_memkind_is_os(memid.memkind)) {
|
||||
// was a direct OS allocation, pass through
|
||||
if (!all_committed && committed_size > 0) {
|
||||
// if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
|
||||
_mi_stat_decrease(&stats->committed, committed_size);
|
||||
}
|
||||
_mi_os_free(p, size, memid, stats);
|
||||
}
|
||||
else if (memid.memkind == MI_MEM_ARENA) {
|
||||
// allocated in an arena
|
||||
size_t arena_idx;
|
||||
size_t bitmap_idx;
|
||||
mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
|
||||
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
|
||||
mi_assert_internal(arena != NULL);
|
||||
const size_t blocks = mi_block_count_of_size(size);
|
||||
|
||||
// checks
|
||||
if (arena == NULL) {
|
||||
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
||||
return;
|
||||
}
|
||||
mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx));
|
||||
if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) {
|
||||
_mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
||||
return;
|
||||
}
|
||||
|
||||
// need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
|
||||
mi_track_mem_undefined(p,size);
|
||||
|
||||
// potentially decommit
|
||||
if (arena->memid.is_pinned || arena->blocks_committed == NULL) {
|
||||
mi_assert_internal(all_committed);
|
||||
}
|
||||
else {
|
||||
mi_assert_internal(arena->blocks_committed != NULL);
|
||||
mi_assert_internal(arena->blocks_purge != NULL);
|
||||
|
||||
if (!all_committed) {
|
||||
// mark the entire range as no longer committed (so we recommit the full range when re-using)
|
||||
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
||||
mi_track_mem_noaccess(p,size);
|
||||
if (committed_size > 0) {
|
||||
// if partially committed, adjust the committed stats (is it will be recommitted when re-using)
|
||||
// in the delayed purge, we now need to not count a decommit if the range is not marked as committed.
|
||||
_mi_stat_decrease(&stats->committed, committed_size);
|
||||
}
|
||||
// note: if not all committed, it may be that the purge will reset/decommit the entire range
|
||||
// that contains already decommitted parts. Since purge consistently uses reset or decommit that
|
||||
// works (as we should never reset decommitted parts).
|
||||
}
|
||||
// (delay) purge the entire range
|
||||
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
|
||||
}
|
||||
|
||||
// and make it available to others again
|
||||
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
|
||||
if (!all_inuse) {
|
||||
_mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", p, size);
|
||||
return;
|
||||
};
|
||||
}
|
||||
else {
|
||||
// arena was none, external, or static; nothing to do
|
||||
mi_assert_internal(memid.memkind < MI_MEM_OS);
|
||||
}
|
||||
|
||||
// purge expired decommits
|
||||
mi_arenas_try_purge(false, false, stats);
|
||||
}
|
||||
|
||||
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
|
||||
// for dynamic libraries that are unloaded and need to release all their allocated memory.
|
||||
static void mi_arenas_unsafe_destroy(void) {
|
||||
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
size_t new_max_arena = 0;
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena != NULL) {
|
||||
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
|
||||
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
|
||||
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
|
||||
}
|
||||
else {
|
||||
new_max_arena = i;
|
||||
}
|
||||
mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size, &_mi_stats_main);
|
||||
}
|
||||
}
|
||||
|
||||
// try to lower the max arena.
|
||||
size_t expected = max_arena;
|
||||
mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena);
|
||||
}
|
||||
|
||||
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
|
||||
void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
|
||||
mi_arenas_try_purge(force_purge, true /* visit all */, stats);
|
||||
}
|
||||
|
||||
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
|
||||
// for dynamic libraries that are unloaded and need to release all their allocated memory.
|
||||
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
|
||||
mi_arenas_unsafe_destroy();
|
||||
_mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas
|
||||
}
|
||||
|
||||
// Is a pointer inside any of our arenas?
|
||||
bool _mi_arena_contains(const void* p) {
|
||||
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Add an arena.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id) {
|
||||
mi_assert_internal(arena != NULL);
|
||||
mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
|
||||
mi_assert_internal(arena->block_count > 0);
|
||||
if (arena_id != NULL) { *arena_id = -1; }
|
||||
|
||||
size_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
|
||||
if (i >= MI_MAX_ARENAS) {
|
||||
mi_atomic_decrement_acq_rel(&mi_arena_count);
|
||||
return false;
|
||||
}
|
||||
arena->id = mi_arena_id_create(i);
|
||||
mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena);
|
||||
if (arena_id != NULL) { *arena_id = arena->id; }
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept
|
||||
{
|
||||
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
|
||||
if (size < MI_ARENA_BLOCK_SIZE) return false;
|
||||
|
||||
if (is_large) {
|
||||
mi_assert_internal(memid.initially_committed && memid.is_pinned);
|
||||
}
|
||||
|
||||
const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
|
||||
const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
|
||||
const size_t bitmaps = (memid.is_pinned ? 2 : 4);
|
||||
const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
|
||||
mi_memid_t meta_memid;
|
||||
mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
|
||||
if (arena == NULL) return false;
|
||||
|
||||
// already zero'd due to os_alloc
|
||||
// _mi_memzero(arena, asize);
|
||||
arena->id = _mi_arena_id_none();
|
||||
arena->memid = memid;
|
||||
arena->exclusive = exclusive;
|
||||
arena->meta_size = asize;
|
||||
arena->meta_memid = meta_memid;
|
||||
arena->block_count = bcount;
|
||||
arena->field_count = fields;
|
||||
arena->start = (uint8_t*)start;
|
||||
arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
|
||||
arena->is_large = is_large;
|
||||
arena->purge_expire = 0;
|
||||
arena->search_idx = 0;
|
||||
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
|
||||
arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
|
||||
arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
|
||||
// initialize committed bitmap?
|
||||
if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
|
||||
memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
|
||||
}
|
||||
|
||||
// and claim leftover blocks if needed (so we never allocate there)
|
||||
ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
|
||||
mi_assert_internal(post >= 0);
|
||||
if (post > 0) {
|
||||
// don't use leftover bits at the end
|
||||
mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post);
|
||||
_mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
|
||||
}
|
||||
return mi_arena_add(arena, arena_id);
|
||||
|
||||
}
|
||||
|
||||
bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
|
||||
mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
|
||||
memid.initially_committed = is_committed;
|
||||
memid.initially_zero = is_zero;
|
||||
memid.is_pinned = is_large;
|
||||
return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id);
|
||||
}
|
||||
|
||||
// Reserve a range of regular OS memory
|
||||
int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
|
||||
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
|
||||
size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
|
||||
mi_memid_t memid;
|
||||
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main);
|
||||
if (start == NULL) return ENOMEM;
|
||||
const bool is_large = memid.is_pinned; // todo: use separate is_large field?
|
||||
if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
|
||||
_mi_os_free_ex(start, size, commit, memid, &_mi_stats_main);
|
||||
_mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size, 1024));
|
||||
return ENOMEM;
|
||||
}
|
||||
_mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), is_large ? " (in large os pages)" : "");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
// Manage a range of regular OS memory
|
||||
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
|
||||
return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL);
|
||||
}
|
||||
|
||||
// Reserve a range of regular OS memory
|
||||
int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
|
||||
return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Debugging
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static size_t mi_debug_show_bitmap(const char* prefix, mi_bitmap_field_t* fields, size_t field_count ) {
|
||||
size_t inuse_count = 0;
|
||||
for (size_t i = 0; i < field_count; i++) {
|
||||
char buf[MI_BITMAP_FIELD_BITS + 1];
|
||||
uintptr_t field = mi_atomic_load_relaxed(&fields[i]);
|
||||
for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++) {
|
||||
bool inuse = ((((uintptr_t)1 << bit) & field) != 0);
|
||||
if (inuse) inuse_count++;
|
||||
buf[MI_BITMAP_FIELD_BITS - 1 - bit] = (inuse ? 'x' : '.');
|
||||
}
|
||||
buf[MI_BITMAP_FIELD_BITS] = 0;
|
||||
_mi_verbose_message("%s%s\n", prefix, buf);
|
||||
}
|
||||
return inuse_count;
|
||||
}
|
||||
|
||||
void mi_debug_show_arenas(void) mi_attr_noexcept {
|
||||
size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
for (size_t i = 0; i < max_arenas; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena == NULL) break;
|
||||
size_t inuse_count = 0;
|
||||
_mi_verbose_message("arena %zu: %zu blocks with %zu fields\n", i, arena->block_count, arena->field_count);
|
||||
inuse_count += mi_debug_show_bitmap(" ", arena->blocks_inuse, arena->field_count);
|
||||
_mi_verbose_message(" blocks in use ('x'): %zu\n", inuse_count);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Reserve a huge page arena.
|
||||
----------------------------------------------------------- */
|
||||
// reserve at a specific numa node
|
||||
int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
|
||||
if (arena_id != NULL) *arena_id = -1;
|
||||
if (pages==0) return 0;
|
||||
if (numa_node < -1) numa_node = -1;
|
||||
if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
|
||||
size_t hsize = 0;
|
||||
size_t pages_reserved = 0;
|
||||
mi_memid_t memid;
|
||||
void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid);
|
||||
if (p==NULL || pages_reserved==0) {
|
||||
_mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
|
||||
return ENOMEM;
|
||||
}
|
||||
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
|
||||
|
||||
if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
|
||||
_mi_os_free(p, hsize, memid, &_mi_stats_main);
|
||||
return ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
|
||||
return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
|
||||
}
|
||||
|
||||
// reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
|
||||
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
|
||||
if (pages == 0) return 0;
|
||||
|
||||
// pages per numa node
|
||||
size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count());
|
||||
if (numa_count <= 0) numa_count = 1;
|
||||
const size_t pages_per = pages / numa_count;
|
||||
const size_t pages_mod = pages % numa_count;
|
||||
const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50);
|
||||
|
||||
// reserve evenly among numa nodes
|
||||
for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) {
|
||||
size_t node_pages = pages_per; // can be 0
|
||||
if (numa_node < pages_mod) node_pages++;
|
||||
int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per);
|
||||
if (err) return err;
|
||||
if (pages < node_pages) {
|
||||
pages = 0;
|
||||
}
|
||||
else {
|
||||
pages -= node_pages;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
|
||||
MI_UNUSED(max_secs);
|
||||
_mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n");
|
||||
if (pages_reserved != NULL) *pages_reserved = 0;
|
||||
int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0));
|
||||
if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
|
||||
return err;
|
||||
}
|
|
@ -0,0 +1,432 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2019-2023 Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
Concurrent bitmap that can set/reset sequences of bits atomically,
|
||||
represeted as an array of fields where each field is a machine word (`size_t`)
|
||||
|
||||
There are two api's; the standard one cannot have sequences that cross
|
||||
between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS).
|
||||
|
||||
The `_across` postfixed functions do allow sequences that can cross over
|
||||
between the fields. (This is used in arena allocation)
|
||||
---------------------------------------------------------------------------- */
|
||||
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "bitmap.h"
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Bitmap definition
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// The bit mask for a given number of blocks at a specified bit index.
|
||||
static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) {
|
||||
mi_assert_internal(count + bitidx <= MI_BITMAP_FIELD_BITS);
|
||||
mi_assert_internal(count > 0);
|
||||
if (count >= MI_BITMAP_FIELD_BITS) return MI_BITMAP_FIELD_FULL;
|
||||
if (count == 0) return 0;
|
||||
return ((((size_t)1 << count) - 1) << bitidx);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Claim a bit sequence atomically
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Try to atomically claim a sequence of `count` bits in a single
|
||||
// field at `idx` in `bitmap`. Returns `true` on success.
|
||||
inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
|
||||
{
|
||||
mi_assert_internal(bitmap_idx != NULL);
|
||||
mi_assert_internal(count <= MI_BITMAP_FIELD_BITS);
|
||||
mi_assert_internal(count > 0);
|
||||
mi_bitmap_field_t* field = &bitmap[idx];
|
||||
size_t map = mi_atomic_load_relaxed(field);
|
||||
if (map==MI_BITMAP_FIELD_FULL) return false; // short cut
|
||||
|
||||
// search for 0-bit sequence of length count
|
||||
const size_t mask = mi_bitmap_mask_(count, 0);
|
||||
const size_t bitidx_max = MI_BITMAP_FIELD_BITS - count;
|
||||
|
||||
#ifdef MI_HAVE_FAST_BITSCAN
|
||||
size_t bitidx = mi_ctz(~map); // quickly find the first zero bit if possible
|
||||
#else
|
||||
size_t bitidx = 0; // otherwise start at 0
|
||||
#endif
|
||||
size_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx
|
||||
|
||||
// scan linearly for a free range of zero bits
|
||||
while (bitidx <= bitidx_max) {
|
||||
const size_t mapm = (map & m);
|
||||
if (mapm == 0) { // are the mask bits free at bitidx?
|
||||
mi_assert_internal((m >> bitidx) == mask); // no overflow?
|
||||
const size_t newmap = (map | m);
|
||||
mi_assert_internal((newmap^map) >> bitidx == mask);
|
||||
if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { // TODO: use weak cas here?
|
||||
// no success, another thread claimed concurrently.. keep going (with updated `map`)
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
// success, we claimed the bits!
|
||||
*bitmap_idx = mi_bitmap_index_create(idx, bitidx);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// on to the next bit range
|
||||
#ifdef MI_HAVE_FAST_BITSCAN
|
||||
mi_assert_internal(mapm != 0);
|
||||
const size_t shift = (count == 1 ? 1 : (MI_INTPTR_BITS - mi_clz(mapm) - bitidx));
|
||||
mi_assert_internal(shift > 0 && shift <= count);
|
||||
#else
|
||||
const size_t shift = 1;
|
||||
#endif
|
||||
bitidx += shift;
|
||||
m <<= shift;
|
||||
}
|
||||
}
|
||||
// no bits found
|
||||
return false;
|
||||
}
|
||||
|
||||
// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success.
|
||||
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
|
||||
// `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
|
||||
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
|
||||
size_t idx = start_field_idx;
|
||||
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
|
||||
if (idx >= bitmap_fields) { idx = 0; } // wrap
|
||||
if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
|
||||
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields,
|
||||
const size_t start_field_idx, const size_t count,
|
||||
mi_bitmap_pred_fun_t pred_fun, void* pred_arg,
|
||||
mi_bitmap_index_t* bitmap_idx) {
|
||||
size_t idx = start_field_idx;
|
||||
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
|
||||
if (idx >= bitmap_fields) idx = 0; // wrap
|
||||
if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
|
||||
if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) {
|
||||
return true;
|
||||
}
|
||||
// predicate returned false, unclaim and look further
|
||||
_mi_bitmap_unclaim(bitmap, bitmap_fields, count, *bitmap_idx);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Set `count` bits at `bitmap_idx` to 0 atomically
|
||||
// Returns `true` if all `count` bits were 1 previously.
|
||||
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||
const size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
|
||||
const size_t mask = mi_bitmap_mask_(count, bitidx);
|
||||
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
|
||||
// mi_assert_internal((bitmap[idx] & mask) == mask);
|
||||
const size_t prev = mi_atomic_and_acq_rel(&bitmap[idx], ~mask);
|
||||
return ((prev & mask) == mask);
|
||||
}
|
||||
|
||||
|
||||
// Set `count` bits at `bitmap_idx` to 1 atomically
|
||||
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
|
||||
bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero) {
|
||||
const size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
|
||||
const size_t mask = mi_bitmap_mask_(count, bitidx);
|
||||
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
|
||||
//mi_assert_internal(any_zero != NULL || (bitmap[idx] & mask) == 0);
|
||||
size_t prev = mi_atomic_or_acq_rel(&bitmap[idx], mask);
|
||||
if (any_zero != NULL) { *any_zero = ((prev & mask) != mask); }
|
||||
return ((prev & mask) == 0);
|
||||
}
|
||||
|
||||
// Returns `true` if all `count` bits were 1. `any_ones` is `true` if there was at least one bit set to one.
|
||||
static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_ones) {
|
||||
const size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
|
||||
const size_t mask = mi_bitmap_mask_(count, bitidx);
|
||||
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
|
||||
const size_t field = mi_atomic_load_relaxed(&bitmap[idx]);
|
||||
if (any_ones != NULL) { *any_ones = ((field & mask) != 0); }
|
||||
return ((field & mask) == mask);
|
||||
}
|
||||
|
||||
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
|
||||
// Returns `true` if successful when all previous `count` bits were 0.
|
||||
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||
const size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
|
||||
const size_t mask = mi_bitmap_mask_(count, bitidx);
|
||||
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
|
||||
size_t expected = mi_atomic_load_relaxed(&bitmap[idx]);
|
||||
do {
|
||||
if ((expected & mask) != 0) return false;
|
||||
}
|
||||
while (!mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask));
|
||||
mi_assert_internal((expected & mask) == 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||
return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL);
|
||||
}
|
||||
|
||||
bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||
bool any_ones;
|
||||
mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
|
||||
return any_ones;
|
||||
}
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// the `_across` functions work on bitmaps where sequences can cross over
|
||||
// between the fields. This is used in arena allocation
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
// Try to atomically claim a sequence of `count` bits starting from the field
|
||||
// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success.
|
||||
// Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`)
|
||||
static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx)
|
||||
{
|
||||
mi_assert_internal(bitmap_idx != NULL);
|
||||
|
||||
// check initial trailing zeros
|
||||
mi_bitmap_field_t* field = &bitmap[idx];
|
||||
size_t map = mi_atomic_load_relaxed(field);
|
||||
const size_t initial = mi_clz(map); // count of initial zeros starting at idx
|
||||
mi_assert_internal(initial <= MI_BITMAP_FIELD_BITS);
|
||||
if (initial == 0) return false;
|
||||
if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields (this case won't happen for us)
|
||||
if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries
|
||||
|
||||
// scan ahead
|
||||
size_t found = initial;
|
||||
size_t mask = 0; // mask bits for the final field
|
||||
while(found < count) {
|
||||
field++;
|
||||
map = mi_atomic_load_relaxed(field);
|
||||
const size_t mask_bits = (found + MI_BITMAP_FIELD_BITS <= count ? MI_BITMAP_FIELD_BITS : (count - found));
|
||||
mi_assert_internal(mask_bits > 0 && mask_bits <= MI_BITMAP_FIELD_BITS);
|
||||
mask = mi_bitmap_mask_(mask_bits, 0);
|
||||
if ((map & mask) != 0) return false; // some part is already claimed
|
||||
found += mask_bits;
|
||||
}
|
||||
mi_assert_internal(field < &bitmap[bitmap_fields]);
|
||||
|
||||
// we found a range of contiguous zeros up to the final field; mask contains mask in the final field
|
||||
// now try to claim the range atomically
|
||||
mi_bitmap_field_t* const final_field = field;
|
||||
const size_t final_mask = mask;
|
||||
mi_bitmap_field_t* const initial_field = &bitmap[idx];
|
||||
const size_t initial_idx = MI_BITMAP_FIELD_BITS - initial;
|
||||
const size_t initial_mask = mi_bitmap_mask_(initial, initial_idx);
|
||||
|
||||
// initial field
|
||||
size_t newmap;
|
||||
field = initial_field;
|
||||
map = mi_atomic_load_relaxed(field);
|
||||
do {
|
||||
newmap = (map | initial_mask);
|
||||
if ((map & initial_mask) != 0) { goto rollback; };
|
||||
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
|
||||
|
||||
// intermediate fields
|
||||
while (++field < final_field) {
|
||||
newmap = MI_BITMAP_FIELD_FULL;
|
||||
map = 0;
|
||||
if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; }
|
||||
}
|
||||
|
||||
// final field
|
||||
mi_assert_internal(field == final_field);
|
||||
map = mi_atomic_load_relaxed(field);
|
||||
do {
|
||||
newmap = (map | final_mask);
|
||||
if ((map & final_mask) != 0) { goto rollback; }
|
||||
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
|
||||
|
||||
// claimed!
|
||||
*bitmap_idx = mi_bitmap_index_create(idx, initial_idx);
|
||||
return true;
|
||||
|
||||
rollback:
|
||||
// roll back intermediate fields
|
||||
// (we just failed to claim `field` so decrement first)
|
||||
while (--field > initial_field) {
|
||||
newmap = 0;
|
||||
map = MI_BITMAP_FIELD_FULL;
|
||||
mi_assert_internal(mi_atomic_load_relaxed(field) == map);
|
||||
mi_atomic_store_release(field, newmap);
|
||||
}
|
||||
if (field == initial_field) { // (if we failed on the initial field, `field + 1 == initial_field`)
|
||||
map = mi_atomic_load_relaxed(field);
|
||||
do {
|
||||
mi_assert_internal((map & initial_mask) == initial_mask);
|
||||
newmap = (map & ~initial_mask);
|
||||
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
|
||||
}
|
||||
// retry? (we make a recursive call instead of goto to be able to use const declarations)
|
||||
if (retries <= 2) {
|
||||
return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx);
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
|
||||
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
|
||||
bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
|
||||
mi_assert_internal(count > 0);
|
||||
if (count <= 2) {
|
||||
// we don't bother with crossover fields for small counts
|
||||
return _mi_bitmap_try_find_from_claim(bitmap, bitmap_fields, start_field_idx, count, bitmap_idx);
|
||||
}
|
||||
|
||||
// visit the fields
|
||||
size_t idx = start_field_idx;
|
||||
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
|
||||
if (idx >= bitmap_fields) { idx = 0; } // wrap
|
||||
// first try to claim inside a field
|
||||
if (count <= MI_BITMAP_FIELD_BITS) {
|
||||
if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// if that fails, then try to claim across fields
|
||||
if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Helper for masks across fields; returns the mid count, post_mask may be 0
|
||||
static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) {
|
||||
MI_UNUSED(bitmap_fields);
|
||||
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
|
||||
if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) {
|
||||
*pre_mask = mi_bitmap_mask_(count, bitidx);
|
||||
*mid_mask = 0;
|
||||
*post_mask = 0;
|
||||
mi_assert_internal(mi_bitmap_index_field(bitmap_idx) < bitmap_fields);
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
const size_t pre_bits = MI_BITMAP_FIELD_BITS - bitidx;
|
||||
mi_assert_internal(pre_bits < count);
|
||||
*pre_mask = mi_bitmap_mask_(pre_bits, bitidx);
|
||||
count -= pre_bits;
|
||||
const size_t mid_count = (count / MI_BITMAP_FIELD_BITS);
|
||||
*mid_mask = MI_BITMAP_FIELD_FULL;
|
||||
count %= MI_BITMAP_FIELD_BITS;
|
||||
*post_mask = (count==0 ? 0 : mi_bitmap_mask_(count, 0));
|
||||
mi_assert_internal(mi_bitmap_index_field(bitmap_idx) + mid_count + (count==0 ? 0 : 1) < bitmap_fields);
|
||||
return mid_count;
|
||||
}
|
||||
}
|
||||
|
||||
// Set `count` bits at `bitmap_idx` to 0 atomically
|
||||
// Returns `true` if all `count` bits were 1 previously.
|
||||
bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||
size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||
size_t pre_mask;
|
||||
size_t mid_mask;
|
||||
size_t post_mask;
|
||||
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
|
||||
bool all_one = true;
|
||||
mi_bitmap_field_t* field = &bitmap[idx];
|
||||
size_t prev = mi_atomic_and_acq_rel(field++, ~pre_mask); // clear first part
|
||||
if ((prev & pre_mask) != pre_mask) all_one = false;
|
||||
while(mid_count-- > 0) {
|
||||
prev = mi_atomic_and_acq_rel(field++, ~mid_mask); // clear mid part
|
||||
if ((prev & mid_mask) != mid_mask) all_one = false;
|
||||
}
|
||||
if (post_mask!=0) {
|
||||
prev = mi_atomic_and_acq_rel(field, ~post_mask); // clear end part
|
||||
if ((prev & post_mask) != post_mask) all_one = false;
|
||||
}
|
||||
return all_one;
|
||||
}
|
||||
|
||||
// Set `count` bits at `bitmap_idx` to 1 atomically
|
||||
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
|
||||
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero) {
|
||||
size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||
size_t pre_mask;
|
||||
size_t mid_mask;
|
||||
size_t post_mask;
|
||||
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
|
||||
bool all_zero = true;
|
||||
bool any_zero = false;
|
||||
_Atomic(size_t)*field = &bitmap[idx];
|
||||
size_t prev = mi_atomic_or_acq_rel(field++, pre_mask);
|
||||
if ((prev & pre_mask) != 0) all_zero = false;
|
||||
if ((prev & pre_mask) != pre_mask) any_zero = true;
|
||||
while (mid_count-- > 0) {
|
||||
prev = mi_atomic_or_acq_rel(field++, mid_mask);
|
||||
if ((prev & mid_mask) != 0) all_zero = false;
|
||||
if ((prev & mid_mask) != mid_mask) any_zero = true;
|
||||
}
|
||||
if (post_mask!=0) {
|
||||
prev = mi_atomic_or_acq_rel(field, post_mask);
|
||||
if ((prev & post_mask) != 0) all_zero = false;
|
||||
if ((prev & post_mask) != post_mask) any_zero = true;
|
||||
}
|
||||
if (pany_zero != NULL) { *pany_zero = any_zero; }
|
||||
return all_zero;
|
||||
}
|
||||
|
||||
|
||||
// Returns `true` if all `count` bits were 1.
|
||||
// `any_ones` is `true` if there was at least one bit set to one.
|
||||
static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) {
|
||||
size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||
size_t pre_mask;
|
||||
size_t mid_mask;
|
||||
size_t post_mask;
|
||||
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
|
||||
bool all_ones = true;
|
||||
bool any_ones = false;
|
||||
mi_bitmap_field_t* field = &bitmap[idx];
|
||||
size_t prev = mi_atomic_load_relaxed(field++);
|
||||
if ((prev & pre_mask) != pre_mask) all_ones = false;
|
||||
if ((prev & pre_mask) != 0) any_ones = true;
|
||||
while (mid_count-- > 0) {
|
||||
prev = mi_atomic_load_relaxed(field++);
|
||||
if ((prev & mid_mask) != mid_mask) all_ones = false;
|
||||
if ((prev & mid_mask) != 0) any_ones = true;
|
||||
}
|
||||
if (post_mask!=0) {
|
||||
prev = mi_atomic_load_relaxed(field);
|
||||
if ((prev & post_mask) != post_mask) all_ones = false;
|
||||
if ((prev & post_mask) != 0) any_ones = true;
|
||||
}
|
||||
if (pany_ones != NULL) { *pany_ones = any_ones; }
|
||||
return all_ones;
|
||||
}
|
||||
|
||||
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||
return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL);
|
||||
}
|
||||
|
||||
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||
bool any_ones;
|
||||
mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
|
||||
return any_ones;
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2019-2023 Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
Concurrent bitmap that can set/reset sequences of bits atomically,
|
||||
represeted as an array of fields where each field is a machine word (`size_t`)
|
||||
|
||||
There are two api's; the standard one cannot have sequences that cross
|
||||
between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS).
|
||||
(this is used in region allocation)
|
||||
|
||||
The `_across` postfixed functions do allow sequences that can cross over
|
||||
between the fields. (This is used in arena allocation)
|
||||
---------------------------------------------------------------------------- */
|
||||
#pragma once
|
||||
#ifndef MI_BITMAP_H
|
||||
#define MI_BITMAP_H
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Bitmap definition
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#define MI_BITMAP_FIELD_BITS (8*MI_SIZE_SIZE)
|
||||
#define MI_BITMAP_FIELD_FULL (~((size_t)0)) // all bits set
|
||||
|
||||
// An atomic bitmap of `size_t` fields
|
||||
typedef _Atomic(size_t) mi_bitmap_field_t;
|
||||
typedef mi_bitmap_field_t* mi_bitmap_t;
|
||||
|
||||
// A bitmap index is the index of the bit in a bitmap.
|
||||
typedef size_t mi_bitmap_index_t;
|
||||
|
||||
// Create a bit index.
|
||||
static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) {
|
||||
mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS);
|
||||
return (idx*MI_BITMAP_FIELD_BITS) + bitidx;
|
||||
}
|
||||
|
||||
// Create a bit index.
|
||||
static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) {
|
||||
return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS);
|
||||
}
|
||||
|
||||
// Get the field index from a bit index.
|
||||
static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) {
|
||||
return (bitmap_idx / MI_BITMAP_FIELD_BITS);
|
||||
}
|
||||
|
||||
// Get the bit index in a bitmap field
|
||||
static inline size_t mi_bitmap_index_bit_in_field(mi_bitmap_index_t bitmap_idx) {
|
||||
return (bitmap_idx % MI_BITMAP_FIELD_BITS);
|
||||
}
|
||||
|
||||
// Get the full bit index
|
||||
static inline size_t mi_bitmap_index_bit(mi_bitmap_index_t bitmap_idx) {
|
||||
return bitmap_idx;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Claim a bit sequence atomically
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Try to atomically claim a sequence of `count` bits in a single
|
||||
// field at `idx` in `bitmap`. Returns `true` on success.
|
||||
bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
|
||||
|
||||
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
|
||||
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
|
||||
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
|
||||
|
||||
// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
|
||||
typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg);
|
||||
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx);
|
||||
|
||||
// Set `count` bits at `bitmap_idx` to 0 atomically
|
||||
// Returns `true` if all `count` bits were 1 previously.
|
||||
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||
|
||||
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
|
||||
// Returns `true` if successful when all previous `count` bits were 0.
|
||||
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||
|
||||
// Set `count` bits at `bitmap_idx` to 1 atomically
|
||||
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
|
||||
bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero);
|
||||
|
||||
bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||
bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// the `_across` functions work on bitmaps where sequences can cross over
|
||||
// between the fields. This is used in arena allocation
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
|
||||
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
|
||||
bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
|
||||
|
||||
// Set `count` bits at `bitmap_idx` to 0 atomically
|
||||
// Returns `true` if all `count` bits were 1 previously.
|
||||
bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||
|
||||
// Set `count` bits at `bitmap_idx` to 1 atomically
|
||||
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
|
||||
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero);
|
||||
|
||||
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,626 @@
|
|||
/*----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/atomic.h"
|
||||
#include "mimalloc/prim.h" // mi_prim_get_default_heap
|
||||
|
||||
#include <string.h> // memset, memcpy
|
||||
|
||||
#if defined(_MSC_VER) && (_MSC_VER < 1920)
|
||||
#pragma warning(disable:4204) // non-constant aggregate initializer
|
||||
#endif
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Helpers
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// return `true` if ok, `false` to break
|
||||
typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2);
|
||||
|
||||
// Visit all pages in a heap; returns `false` if break was called.
|
||||
static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2)
|
||||
{
|
||||
if (heap==NULL || heap->page_count==0) return 0;
|
||||
|
||||
// visit all pages
|
||||
#if MI_DEBUG>1
|
||||
size_t total = heap->page_count;
|
||||
size_t count = 0;
|
||||
#endif
|
||||
|
||||
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
|
||||
mi_page_queue_t* pq = &heap->pages[i];
|
||||
mi_page_t* page = pq->first;
|
||||
while(page != NULL) {
|
||||
mi_page_t* next = page->next; // save next in case the page gets removed from the queue
|
||||
mi_assert_internal(mi_page_heap(page) == heap);
|
||||
#if MI_DEBUG>1
|
||||
count++;
|
||||
#endif
|
||||
if (!fn(heap, pq, page, arg1, arg2)) return false;
|
||||
page = next; // and continue
|
||||
}
|
||||
}
|
||||
mi_assert_internal(count == total);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
#if MI_DEBUG>=2
|
||||
static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
|
||||
MI_UNUSED(arg1);
|
||||
MI_UNUSED(arg2);
|
||||
MI_UNUSED(pq);
|
||||
mi_assert_internal(mi_page_heap(page) == heap);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_assert_internal(segment->thread_id == heap->thread_id);
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
#if MI_DEBUG>=3
|
||||
static bool mi_heap_is_valid(mi_heap_t* heap) {
|
||||
mi_assert_internal(heap!=NULL);
|
||||
mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL);
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
"Collect" pages by migrating `local_free` and `thread_free`
|
||||
lists and freeing empty pages. This is done when a thread
|
||||
stops (and in that case abandons pages if there are still
|
||||
blocks alive)
|
||||
----------------------------------------------------------- */
|
||||
|
||||
typedef enum mi_collect_e {
|
||||
MI_NORMAL,
|
||||
MI_FORCE,
|
||||
MI_ABANDON
|
||||
} mi_collect_t;
|
||||
|
||||
|
||||
static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) {
|
||||
MI_UNUSED(arg2);
|
||||
MI_UNUSED(heap);
|
||||
mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
|
||||
mi_collect_t collect = *((mi_collect_t*)arg_collect);
|
||||
_mi_page_free_collect(page, collect >= MI_FORCE);
|
||||
if (mi_page_all_free(page)) {
|
||||
// no more used blocks, free the page.
|
||||
// note: this will free retired pages as well.
|
||||
_mi_page_free(page, pq, collect >= MI_FORCE);
|
||||
}
|
||||
else if (collect == MI_ABANDON) {
|
||||
// still used blocks but the thread is done; abandon the page
|
||||
_mi_page_abandon(page, pq);
|
||||
}
|
||||
return true; // don't break
|
||||
}
|
||||
|
||||
static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
|
||||
MI_UNUSED(arg1);
|
||||
MI_UNUSED(arg2);
|
||||
MI_UNUSED(heap);
|
||||
MI_UNUSED(pq);
|
||||
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
|
||||
return true; // don't break
|
||||
}
|
||||
|
||||
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||
{
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
|
||||
const bool force = collect >= MI_FORCE;
|
||||
_mi_deferred_free(heap, force);
|
||||
|
||||
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
||||
const bool force_main =
|
||||
#ifdef NDEBUG
|
||||
collect == MI_FORCE
|
||||
#else
|
||||
collect >= MI_FORCE
|
||||
#endif
|
||||
&& _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim;
|
||||
|
||||
if (force_main) {
|
||||
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
|
||||
// if all memory is freed by now, all segments should be freed.
|
||||
_mi_abandoned_reclaim_all(heap, &heap->tld->segments);
|
||||
}
|
||||
|
||||
// if abandoning, mark all pages to no longer add to delayed_free
|
||||
if (collect == MI_ABANDON) {
|
||||
mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
|
||||
}
|
||||
|
||||
// free all current thread delayed blocks.
|
||||
// (if abandoning, after this there are no more thread-delayed references into the pages.)
|
||||
_mi_heap_delayed_free_all(heap);
|
||||
|
||||
// collect retired pages
|
||||
_mi_heap_collect_retired(heap, force);
|
||||
|
||||
// collect all pages owned by this thread
|
||||
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
||||
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
|
||||
|
||||
// collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
|
||||
// note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
|
||||
_mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
|
||||
|
||||
// collect segment local caches
|
||||
if (force) {
|
||||
_mi_segment_thread_collect(&heap->tld->segments);
|
||||
}
|
||||
|
||||
// collect regions on program-exit (or shared library unload)
|
||||
if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
|
||||
_mi_thread_data_collect(); // collect thread data cache
|
||||
_mi_arena_collect(true /* force purge */, &heap->tld->stats);
|
||||
}
|
||||
}
|
||||
|
||||
void _mi_heap_collect_abandon(mi_heap_t* heap) {
|
||||
mi_heap_collect_ex(heap, MI_ABANDON);
|
||||
}
|
||||
|
||||
void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept {
|
||||
mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL));
|
||||
}
|
||||
|
||||
void mi_collect(bool force) mi_attr_noexcept {
|
||||
mi_heap_collect(mi_prim_get_default_heap(), force);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Heap new
|
||||
----------------------------------------------------------- */
|
||||
|
||||
mi_heap_t* mi_heap_get_default(void) {
|
||||
mi_thread_init();
|
||||
return mi_prim_get_default_heap();
|
||||
}
|
||||
|
||||
static bool mi_heap_is_default(const mi_heap_t* heap) {
|
||||
return (heap == mi_prim_get_default_heap());
|
||||
}
|
||||
|
||||
|
||||
mi_heap_t* mi_heap_get_backing(void) {
|
||||
mi_heap_t* heap = mi_heap_get_default();
|
||||
mi_assert_internal(heap!=NULL);
|
||||
mi_heap_t* bheap = heap->tld->heap_backing;
|
||||
mi_assert_internal(bheap!=NULL);
|
||||
mi_assert_internal(bheap->thread_id == _mi_thread_id());
|
||||
return bheap;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
|
||||
mi_heap_t* bheap = mi_heap_get_backing();
|
||||
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
|
||||
if (heap == NULL) return NULL;
|
||||
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
|
||||
heap->tld = bheap->tld;
|
||||
heap->thread_id = _mi_thread_id();
|
||||
heap->arena_id = arena_id;
|
||||
_mi_random_split(&bheap->random, &heap->random);
|
||||
heap->cookie = _mi_heap_random_next(heap) | 1;
|
||||
heap->keys[0] = _mi_heap_random_next(heap);
|
||||
heap->keys[1] = _mi_heap_random_next(heap);
|
||||
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
|
||||
// push on the thread local heaps list
|
||||
heap->next = heap->tld->heaps;
|
||||
heap->tld->heaps = heap;
|
||||
return heap;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
||||
return mi_heap_new_in_arena(_mi_arena_id_none());
|
||||
}
|
||||
|
||||
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
|
||||
return _mi_arena_memid_is_suitable(memid, heap->arena_id);
|
||||
}
|
||||
|
||||
uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
|
||||
return _mi_random_next(&heap->random);
|
||||
}
|
||||
|
||||
// zero out the page queues
|
||||
static void mi_heap_reset_pages(mi_heap_t* heap) {
|
||||
mi_assert_internal(heap != NULL);
|
||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||
// TODO: copy full empty heap instead?
|
||||
memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct));
|
||||
_mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages));
|
||||
heap->thread_delayed_free = NULL;
|
||||
heap->page_count = 0;
|
||||
}
|
||||
|
||||
// called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources.
|
||||
static void mi_heap_free(mi_heap_t* heap) {
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
if (mi_heap_is_backing(heap)) return; // dont free the backing heap
|
||||
|
||||
// reset default
|
||||
if (mi_heap_is_default(heap)) {
|
||||
_mi_heap_set_default_direct(heap->tld->heap_backing);
|
||||
}
|
||||
|
||||
// remove ourselves from the thread local heaps list
|
||||
// linear search but we expect the number of heaps to be relatively small
|
||||
mi_heap_t* prev = NULL;
|
||||
mi_heap_t* curr = heap->tld->heaps;
|
||||
while (curr != heap && curr != NULL) {
|
||||
prev = curr;
|
||||
curr = curr->next;
|
||||
}
|
||||
mi_assert_internal(curr == heap);
|
||||
if (curr == heap) {
|
||||
if (prev != NULL) { prev->next = heap->next; }
|
||||
else { heap->tld->heaps = heap->next; }
|
||||
}
|
||||
mi_assert_internal(heap->tld->heaps != NULL);
|
||||
|
||||
// and free the used memory
|
||||
mi_free(heap);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Heap destroy
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
|
||||
MI_UNUSED(arg1);
|
||||
MI_UNUSED(arg2);
|
||||
MI_UNUSED(heap);
|
||||
MI_UNUSED(pq);
|
||||
|
||||
// ensure no more thread_delayed_free will be added
|
||||
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
|
||||
|
||||
// stats
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_decrease(heap, large, bsize);
|
||||
}
|
||||
else {
|
||||
mi_heap_stat_decrease(heap, huge, bsize);
|
||||
}
|
||||
}
|
||||
#if (MI_STAT)
|
||||
_mi_page_free_collect(page, false); // update used count
|
||||
const size_t inuse = page->used;
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_decrease(heap, normal, bsize * inuse);
|
||||
#if (MI_STAT>1)
|
||||
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse);
|
||||
#endif
|
||||
}
|
||||
mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks...
|
||||
#endif
|
||||
|
||||
/// pretend it is all free now
|
||||
mi_assert_internal(mi_page_thread_free(page) == NULL);
|
||||
page->used = 0;
|
||||
|
||||
// and free the page
|
||||
// mi_page_free(page,false);
|
||||
page->next = NULL;
|
||||
page->prev = NULL;
|
||||
_mi_segment_page_free(page,false /* no force? */, &heap->tld->segments);
|
||||
|
||||
return true; // keep going
|
||||
}
|
||||
|
||||
void _mi_heap_destroy_pages(mi_heap_t* heap) {
|
||||
mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL);
|
||||
mi_heap_reset_pages(heap);
|
||||
}
|
||||
|
||||
#if MI_TRACK_HEAP_DESTROY
|
||||
static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
|
||||
MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size);
|
||||
mi_track_free_size(block,mi_usable_size(block));
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
void mi_heap_destroy(mi_heap_t* heap) {
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert(mi_heap_is_initialized(heap));
|
||||
mi_assert(heap->no_reclaim);
|
||||
mi_assert_expensive(mi_heap_is_valid(heap));
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
if (!heap->no_reclaim) {
|
||||
// don't free in case it may contain reclaimed pages
|
||||
mi_heap_delete(heap);
|
||||
}
|
||||
else {
|
||||
// track all blocks as freed
|
||||
#if MI_TRACK_HEAP_DESTROY
|
||||
mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL);
|
||||
#endif
|
||||
// free all pages
|
||||
_mi_heap_destroy_pages(heap);
|
||||
mi_heap_free(heap);
|
||||
}
|
||||
}
|
||||
|
||||
// forcefully destroy all heaps in the current thread
|
||||
void _mi_heap_unsafe_destroy_all(void) {
|
||||
mi_heap_t* bheap = mi_heap_get_backing();
|
||||
mi_heap_t* curr = bheap->tld->heaps;
|
||||
while (curr != NULL) {
|
||||
mi_heap_t* next = curr->next;
|
||||
if (curr->no_reclaim) {
|
||||
mi_heap_destroy(curr);
|
||||
}
|
||||
else {
|
||||
_mi_heap_destroy_pages(curr);
|
||||
}
|
||||
curr = next;
|
||||
}
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Safe Heap delete
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Transfer the pages from one heap to the other
|
||||
static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
|
||||
mi_assert_internal(heap!=NULL);
|
||||
if (from==NULL || from->page_count == 0) return;
|
||||
|
||||
// reduce the size of the delayed frees
|
||||
_mi_heap_delayed_free_partial(from);
|
||||
|
||||
// transfer all pages by appending the queues; this will set a new heap field
|
||||
// so threads may do delayed frees in either heap for a while.
|
||||
// note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
|
||||
// so after this only the new heap will get delayed frees
|
||||
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
|
||||
mi_page_queue_t* pq = &heap->pages[i];
|
||||
mi_page_queue_t* append = &from->pages[i];
|
||||
size_t pcount = _mi_page_queue_append(heap, pq, append);
|
||||
heap->page_count += pcount;
|
||||
from->page_count -= pcount;
|
||||
}
|
||||
mi_assert_internal(from->page_count == 0);
|
||||
|
||||
// and do outstanding delayed frees in the `from` heap
|
||||
// note: be careful here as the `heap` field in all those pages no longer point to `from`,
|
||||
// turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
|
||||
// the regular `_mi_free_delayed_block` which is safe.
|
||||
_mi_heap_delayed_free_all(from);
|
||||
#if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353
|
||||
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL);
|
||||
#endif
|
||||
|
||||
// and reset the `from` heap
|
||||
mi_heap_reset_pages(from);
|
||||
}
|
||||
|
||||
// Safe delete a heap without freeing any still allocated blocks in that heap.
|
||||
void mi_heap_delete(mi_heap_t* heap)
|
||||
{
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert(mi_heap_is_initialized(heap));
|
||||
mi_assert_expensive(mi_heap_is_valid(heap));
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
|
||||
if (!mi_heap_is_backing(heap)) {
|
||||
// tranfer still used pages to the backing heap
|
||||
mi_heap_absorb(heap->tld->heap_backing, heap);
|
||||
}
|
||||
else {
|
||||
// the backing heap abandons its pages
|
||||
_mi_heap_collect_abandon(heap);
|
||||
}
|
||||
mi_assert_internal(heap->page_count==0);
|
||||
mi_heap_free(heap);
|
||||
}
|
||||
|
||||
mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert(mi_heap_is_initialized(heap));
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL;
|
||||
mi_assert_expensive(mi_heap_is_valid(heap));
|
||||
mi_heap_t* old = mi_prim_get_default_heap();
|
||||
_mi_heap_set_default_direct(heap);
|
||||
return old;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Analysis
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// static since it is not thread safe to access heaps from other threads.
|
||||
static mi_heap_t* mi_heap_of_block(const void* p) {
|
||||
if (p == NULL) return NULL;
|
||||
mi_segment_t* segment = _mi_ptr_segment(p);
|
||||
bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
|
||||
mi_assert_internal(valid);
|
||||
if mi_unlikely(!valid) return NULL;
|
||||
return mi_page_heap(_mi_segment_page_of(segment,p));
|
||||
}
|
||||
|
||||
bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {
|
||||
mi_assert(heap != NULL);
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
|
||||
return (heap == mi_heap_of_block(p));
|
||||
}
|
||||
|
||||
|
||||
static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) {
|
||||
MI_UNUSED(heap);
|
||||
MI_UNUSED(pq);
|
||||
bool* found = (bool*)vfound;
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
void* start = _mi_page_start(segment, page, NULL);
|
||||
void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
|
||||
*found = (p >= start && p < end);
|
||||
return (!*found); // continue if not found
|
||||
}
|
||||
|
||||
bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {
|
||||
mi_assert(heap != NULL);
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
|
||||
if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers
|
||||
bool found = false;
|
||||
mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found);
|
||||
return found;
|
||||
}
|
||||
|
||||
bool mi_check_owned(const void* p) {
|
||||
return mi_heap_check_owned(mi_prim_get_default_heap(), p);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Visit all heap blocks and areas
|
||||
Todo: enable visiting abandoned pages, and
|
||||
enable visiting all blocks of all heaps across threads
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Separate struct to keep `mi_page_t` out of the public interface
|
||||
typedef struct mi_heap_area_ex_s {
|
||||
mi_heap_area_t area;
|
||||
mi_page_t* page;
|
||||
} mi_heap_area_ex_t;
|
||||
|
||||
static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_visit_fun* visitor, void* arg) {
|
||||
mi_assert(xarea != NULL);
|
||||
if (xarea==NULL) return true;
|
||||
const mi_heap_area_t* area = &xarea->area;
|
||||
mi_page_t* page = xarea->page;
|
||||
mi_assert(page != NULL);
|
||||
if (page == NULL) return true;
|
||||
|
||||
_mi_page_free_collect(page,true);
|
||||
mi_assert_internal(page->local_free == NULL);
|
||||
if (page->used == 0) return true;
|
||||
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
const size_t ubsize = mi_page_usable_block_size(page); // without padding
|
||||
size_t psize;
|
||||
uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
|
||||
|
||||
if (page->capacity == 1) {
|
||||
// optimize page with one block
|
||||
mi_assert_internal(page->used == 1 && page->free == NULL);
|
||||
return visitor(mi_page_heap(page), area, pstart, ubsize, arg);
|
||||
}
|
||||
|
||||
// create a bitmap of free blocks.
|
||||
#define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*))
|
||||
uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)];
|
||||
memset(free_map, 0, sizeof(free_map));
|
||||
|
||||
#if MI_DEBUG>1
|
||||
size_t free_count = 0;
|
||||
#endif
|
||||
for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
|
||||
#if MI_DEBUG>1
|
||||
free_count++;
|
||||
#endif
|
||||
mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize));
|
||||
size_t offset = (uint8_t*)block - pstart;
|
||||
mi_assert_internal(offset % bsize == 0);
|
||||
size_t blockidx = offset / bsize; // Todo: avoid division?
|
||||
mi_assert_internal( blockidx < MI_MAX_BLOCKS);
|
||||
size_t bitidx = (blockidx / sizeof(uintptr_t));
|
||||
size_t bit = blockidx - (bitidx * sizeof(uintptr_t));
|
||||
free_map[bitidx] |= ((uintptr_t)1 << bit);
|
||||
}
|
||||
mi_assert_internal(page->capacity == (free_count + page->used));
|
||||
|
||||
// walk through all blocks skipping the free ones
|
||||
#if MI_DEBUG>1
|
||||
size_t used_count = 0;
|
||||
#endif
|
||||
for (size_t i = 0; i < page->capacity; i++) {
|
||||
size_t bitidx = (i / sizeof(uintptr_t));
|
||||
size_t bit = i - (bitidx * sizeof(uintptr_t));
|
||||
uintptr_t m = free_map[bitidx];
|
||||
if (bit == 0 && m == UINTPTR_MAX) {
|
||||
i += (sizeof(uintptr_t) - 1); // skip a run of free blocks
|
||||
}
|
||||
else if ((m & ((uintptr_t)1 << bit)) == 0) {
|
||||
#if MI_DEBUG>1
|
||||
used_count++;
|
||||
#endif
|
||||
uint8_t* block = pstart + (i * bsize);
|
||||
if (!visitor(mi_page_heap(page), area, block, ubsize, arg)) return false;
|
||||
}
|
||||
}
|
||||
mi_assert_internal(page->used == used_count);
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg);
|
||||
|
||||
|
||||
static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) {
|
||||
MI_UNUSED(heap);
|
||||
MI_UNUSED(pq);
|
||||
mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
|
||||
mi_heap_area_ex_t xarea;
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
const size_t ubsize = mi_page_usable_block_size(page);
|
||||
xarea.page = page;
|
||||
xarea.area.reserved = page->reserved * bsize;
|
||||
xarea.area.committed = page->capacity * bsize;
|
||||
xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
|
||||
xarea.area.used = page->used; // number of blocks in use (#553)
|
||||
xarea.area.block_size = ubsize;
|
||||
xarea.area.full_block_size = bsize;
|
||||
return fun(heap, &xarea, arg);
|
||||
}
|
||||
|
||||
// Visit all heap pages as areas
|
||||
static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) {
|
||||
if (visitor == NULL) return false;
|
||||
return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{
|
||||
}
|
||||
|
||||
// Just to pass arguments
|
||||
typedef struct mi_visit_blocks_args_s {
|
||||
bool visit_blocks;
|
||||
mi_block_visit_fun* visitor;
|
||||
void* arg;
|
||||
} mi_visit_blocks_args_t;
|
||||
|
||||
static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) {
|
||||
mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg;
|
||||
if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false;
|
||||
if (args->visit_blocks) {
|
||||
return mi_heap_area_visit_blocks(xarea, args->visitor, args->arg);
|
||||
}
|
||||
else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Visit all blocks in a heap
|
||||
bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
|
||||
mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };
|
||||
return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
|
||||
}
|
|
@ -0,0 +1,713 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/prim.h"
|
||||
|
||||
#include <string.h> // memcpy, memset
|
||||
#include <stdlib.h> // atexit
|
||||
|
||||
|
||||
// Empty page used to initialize the small free pages array
|
||||
const mi_page_t _mi_page_empty = {
|
||||
0, false, false, false,
|
||||
0, // capacity
|
||||
0, // reserved capacity
|
||||
{ 0 }, // flags
|
||||
false, // is_zero
|
||||
0, // retire_expire
|
||||
NULL, // free
|
||||
0, // used
|
||||
0, // xblock_size
|
||||
NULL, // local_free
|
||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||
{ 0, 0 },
|
||||
#endif
|
||||
MI_ATOMIC_VAR_INIT(0), // xthread_free
|
||||
MI_ATOMIC_VAR_INIT(0), // xheap
|
||||
NULL, NULL
|
||||
#if MI_INTPTR_SIZE==8
|
||||
, { 0 } // padding
|
||||
#endif
|
||||
};
|
||||
|
||||
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
|
||||
|
||||
#if (MI_SMALL_WSIZE_MAX==128)
|
||||
#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8)
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
|
||||
#elif (MI_PADDING>0)
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
|
||||
#else
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
|
||||
#endif
|
||||
#else
|
||||
#error "define right initialization sizes corresponding to MI_SMALL_WSIZE_MAX"
|
||||
#endif
|
||||
|
||||
// Empty page queues for every bin
|
||||
#define QNULL(sz) { NULL, NULL, (sz)*sizeof(uintptr_t) }
|
||||
#define MI_PAGE_QUEUES_EMPTY \
|
||||
{ QNULL(1), \
|
||||
QNULL( 1), QNULL( 2), QNULL( 3), QNULL( 4), QNULL( 5), QNULL( 6), QNULL( 7), QNULL( 8), /* 8 */ \
|
||||
QNULL( 10), QNULL( 12), QNULL( 14), QNULL( 16), QNULL( 20), QNULL( 24), QNULL( 28), QNULL( 32), /* 16 */ \
|
||||
QNULL( 40), QNULL( 48), QNULL( 56), QNULL( 64), QNULL( 80), QNULL( 96), QNULL( 112), QNULL( 128), /* 24 */ \
|
||||
QNULL( 160), QNULL( 192), QNULL( 224), QNULL( 256), QNULL( 320), QNULL( 384), QNULL( 448), QNULL( 512), /* 32 */ \
|
||||
QNULL( 640), QNULL( 768), QNULL( 896), QNULL( 1024), QNULL( 1280), QNULL( 1536), QNULL( 1792), QNULL( 2048), /* 40 */ \
|
||||
QNULL( 2560), QNULL( 3072), QNULL( 3584), QNULL( 4096), QNULL( 5120), QNULL( 6144), QNULL( 7168), QNULL( 8192), /* 48 */ \
|
||||
QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \
|
||||
QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \
|
||||
QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \
|
||||
QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
|
||||
QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ }
|
||||
|
||||
#define MI_STAT_COUNT_NULL() {0,0,0,0}
|
||||
|
||||
// Empty statistics
|
||||
#if MI_STAT>1
|
||||
#define MI_STAT_COUNT_END_NULL() , { MI_STAT_COUNT_NULL(), MI_INIT32(MI_STAT_COUNT_NULL) }
|
||||
#else
|
||||
#define MI_STAT_COUNT_END_NULL()
|
||||
#endif
|
||||
|
||||
#define MI_STATS_NULL \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \
|
||||
MI_STAT_COUNT_END_NULL()
|
||||
|
||||
|
||||
// Empty slice span queues for every bin
|
||||
#define SQNULL(sz) { NULL, NULL, sz }
|
||||
#define MI_SEGMENT_SPAN_QUEUES_EMPTY \
|
||||
{ SQNULL(1), \
|
||||
SQNULL( 1), SQNULL( 2), SQNULL( 3), SQNULL( 4), SQNULL( 5), SQNULL( 6), SQNULL( 7), SQNULL( 10), /* 8 */ \
|
||||
SQNULL( 12), SQNULL( 14), SQNULL( 16), SQNULL( 20), SQNULL( 24), SQNULL( 28), SQNULL( 32), SQNULL( 40), /* 16 */ \
|
||||
SQNULL( 48), SQNULL( 56), SQNULL( 64), SQNULL( 80), SQNULL( 96), SQNULL( 112), SQNULL( 128), SQNULL( 160), /* 24 */ \
|
||||
SQNULL( 192), SQNULL( 224), SQNULL( 256), SQNULL( 320), SQNULL( 384), SQNULL( 448), SQNULL( 512), SQNULL( 640), /* 32 */ \
|
||||
SQNULL( 768), SQNULL( 896), SQNULL( 1024) /* 35 */ }
|
||||
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Statically allocate an empty heap as the initial
|
||||
// thread local value for the default heap,
|
||||
// and statically allocate the backing heap for the main
|
||||
// thread so it can function without doing any allocation
|
||||
// itself (as accessing a thread local for the first time
|
||||
// may lead to allocation itself on some platforms)
|
||||
// --------------------------------------------------------
|
||||
|
||||
mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
||||
NULL,
|
||||
MI_SMALL_PAGES_EMPTY,
|
||||
MI_PAGE_QUEUES_EMPTY,
|
||||
MI_ATOMIC_VAR_INIT(NULL),
|
||||
0, // tid
|
||||
0, // cookie
|
||||
0, // arena id
|
||||
{ 0, 0 }, // keys
|
||||
{ {0}, {0}, 0, true }, // random
|
||||
0, // page count
|
||||
MI_BIN_FULL, 0, // page retired min/max
|
||||
NULL, // next
|
||||
false
|
||||
};
|
||||
|
||||
#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
|
||||
#define tld_empty_os ((mi_os_tld_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,os)))
|
||||
|
||||
mi_decl_cache_align static const mi_tld_t tld_empty = {
|
||||
0,
|
||||
false,
|
||||
NULL, NULL,
|
||||
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments
|
||||
{ 0, tld_empty_stats }, // os
|
||||
{ MI_STATS_NULL } // stats
|
||||
};
|
||||
|
||||
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
|
||||
return _mi_prim_thread_id();
|
||||
}
|
||||
|
||||
// the thread-local default heap for allocation
|
||||
mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
|
||||
|
||||
extern mi_heap_t _mi_heap_main;
|
||||
|
||||
static mi_tld_t tld_main = {
|
||||
0, false,
|
||||
&_mi_heap_main, & _mi_heap_main,
|
||||
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments
|
||||
{ 0, &tld_main.stats }, // os
|
||||
{ MI_STATS_NULL } // stats
|
||||
};
|
||||
|
||||
mi_heap_t _mi_heap_main = {
|
||||
&tld_main,
|
||||
MI_SMALL_PAGES_EMPTY,
|
||||
MI_PAGE_QUEUES_EMPTY,
|
||||
MI_ATOMIC_VAR_INIT(NULL),
|
||||
0, // thread id
|
||||
0, // initial cookie
|
||||
0, // arena id
|
||||
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
|
||||
{ {0x846ca68b}, {0}, 0, true }, // random
|
||||
0, // page count
|
||||
MI_BIN_FULL, 0, // page retired min/max
|
||||
NULL, // next heap
|
||||
false // can reclaim
|
||||
};
|
||||
|
||||
bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.
|
||||
|
||||
mi_stats_t _mi_stats_main = { MI_STATS_NULL };
|
||||
|
||||
|
||||
static void mi_heap_main_init(void) {
|
||||
if (_mi_heap_main.cookie == 0) {
|
||||
_mi_heap_main.thread_id = _mi_thread_id();
|
||||
_mi_heap_main.cookie = 1;
|
||||
#if defined(_WIN32) && !defined(MI_SHARED_LIB)
|
||||
_mi_random_init_weak(&_mi_heap_main.random); // prevent allocation failure during bcrypt dll initialization with static linking
|
||||
#else
|
||||
_mi_random_init(&_mi_heap_main.random);
|
||||
#endif
|
||||
_mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
|
||||
_mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
|
||||
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
|
||||
}
|
||||
}
|
||||
|
||||
mi_heap_t* _mi_heap_main_get(void) {
|
||||
mi_heap_main_init();
|
||||
return &_mi_heap_main;
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Initialization and freeing of the thread local heaps
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// note: in x64 in release build `sizeof(mi_thread_data_t)` is under 4KiB (= OS page size).
|
||||
typedef struct mi_thread_data_s {
|
||||
mi_heap_t heap; // must come first due to cast in `_mi_heap_done`
|
||||
mi_tld_t tld;
|
||||
mi_memid_t memid;
|
||||
} mi_thread_data_t;
|
||||
|
||||
|
||||
// Thread meta-data is allocated directly from the OS. For
|
||||
// some programs that do not use thread pools and allocate and
|
||||
// destroy many OS threads, this may causes too much overhead
|
||||
// per thread so we maintain a small cache of recently freed metadata.
|
||||
|
||||
#define TD_CACHE_SIZE (16)
|
||||
static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE];
|
||||
|
||||
static mi_thread_data_t* mi_thread_data_zalloc(void) {
|
||||
// try to find thread metadata in the cache
|
||||
bool is_zero = false;
|
||||
mi_thread_data_t* td = NULL;
|
||||
for (int i = 0; i < TD_CACHE_SIZE; i++) {
|
||||
td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
|
||||
if (td != NULL) {
|
||||
// found cached allocation, try use it
|
||||
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
|
||||
if (td != NULL) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if that fails, allocate as meta data
|
||||
if (td == NULL) {
|
||||
mi_memid_t memid;
|
||||
td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
|
||||
if (td == NULL) {
|
||||
// if this fails, try once more. (issue #257)
|
||||
td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
|
||||
if (td == NULL) {
|
||||
// really out of memory
|
||||
_mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
|
||||
}
|
||||
}
|
||||
if (td != NULL) {
|
||||
td->memid = memid;
|
||||
is_zero = memid.initially_zero;
|
||||
}
|
||||
}
|
||||
|
||||
if (td != NULL && !is_zero) {
|
||||
_mi_memzero_aligned(td, sizeof(*td));
|
||||
}
|
||||
return td;
|
||||
}
|
||||
|
||||
static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
|
||||
// try to add the thread metadata to the cache
|
||||
for (int i = 0; i < TD_CACHE_SIZE; i++) {
|
||||
mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
|
||||
if (td == NULL) {
|
||||
mi_thread_data_t* expected = NULL;
|
||||
if (mi_atomic_cas_ptr_weak_acq_rel(mi_thread_data_t, &td_cache[i], &expected, tdfree)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
// if that fails, just free it directly
|
||||
_mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main);
|
||||
}
|
||||
|
||||
void _mi_thread_data_collect(void) {
|
||||
// free all thread metadata from the cache
|
||||
for (int i = 0; i < TD_CACHE_SIZE; i++) {
|
||||
mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
|
||||
if (td != NULL) {
|
||||
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
|
||||
if (td != NULL) {
|
||||
_mi_os_free(td, sizeof(mi_thread_data_t), td->memid, &_mi_stats_main);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the thread local default heap, called from `mi_thread_init`
|
||||
static bool _mi_heap_init(void) {
|
||||
if (mi_heap_is_initialized(mi_prim_get_default_heap())) return true;
|
||||
if (_mi_is_main_thread()) {
|
||||
// mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization
|
||||
// the main heap is statically allocated
|
||||
mi_heap_main_init();
|
||||
_mi_heap_set_default_direct(&_mi_heap_main);
|
||||
//mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_prim_get_default_heap());
|
||||
}
|
||||
else {
|
||||
// use `_mi_os_alloc` to allocate directly from the OS
|
||||
mi_thread_data_t* td = mi_thread_data_zalloc();
|
||||
if (td == NULL) return false;
|
||||
|
||||
mi_tld_t* tld = &td->tld;
|
||||
mi_heap_t* heap = &td->heap;
|
||||
_mi_memcpy_aligned(tld, &tld_empty, sizeof(*tld));
|
||||
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(*heap));
|
||||
heap->thread_id = _mi_thread_id();
|
||||
#if defined(_WIN32) && !defined(MI_SHARED_LIB)
|
||||
_mi_random_init_weak(&heap->random); // match mi_heap_main_init()
|
||||
#else
|
||||
_mi_random_init(&heap->random);
|
||||
#endif
|
||||
heap->cookie = _mi_heap_random_next(heap) | 1;
|
||||
heap->keys[0] = _mi_heap_random_next(heap);
|
||||
heap->keys[1] = _mi_heap_random_next(heap);
|
||||
heap->tld = tld;
|
||||
tld->heap_backing = heap;
|
||||
tld->heaps = heap;
|
||||
tld->segments.stats = &tld->stats;
|
||||
tld->segments.os = &tld->os;
|
||||
tld->os.stats = &tld->stats;
|
||||
_mi_heap_set_default_direct(heap);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Free the thread local default heap (called from `mi_thread_done`)
|
||||
static bool _mi_heap_done(mi_heap_t* heap) {
|
||||
if (!mi_heap_is_initialized(heap)) return true;
|
||||
|
||||
// reset default heap
|
||||
_mi_heap_set_default_direct(_mi_is_main_thread() ? &_mi_heap_main : (mi_heap_t*)&_mi_heap_empty);
|
||||
|
||||
// switch to backing heap
|
||||
heap = heap->tld->heap_backing;
|
||||
if (!mi_heap_is_initialized(heap)) return false;
|
||||
|
||||
// delete all non-backing heaps in this thread
|
||||
mi_heap_t* curr = heap->tld->heaps;
|
||||
while (curr != NULL) {
|
||||
mi_heap_t* next = curr->next; // save `next` as `curr` will be freed
|
||||
if (curr != heap) {
|
||||
mi_assert_internal(!mi_heap_is_backing(curr));
|
||||
mi_heap_delete(curr);
|
||||
}
|
||||
curr = next;
|
||||
}
|
||||
mi_assert_internal(heap->tld->heaps == heap && heap->next == NULL);
|
||||
mi_assert_internal(mi_heap_is_backing(heap));
|
||||
|
||||
// collect if not the main thread
|
||||
if (heap != &_mi_heap_main) {
|
||||
_mi_heap_collect_abandon(heap);
|
||||
}
|
||||
|
||||
// merge stats
|
||||
_mi_stats_done(&heap->tld->stats);
|
||||
|
||||
// free if not the main thread
|
||||
if (heap != &_mi_heap_main) {
|
||||
// the following assertion does not always hold for huge segments as those are always treated
|
||||
// as abondened: one may allocate it in one thread, but deallocate in another in which case
|
||||
// the count can be too large or negative. todo: perhaps not count huge segments? see issue #363
|
||||
// mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
|
||||
mi_thread_data_free((mi_thread_data_t*)heap);
|
||||
}
|
||||
else {
|
||||
#if 0
|
||||
// never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
|
||||
// there may still be delete/free calls after the mi_fls_done is called. Issue #207
|
||||
_mi_heap_destroy_pages(heap);
|
||||
mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main);
|
||||
#endif
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Try to run `mi_thread_done()` automatically so any memory
|
||||
// owned by the thread but not yet released can be abandoned
|
||||
// and re-owned by another thread.
|
||||
//
|
||||
// 1. windows dynamic library:
|
||||
// call from DllMain on DLL_THREAD_DETACH
|
||||
// 2. windows static library:
|
||||
// use `FlsAlloc` to call a destructor when the thread is done
|
||||
// 3. unix, pthreads:
|
||||
// use a pthread key to call a destructor when a pthread is done
|
||||
//
|
||||
// In the last two cases we also need to call `mi_process_init`
|
||||
// to set up the thread local keys.
|
||||
// --------------------------------------------------------
|
||||
|
||||
// Set up handlers so `mi_thread_done` is called automatically
|
||||
static void mi_process_setup_auto_thread_done(void) {
|
||||
static bool tls_initialized = false; // fine if it races
|
||||
if (tls_initialized) return;
|
||||
tls_initialized = true;
|
||||
_mi_prim_thread_init_auto_done();
|
||||
_mi_heap_set_default_direct(&_mi_heap_main);
|
||||
}
|
||||
|
||||
|
||||
bool _mi_is_main_thread(void) {
|
||||
return (_mi_heap_main.thread_id==0 || _mi_heap_main.thread_id == _mi_thread_id());
|
||||
}
|
||||
|
||||
static _Atomic(size_t) thread_count = MI_ATOMIC_VAR_INIT(1);
|
||||
|
||||
size_t _mi_current_thread_count(void) {
|
||||
return mi_atomic_load_relaxed(&thread_count);
|
||||
}
|
||||
|
||||
// This is called from the `mi_malloc_generic`
|
||||
void mi_thread_init(void) mi_attr_noexcept
|
||||
{
|
||||
// ensure our process has started already
|
||||
mi_process_init();
|
||||
|
||||
// initialize the thread local default heap
|
||||
// (this will call `_mi_heap_set_default_direct` and thus set the
|
||||
// fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called)
|
||||
if (_mi_heap_init()) return; // returns true if already initialized
|
||||
|
||||
_mi_stat_increase(&_mi_stats_main.threads, 1);
|
||||
mi_atomic_increment_relaxed(&thread_count);
|
||||
//_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id());
|
||||
}
|
||||
|
||||
void mi_thread_done(void) mi_attr_noexcept {
|
||||
_mi_thread_done(NULL);
|
||||
}
|
||||
|
||||
void _mi_thread_done(mi_heap_t* heap)
|
||||
{
|
||||
// calling with NULL implies using the default heap
|
||||
if (heap == NULL) {
|
||||
heap = mi_prim_get_default_heap();
|
||||
if (heap == NULL) return;
|
||||
}
|
||||
|
||||
// prevent re-entrancy through heap_done/heap_set_default_direct (issue #699)
|
||||
if (!mi_heap_is_initialized(heap)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// adjust stats
|
||||
mi_atomic_decrement_relaxed(&thread_count);
|
||||
_mi_stat_decrease(&_mi_stats_main.threads, 1);
|
||||
|
||||
// check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps...
|
||||
if (heap->thread_id != _mi_thread_id()) return;
|
||||
|
||||
// abandon the thread local heap
|
||||
if (_mi_heap_done(heap)) return; // returns true if already ran
|
||||
}
|
||||
|
||||
void _mi_heap_set_default_direct(mi_heap_t* heap) {
|
||||
mi_assert_internal(heap != NULL);
|
||||
#if defined(MI_TLS_SLOT)
|
||||
mi_prim_tls_slot_set(MI_TLS_SLOT,heap);
|
||||
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
|
||||
*mi_tls_pthread_heap_slot() = heap;
|
||||
#elif defined(MI_TLS_PTHREAD)
|
||||
// we use _mi_heap_default_key
|
||||
#else
|
||||
_mi_heap_default = heap;
|
||||
#endif
|
||||
|
||||
// ensure the default heap is passed to `_mi_thread_done`
|
||||
// setting to a non-NULL value also ensures `mi_thread_done` is called.
|
||||
_mi_prim_thread_associate_default_heap(heap);
|
||||
}
|
||||
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Run functions on process init/done, and thread init/done
|
||||
// --------------------------------------------------------
|
||||
static void mi_cdecl mi_process_done(void);
|
||||
|
||||
static bool os_preloading = true; // true until this module is initialized
|
||||
static bool mi_redirected = false; // true if malloc redirects to mi_malloc
|
||||
|
||||
// Returns true if this module has not been initialized; Don't use C runtime routines until it returns false.
|
||||
bool mi_decl_noinline _mi_preloading(void) {
|
||||
return os_preloading;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept {
|
||||
return mi_redirected;
|
||||
}
|
||||
|
||||
// Communicate with the redirection module on Windows
|
||||
#if defined(_WIN32) && defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT)
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
mi_decl_export void _mi_redirect_entry(DWORD reason) {
|
||||
// called on redirection; careful as this may be called before DllMain
|
||||
if (reason == DLL_PROCESS_ATTACH) {
|
||||
mi_redirected = true;
|
||||
}
|
||||
else if (reason == DLL_PROCESS_DETACH) {
|
||||
mi_redirected = false;
|
||||
}
|
||||
else if (reason == DLL_THREAD_DETACH) {
|
||||
mi_thread_done();
|
||||
}
|
||||
}
|
||||
__declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message);
|
||||
__declspec(dllimport) void mi_cdecl mi_allocator_done(void);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
static bool mi_allocator_init(const char** message) {
|
||||
if (message != NULL) *message = NULL;
|
||||
return true;
|
||||
}
|
||||
static void mi_allocator_done(void) {
|
||||
// nothing to do
|
||||
}
|
||||
#endif
|
||||
|
||||
// Called once by the process loader
|
||||
static void mi_process_load(void) {
|
||||
mi_heap_main_init();
|
||||
#if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
|
||||
volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true;
|
||||
if (dummy == NULL) return; // use dummy or otherwise the access may get optimized away (issue #697)
|
||||
#endif
|
||||
os_preloading = false;
|
||||
mi_assert_internal(_mi_is_main_thread());
|
||||
#if !(defined(_WIN32) && defined(MI_SHARED_LIB)) // use Dll process detach (see below) instead of atexit (issue #521)
|
||||
atexit(&mi_process_done);
|
||||
#endif
|
||||
_mi_options_init();
|
||||
mi_process_setup_auto_thread_done();
|
||||
mi_process_init();
|
||||
if (mi_redirected) _mi_verbose_message("malloc is redirected.\n");
|
||||
|
||||
// show message from the redirector (if present)
|
||||
const char* msg = NULL;
|
||||
mi_allocator_init(&msg);
|
||||
if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) {
|
||||
_mi_fputs(NULL,NULL,NULL,msg);
|
||||
}
|
||||
|
||||
// reseed random
|
||||
_mi_random_reinit_if_weak(&_mi_heap_main.random);
|
||||
}
|
||||
|
||||
#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
|
||||
#include <intrin.h>
|
||||
mi_decl_cache_align bool _mi_cpu_has_fsrm = false;
|
||||
|
||||
static void mi_detect_cpu_features(void) {
|
||||
// FSRM for fast rep movsb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017))
|
||||
int32_t cpu_info[4];
|
||||
__cpuid(cpu_info, 7);
|
||||
_mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see <https://en.wikipedia.org/wiki/CPUID#EAX=7,_ECX=0:_Extended_Features>
|
||||
}
|
||||
#else
|
||||
static void mi_detect_cpu_features(void) {
|
||||
// nothing
|
||||
}
|
||||
#endif
|
||||
|
||||
// Initialize the process; called by thread_init or the process loader
|
||||
void mi_process_init(void) mi_attr_noexcept {
|
||||
// ensure we are called once
|
||||
static mi_atomic_once_t process_init;
|
||||
#if _MSC_VER < 1920
|
||||
mi_heap_main_init(); // vs2017 can dynamically re-initialize _mi_heap_main
|
||||
#endif
|
||||
if (!mi_atomic_once(&process_init)) return;
|
||||
_mi_process_is_initialized = true;
|
||||
_mi_verbose_message("process init: 0x%zx\n", _mi_thread_id());
|
||||
mi_process_setup_auto_thread_done();
|
||||
|
||||
mi_detect_cpu_features();
|
||||
_mi_os_init();
|
||||
mi_heap_main_init();
|
||||
#if MI_DEBUG
|
||||
_mi_verbose_message("debug level : %d\n", MI_DEBUG);
|
||||
#endif
|
||||
_mi_verbose_message("secure level: %d\n", MI_SECURE);
|
||||
_mi_verbose_message("mem tracking: %s\n", MI_TRACK_TOOL);
|
||||
#if MI_TSAN
|
||||
_mi_verbose_message("thread santizer enabled\n");
|
||||
#endif
|
||||
mi_thread_init();
|
||||
|
||||
#if defined(_WIN32)
|
||||
// On windows, when building as a static lib the FLS cleanup happens to early for the main thread.
|
||||
// To avoid this, set the FLS value for the main thread to NULL so the fls cleanup
|
||||
// will not call _mi_thread_done on the (still executing) main thread. See issue #508.
|
||||
_mi_prim_thread_associate_default_heap(NULL);
|
||||
#endif
|
||||
|
||||
mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL)
|
||||
mi_track_init();
|
||||
|
||||
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
|
||||
size_t pages = mi_option_get_clamp(mi_option_reserve_huge_os_pages, 0, 128*1024);
|
||||
long reserve_at = mi_option_get(mi_option_reserve_huge_os_pages_at);
|
||||
if (reserve_at != -1) {
|
||||
mi_reserve_huge_os_pages_at(pages, reserve_at, pages*500);
|
||||
} else {
|
||||
mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
|
||||
}
|
||||
}
|
||||
if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
|
||||
long ksize = mi_option_get(mi_option_reserve_os_memory);
|
||||
if (ksize > 0) {
|
||||
mi_reserve_os_memory((size_t)ksize*MI_KiB, true /* commit? */, true /* allow large pages? */);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called when the process is done (through `at_exit`)
|
||||
static void mi_cdecl mi_process_done(void) {
|
||||
// only shutdown if we were initialized
|
||||
if (!_mi_process_is_initialized) return;
|
||||
// ensure we are called once
|
||||
static bool process_done = false;
|
||||
if (process_done) return;
|
||||
process_done = true;
|
||||
|
||||
// release any thread specific resources and ensure _mi_thread_done is called on all but the main thread
|
||||
_mi_prim_thread_done_auto_done();
|
||||
|
||||
#ifndef MI_SKIP_COLLECT_ON_EXIT
|
||||
#if (MI_DEBUG || !defined(MI_SHARED_LIB))
|
||||
// free all memory if possible on process exit. This is not needed for a stand-alone process
|
||||
// but should be done if mimalloc is statically linked into another shared library which
|
||||
// is repeatedly loaded/unloaded, see issue #281.
|
||||
mi_collect(true /* force */ );
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Forcefully release all retained memory; this can be dangerous in general if overriding regular malloc/free
|
||||
// since after process_done there might still be other code running that calls `free` (like at_exit routines,
|
||||
// or C-runtime termination code.
|
||||
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
|
||||
mi_collect(true /* force */);
|
||||
_mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
|
||||
_mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats);
|
||||
}
|
||||
|
||||
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
|
||||
mi_stats_print(NULL);
|
||||
}
|
||||
mi_allocator_done();
|
||||
_mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id);
|
||||
os_preloading = true; // don't call the C runtime anymore
|
||||
}
|
||||
|
||||
|
||||
|
||||
#if defined(_WIN32) && defined(MI_SHARED_LIB)
|
||||
// Windows DLL: easy to hook into process_init and thread_done
|
||||
__declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) {
|
||||
MI_UNUSED(reserved);
|
||||
MI_UNUSED(inst);
|
||||
if (reason==DLL_PROCESS_ATTACH) {
|
||||
mi_process_load();
|
||||
}
|
||||
else if (reason==DLL_PROCESS_DETACH) {
|
||||
mi_process_done();
|
||||
}
|
||||
else if (reason==DLL_THREAD_DETACH) {
|
||||
if (!mi_is_redirected()) {
|
||||
mi_thread_done();
|
||||
}
|
||||
}
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
// MSVC: use data section magic for static libraries
|
||||
// See <https://www.codeguru.com/cpp/misc/misc/applicationcontrol/article.php/c6945/Running-Code-Before-and-After-Main.htm>
|
||||
static int _mi_process_init(void) {
|
||||
mi_process_load();
|
||||
return 0;
|
||||
}
|
||||
typedef int(*_mi_crt_callback_t)(void);
|
||||
#if defined(_M_X64) || defined(_M_ARM64)
|
||||
__pragma(comment(linker, "/include:" "_mi_msvc_initu"))
|
||||
#pragma section(".CRT$XIU", long, read)
|
||||
#else
|
||||
__pragma(comment(linker, "/include:" "__mi_msvc_initu"))
|
||||
#endif
|
||||
#pragma data_seg(".CRT$XIU")
|
||||
mi_decl_externc _mi_crt_callback_t _mi_msvc_initu[] = { &_mi_process_init };
|
||||
#pragma data_seg()
|
||||
|
||||
#elif defined(__cplusplus)
|
||||
// C++: use static initialization to detect process start
|
||||
static bool _mi_process_init(void) {
|
||||
mi_process_load();
|
||||
return (_mi_heap_main.thread_id != 0);
|
||||
}
|
||||
static bool mi_initialized = _mi_process_init();
|
||||
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
// GCC,Clang: use the constructor attribute
|
||||
static void __attribute__((constructor)) _mi_process_init(void) {
|
||||
mi_process_load();
|
||||
}
|
||||
|
||||
#else
|
||||
#pragma message("define a way to call mi_process_load on your platform")
|
||||
#endif
|
|
@ -0,0 +1,566 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_H
|
||||
#define MIMALLOC_H
|
||||
|
||||
#define MI_MALLOC_VERSION 212 // major + 2 digits minor
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Compiler specific attributes
|
||||
// ------------------------------------------------------
|
||||
|
||||
#ifdef __cplusplus
|
||||
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
|
||||
#define mi_attr_noexcept noexcept
|
||||
#else
|
||||
#define mi_attr_noexcept throw()
|
||||
#endif
|
||||
#else
|
||||
#define mi_attr_noexcept
|
||||
#endif
|
||||
|
||||
#if defined(__cplusplus) && (__cplusplus >= 201703)
|
||||
#define mi_decl_nodiscard [[nodiscard]]
|
||||
#elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl
|
||||
#define mi_decl_nodiscard __attribute__((warn_unused_result))
|
||||
#elif defined(_HAS_NODISCARD)
|
||||
#define mi_decl_nodiscard _NODISCARD
|
||||
#elif (_MSC_VER >= 1700)
|
||||
#define mi_decl_nodiscard _Check_return_
|
||||
#else
|
||||
#define mi_decl_nodiscard
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||
#if !defined(MI_SHARED_LIB)
|
||||
#define mi_decl_export
|
||||
#elif defined(MI_SHARED_LIB_EXPORT)
|
||||
#define mi_decl_export __declspec(dllexport)
|
||||
#else
|
||||
#define mi_decl_export __declspec(dllimport)
|
||||
#endif
|
||||
#if defined(__MINGW32__)
|
||||
#define mi_decl_restrict
|
||||
#define mi_attr_malloc __attribute__((malloc))
|
||||
#else
|
||||
#if (_MSC_VER >= 1900) && !defined(__EDG__)
|
||||
#define mi_decl_restrict __declspec(allocator) __declspec(restrict)
|
||||
#else
|
||||
#define mi_decl_restrict __declspec(restrict)
|
||||
#endif
|
||||
#define mi_attr_malloc
|
||||
#endif
|
||||
#define mi_cdecl __cdecl
|
||||
#define mi_attr_alloc_size(s)
|
||||
#define mi_attr_alloc_size2(s1,s2)
|
||||
#define mi_attr_alloc_align(p)
|
||||
#elif defined(__GNUC__) // includes clang and icc
|
||||
#if defined(MI_SHARED_LIB) && defined(MI_SHARED_LIB_EXPORT)
|
||||
#define mi_decl_export __attribute__((visibility("default")))
|
||||
#else
|
||||
#define mi_decl_export
|
||||
#endif
|
||||
#define mi_cdecl // leads to warnings... __attribute__((cdecl))
|
||||
#define mi_decl_restrict
|
||||
#define mi_attr_malloc __attribute__((malloc))
|
||||
#if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5)
|
||||
#define mi_attr_alloc_size(s)
|
||||
#define mi_attr_alloc_size2(s1,s2)
|
||||
#define mi_attr_alloc_align(p)
|
||||
#elif defined(__INTEL_COMPILER)
|
||||
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
|
||||
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
|
||||
#define mi_attr_alloc_align(p)
|
||||
#else
|
||||
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
|
||||
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
|
||||
#define mi_attr_alloc_align(p) __attribute__((alloc_align(p)))
|
||||
#endif
|
||||
#else
|
||||
#define mi_cdecl
|
||||
#define mi_decl_export
|
||||
#define mi_decl_restrict
|
||||
#define mi_attr_malloc
|
||||
#define mi_attr_alloc_size(s)
|
||||
#define mi_attr_alloc_size2(s1,s2)
|
||||
#define mi_attr_alloc_align(p)
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Includes
|
||||
// ------------------------------------------------------
|
||||
|
||||
#include "git-compat-util.h"
|
||||
|
||||
#include <stdbool.h> // bool
|
||||
#include <stdint.h> // INTPTR_MAX
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Standard malloc interface
|
||||
// ------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
|
||||
mi_decl_export void mi_free(void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Extended functionality
|
||||
// ------------------------------------------------------
|
||||
#define MI_SMALL_WSIZE_MAX (128)
|
||||
#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*))
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept;
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Internals
|
||||
// ------------------------------------------------------
|
||||
|
||||
typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
|
||||
mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept;
|
||||
|
||||
typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg);
|
||||
mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept;
|
||||
|
||||
typedef void (mi_cdecl mi_error_fun)(int err, void* arg);
|
||||
mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg);
|
||||
|
||||
mi_decl_export void mi_collect(bool force) mi_attr_noexcept;
|
||||
mi_decl_export int mi_version(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_reset(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_merge(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL
|
||||
mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_process_init(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_thread_init(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_thread_done(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs,
|
||||
size_t* current_rss, size_t* peak_rss,
|
||||
size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept;
|
||||
|
||||
// -------------------------------------------------------------------------------------
|
||||
// Aligned allocation
|
||||
// Note that `alignment` always follows `size` for consistency with unaligned
|
||||
// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
|
||||
// -------------------------------------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------------------
|
||||
// Heaps: first-class, but can only allocate from the same thread that created it.
|
||||
// -------------------------------------------------------------------------------------
|
||||
|
||||
struct mi_heap_s;
|
||||
typedef struct mi_heap_s mi_heap_t;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void);
|
||||
mi_decl_export void mi_heap_delete(mi_heap_t* heap);
|
||||
mi_decl_export void mi_heap_destroy(mi_heap_t* heap);
|
||||
mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap);
|
||||
mi_decl_export mi_heap_t* mi_heap_get_default(void);
|
||||
mi_decl_export mi_heap_t* mi_heap_get_backing(void);
|
||||
mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------------
|
||||
// Zero initialized re-allocation.
|
||||
// Only valid on memory that was originally allocated with zero initialization too.
|
||||
// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc.
|
||||
// see <https://github.com/microsoft/mimalloc/issues/63#issuecomment-508272992>
|
||||
// --------------------------------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4);
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Analysis
|
||||
// ------------------------------------------------------
|
||||
|
||||
mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
|
||||
mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
|
||||
mi_decl_export bool mi_check_owned(const void* p);
|
||||
|
||||
// An area of heap space contains blocks of a single size.
|
||||
typedef struct mi_heap_area_s {
|
||||
void* blocks; // start of the area containing heap blocks
|
||||
size_t reserved; // bytes reserved for this area (virtual)
|
||||
size_t committed; // current available bytes for this area
|
||||
size_t used; // number of allocated blocks
|
||||
size_t block_size; // size in bytes of each block
|
||||
size_t full_block_size; // size in bytes of a full block including padding and metadata.
|
||||
} mi_heap_area_t;
|
||||
|
||||
typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
|
||||
|
||||
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
// Experimental
|
||||
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept;
|
||||
mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept;
|
||||
mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept;
|
||||
|
||||
// Experimental: heaps associated with specific memory arena's
|
||||
typedef int mi_arena_id_t;
|
||||
mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size);
|
||||
mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
|
||||
#if MI_MALLOC_VERSION >= 182
|
||||
// Create a heap that only allocates in the specified arena
|
||||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
|
||||
#endif
|
||||
|
||||
// deprecated
|
||||
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Convenience
|
||||
// ------------------------------------------------------
|
||||
|
||||
#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
|
||||
#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
|
||||
#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp)))
|
||||
#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp)))
|
||||
#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp)))
|
||||
#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp)))
|
||||
|
||||
#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
|
||||
#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
|
||||
#define mi_heap_calloc_tp(hp,tp,n) ((tp*)mi_heap_calloc(hp,n,sizeof(tp)))
|
||||
#define mi_heap_mallocn_tp(hp,tp,n) ((tp*)mi_heap_mallocn(hp,n,sizeof(tp)))
|
||||
#define mi_heap_reallocn_tp(hp,p,tp,n) ((tp*)mi_heap_reallocn(hp,p,n,sizeof(tp)))
|
||||
#define mi_heap_recalloc_tp(hp,p,tp,n) ((tp*)mi_heap_recalloc(hp,p,n,sizeof(tp)))
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Options
|
||||
// ------------------------------------------------------
|
||||
|
||||
typedef enum mi_option_e {
|
||||
// stable options
|
||||
mi_option_show_errors, // print error messages
|
||||
mi_option_show_stats, // print statistics on termination
|
||||
mi_option_verbose, // print verbose messages
|
||||
// the following options are experimental (see src/options.h)
|
||||
mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1)
|
||||
mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2)
|
||||
mi_option_purge_decommits, // should a memory purge decommit (or only reset) (=1)
|
||||
mi_option_allow_large_os_pages, // allow large (2MiB) OS pages, implies eager commit
|
||||
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB/page) at startup
|
||||
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
|
||||
mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup
|
||||
mi_option_deprecated_segment_cache,
|
||||
mi_option_deprecated_page_reset,
|
||||
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
|
||||
mi_option_deprecated_segment_reset,
|
||||
mi_option_eager_commit_delay,
|
||||
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all.
|
||||
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
|
||||
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
|
||||
mi_option_os_tag, // tag used for OS logging (macOS only for now)
|
||||
mi_option_max_errors, // issue at most N error messages
|
||||
mi_option_max_warnings, // issue at most N warning messages
|
||||
mi_option_max_segment_reclaim,
|
||||
mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe.
|
||||
mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
|
||||
mi_option_arena_purge_mult,
|
||||
mi_option_purge_extend_delay,
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
mi_option_eager_region_commit = mi_option_arena_eager_commit,
|
||||
mi_option_reset_decommits = mi_option_purge_decommits,
|
||||
mi_option_reset_delay = mi_option_purge_delay,
|
||||
mi_option_abandoned_page_reset = mi_option_abandoned_page_purge
|
||||
} mi_option_t;
|
||||
|
||||
|
||||
mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option);
|
||||
mi_decl_export void mi_option_enable(mi_option_t option);
|
||||
mi_decl_export void mi_option_disable(mi_option_t option);
|
||||
mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable);
|
||||
mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option);
|
||||
mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max);
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option);
|
||||
mi_decl_export void mi_option_set(mi_option_t option, long value);
|
||||
mi_decl_export void mi_option_set_default(mi_option_t option, long value);
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------
|
||||
// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions.
|
||||
// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.)
|
||||
// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing.
|
||||
// -------------------------------------------------------------------------------------------------------
|
||||
|
||||
mi_decl_export void mi_cfree(void* p) mi_attr_noexcept;
|
||||
mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_malloc_good_size(size_t size) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept;
|
||||
mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept;
|
||||
mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept;
|
||||
mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept;
|
||||
|
||||
// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`.
|
||||
// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception).
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
// Implement the C++ std::allocator interface for use in STL containers.
|
||||
// (note: see `mimalloc-new-delete.h` for overriding the new/delete operators globally)
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include <cstddef> // std::size_t
|
||||
#include <cstdint> // PTRDIFF_MAX
|
||||
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
|
||||
#include <type_traits> // std::true_type
|
||||
#include <utility> // std::forward
|
||||
#endif
|
||||
|
||||
template<class T> struct _mi_stl_allocator_common {
|
||||
typedef T value_type;
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef value_type& reference;
|
||||
typedef value_type const& const_reference;
|
||||
typedef value_type* pointer;
|
||||
typedef value_type const* const_pointer;
|
||||
|
||||
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
|
||||
using propagate_on_container_copy_assignment = std::true_type;
|
||||
using propagate_on_container_move_assignment = std::true_type;
|
||||
using propagate_on_container_swap = std::true_type;
|
||||
template <class U, class ...Args> void construct(U* p, Args&& ...args) { ::new(p) U(std::forward<Args>(args)...); }
|
||||
template <class U> void destroy(U* p) mi_attr_noexcept { p->~U(); }
|
||||
#else
|
||||
void construct(pointer p, value_type const& val) { ::new(p) value_type(val); }
|
||||
void destroy(pointer p) { p->~value_type(); }
|
||||
#endif
|
||||
|
||||
size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); }
|
||||
pointer address(reference x) const { return &x; }
|
||||
const_pointer address(const_reference x) const { return &x; }
|
||||
};
|
||||
|
||||
template<class T> struct mi_stl_allocator : public _mi_stl_allocator_common<T> {
|
||||
using typename _mi_stl_allocator_common<T>::size_type;
|
||||
using typename _mi_stl_allocator_common<T>::value_type;
|
||||
using typename _mi_stl_allocator_common<T>::pointer;
|
||||
template <class U> struct rebind { typedef mi_stl_allocator<U> other; };
|
||||
|
||||
mi_stl_allocator() mi_attr_noexcept = default;
|
||||
mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default;
|
||||
template<class U> mi_stl_allocator(const mi_stl_allocator<U>&) mi_attr_noexcept { }
|
||||
mi_stl_allocator select_on_container_copy_construction() const { return *this; }
|
||||
void deallocate(T* p, size_type) { mi_free(p); }
|
||||
|
||||
#if (__cplusplus >= 201703L) // C++17
|
||||
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_new_n(count, sizeof(T))); }
|
||||
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
|
||||
#else
|
||||
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_new_n(count, sizeof(value_type))); }
|
||||
#endif
|
||||
|
||||
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
|
||||
using is_always_equal = std::true_type;
|
||||
#endif
|
||||
};
|
||||
|
||||
template<class T1,class T2> bool operator==(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return true; }
|
||||
template<class T1,class T2> bool operator!=(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return false; }
|
||||
|
||||
|
||||
#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11
|
||||
#define MI_HAS_HEAP_STL_ALLOCATOR 1
|
||||
|
||||
#include <memory> // std::shared_ptr
|
||||
|
||||
// Common base class for STL allocators in a specific heap
|
||||
template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common<T> {
|
||||
using typename _mi_stl_allocator_common<T>::size_type;
|
||||
using typename _mi_stl_allocator_common<T>::value_type;
|
||||
using typename _mi_stl_allocator_common<T>::pointer;
|
||||
|
||||
_mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp) { } /* will not delete nor destroy the passed in heap */
|
||||
|
||||
#if (__cplusplus >= 201703L) // C++17
|
||||
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); }
|
||||
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
|
||||
#else
|
||||
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); }
|
||||
#endif
|
||||
|
||||
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
|
||||
using is_always_equal = std::false_type;
|
||||
#endif
|
||||
|
||||
void collect(bool force) { mi_heap_collect(this->heap.get(), force); }
|
||||
template<class U> bool is_equal(const _mi_heap_stl_allocator_common<U, _mi_destroy>& x) const { return (this->heap == x.heap); }
|
||||
|
||||
protected:
|
||||
std::shared_ptr<mi_heap_t> heap;
|
||||
template<class U, bool D> friend struct _mi_heap_stl_allocator_common;
|
||||
|
||||
_mi_heap_stl_allocator_common() {
|
||||
mi_heap_t* hp = mi_heap_new();
|
||||
this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
|
||||
}
|
||||
_mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { }
|
||||
template<class U> _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common<U, _mi_destroy>& x) mi_attr_noexcept : heap(x.heap) { }
|
||||
|
||||
private:
|
||||
static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } }
|
||||
static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } }
|
||||
};
|
||||
|
||||
// STL allocator allocation in a specific heap
|
||||
template<class T> struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common<T, false> {
|
||||
using typename _mi_heap_stl_allocator_common<T, false>::size_type;
|
||||
mi_heap_stl_allocator() : _mi_heap_stl_allocator_common<T, false>() { } // creates fresh heap that is deleted when the destructor is called
|
||||
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
|
||||
template<class U> mi_heap_stl_allocator(const mi_heap_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, false>(x) { }
|
||||
|
||||
mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
|
||||
void deallocate(T* p, size_type) { mi_free(p); }
|
||||
template<class U> struct rebind { typedef mi_heap_stl_allocator<U> other; };
|
||||
};
|
||||
|
||||
template<class T1, class T2> bool operator==(const mi_heap_stl_allocator<T1>& x, const mi_heap_stl_allocator<T2>& y) mi_attr_noexcept { return (x.is_equal(y)); }
|
||||
template<class T1, class T2> bool operator!=(const mi_heap_stl_allocator<T1>& x, const mi_heap_stl_allocator<T2>& y) mi_attr_noexcept { return (!x.is_equal(y)); }
|
||||
|
||||
|
||||
// STL allocator allocation in a specific heap, where `free` does nothing and
|
||||
// the heap is destroyed in one go on destruction -- use with care!
|
||||
template<class T> struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common<T, true> {
|
||||
using typename _mi_heap_stl_allocator_common<T, true>::size_type;
|
||||
mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common<T, true>() { } // creates fresh heap that is destroyed when the destructor is called
|
||||
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
|
||||
template<class U> mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, true>(x) { }
|
||||
|
||||
mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }
|
||||
void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ }
|
||||
template<class U> struct rebind { typedef mi_heap_destroy_stl_allocator<U> other; };
|
||||
};
|
||||
|
||||
template<class T1, class T2> bool operator==(const mi_heap_destroy_stl_allocator<T1>& x, const mi_heap_destroy_stl_allocator<T2>& y) mi_attr_noexcept { return (x.is_equal(y)); }
|
||||
template<class T1, class T2> bool operator!=(const mi_heap_destroy_stl_allocator<T1>& x, const mi_heap_destroy_stl_allocator<T2>& y) mi_attr_noexcept { return (!x.is_equal(y)); }
|
||||
|
||||
#endif // C++11
|
||||
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif
|
|
@ -0,0 +1,385 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023 Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_ATOMIC_H
|
||||
#define MIMALLOC_ATOMIC_H
|
||||
|
||||
// --------------------------------------------------------------------------------------------
|
||||
// Atomics
|
||||
// We need to be portable between C, C++, and MSVC.
|
||||
// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode.
|
||||
// This is why we try to use only `uintptr_t` and `<type>*` as atomic types.
|
||||
// To gain better insight in the range of used atomics, we use explicitly named memory order operations
|
||||
// instead of passing the memory order as a parameter.
|
||||
// -----------------------------------------------------------------------------------------------
|
||||
|
||||
#if defined(__cplusplus)
|
||||
// Use C++ atomics
|
||||
#include <atomic>
|
||||
#define _Atomic(tp) std::atomic<tp>
|
||||
#define mi_atomic(name) std::atomic_##name
|
||||
#define mi_memory_order(name) std::memory_order_##name
|
||||
#if !defined(ATOMIC_VAR_INIT) || (__cplusplus >= 202002L) // c++20, see issue #571
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#else
|
||||
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
|
||||
#endif
|
||||
#elif defined(_MSC_VER)
|
||||
// Use MSVC C wrapper for C11 atomics
|
||||
#define _Atomic(tp) tp
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#define mi_atomic(name) mi_atomic_##name
|
||||
#define mi_memory_order(name) mi_memory_order_##name
|
||||
#else
|
||||
// Use C11 atomics
|
||||
#include <stdatomic.h>
|
||||
#define mi_atomic(name) atomic_##name
|
||||
#define mi_memory_order(name) memory_order_##name
|
||||
#if !defined(ATOMIC_VAR_INIT) || (__STDC_VERSION__ >= 201710L) // c17, see issue #735
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#else
|
||||
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Various defines for all used memory orders in mimalloc
|
||||
#define mi_atomic_cas_weak(p,expected,desired,mem_success,mem_fail) \
|
||||
mi_atomic(compare_exchange_weak_explicit)(p,expected,desired,mem_success,mem_fail)
|
||||
|
||||
#define mi_atomic_cas_strong(p,expected,desired,mem_success,mem_fail) \
|
||||
mi_atomic(compare_exchange_strong_explicit)(p,expected,desired,mem_success,mem_fail)
|
||||
|
||||
#define mi_atomic_load_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
|
||||
#define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
|
||||
#define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
|
||||
#define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
|
||||
#define mi_atomic_cas_weak_acq_rel(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
|
||||
#define mi_atomic_cas_strong_release(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
|
||||
#define mi_atomic_cas_strong_acq_rel(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
|
||||
|
||||
#define mi_atomic_add_relaxed(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(relaxed))
|
||||
#define mi_atomic_sub_relaxed(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed))
|
||||
#define mi_atomic_add_acq_rel(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_sub_acq_rel(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
|
||||
#define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,(uintptr_t)1)
|
||||
#define mi_atomic_decrement_relaxed(p) mi_atomic_sub_relaxed(p,(uintptr_t)1)
|
||||
#define mi_atomic_increment_acq_rel(p) mi_atomic_add_acq_rel(p,(uintptr_t)1)
|
||||
#define mi_atomic_decrement_acq_rel(p) mi_atomic_sub_acq_rel(p,(uintptr_t)1)
|
||||
|
||||
static inline void mi_atomic_yield(void);
|
||||
static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add);
|
||||
static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
|
||||
|
||||
|
||||
#if defined(__cplusplus) || !defined(_MSC_VER)
|
||||
|
||||
// In C++/C11 atomics we have polymorphic atomics so can use the typed `ptr` variants (where `tp` is the type of atomic value)
|
||||
// We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well
|
||||
#define mi_atomic_load_ptr_acquire(tp,p) mi_atomic_load_acquire(p)
|
||||
#define mi_atomic_load_ptr_relaxed(tp,p) mi_atomic_load_relaxed(p)
|
||||
|
||||
// In C++ we need to add casts to help resolve templates if NULL is passed
|
||||
#if defined(__cplusplus)
|
||||
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,(tp*)x)
|
||||
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,(tp*)x)
|
||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des)
|
||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
|
||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des)
|
||||
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x)
|
||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x)
|
||||
#else
|
||||
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,x)
|
||||
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,x)
|
||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des)
|
||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des)
|
||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des)
|
||||
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x)
|
||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x)
|
||||
#endif
|
||||
|
||||
// These are used by the statistics
|
||||
static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) {
|
||||
return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed));
|
||||
}
|
||||
static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
|
||||
int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p);
|
||||
while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, ¤t, x)) { /* nothing */ };
|
||||
}
|
||||
|
||||
// Used by timers
|
||||
#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
|
||||
#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
|
||||
#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
|
||||
|
||||
#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d)
|
||||
#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i)
|
||||
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
// MSVC C compilation wrapper that uses Interlocked operations to model C11 atomics.
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <intrin.h>
|
||||
#ifdef _WIN64
|
||||
typedef LONG64 msc_intptr_t;
|
||||
#define MI_64(f) f##64
|
||||
#else
|
||||
typedef LONG msc_intptr_t;
|
||||
#define MI_64(f) f
|
||||
#endif
|
||||
|
||||
typedef enum mi_memory_order_e {
|
||||
mi_memory_order_relaxed,
|
||||
mi_memory_order_consume,
|
||||
mi_memory_order_acquire,
|
||||
mi_memory_order_release,
|
||||
mi_memory_order_acq_rel,
|
||||
mi_memory_order_seq_cst
|
||||
} mi_memory_order;
|
||||
|
||||
static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)*p, uintptr_t add, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub));
|
||||
}
|
||||
static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
|
||||
}
|
||||
static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
|
||||
(void)(mo1); (void)(mo2);
|
||||
uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected));
|
||||
if (read == *expected) {
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
*expected = read;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
|
||||
return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintptr_t exchange, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange);
|
||||
}
|
||||
static inline void mi_atomic_thread_fence(mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
_Atomic(uintptr_t) x = 0;
|
||||
mi_atomic_exchange_explicit(&x, 1, mo);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(_M_IX86) || defined(_M_X64)
|
||||
return *p;
|
||||
#else
|
||||
uintptr_t x = *p;
|
||||
if (mo > mi_memory_order_relaxed) {
|
||||
while (!mi_atomic_compare_exchange_weak_explicit(p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ };
|
||||
}
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(_M_IX86) || defined(_M_X64)
|
||||
*p = x;
|
||||
#else
|
||||
mi_atomic_exchange_explicit(p, x, mo);
|
||||
#endif
|
||||
}
|
||||
static inline int64_t mi_atomic_loadi64_explicit(_Atomic(int64_t)*p, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(_M_X64)
|
||||
return *p;
|
||||
#else
|
||||
int64_t old = *p;
|
||||
int64_t x = old;
|
||||
while ((old = InterlockedCompareExchange64(p, x, old)) != x) {
|
||||
x = old;
|
||||
}
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
static inline void mi_atomic_storei64_explicit(_Atomic(int64_t)*p, int64_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(x_M_IX86) || defined(_M_X64)
|
||||
*p = x;
|
||||
#else
|
||||
InterlockedExchange64(p, x);
|
||||
#endif
|
||||
}
|
||||
|
||||
// These are used by the statistics
|
||||
static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int64_t add) {
|
||||
#ifdef _WIN64
|
||||
return (int64_t)mi_atomic_addi((int64_t*)p, add);
|
||||
#else
|
||||
int64_t current;
|
||||
int64_t sum;
|
||||
do {
|
||||
current = *p;
|
||||
sum = current + add;
|
||||
} while (_InterlockedCompareExchange64(p, sum, current) != current);
|
||||
return current;
|
||||
#endif
|
||||
}
|
||||
static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) {
|
||||
int64_t current;
|
||||
do {
|
||||
current = *p;
|
||||
} while (current < x && _InterlockedCompareExchange64(p, x, current) != current);
|
||||
}
|
||||
|
||||
static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) {
|
||||
mi_atomic_addi64_relaxed(p, i);
|
||||
}
|
||||
|
||||
static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) {
|
||||
int64_t read = _InterlockedCompareExchange64(p, des, *exp);
|
||||
if (read == *exp) {
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
*exp = read;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// The pointer macros cast to `uintptr_t`.
|
||||
#define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p))
|
||||
#define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p))
|
||||
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
|
||||
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
|
||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||
#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
||||
|
||||
#define mi_atomic_loadi64_acquire(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(acquire))
|
||||
#define mi_atomic_loadi64_relaxed(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(relaxed))
|
||||
#define mi_atomic_storei64_release(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_storei64_relaxed(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(relaxed))
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// Atomically add a signed value; returns the previous value.
|
||||
static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add) {
|
||||
return (intptr_t)mi_atomic_add_acq_rel((_Atomic(uintptr_t)*)p, (uintptr_t)add);
|
||||
}
|
||||
|
||||
// Atomically subtract a signed value; returns the previous value.
|
||||
static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
|
||||
return (intptr_t)mi_atomic_addi(p, -sub);
|
||||
}
|
||||
|
||||
typedef _Atomic(uintptr_t) mi_atomic_once_t;
|
||||
|
||||
// Returns true only on the first invocation
|
||||
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
|
||||
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
|
||||
uintptr_t expected = 0;
|
||||
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
|
||||
}
|
||||
|
||||
typedef _Atomic(uintptr_t) mi_atomic_guard_t;
|
||||
|
||||
// Allows only one thread to execute at a time
|
||||
#define mi_atomic_guard(guard) \
|
||||
uintptr_t _mi_guard_expected = 0; \
|
||||
for(bool _mi_guard_once = true; \
|
||||
_mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \
|
||||
(mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) )
|
||||
|
||||
|
||||
|
||||
// Yield
|
||||
#if defined(__cplusplus)
|
||||
#include <thread>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
YieldProcessor();
|
||||
}
|
||||
#elif defined(__SSE2__)
|
||||
#include <emmintrin.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
_mm_pause();
|
||||
}
|
||||
#elif (defined(__GNUC__) || defined(__clang__)) && \
|
||||
(defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \
|
||||
defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) || defined(__POWERPC__)
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("pause" ::: "memory");
|
||||
}
|
||||
#elif defined(__aarch64__)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile("wfe");
|
||||
}
|
||||
#elif (defined(__arm__) && __ARM_ARCH__ >= 7)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile("yield" ::: "memory");
|
||||
}
|
||||
#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)
|
||||
#ifdef __APPLE__
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("or r27,r27,r27" ::: "memory");
|
||||
}
|
||||
#else
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ __volatile__ ("or 27,27,27" ::: "memory");
|
||||
}
|
||||
#endif
|
||||
#elif defined(__armel__) || defined(__ARMEL__)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("nop" ::: "memory");
|
||||
}
|
||||
#endif
|
||||
#elif defined(__sun)
|
||||
// Fallback for other archs
|
||||
#include <synch.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
smt_pause();
|
||||
}
|
||||
#elif defined(__wasi__)
|
||||
#include <sched.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
sched_yield();
|
||||
}
|
||||
#else
|
||||
#include <unistd.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
sleep(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif // __MIMALLOC_ATOMIC_H
|
|
@ -0,0 +1,979 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_INTERNAL_H
|
||||
#define MIMALLOC_INTERNAL_H
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// This file contains the interal API's of mimalloc and various utility
|
||||
// functions and macros.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
#include "mimalloc/types.h"
|
||||
#include "mimalloc/track.h"
|
||||
|
||||
#if (MI_DEBUG>0)
|
||||
#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
|
||||
#else
|
||||
#define mi_trace_message(...)
|
||||
#endif
|
||||
|
||||
#define MI_CACHE_LINE 64
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
|
||||
#pragma warning(disable:26812) // unscoped enum warning
|
||||
#define mi_decl_noinline __declspec(noinline)
|
||||
#define mi_decl_thread __declspec(thread)
|
||||
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
|
||||
#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
|
||||
#define mi_decl_noinline __attribute__((noinline))
|
||||
#define mi_decl_thread __thread
|
||||
#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE)))
|
||||
#else
|
||||
#define mi_decl_noinline
|
||||
#define mi_decl_thread __thread // hope for the best :-)
|
||||
#define mi_decl_cache_align
|
||||
#endif
|
||||
|
||||
#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
|
||||
#define __wasi__
|
||||
#endif
|
||||
|
||||
#if defined(__cplusplus)
|
||||
#define mi_decl_externc extern "C"
|
||||
#else
|
||||
#define mi_decl_externc
|
||||
#endif
|
||||
|
||||
// pthreads
|
||||
#if !defined(_WIN32) && !defined(__wasi__)
|
||||
#define MI_USE_PTHREADS
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
// "options.c"
|
||||
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
|
||||
void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
|
||||
void _mi_warning_message(const char* fmt, ...);
|
||||
void _mi_verbose_message(const char* fmt, ...);
|
||||
void _mi_trace_message(const char* fmt, ...);
|
||||
void _mi_options_init(void);
|
||||
void _mi_error_message(int err, const char* fmt, ...);
|
||||
|
||||
// random.c
|
||||
void _mi_random_init(mi_random_ctx_t* ctx);
|
||||
void _mi_random_init_weak(mi_random_ctx_t* ctx);
|
||||
void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
|
||||
void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
|
||||
uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
|
||||
uintptr_t _mi_heap_random_next(mi_heap_t* heap);
|
||||
uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
|
||||
static inline uintptr_t _mi_random_shuffle(uintptr_t x);
|
||||
|
||||
// init.c
|
||||
extern mi_decl_cache_align mi_stats_t _mi_stats_main;
|
||||
extern mi_decl_cache_align const mi_page_t _mi_page_empty;
|
||||
bool _mi_is_main_thread(void);
|
||||
size_t _mi_current_thread_count(void);
|
||||
bool _mi_preloading(void); // true while the C runtime is not initialized yet
|
||||
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
|
||||
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
|
||||
void _mi_thread_done(mi_heap_t* heap);
|
||||
void _mi_thread_data_collect(void);
|
||||
|
||||
// os.c
|
||||
void _mi_os_init(void); // called from process init
|
||||
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
|
||||
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
|
||||
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
|
||||
|
||||
size_t _mi_os_page_size(void);
|
||||
size_t _mi_os_good_alloc_size(size_t size);
|
||||
bool _mi_os_has_overcommit(void);
|
||||
bool _mi_os_has_virtual_reserve(void);
|
||||
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
|
||||
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_protect(void* addr, size_t size);
|
||||
bool _mi_os_unprotect(void* addr, size_t size);
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats);
|
||||
|
||||
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats);
|
||||
void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats);
|
||||
|
||||
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
|
||||
bool _mi_os_use_large_page(size_t size, size_t alignment);
|
||||
size_t _mi_os_large_page_size(void);
|
||||
|
||||
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
|
||||
|
||||
// arena.c
|
||||
mi_arena_id_t _mi_arena_id_none(void);
|
||||
void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats);
|
||||
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
|
||||
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
|
||||
bool _mi_arena_contains(const void* p);
|
||||
void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
|
||||
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
|
||||
|
||||
// "segment-map.c"
|
||||
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
|
||||
void _mi_segment_map_freed_at(const mi_segment_t* segment);
|
||||
|
||||
// "segment.c"
|
||||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
|
||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
||||
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
|
||||
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
#else
|
||||
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
#endif
|
||||
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
|
||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
||||
void _mi_abandoned_await_readers(void);
|
||||
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
|
||||
|
||||
// "page.c"
|
||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
|
||||
|
||||
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
|
||||
void _mi_page_unfull(mi_page_t* page);
|
||||
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
|
||||
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
|
||||
void _mi_heap_delayed_free_all(mi_heap_t* heap);
|
||||
bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
|
||||
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
|
||||
|
||||
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
|
||||
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
|
||||
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
|
||||
void _mi_deferred_free(mi_heap_t* heap, bool force);
|
||||
|
||||
void _mi_page_free_collect(mi_page_t* page,bool force);
|
||||
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
|
||||
|
||||
size_t _mi_bin_size(uint8_t bin); // for stats
|
||||
uint8_t _mi_bin(size_t size); // for stats
|
||||
|
||||
// "heap.c"
|
||||
void _mi_heap_destroy_pages(mi_heap_t* heap);
|
||||
void _mi_heap_collect_abandon(mi_heap_t* heap);
|
||||
void _mi_heap_set_default_direct(mi_heap_t* heap);
|
||||
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
|
||||
void _mi_heap_unsafe_destroy_all(void);
|
||||
|
||||
// "stats.c"
|
||||
void _mi_stats_done(mi_stats_t* stats);
|
||||
mi_msecs_t _mi_clock_now(void);
|
||||
mi_msecs_t _mi_clock_end(mi_msecs_t start);
|
||||
mi_msecs_t _mi_clock_start(void);
|
||||
|
||||
// "alloc.c"
|
||||
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic`
|
||||
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||
void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
|
||||
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
||||
bool _mi_free_delayed_block(mi_block_t* block);
|
||||
void _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
|
||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
|
||||
|
||||
// option.c, c primitives
|
||||
char _mi_toupper(char c);
|
||||
int _mi_strnicmp(const char* s, const char* t, size_t n);
|
||||
void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
|
||||
void _mi_strlcat(char* dest, const char* src, size_t dest_size);
|
||||
size_t _mi_strlen(const char* s);
|
||||
size_t _mi_strnlen(const char* s, size_t max_len);
|
||||
|
||||
|
||||
#if MI_DEBUG>1
|
||||
bool _mi_page_is_valid(mi_page_t* page);
|
||||
#endif
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Branches
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define mi_unlikely(x) (__builtin_expect(!!(x),false))
|
||||
#define mi_likely(x) (__builtin_expect(!!(x),true))
|
||||
#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
|
||||
#define mi_unlikely(x) (x) [[unlikely]]
|
||||
#define mi_likely(x) (x) [[likely]]
|
||||
#else
|
||||
#define mi_unlikely(x) (x)
|
||||
#define mi_likely(x) (x)
|
||||
#endif
|
||||
|
||||
#ifndef __has_builtin
|
||||
#define __has_builtin(x) 0
|
||||
#endif
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Error codes passed to `_mi_fatal_error`
|
||||
All are recoverable but EFAULT is a serious error and aborts by default in secure mode.
|
||||
For portability define undefined error codes using common Unix codes:
|
||||
<https://www-numi.fnal.gov/offline_software/srt_public_context/WebDocs/Errors/unix_system_errors.html>
|
||||
----------------------------------------------------------- */
|
||||
#include <errno.h>
|
||||
#ifndef EAGAIN // double free
|
||||
#define EAGAIN (11)
|
||||
#endif
|
||||
#ifndef ENOMEM // out of memory
|
||||
#define ENOMEM (12)
|
||||
#endif
|
||||
#ifndef EFAULT // corrupted free-list or meta-data
|
||||
#define EFAULT (14)
|
||||
#endif
|
||||
#ifndef EINVAL // trying to free an invalid pointer
|
||||
#define EINVAL (22)
|
||||
#endif
|
||||
#ifndef EOVERFLOW // count*size overflow
|
||||
#define EOVERFLOW (75)
|
||||
#endif
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Inlined definitions
|
||||
----------------------------------------------------------- */
|
||||
#define MI_UNUSED(x) (void)(x)
|
||||
#if (MI_DEBUG>0)
|
||||
#define MI_UNUSED_RELEASE(x)
|
||||
#else
|
||||
#define MI_UNUSED_RELEASE(x) MI_UNUSED(x)
|
||||
#endif
|
||||
|
||||
#define MI_INIT4(x) x(),x(),x(),x()
|
||||
#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x)
|
||||
#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x)
|
||||
#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x)
|
||||
#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x)
|
||||
#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x)
|
||||
#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x)
|
||||
|
||||
|
||||
#include <string.h>
|
||||
// initialize a local variable to zero; use memset as compilers optimize constant sized memset's
|
||||
#define _mi_memzero_var(x) memset(&x,0,sizeof(x))
|
||||
|
||||
// Is `x` a power of two? (0 is considered a power of two)
|
||||
static inline bool _mi_is_power_of_two(uintptr_t x) {
|
||||
return ((x & (x - 1)) == 0);
|
||||
}
|
||||
|
||||
// Is a pointer aligned?
|
||||
static inline bool _mi_is_aligned(void* p, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
return (((uintptr_t)p % alignment) == 0);
|
||||
}
|
||||
|
||||
// Align upwards
|
||||
static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
uintptr_t mask = alignment - 1;
|
||||
if ((alignment & mask) == 0) { // power of two?
|
||||
return ((sz + mask) & ~mask);
|
||||
}
|
||||
else {
|
||||
return (((sz + mask)/alignment)*alignment);
|
||||
}
|
||||
}
|
||||
|
||||
// Align downwards
|
||||
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
uintptr_t mask = alignment - 1;
|
||||
if ((alignment & mask) == 0) { // power of two?
|
||||
return (sz & ~mask);
|
||||
}
|
||||
else {
|
||||
return ((sz / alignment) * alignment);
|
||||
}
|
||||
}
|
||||
|
||||
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
|
||||
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
|
||||
mi_assert_internal(divider != 0);
|
||||
return (divider == 0 ? size : ((size + divider - 1) / divider));
|
||||
}
|
||||
|
||||
// Is memory zero initialized?
|
||||
static inline bool mi_mem_is_zero(const void* p, size_t size) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (((uint8_t*)p)[i] != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Align a byte size to a size in _machine words_,
|
||||
// i.e. byte size == `wsize*sizeof(void*)`.
|
||||
static inline size_t _mi_wsize_from_size(size_t size) {
|
||||
mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t));
|
||||
return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
|
||||
}
|
||||
|
||||
// Overflow detecting multiply
|
||||
#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5))
|
||||
#include <limits.h> // UINT_MAX, ULONG_MAX
|
||||
#if defined(_CLOCK_T) // for Illumos
|
||||
#undef _CLOCK_T
|
||||
#endif
|
||||
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
|
||||
#if (SIZE_MAX == ULONG_MAX)
|
||||
return __builtin_umull_overflow(count, size, (unsigned long *)total);
|
||||
#elif (SIZE_MAX == UINT_MAX)
|
||||
return __builtin_umul_overflow(count, size, (unsigned int *)total);
|
||||
#else
|
||||
return __builtin_umulll_overflow(count, size, (unsigned long long *)total);
|
||||
#endif
|
||||
}
|
||||
#else /* __builtin_umul_overflow is unavailable */
|
||||
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
|
||||
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
|
||||
*total = count * size;
|
||||
// note: gcc/clang optimize this to directly check the overflow flag
|
||||
return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Safe multiply `count*size` into `total`; return `true` on overflow.
|
||||
static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) {
|
||||
if (count==1) { // quick check for the case where count is one (common for C++ allocators)
|
||||
*total = size;
|
||||
return false;
|
||||
}
|
||||
else if mi_unlikely(mi_mul_overflow(count, size, total)) {
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size);
|
||||
#endif
|
||||
*total = SIZE_MAX;
|
||||
return true;
|
||||
}
|
||||
else return false;
|
||||
}
|
||||
|
||||
|
||||
/*----------------------------------------------------------------------------------------
|
||||
Heap functions
|
||||
------------------------------------------------------------------------------------------- */
|
||||
|
||||
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
|
||||
|
||||
static inline bool mi_heap_is_backing(const mi_heap_t* heap) {
|
||||
return (heap->tld->heap_backing == heap);
|
||||
}
|
||||
|
||||
static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
|
||||
mi_assert_internal(heap != NULL);
|
||||
return (heap != &_mi_heap_empty);
|
||||
}
|
||||
|
||||
static inline uintptr_t _mi_ptr_cookie(const void* p) {
|
||||
extern mi_heap_t _mi_heap_main;
|
||||
mi_assert_internal(_mi_heap_main.cookie != 0);
|
||||
return ((uintptr_t)p ^ _mi_heap_main.cookie);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Pages
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
|
||||
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
|
||||
const size_t idx = _mi_wsize_from_size(size);
|
||||
mi_assert_internal(idx < MI_PAGES_DIRECT);
|
||||
return heap->pages_free_direct[idx];
|
||||
}
|
||||
|
||||
// Segment that contains the pointer
|
||||
// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
|
||||
// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
|
||||
// therefore we align one byte before `p`.
|
||||
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
||||
mi_assert_internal(p != NULL);
|
||||
return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
|
||||
}
|
||||
|
||||
static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
|
||||
mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0);
|
||||
return (mi_page_t*)(s);
|
||||
}
|
||||
|
||||
static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
|
||||
mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0);
|
||||
return (mi_slice_t*)(p);
|
||||
}
|
||||
|
||||
// Segment belonging to a page
|
||||
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
|
||||
mi_segment_t* segment = _mi_ptr_segment(page);
|
||||
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
|
||||
return segment;
|
||||
}
|
||||
|
||||
static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
|
||||
mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset);
|
||||
mi_assert_internal(start >= _mi_ptr_segment(slice)->slices);
|
||||
mi_assert_internal(start->slice_offset == 0);
|
||||
mi_assert_internal(start + start->slice_count > slice);
|
||||
return start;
|
||||
}
|
||||
|
||||
// Get the page containing the pointer (performance critical as it is called in mi_free)
|
||||
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
|
||||
mi_assert_internal(p > (void*)segment);
|
||||
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
|
||||
mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE);
|
||||
size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
|
||||
mi_assert_internal(idx <= segment->slice_entries);
|
||||
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
|
||||
mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
|
||||
mi_assert_internal(slice->slice_offset == 0);
|
||||
mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries);
|
||||
return mi_slice_to_page(slice);
|
||||
}
|
||||
|
||||
// Quick page start for initialized pages
|
||||
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
|
||||
return _mi_segment_page_start(segment, page, page_size);
|
||||
}
|
||||
|
||||
// Get the page containing the pointer
|
||||
static inline mi_page_t* _mi_ptr_page(void* p) {
|
||||
return _mi_segment_page_of(_mi_ptr_segment(p), p);
|
||||
}
|
||||
|
||||
// Get the block size of a page (special case for huge objects)
|
||||
static inline size_t mi_page_block_size(const mi_page_t* page) {
|
||||
const size_t bsize = page->xblock_size;
|
||||
mi_assert_internal(bsize > 0);
|
||||
if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
|
||||
return bsize;
|
||||
}
|
||||
else {
|
||||
size_t psize;
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||
return psize;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
||||
return (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
||||
}
|
||||
|
||||
// Get the usable block size of a page without fixed padding.
|
||||
// This may still include internal padding due to alignment and rounding up size classes.
|
||||
static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
|
||||
return mi_page_block_size(page) - MI_PADDING_SIZE;
|
||||
}
|
||||
|
||||
// size of a segment
|
||||
static inline size_t mi_segment_size(mi_segment_t* segment) {
|
||||
return segment->segment_slices * MI_SEGMENT_SLICE_SIZE;
|
||||
}
|
||||
|
||||
static inline uint8_t* mi_segment_end(mi_segment_t* segment) {
|
||||
return (uint8_t*)segment + mi_segment_size(segment);
|
||||
}
|
||||
|
||||
// Thread free access
|
||||
static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
|
||||
return (mi_block_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & ~3);
|
||||
}
|
||||
|
||||
static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) {
|
||||
return (mi_delayed_t)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & 3);
|
||||
}
|
||||
|
||||
// Heap access
|
||||
static inline mi_heap_t* mi_page_heap(const mi_page_t* page) {
|
||||
return (mi_heap_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xheap));
|
||||
}
|
||||
|
||||
static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) {
|
||||
mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING);
|
||||
mi_atomic_store_release(&page->xheap,(uintptr_t)heap);
|
||||
}
|
||||
|
||||
// Thread free flag helpers
|
||||
static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) {
|
||||
return (mi_block_t*)(tf & ~0x03);
|
||||
}
|
||||
static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) {
|
||||
return (mi_delayed_t)(tf & 0x03);
|
||||
}
|
||||
static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) {
|
||||
return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed);
|
||||
}
|
||||
static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) {
|
||||
return mi_tf_make(mi_tf_block(tf),delayed);
|
||||
}
|
||||
static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) {
|
||||
return mi_tf_make(block, mi_tf_delayed(tf));
|
||||
}
|
||||
|
||||
// are all blocks in a page freed?
|
||||
// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`.
|
||||
static inline bool mi_page_all_free(const mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
return (page->used == 0);
|
||||
}
|
||||
|
||||
// are there any available blocks?
|
||||
static inline bool mi_page_has_any_available(const mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL && page->reserved > 0);
|
||||
return (page->used < page->reserved || (mi_page_thread_free(page) != NULL));
|
||||
}
|
||||
|
||||
// are there immediately available blocks, i.e. blocks available on the free list.
|
||||
static inline bool mi_page_immediate_available(const mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
return (page->free != NULL);
|
||||
}
|
||||
|
||||
// is more than 7/8th of a page in use?
|
||||
static inline bool mi_page_mostly_used(const mi_page_t* page) {
|
||||
if (page==NULL) return true;
|
||||
uint16_t frac = page->reserved / 8U;
|
||||
return (page->reserved - page->used <= frac);
|
||||
}
|
||||
|
||||
static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) {
|
||||
return &((mi_heap_t*)heap)->pages[_mi_bin(size)];
|
||||
}
|
||||
|
||||
|
||||
|
||||
//-----------------------------------------------------------
|
||||
// Page flags
|
||||
//-----------------------------------------------------------
|
||||
static inline bool mi_page_is_in_full(const mi_page_t* page) {
|
||||
return page->flags.x.in_full;
|
||||
}
|
||||
|
||||
static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) {
|
||||
page->flags.x.in_full = in_full;
|
||||
}
|
||||
|
||||
static inline bool mi_page_has_aligned(const mi_page_t* page) {
|
||||
return page->flags.x.has_aligned;
|
||||
}
|
||||
|
||||
static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
|
||||
page->flags.x.has_aligned = has_aligned;
|
||||
}
|
||||
|
||||
|
||||
/* -------------------------------------------------------------------
|
||||
Encoding/Decoding the free list next pointers
|
||||
|
||||
This is to protect against buffer overflow exploits where the
|
||||
free list is mutated. Many hardened allocators xor the next pointer `p`
|
||||
with a secret key `k1`, as `p^k1`. This prevents overwriting with known
|
||||
values but might be still too weak: if the attacker can guess
|
||||
the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
|
||||
Moreover, if multiple blocks can be read as well, the attacker can
|
||||
xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot
|
||||
about the pointers (and subsequently `k1`).
|
||||
|
||||
Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<<k1)+k1`.
|
||||
Since these operations are not associative, the above approaches do not
|
||||
work so well any more even if the `p` can be guesstimated. For example,
|
||||
for the read case we can subtract two entries to discard the `+k1` term,
|
||||
but that leads to `((p1^k2)<<<k1) - ((p2^k2)<<<k1)` at best.
|
||||
We include the left-rotation since xor and addition are otherwise linear
|
||||
in the lowest bit. Finally, both keys are unique per page which reduces
|
||||
the re-use of keys by a large factor.
|
||||
|
||||
We also pass a separate `null` value to be used as `NULL` or otherwise
|
||||
`(k2<<<k1)+k1` would appear (too) often as a sentinel value.
|
||||
------------------------------------------------------------------- */
|
||||
|
||||
static inline bool mi_is_in_same_segment(const void* p, const void* q) {
|
||||
return (_mi_ptr_segment(p) == _mi_ptr_segment(q));
|
||||
}
|
||||
|
||||
static inline bool mi_is_in_same_page(const void* p, const void* q) {
|
||||
mi_segment_t* segment = _mi_ptr_segment(p);
|
||||
if (_mi_ptr_segment(q) != segment) return false;
|
||||
// assume q may be invalid // return (_mi_segment_page_of(segment, p) == _mi_segment_page_of(segment, q));
|
||||
mi_page_t* page = _mi_segment_page_of(segment, p);
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||
return (start <= (uint8_t*)q && (uint8_t*)q < start + psize);
|
||||
}
|
||||
|
||||
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
|
||||
shift %= MI_INTPTR_BITS;
|
||||
return (shift==0 ? x : ((x << shift) | (x >> (MI_INTPTR_BITS - shift))));
|
||||
}
|
||||
static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
|
||||
shift %= MI_INTPTR_BITS;
|
||||
return (shift==0 ? x : ((x >> shift) | (x << (MI_INTPTR_BITS - shift))));
|
||||
}
|
||||
|
||||
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
|
||||
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
|
||||
return (p==null ? NULL : p);
|
||||
}
|
||||
|
||||
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
|
||||
uintptr_t x = (uintptr_t)(p==NULL ? null : p);
|
||||
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
|
||||
}
|
||||
|
||||
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) {
|
||||
mi_track_mem_defined(block,sizeof(mi_block_t));
|
||||
mi_block_t* next;
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
next = (mi_block_t*)mi_ptr_decode(null, block->next, keys);
|
||||
#else
|
||||
MI_UNUSED(keys); MI_UNUSED(null);
|
||||
next = (mi_block_t*)block->next;
|
||||
#endif
|
||||
mi_track_mem_noaccess(block,sizeof(mi_block_t));
|
||||
return next;
|
||||
}
|
||||
|
||||
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) {
|
||||
mi_track_mem_undefined(block,sizeof(mi_block_t));
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
block->next = mi_ptr_encode(null, next, keys);
|
||||
#else
|
||||
MI_UNUSED(keys); MI_UNUSED(null);
|
||||
block->next = (mi_encoded_t)next;
|
||||
#endif
|
||||
mi_track_mem_noaccess(block,sizeof(mi_block_t));
|
||||
}
|
||||
|
||||
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
mi_block_t* next = mi_block_nextx(page,block,page->keys);
|
||||
// check for free list corruption: is `next` at least in the same page?
|
||||
// TODO: check if `next` is `page->block_size` aligned?
|
||||
if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
|
||||
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
|
||||
next = NULL;
|
||||
}
|
||||
return next;
|
||||
#else
|
||||
MI_UNUSED(page);
|
||||
return mi_block_nextx(page,block,NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
mi_block_set_nextx(page,block,next, page->keys);
|
||||
#else
|
||||
MI_UNUSED(page);
|
||||
mi_block_set_nextx(page,block,next,NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// commit mask
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
cm->mask[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
cm->mask[i] = ~((size_t)0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
if (cm->mask[i] != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
if (cm->mask[i] != ~((size_t)0)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// defined in `segment.c`:
|
||||
size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total);
|
||||
size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
|
||||
|
||||
#define mi_commit_mask_foreach(cm,idx,count) \
|
||||
idx = 0; \
|
||||
while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
|
||||
|
||||
#define mi_commit_mask_foreach_end() \
|
||||
idx += count; \
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
memory id's
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) {
|
||||
mi_memid_t memid;
|
||||
_mi_memzero_var(memid);
|
||||
memid.memkind = memkind;
|
||||
return memid;
|
||||
}
|
||||
|
||||
static inline mi_memid_t _mi_memid_none(void) {
|
||||
return _mi_memid_create(MI_MEM_NONE);
|
||||
}
|
||||
|
||||
static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool is_large) {
|
||||
mi_memid_t memid = _mi_memid_create(MI_MEM_OS);
|
||||
memid.initially_committed = committed;
|
||||
memid.initially_zero = is_zero;
|
||||
memid.is_pinned = is_large;
|
||||
return memid;
|
||||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Fast "random" shuffle
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
|
||||
if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros
|
||||
#if (MI_INTPTR_SIZE==8)
|
||||
// by Sebastiano Vigna, see: <http://xoshiro.di.unimi.it/splitmix64.c>
|
||||
x ^= x >> 30;
|
||||
x *= 0xbf58476d1ce4e5b9UL;
|
||||
x ^= x >> 27;
|
||||
x *= 0x94d049bb133111ebUL;
|
||||
x ^= x >> 31;
|
||||
#elif (MI_INTPTR_SIZE==4)
|
||||
// by Chris Wellons, see: <https://nullprogram.com/blog/2018/07/31/>
|
||||
x ^= x >> 16;
|
||||
x *= 0x7feb352dUL;
|
||||
x ^= x >> 15;
|
||||
x *= 0x846ca68bUL;
|
||||
x ^= x >> 16;
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Optimize numa node access for the common case (= one node)
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
int _mi_os_numa_node_get(mi_os_tld_t* tld);
|
||||
size_t _mi_os_numa_node_count_get(void);
|
||||
|
||||
extern _Atomic(size_t) _mi_numa_node_count;
|
||||
static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
|
||||
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
|
||||
else return _mi_os_numa_node_get(tld);
|
||||
}
|
||||
static inline size_t _mi_os_numa_node_count(void) {
|
||||
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
|
||||
if mi_likely(count > 0) { return count; }
|
||||
else return _mi_os_numa_node_count_get();
|
||||
}
|
||||
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero)
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
#if defined(__GNUC__)
|
||||
|
||||
#include <limits.h> // LONG_MAX
|
||||
#define MI_HAVE_FAST_BITSCAN
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
return __builtin_clzl(x);
|
||||
#else
|
||||
return __builtin_clzll(x);
|
||||
#endif
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
return __builtin_ctzl(x);
|
||||
#else
|
||||
return __builtin_ctzll(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
#include <limits.h> // LONG_MAX
|
||||
#include <intrin.h> // BitScanReverse64
|
||||
#define MI_HAVE_FAST_BITSCAN
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
unsigned long idx;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
_BitScanReverse(&idx, x);
|
||||
#else
|
||||
_BitScanReverse64(&idx, x);
|
||||
#endif
|
||||
return ((MI_INTPTR_BITS - 1) - idx);
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
unsigned long idx;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
_BitScanForward(&idx, x);
|
||||
#else
|
||||
_BitScanForward64(&idx, x);
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline size_t mi_ctz32(uint32_t x) {
|
||||
// de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
|
||||
static const unsigned char debruijn[32] = {
|
||||
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
|
||||
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
|
||||
};
|
||||
if (x==0) return 32;
|
||||
return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27];
|
||||
}
|
||||
static inline size_t mi_clz32(uint32_t x) {
|
||||
// de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
|
||||
static const uint8_t debruijn[32] = {
|
||||
31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1,
|
||||
23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0
|
||||
};
|
||||
if (x==0) return 32;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
x |= x >> 4;
|
||||
x |= x >> 8;
|
||||
x |= x >> 16;
|
||||
return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27];
|
||||
}
|
||||
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (MI_INTPTR_BITS <= 32)
|
||||
return mi_clz32((uint32_t)x);
|
||||
#else
|
||||
size_t count = mi_clz32((uint32_t)(x >> 32));
|
||||
if (count < 32) return count;
|
||||
return (32 + mi_clz32((uint32_t)x));
|
||||
#endif
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (MI_INTPTR_BITS <= 32)
|
||||
return mi_ctz32((uint32_t)x);
|
||||
#else
|
||||
size_t count = mi_ctz32((uint32_t)x);
|
||||
if (count < 32) return count;
|
||||
return (32 + mi_ctz32((uint32_t)(x>>32)));
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero)
|
||||
static inline size_t mi_bsr(uintptr_t x) {
|
||||
return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x));
|
||||
}
|
||||
|
||||
|
||||
// ---------------------------------------------------------------------------------
|
||||
// Provide our own `_mi_memcpy` for potential performance optimizations.
|
||||
//
|
||||
// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if
|
||||
// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support
|
||||
// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253.
|
||||
// ---------------------------------------------------------------------------------
|
||||
|
||||
#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
|
||||
#include <intrin.h>
|
||||
extern bool _mi_cpu_has_fsrm;
|
||||
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
|
||||
if (_mi_cpu_has_fsrm) {
|
||||
__movsb((unsigned char*)dst, (const unsigned char*)src, n);
|
||||
}
|
||||
else {
|
||||
memcpy(dst, src, n);
|
||||
}
|
||||
}
|
||||
static inline void _mi_memzero(void* dst, size_t n) {
|
||||
if (_mi_cpu_has_fsrm) {
|
||||
__stosb((unsigned char*)dst, 0, n);
|
||||
}
|
||||
else {
|
||||
memset(dst, 0, n);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
|
||||
memcpy(dst, src, n);
|
||||
}
|
||||
static inline void _mi_memzero(void* dst, size_t n) {
|
||||
memset(dst, 0, n);
|
||||
}
|
||||
#endif
|
||||
|
||||
// -------------------------------------------------------------------------------
|
||||
// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned
|
||||
// This is used for example in `mi_realloc`.
|
||||
// -------------------------------------------------------------------------------
|
||||
|
||||
#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
|
||||
// On GCC/CLang we provide a hint that the pointers are word aligned.
|
||||
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
|
||||
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
|
||||
void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
|
||||
const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE);
|
||||
_mi_memcpy(adst, asrc, n);
|
||||
}
|
||||
|
||||
static inline void _mi_memzero_aligned(void* dst, size_t n) {
|
||||
mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
|
||||
void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
|
||||
_mi_memzero(adst, n);
|
||||
}
|
||||
#else
|
||||
// Default fallback on `_mi_memcpy`
|
||||
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
|
||||
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
|
||||
_mi_memcpy(dst, src, n);
|
||||
}
|
||||
|
||||
static inline void _mi_memzero_aligned(void* dst, size_t n) {
|
||||
mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
|
||||
_mi_memzero(dst, n);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
|
@ -0,0 +1,323 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_PRIM_H
|
||||
#define MIMALLOC_PRIM_H
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// This file specifies the primitive portability API.
|
||||
// Each OS/host needs to implement these primitives, see `src/prim`
|
||||
// for implementations on Window, macOS, WASI, and Linux/Unix.
|
||||
//
|
||||
// note: on all primitive functions, we always have result parameters != NUL, and:
|
||||
// addr != NULL and page aligned
|
||||
// size > 0 and page aligned
|
||||
// return value is an error code an int where 0 is success.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// OS memory configuration
|
||||
typedef struct mi_os_mem_config_s {
|
||||
size_t page_size; // 4KiB
|
||||
size_t large_page_size; // 2MiB
|
||||
size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
|
||||
bool has_overcommit; // can we reserve more memory than can be actually committed?
|
||||
bool must_free_whole; // must allocated blocks be freed as a whole (false for mmap, true for VirtualAlloc)
|
||||
bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
|
||||
} mi_os_mem_config_t;
|
||||
|
||||
// Initialize
|
||||
void _mi_prim_mem_init( mi_os_mem_config_t* config );
|
||||
|
||||
// Free OS memory
|
||||
int _mi_prim_free(void* addr, size_t size );
|
||||
|
||||
// Allocate OS memory. Return NULL on error.
|
||||
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
|
||||
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
|
||||
// which will later be committed explicitly using `_mi_prim_commit`.
|
||||
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
|
||||
// pre: !commit => !allow_large
|
||||
// try_alignment >= _mi_os_page_size() and a power of 2
|
||||
int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
|
||||
|
||||
// Commit memory. Returns error code or 0 on success.
|
||||
// For example, on Linux this would make the memory PROT_READ|PROT_WRITE.
|
||||
// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows)
|
||||
int _mi_prim_commit(void* addr, size_t size, bool* is_zero);
|
||||
|
||||
// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true
|
||||
// if the memory would need to be re-committed. For example, on Windows this is always true,
|
||||
// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit.
|
||||
// pre: needs_recommit != NULL
|
||||
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit);
|
||||
|
||||
// Reset memory. The range keeps being accessible but the content might be reset.
|
||||
// Returns error code or 0 on success.
|
||||
int _mi_prim_reset(void* addr, size_t size);
|
||||
|
||||
// Protect memory. Returns error code or 0 on success.
|
||||
int _mi_prim_protect(void* addr, size_t size, bool protect);
|
||||
|
||||
// Allocate huge (1GiB) pages possibly associated with a NUMA node.
|
||||
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
|
||||
// pre: size > 0 and a multiple of 1GiB.
|
||||
// numa_node is either negative (don't care), or a numa node number.
|
||||
int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr);
|
||||
|
||||
// Return the current NUMA node
|
||||
size_t _mi_prim_numa_node(void);
|
||||
|
||||
// Return the number of logical NUMA nodes
|
||||
size_t _mi_prim_numa_node_count(void);
|
||||
|
||||
// Clock ticks
|
||||
mi_msecs_t _mi_prim_clock_now(void);
|
||||
|
||||
// Return process information (only for statistics)
|
||||
typedef struct mi_process_info_s {
|
||||
mi_msecs_t elapsed;
|
||||
mi_msecs_t utime;
|
||||
mi_msecs_t stime;
|
||||
size_t current_rss;
|
||||
size_t peak_rss;
|
||||
size_t current_commit;
|
||||
size_t peak_commit;
|
||||
size_t page_faults;
|
||||
} mi_process_info_t;
|
||||
|
||||
void _mi_prim_process_info(mi_process_info_t* pinfo);
|
||||
|
||||
// Default stderr output. (only for warnings etc. with verbose enabled)
|
||||
// msg != NULL && _mi_strlen(msg) > 0
|
||||
void _mi_prim_out_stderr( const char* msg );
|
||||
|
||||
// Get an environment variable. (only for options)
|
||||
// name != NULL, result != NULL, result_size >= 64
|
||||
bool _mi_prim_getenv(const char* name, char* result, size_t result_size);
|
||||
|
||||
|
||||
// Fill a buffer with strong randomness; return `false` on error or if
|
||||
// there is no strong randomization available.
|
||||
bool _mi_prim_random_buf(void* buf, size_t buf_len);
|
||||
|
||||
// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination.
|
||||
void _mi_prim_thread_init_auto_done(void);
|
||||
|
||||
// Called on process exit and may take action to clean up resources associated with the thread auto done.
|
||||
void _mi_prim_thread_done_auto_done(void);
|
||||
|
||||
// Called when the default heap for a thread changes
|
||||
void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
|
||||
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Thread id: `_mi_prim_thread_id()`
|
||||
//
|
||||
// Getting the thread id should be performant as it is called in the
|
||||
// fast path of `_mi_free` and we specialize for various platforms as
|
||||
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
|
||||
// We only require _mi_prim_thread_id() to return a unique id
|
||||
// for each thread (unequal to zero).
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// defined in `init.c`; do not use these directly
|
||||
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
|
||||
extern bool _mi_process_is_initialized; // has mi_process_init been called?
|
||||
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept;
|
||||
|
||||
#if defined(_WIN32)
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
// Windows: works on Intel and ARM in both 32- and 64-bit
|
||||
return (uintptr_t)NtCurrentTeb();
|
||||
}
|
||||
|
||||
// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
|
||||
// both the OS and libc implementation so we use specific tests for each main platform.
|
||||
// If you test on another platform and it works please send a PR :-)
|
||||
// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
|
||||
#elif defined(__GNUC__) && ( \
|
||||
(defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
|
||||
|| (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \
|
||||
|| (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
|
||||
|| (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|
||||
|| (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|
||||
)
|
||||
|
||||
static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
|
||||
void* res;
|
||||
const size_t ofs = (slot*sizeof(void*));
|
||||
#if defined(__i386__)
|
||||
__asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
__asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
|
||||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
|
||||
__asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
|
||||
#elif defined(__x86_64__)
|
||||
__asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
|
||||
#elif defined(__arm__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
|
||||
res = tcb[slot];
|
||||
#elif defined(__aarch64__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
#if defined(__APPLE__) // M1, issue #343
|
||||
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
|
||||
#endif
|
||||
res = tcb[slot];
|
||||
#endif
|
||||
return res;
|
||||
}
|
||||
|
||||
// setting a tls slot is only used on macOS for now
|
||||
static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
|
||||
const size_t ofs = (slot*sizeof(void*));
|
||||
#if defined(__i386__)
|
||||
__asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
__asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS
|
||||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
|
||||
__asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
|
||||
#elif defined(__x86_64__)
|
||||
__asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
|
||||
#elif defined(__arm__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
|
||||
tcb[slot] = value;
|
||||
#elif defined(__aarch64__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
#if defined(__APPLE__) // M1, issue #343
|
||||
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
|
||||
#endif
|
||||
tcb[slot] = value;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
#if defined(__BIONIC__)
|
||||
// issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
|
||||
// see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
|
||||
return (uintptr_t)mi_prim_tls_slot(1);
|
||||
#else
|
||||
// in all our other targets, slot 0 is the thread id
|
||||
// glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
|
||||
// apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
|
||||
return (uintptr_t)mi_prim_tls_slot(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
return (uintptr_t)&_mi_heap_default;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------------------------
|
||||
The thread local default heap: `_mi_prim_get_default_heap()`
|
||||
This is inlined here as it is on the fast path for allocation functions.
|
||||
|
||||
On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
|
||||
__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures
|
||||
that the storage will always be available (allocated on the thread stacks).
|
||||
|
||||
On some platforms though we cannot use that when overriding `malloc` since the underlying
|
||||
TLS implementation (or the loader) will call itself `malloc` on a first access and recurse.
|
||||
We try to circumvent this in an efficient way:
|
||||
- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
|
||||
loader itself calls `malloc` even before the modules are initialized.
|
||||
- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
|
||||
- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323)
|
||||
------------------------------------------------------------------------------------------- */
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void);
|
||||
|
||||
#if defined(MI_MALLOC_OVERRIDE)
|
||||
#if defined(__APPLE__) // macOS
|
||||
#define MI_TLS_SLOT 89 // seems unused?
|
||||
// #define MI_TLS_RECURSE_GUARD 1
|
||||
// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
|
||||
// see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h>
|
||||
#elif defined(__OpenBSD__)
|
||||
// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
|
||||
// see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
|
||||
#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
|
||||
// #elif defined(__DragonFly__)
|
||||
// #warning "mimalloc is not working correctly on DragonFly yet."
|
||||
// #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
|
||||
#elif defined(__ANDROID__)
|
||||
// See issue #381
|
||||
#define MI_TLS_PTHREAD
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(MI_TLS_SLOT)
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT);
|
||||
if mi_unlikely(heap == NULL) {
|
||||
#ifdef __GNUC__
|
||||
__asm(""); // prevent conditional load of the address of _mi_heap_empty
|
||||
#endif
|
||||
heap = (mi_heap_t*)&_mi_heap_empty;
|
||||
}
|
||||
return heap;
|
||||
}
|
||||
|
||||
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
|
||||
|
||||
static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) {
|
||||
pthread_t self = pthread_self();
|
||||
#if defined(__DragonFly__)
|
||||
if (self==NULL) return NULL;
|
||||
#endif
|
||||
return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
|
||||
}
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot();
|
||||
if mi_unlikely(pheap == NULL) return _mi_heap_main_get();
|
||||
mi_heap_t* heap = *pheap;
|
||||
if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty;
|
||||
return heap;
|
||||
}
|
||||
|
||||
#elif defined(MI_TLS_PTHREAD)
|
||||
|
||||
extern pthread_key_t _mi_heap_default_key;
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
|
||||
return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
|
||||
}
|
||||
|
||||
#else // default using a thread local variable; used on most platforms.
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
#if defined(MI_TLS_RECURSE_GUARD)
|
||||
if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get();
|
||||
#endif
|
||||
return _mi_heap_default;
|
||||
}
|
||||
|
||||
#endif // mi_prim_get_default_heap()
|
||||
|
||||
|
||||
|
||||
#endif // MIMALLOC_PRIM_H
|
|
@ -0,0 +1,147 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_TRACK_H
|
||||
#define MIMALLOC_TRACK_H
|
||||
|
||||
/* ------------------------------------------------------------------------------------------------------
|
||||
Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers.
|
||||
These can be defined for tracking allocation:
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero)
|
||||
#define mi_track_free_size(p,_size)
|
||||
|
||||
The macros are set up such that the size passed to `mi_track_free_size`
|
||||
always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`).
|
||||
The `reqsize` is what the user requested, and `size >= reqsize`.
|
||||
The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled,
|
||||
or otherwise it is the usable block size which may be larger than the original request.
|
||||
Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc).
|
||||
The `zero` parameter is `true` if the allocated block is zero initialized.
|
||||
|
||||
Optional:
|
||||
|
||||
#define mi_track_align(p,alignedp,offset,size)
|
||||
#define mi_track_resize(p,oldsize,newsize)
|
||||
#define mi_track_init()
|
||||
|
||||
The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block.
|
||||
The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`).
|
||||
The `mi_track_resize` is currently unused but could be called on reallocations within a block.
|
||||
`mi_track_init` is called at program start.
|
||||
|
||||
The following macros are for tools like asan and valgrind to track whether memory is
|
||||
defined, undefined, or not accessible at all:
|
||||
|
||||
#define mi_track_mem_defined(p,size)
|
||||
#define mi_track_mem_undefined(p,size)
|
||||
#define mi_track_mem_noaccess(p,size)
|
||||
|
||||
-------------------------------------------------------------------------------------------------------*/
|
||||
|
||||
#if MI_TRACK_VALGRIND
|
||||
// valgrind tool
|
||||
|
||||
#define MI_TRACK_ENABLED 1
|
||||
#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy
|
||||
#define MI_TRACK_TOOL "valgrind"
|
||||
|
||||
#include <valgrind/valgrind.h>
|
||||
#include <valgrind/memcheck.h>
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero)
|
||||
#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
|
||||
#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/)
|
||||
#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size)
|
||||
#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size)
|
||||
#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size)
|
||||
|
||||
#elif MI_TRACK_ASAN
|
||||
// address sanitizer
|
||||
|
||||
#define MI_TRACK_ENABLED 1
|
||||
#define MI_TRACK_HEAP_DESTROY 0
|
||||
#define MI_TRACK_TOOL "asan"
|
||||
|
||||
#include <sanitizer/asan_interface.h>
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size)
|
||||
|
||||
#elif MI_TRACK_ETW
|
||||
// windows event tracing
|
||||
|
||||
#define MI_TRACK_ENABLED 1
|
||||
#define MI_TRACK_HEAP_DESTROY 1
|
||||
#define MI_TRACK_TOOL "ETW"
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include "../src/prim/windows/etw.h"
|
||||
|
||||
#define mi_track_init() EventRegistermicrosoft_windows_mimalloc();
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size)
|
||||
#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size)
|
||||
|
||||
#else
|
||||
// no tracking
|
||||
|
||||
#define MI_TRACK_ENABLED 0
|
||||
#define MI_TRACK_HEAP_DESTROY 0
|
||||
#define MI_TRACK_TOOL "none"
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero)
|
||||
#define mi_track_free_size(p,_size)
|
||||
|
||||
#endif
|
||||
|
||||
// -------------------
|
||||
// Utility definitions
|
||||
|
||||
#ifndef mi_track_resize
|
||||
#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_align
|
||||
#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_init
|
||||
#define mi_track_init()
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_mem_defined
|
||||
#define mi_track_mem_defined(p,size)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_mem_undefined
|
||||
#define mi_track_mem_undefined(p,size)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_mem_noaccess
|
||||
#define mi_track_mem_noaccess(p,size)
|
||||
#endif
|
||||
|
||||
|
||||
#if MI_PADDING
|
||||
#define mi_track_malloc(p,reqsize,zero) \
|
||||
if ((p)!=NULL) { \
|
||||
mi_assert_internal(mi_usable_size(p)==(reqsize)); \
|
||||
mi_track_malloc_size(p,reqsize,reqsize,zero); \
|
||||
}
|
||||
#else
|
||||
#define mi_track_malloc(p,reqsize,zero) \
|
||||
if ((p)!=NULL) { \
|
||||
mi_assert_internal(mi_usable_size(p)>=(reqsize)); \
|
||||
mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -0,0 +1,670 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_TYPES_H
|
||||
#define MIMALLOC_TYPES_H
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// This file contains the main type definitions for mimalloc:
|
||||
// mi_heap_t : all data for a thread-local heap, contains
|
||||
// lists of all managed heap pages.
|
||||
// mi_segment_t : a larger chunk of memory (32GiB) from where pages
|
||||
// are allocated.
|
||||
// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from
|
||||
// where objects are allocated.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
|
||||
#include <stddef.h> // ptrdiff_t
|
||||
#include <stdint.h> // uintptr_t, uint16_t, etc
|
||||
#include "mimalloc/atomic.h" // _Atomic
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable:4214) // bitfield is not int
|
||||
#endif
|
||||
|
||||
// Minimal alignment necessary. On most platforms 16 bytes are needed
|
||||
// due to SSE registers for example. This must be at least `sizeof(void*)`
|
||||
#ifndef MI_MAX_ALIGN_SIZE
|
||||
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Variants
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Define NDEBUG in the release version to disable assertions.
|
||||
// #define NDEBUG
|
||||
|
||||
// Define MI_TRACK_<tool> to enable tracking support
|
||||
// #define MI_TRACK_VALGRIND 1
|
||||
// #define MI_TRACK_ASAN 1
|
||||
// #define MI_TRACK_ETW 1
|
||||
|
||||
// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
|
||||
// #define MI_STAT 1
|
||||
|
||||
// Define MI_SECURE to enable security mitigations
|
||||
// #define MI_SECURE 1 // guard page around metadata
|
||||
// #define MI_SECURE 2 // guard page around each mimalloc page
|
||||
// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
|
||||
// #define MI_SECURE 4 // checks for double free. (may be more expensive)
|
||||
|
||||
#if !defined(MI_SECURE)
|
||||
#define MI_SECURE 0
|
||||
#endif
|
||||
|
||||
// Define MI_DEBUG for debug mode
|
||||
// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free.
|
||||
// #define MI_DEBUG 2 // + internal assertion checks
|
||||
// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
|
||||
#if !defined(MI_DEBUG)
|
||||
#if !defined(NDEBUG) || defined(_DEBUG)
|
||||
#define MI_DEBUG 2
|
||||
#else
|
||||
#define MI_DEBUG 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
|
||||
// The padding can detect buffer overflow on free.
|
||||
#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
|
||||
#define MI_PADDING 1
|
||||
#endif
|
||||
|
||||
// Check padding bytes; allows byte-precise buffer overflow detection
|
||||
#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1)
|
||||
#define MI_PADDING_CHECK 1
|
||||
#endif
|
||||
|
||||
|
||||
// Encoded free lists allow detection of corrupted free lists
|
||||
// and can detect buffer overflows, modify after free, and double `free`s.
|
||||
#if (MI_SECURE>=3 || MI_DEBUG>=1)
|
||||
#define MI_ENCODE_FREELIST 1
|
||||
#endif
|
||||
|
||||
|
||||
// We used to abandon huge pages but to eagerly deallocate if freed from another thread,
|
||||
// but that makes it not possible to visit them during a heap walk or include them in a
|
||||
// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from
|
||||
// another thread so most memory is available until it gets properly freed by the owning thread.
|
||||
// #define MI_HUGE_PAGE_ABANDON 1
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Platform specific values
|
||||
// ------------------------------------------------------
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Size of a pointer.
|
||||
// We assume that `sizeof(void*)==sizeof(intptr_t)`
|
||||
// and it holds for all platforms we know of.
|
||||
//
|
||||
// However, the C standard only requires that:
|
||||
// p == (void*)((intptr_t)p))
|
||||
// but we also need:
|
||||
// i == (intptr_t)((void*)i)
|
||||
// or otherwise one might define an intptr_t type that is larger than a pointer...
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if INTPTR_MAX > INT64_MAX
|
||||
# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example)
|
||||
#elif INTPTR_MAX == INT64_MAX
|
||||
# define MI_INTPTR_SHIFT (3)
|
||||
#elif INTPTR_MAX == INT32_MAX
|
||||
# define MI_INTPTR_SHIFT (2)
|
||||
#else
|
||||
#error platform pointers must be 32, 64, or 128 bits
|
||||
#endif
|
||||
|
||||
#if SIZE_MAX == UINT64_MAX
|
||||
# define MI_SIZE_SHIFT (3)
|
||||
typedef int64_t mi_ssize_t;
|
||||
#elif SIZE_MAX == UINT32_MAX
|
||||
# define MI_SIZE_SHIFT (2)
|
||||
typedef int32_t mi_ssize_t;
|
||||
#else
|
||||
#error platform objects must be 32 or 64 bits
|
||||
#endif
|
||||
|
||||
#if (SIZE_MAX/2) > LONG_MAX
|
||||
# define MI_ZU(x) x##ULL
|
||||
# define MI_ZI(x) x##LL
|
||||
#else
|
||||
# define MI_ZU(x) x##UL
|
||||
# define MI_ZI(x) x##L
|
||||
#endif
|
||||
|
||||
#define MI_INTPTR_SIZE (1<<MI_INTPTR_SHIFT)
|
||||
#define MI_INTPTR_BITS (MI_INTPTR_SIZE*8)
|
||||
|
||||
#define MI_SIZE_SIZE (1<<MI_SIZE_SHIFT)
|
||||
#define MI_SIZE_BITS (MI_SIZE_SIZE*8)
|
||||
|
||||
#define MI_KiB (MI_ZU(1024))
|
||||
#define MI_MiB (MI_KiB*MI_KiB)
|
||||
#define MI_GiB (MI_MiB*MI_KiB)
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Main internal data-structures
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Main tuning parameters for segment and page sizes
|
||||
// Sizes for 64-bit (usually divide by two for 32-bit)
|
||||
#define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB (32KiB on 32-bit)
|
||||
|
||||
#if MI_INTPTR_SIZE > 4
|
||||
#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB
|
||||
#else
|
||||
#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit
|
||||
#endif
|
||||
|
||||
#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB
|
||||
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
|
||||
|
||||
|
||||
// Derived constants
|
||||
#define MI_SEGMENT_SIZE (MI_ZU(1)<<MI_SEGMENT_SHIFT)
|
||||
#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
|
||||
#define MI_SEGMENT_MASK ((uintptr_t)(MI_SEGMENT_ALIGN - 1))
|
||||
#define MI_SEGMENT_SLICE_SIZE (MI_ZU(1)<< MI_SEGMENT_SLICE_SHIFT)
|
||||
#define MI_SLICES_PER_SEGMENT (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
|
||||
|
||||
#define MI_SMALL_PAGE_SIZE (MI_ZU(1)<<MI_SMALL_PAGE_SHIFT)
|
||||
#define MI_MEDIUM_PAGE_SIZE (MI_ZU(1)<<MI_MEDIUM_PAGE_SHIFT)
|
||||
|
||||
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 8KiB on 64-bit
|
||||
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB on 64-bit
|
||||
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
|
||||
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 32MiB on 64-bit
|
||||
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
|
||||
|
||||
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
|
||||
#define MI_BIN_HUGE (73U)
|
||||
|
||||
#if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
|
||||
#error "mimalloc internal: define more bins"
|
||||
#endif
|
||||
|
||||
// Maximum slice offset (15)
|
||||
#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
|
||||
|
||||
// Used as a special value to encode block sizes in 32 bits.
|
||||
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
|
||||
|
||||
// blocks up to this size are always allocated aligned
|
||||
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
|
||||
|
||||
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
|
||||
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Mimalloc pages contain allocated blocks
|
||||
// ------------------------------------------------------
|
||||
|
||||
// The free lists use encoded next fields
|
||||
// (Only actually encodes when MI_ENCODED_FREELIST is defined.)
|
||||
typedef uintptr_t mi_encoded_t;
|
||||
|
||||
// thread id's
|
||||
typedef size_t mi_threadid_t;
|
||||
|
||||
// free lists contain blocks
|
||||
typedef struct mi_block_s {
|
||||
mi_encoded_t next;
|
||||
} mi_block_t;
|
||||
|
||||
|
||||
// The delayed flags are used for efficient multi-threaded free-ing
|
||||
typedef enum mi_delayed_e {
|
||||
MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list
|
||||
MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap
|
||||
MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
|
||||
MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim
|
||||
} mi_delayed_t;
|
||||
|
||||
|
||||
// The `in_full` and `has_aligned` page flags are put in a union to efficiently
|
||||
// test if both are false (`full_aligned == 0`) in the `mi_free` routine.
|
||||
#if !MI_TSAN
|
||||
typedef union mi_page_flags_s {
|
||||
uint8_t full_aligned;
|
||||
struct {
|
||||
uint8_t in_full : 1;
|
||||
uint8_t has_aligned : 1;
|
||||
} x;
|
||||
} mi_page_flags_t;
|
||||
#else
|
||||
// under thread sanitizer, use a byte for each flag to suppress warning, issue #130
|
||||
typedef union mi_page_flags_s {
|
||||
uint16_t full_aligned;
|
||||
struct {
|
||||
uint8_t in_full;
|
||||
uint8_t has_aligned;
|
||||
} x;
|
||||
} mi_page_flags_t;
|
||||
#endif
|
||||
|
||||
// Thread free list.
|
||||
// We use the bottom 2 bits of the pointer for mi_delayed_t flags
|
||||
typedef uintptr_t mi_thread_free_t;
|
||||
|
||||
// A page contains blocks of one specific size (`block_size`).
|
||||
// Each page has three list of free blocks:
|
||||
// `free` for blocks that can be allocated,
|
||||
// `local_free` for freed blocks that are not yet available to `mi_malloc`
|
||||
// `thread_free` for freed blocks by other threads
|
||||
// The `local_free` and `thread_free` lists are migrated to the `free` list
|
||||
// when it is exhausted. The separate `local_free` list is necessary to
|
||||
// implement a monotonic heartbeat. The `thread_free` list is needed for
|
||||
// avoiding atomic operations in the common case.
|
||||
//
|
||||
//
|
||||
// `used - |thread_free|` == actual blocks that are in use (alive)
|
||||
// `used - |thread_free| + |free| + |local_free| == capacity`
|
||||
//
|
||||
// We don't count `freed` (as |free|) but use `used` to reduce
|
||||
// the number of memory accesses in the `mi_page_all_free` function(s).
|
||||
//
|
||||
// Notes:
|
||||
// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
|
||||
// - Using `uint16_t` does not seem to slow things down
|
||||
// - The size is 8 words on 64-bit which helps the page index calculations
|
||||
// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
|
||||
// and 12 are still good for address calculation)
|
||||
// - To limit the structure size, the `xblock_size` is 32-bits only; for
|
||||
// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
|
||||
// - `thread_free` uses the bottom bits as a delayed-free flags to optimize
|
||||
// concurrent frees where only the first concurrent free adds to the owning
|
||||
// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
|
||||
// The invariant is that no-delayed-free is only set if there is
|
||||
// at least one block that will be added, or as already been added, to
|
||||
// the owning heap `thread_delayed_free` list. This guarantees that pages
|
||||
// will be freed correctly even if only other threads free blocks.
|
||||
typedef struct mi_page_s {
|
||||
// "owned" by the segment
|
||||
uint32_t slice_count; // slices in this page (0 if not a page)
|
||||
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
|
||||
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
|
||||
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
|
||||
|
||||
// layout like this to optimize access in `mi_malloc` and `mi_free`
|
||||
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
||||
uint16_t reserved; // number of blocks reserved in memory
|
||||
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
|
||||
uint8_t free_is_zero : 1; // `true` if the blocks in the free list are zero initialized
|
||||
uint8_t retire_expire : 7; // expiration count for retired blocks
|
||||
|
||||
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
||||
uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
|
||||
uint32_t xblock_size; // size available in each block (always `>0`)
|
||||
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
|
||||
|
||||
#if (MI_ENCODE_FREELIST || MI_PADDING)
|
||||
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
|
||||
#endif
|
||||
|
||||
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
|
||||
_Atomic(uintptr_t) xheap;
|
||||
|
||||
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
|
||||
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
|
||||
|
||||
// 64-bit 9 words, 32-bit 12 words, (+2 for secure)
|
||||
#if MI_INTPTR_SIZE==8
|
||||
uintptr_t padding[1];
|
||||
#endif
|
||||
} mi_page_t;
|
||||
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Mimalloc segments contain mimalloc pages
|
||||
// ------------------------------------------------------
|
||||
|
||||
typedef enum mi_page_kind_e {
|
||||
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
|
||||
MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment
|
||||
MI_PAGE_LARGE, // larger blocks go into a page of just one block
|
||||
MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment.
|
||||
} mi_page_kind_t;
|
||||
|
||||
typedef enum mi_segment_kind_e {
|
||||
MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
|
||||
MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
|
||||
} mi_segment_kind_t;
|
||||
|
||||
// ------------------------------------------------------
|
||||
// A segment holds a commit mask where a bit is set if
|
||||
// the corresponding MI_COMMIT_SIZE area is committed.
|
||||
// The MI_COMMIT_SIZE must be a multiple of the slice
|
||||
// size. If it is equal we have the most fine grained
|
||||
// decommit (but setting it higher can be more efficient).
|
||||
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
|
||||
// be committed in one go which can be set higher than
|
||||
// MI_COMMIT_SIZE for efficiency (while the decommit mask
|
||||
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
|
||||
// ------------------------------------------------------
|
||||
|
||||
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
|
||||
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
|
||||
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
|
||||
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
|
||||
#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
|
||||
|
||||
#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
|
||||
#error "the segment size must be exactly divisible by the (commit size * size_t bits)"
|
||||
#endif
|
||||
|
||||
typedef struct mi_commit_mask_s {
|
||||
size_t mask[MI_COMMIT_MASK_FIELD_COUNT];
|
||||
} mi_commit_mask_t;
|
||||
|
||||
typedef mi_page_t mi_slice_t;
|
||||
typedef int64_t mi_msecs_t;
|
||||
|
||||
|
||||
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
|
||||
typedef enum mi_memkind_e {
|
||||
MI_MEM_NONE, // not allocated
|
||||
MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
|
||||
MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
|
||||
MI_MEM_OS, // allocated from the OS
|
||||
MI_MEM_OS_HUGE, // allocated as huge os pages
|
||||
MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`)
|
||||
MI_MEM_ARENA // allocated from an arena (the usual case)
|
||||
} mi_memkind_t;
|
||||
|
||||
static inline bool mi_memkind_is_os(mi_memkind_t memkind) {
|
||||
return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP);
|
||||
}
|
||||
|
||||
typedef struct mi_memid_os_info {
|
||||
void* base; // actual base address of the block (used for offset aligned allocations)
|
||||
size_t alignment; // alignment at allocation
|
||||
} mi_memid_os_info_t;
|
||||
|
||||
typedef struct mi_memid_arena_info {
|
||||
size_t block_index; // index in the arena
|
||||
mi_arena_id_t id; // arena id (>= 1)
|
||||
bool is_exclusive; // the arena can only be used for specific arena allocations
|
||||
} mi_memid_arena_info_t;
|
||||
|
||||
typedef struct mi_memid_s {
|
||||
union {
|
||||
mi_memid_os_info_t os; // only used for MI_MEM_OS
|
||||
mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA
|
||||
} mem;
|
||||
bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
|
||||
bool initially_committed;// `true` if the memory was originally allocated as committed
|
||||
bool initially_zero; // `true` if the memory was originally zero initialized
|
||||
mi_memkind_t memkind;
|
||||
} mi_memid_t;
|
||||
|
||||
|
||||
// Segments are large allocated memory blocks (8mb on 64 bit) from
|
||||
// the OS. Inside segments we allocated fixed size _pages_ that
|
||||
// contain blocks.
|
||||
typedef struct mi_segment_s {
|
||||
// constant fields
|
||||
mi_memid_t memid; // memory id for arena allocation
|
||||
bool allow_decommit;
|
||||
bool allow_purge;
|
||||
size_t segment_size;
|
||||
|
||||
// segment fields
|
||||
mi_msecs_t purge_expire;
|
||||
mi_commit_mask_t purge_mask;
|
||||
mi_commit_mask_t commit_mask;
|
||||
|
||||
_Atomic(struct mi_segment_s*) abandoned_next;
|
||||
|
||||
// from here is zero initialized
|
||||
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
|
||||
|
||||
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
|
||||
size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
|
||||
size_t used; // count of pages in use
|
||||
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
|
||||
|
||||
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
|
||||
size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
|
||||
|
||||
// layout like this to optimize access in `mi_free`
|
||||
mi_segment_kind_t kind;
|
||||
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
|
||||
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
|
||||
|
||||
mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one more for huge blocks with large alignment
|
||||
} mi_segment_t;
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Heaps
|
||||
// Provide first-class heaps to allocate from.
|
||||
// A heap just owns a set of pages for allocation and
|
||||
// can only be allocate/reallocate from the thread that created it.
|
||||
// Freeing blocks can be done from any thread though.
|
||||
// Per thread, the segments are shared among its heaps.
|
||||
// Per thread, there is always a default heap that is
|
||||
// used for allocation; it is initialized to statically
|
||||
// point to an empty heap to avoid initialization checks
|
||||
// in the fast path.
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Thread local data
|
||||
typedef struct mi_tld_s mi_tld_t;
|
||||
|
||||
// Pages of a certain block size are held in a queue.
|
||||
typedef struct mi_page_queue_s {
|
||||
mi_page_t* first;
|
||||
mi_page_t* last;
|
||||
size_t block_size;
|
||||
} mi_page_queue_t;
|
||||
|
||||
#define MI_BIN_FULL (MI_BIN_HUGE+1)
|
||||
|
||||
// Random context
|
||||
typedef struct mi_random_cxt_s {
|
||||
uint32_t input[16];
|
||||
uint32_t output[16];
|
||||
int output_available;
|
||||
bool weak;
|
||||
} mi_random_ctx_t;
|
||||
|
||||
|
||||
// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
|
||||
#if (MI_PADDING)
|
||||
typedef struct mi_padding_s {
|
||||
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
|
||||
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
|
||||
} mi_padding_t;
|
||||
#define MI_PADDING_SIZE (sizeof(mi_padding_t))
|
||||
#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
|
||||
#else
|
||||
#define MI_PADDING_SIZE 0
|
||||
#define MI_PADDING_WSIZE 0
|
||||
#endif
|
||||
|
||||
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
|
||||
|
||||
|
||||
// A heap owns a set of pages.
|
||||
struct mi_heap_s {
|
||||
mi_tld_t* tld;
|
||||
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
|
||||
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
||||
_Atomic(mi_block_t*) thread_delayed_free;
|
||||
mi_threadid_t thread_id; // thread this heap belongs too
|
||||
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
|
||||
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
|
||||
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
|
||||
mi_random_ctx_t random; // random number context used for secure allocation
|
||||
size_t page_count; // total number of pages in the `pages` queues.
|
||||
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
|
||||
size_t page_retired_max; // largest retired index into the `pages` array.
|
||||
mi_heap_t* next; // list of heaps per thread
|
||||
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||
};
|
||||
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Debug
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if !defined(MI_DEBUG_UNINIT)
|
||||
#define MI_DEBUG_UNINIT (0xD0)
|
||||
#endif
|
||||
#if !defined(MI_DEBUG_FREED)
|
||||
#define MI_DEBUG_FREED (0xDF)
|
||||
#endif
|
||||
#if !defined(MI_DEBUG_PADDING)
|
||||
#define MI_DEBUG_PADDING (0xDE)
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG)
|
||||
// use our own assertion to print without memory allocation
|
||||
void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func );
|
||||
#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__))
|
||||
#else
|
||||
#define mi_assert(x)
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG>1)
|
||||
#define mi_assert_internal mi_assert
|
||||
#else
|
||||
#define mi_assert_internal(x)
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG>2)
|
||||
#define mi_assert_expensive mi_assert
|
||||
#else
|
||||
#define mi_assert_expensive(x)
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Statistics
|
||||
// ------------------------------------------------------
|
||||
|
||||
#ifndef MI_STAT
|
||||
#if (MI_DEBUG>0)
|
||||
#define MI_STAT 2
|
||||
#else
|
||||
#define MI_STAT 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
typedef struct mi_stat_count_s {
|
||||
int64_t allocated;
|
||||
int64_t freed;
|
||||
int64_t peak;
|
||||
int64_t current;
|
||||
} mi_stat_count_t;
|
||||
|
||||
typedef struct mi_stat_counter_s {
|
||||
int64_t total;
|
||||
int64_t count;
|
||||
} mi_stat_counter_t;
|
||||
|
||||
typedef struct mi_stats_s {
|
||||
mi_stat_count_t segments;
|
||||
mi_stat_count_t pages;
|
||||
mi_stat_count_t reserved;
|
||||
mi_stat_count_t committed;
|
||||
mi_stat_count_t reset;
|
||||
mi_stat_count_t purged;
|
||||
mi_stat_count_t page_committed;
|
||||
mi_stat_count_t segments_abandoned;
|
||||
mi_stat_count_t pages_abandoned;
|
||||
mi_stat_count_t threads;
|
||||
mi_stat_count_t normal;
|
||||
mi_stat_count_t huge;
|
||||
mi_stat_count_t large;
|
||||
mi_stat_count_t malloc;
|
||||
mi_stat_count_t segments_cache;
|
||||
mi_stat_counter_t pages_extended;
|
||||
mi_stat_counter_t mmap_calls;
|
||||
mi_stat_counter_t commit_calls;
|
||||
mi_stat_counter_t reset_calls;
|
||||
mi_stat_counter_t purge_calls;
|
||||
mi_stat_counter_t page_no_retire;
|
||||
mi_stat_counter_t searches;
|
||||
mi_stat_counter_t normal_count;
|
||||
mi_stat_counter_t huge_count;
|
||||
mi_stat_counter_t large_count;
|
||||
#if MI_STAT>1
|
||||
mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
|
||||
#endif
|
||||
} mi_stats_t;
|
||||
|
||||
|
||||
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
|
||||
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
|
||||
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
|
||||
|
||||
#if (MI_STAT)
|
||||
#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
|
||||
#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
|
||||
#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
|
||||
#else
|
||||
#define mi_stat_increase(stat,amount) (void)0
|
||||
#define mi_stat_decrease(stat,amount) (void)0
|
||||
#define mi_stat_counter_increase(stat,amount) (void)0
|
||||
#endif
|
||||
|
||||
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
|
||||
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
|
||||
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Thread Local data
|
||||
// ------------------------------------------------------
|
||||
|
||||
// A "span" is is an available range of slices. The span queues keep
|
||||
// track of slice spans of at most the given `slice_count` (but more than the previous size class).
|
||||
typedef struct mi_span_queue_s {
|
||||
mi_slice_t* first;
|
||||
mi_slice_t* last;
|
||||
size_t slice_count;
|
||||
} mi_span_queue_t;
|
||||
|
||||
#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
|
||||
|
||||
// OS thread local data
|
||||
typedef struct mi_os_tld_s {
|
||||
size_t region_idx; // start point for next allocation
|
||||
mi_stats_t* stats; // points to tld stats
|
||||
} mi_os_tld_t;
|
||||
|
||||
|
||||
// Segments thread local data
|
||||
typedef struct mi_segments_tld_s {
|
||||
mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
|
||||
size_t count; // current number of segments;
|
||||
size_t peak_count; // peak number of segments
|
||||
size_t current_size; // current size of all segments
|
||||
size_t peak_size; // peak size of all segments
|
||||
mi_stats_t* stats; // points to tld stats
|
||||
mi_os_tld_t* os; // points to os stats
|
||||
} mi_segments_tld_t;
|
||||
|
||||
// Thread local data
|
||||
struct mi_tld_s {
|
||||
unsigned long long heartbeat; // monotonic heartbeat count
|
||||
bool recurse; // true if deferred was called; used to prevent infinite recursion.
|
||||
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
|
||||
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
|
||||
mi_segments_tld_t segments; // segment tld
|
||||
mi_os_tld_t os; // os tld
|
||||
mi_stats_t stats; // statistics
|
||||
};
|
||||
|
||||
#endif
|
|
@ -0,0 +1,571 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/atomic.h"
|
||||
#include "mimalloc/prim.h" // mi_prim_out_stderr
|
||||
|
||||
#include <stdio.h> // FILE
|
||||
#include <stdlib.h> // abort
|
||||
#include <stdarg.h>
|
||||
|
||||
|
||||
static long mi_max_error_count = 16; // stop outputting errors after this (use < 0 for no limit)
|
||||
static long mi_max_warning_count = 16; // stop outputting warnings after this (use < 0 for no limit)
|
||||
|
||||
static void mi_add_stderr_output(void);
|
||||
|
||||
int mi_version(void) mi_attr_noexcept {
|
||||
return MI_MALLOC_VERSION;
|
||||
}
|
||||
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Options
|
||||
// These can be accessed by multiple threads and may be
|
||||
// concurrently initialized, but an initializing data race
|
||||
// is ok since they resolve to the same value.
|
||||
// --------------------------------------------------------
|
||||
typedef enum mi_init_e {
|
||||
UNINIT, // not yet initialized
|
||||
DEFAULTED, // not found in the environment, use default value
|
||||
INITIALIZED // found in environment or set explicitly
|
||||
} mi_init_t;
|
||||
|
||||
typedef struct mi_option_desc_s {
|
||||
long value; // the value
|
||||
mi_init_t init; // is it initialized yet? (from the environment)
|
||||
mi_option_t option; // for debugging: the option index should match the option
|
||||
const char* name; // option name without `mimalloc_` prefix
|
||||
const char* legacy_name; // potential legacy option name
|
||||
} mi_option_desc_t;
|
||||
|
||||
#define MI_OPTION(opt) mi_option_##opt, #opt, NULL
|
||||
#define MI_OPTION_LEGACY(opt,legacy) mi_option_##opt, #opt, #legacy
|
||||
|
||||
static mi_option_desc_t options[_mi_option_last] =
|
||||
{
|
||||
// stable options
|
||||
#if MI_DEBUG || defined(MI_SHOW_ERRORS)
|
||||
{ 1, UNINIT, MI_OPTION(show_errors) },
|
||||
#else
|
||||
{ 0, UNINIT, MI_OPTION(show_errors) },
|
||||
#endif
|
||||
{ 0, UNINIT, MI_OPTION(show_stats) },
|
||||
{ 0, UNINIT, MI_OPTION(verbose) },
|
||||
|
||||
// the following options are experimental and not all combinations make sense.
|
||||
{ 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`)
|
||||
{ 2, UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux)
|
||||
{ 1, UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) }, // purge decommits memory (instead of reset) (note: on linux this uses MADV_DONTNEED for decommit)
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
|
||||
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
|
||||
{-1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
|
||||
{ 0, UNINIT, MI_OPTION(reserve_os_memory) },
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_purge,abandoned_page_reset) }, // reset free page memory when a thread terminates
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_segment_reset) }, // reset segment memory on free (needs eager commit)
|
||||
#if defined(__NetBSD__)
|
||||
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
|
||||
#else
|
||||
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
|
||||
#endif
|
||||
{ 10, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
|
||||
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
|
||||
{ 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
|
||||
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
|
||||
{ 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
|
||||
{ 16, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
|
||||
{ 8, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. number of segment reclaims from the abandoned segments per try.
|
||||
{ 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
|
||||
#if (MI_INTPTR_SIZE>4)
|
||||
{ 1024L * 1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time
|
||||
#else
|
||||
{ 128L * 1024L, UNINIT, MI_OPTION(arena_reserve) },
|
||||
#endif
|
||||
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
|
||||
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
||||
void _mi_options_init(void) {
|
||||
// called on process load; should not be called before the CRT is initialized!
|
||||
// (e.g. do not call this from process_init as that may run before CRT initialization)
|
||||
mi_add_stderr_output(); // now it safe to use stderr for output
|
||||
for(int i = 0; i < _mi_option_last; i++ ) {
|
||||
mi_option_t option = (mi_option_t)i;
|
||||
long l = mi_option_get(option); MI_UNUSED(l); // initialize
|
||||
// if (option != mi_option_verbose)
|
||||
{
|
||||
mi_option_desc_t* desc = &options[option];
|
||||
_mi_verbose_message("option '%s': %ld\n", desc->name, desc->value);
|
||||
}
|
||||
}
|
||||
mi_max_error_count = mi_option_get(mi_option_max_errors);
|
||||
mi_max_warning_count = mi_option_get(mi_option_max_warnings);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard long mi_option_get(mi_option_t option) {
|
||||
mi_assert(option >= 0 && option < _mi_option_last);
|
||||
if (option < 0 || option >= _mi_option_last) return 0;
|
||||
mi_option_desc_t* desc = &options[option];
|
||||
mi_assert(desc->option == option); // index should match the option
|
||||
if mi_unlikely(desc->init == UNINIT) {
|
||||
mi_option_init(desc);
|
||||
}
|
||||
return desc->value;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard long mi_option_get_clamp(mi_option_t option, long min, long max) {
|
||||
long x = mi_option_get(option);
|
||||
return (x < min ? min : (x > max ? max : x));
|
||||
}
|
||||
|
||||
mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) {
|
||||
mi_assert_internal(option == mi_option_reserve_os_memory || option == mi_option_arena_reserve);
|
||||
long x = mi_option_get(option);
|
||||
return (x < 0 ? 0 : (size_t)x * MI_KiB);
|
||||
}
|
||||
|
||||
void mi_option_set(mi_option_t option, long value) {
|
||||
mi_assert(option >= 0 && option < _mi_option_last);
|
||||
if (option < 0 || option >= _mi_option_last) return;
|
||||
mi_option_desc_t* desc = &options[option];
|
||||
mi_assert(desc->option == option); // index should match the option
|
||||
desc->value = value;
|
||||
desc->init = INITIALIZED;
|
||||
}
|
||||
|
||||
void mi_option_set_default(mi_option_t option, long value) {
|
||||
mi_assert(option >= 0 && option < _mi_option_last);
|
||||
if (option < 0 || option >= _mi_option_last) return;
|
||||
mi_option_desc_t* desc = &options[option];
|
||||
if (desc->init != INITIALIZED) {
|
||||
desc->value = value;
|
||||
}
|
||||
}
|
||||
|
||||
mi_decl_nodiscard bool mi_option_is_enabled(mi_option_t option) {
|
||||
return (mi_option_get(option) != 0);
|
||||
}
|
||||
|
||||
void mi_option_set_enabled(mi_option_t option, bool enable) {
|
||||
mi_option_set(option, (enable ? 1 : 0));
|
||||
}
|
||||
|
||||
void mi_option_set_enabled_default(mi_option_t option, bool enable) {
|
||||
mi_option_set_default(option, (enable ? 1 : 0));
|
||||
}
|
||||
|
||||
void mi_option_enable(mi_option_t option) {
|
||||
mi_option_set_enabled(option,true);
|
||||
}
|
||||
|
||||
void mi_option_disable(mi_option_t option) {
|
||||
mi_option_set_enabled(option,false);
|
||||
}
|
||||
|
||||
static void mi_cdecl mi_out_stderr(const char* msg, void* arg) {
|
||||
MI_UNUSED(arg);
|
||||
if (msg != NULL && msg[0] != 0) {
|
||||
_mi_prim_out_stderr(msg);
|
||||
}
|
||||
}
|
||||
|
||||
// Since an output function can be registered earliest in the `main`
|
||||
// function we also buffer output that happens earlier. When
|
||||
// an output function is registered it is called immediately with
|
||||
// the output up to that point.
|
||||
#ifndef MI_MAX_DELAY_OUTPUT
|
||||
#define MI_MAX_DELAY_OUTPUT ((size_t)(32*1024))
|
||||
#endif
|
||||
static char out_buf[MI_MAX_DELAY_OUTPUT+1];
|
||||
static _Atomic(size_t) out_len;
|
||||
|
||||
static void mi_cdecl mi_out_buf(const char* msg, void* arg) {
|
||||
MI_UNUSED(arg);
|
||||
if (msg==NULL) return;
|
||||
if (mi_atomic_load_relaxed(&out_len)>=MI_MAX_DELAY_OUTPUT) return;
|
||||
size_t n = _mi_strlen(msg);
|
||||
if (n==0) return;
|
||||
// claim space
|
||||
size_t start = mi_atomic_add_acq_rel(&out_len, n);
|
||||
if (start >= MI_MAX_DELAY_OUTPUT) return;
|
||||
// check bound
|
||||
if (start+n >= MI_MAX_DELAY_OUTPUT) {
|
||||
n = MI_MAX_DELAY_OUTPUT-start-1;
|
||||
}
|
||||
_mi_memcpy(&out_buf[start], msg, n);
|
||||
}
|
||||
|
||||
static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) {
|
||||
if (out==NULL) return;
|
||||
// claim (if `no_more_buf == true`, no more output will be added after this point)
|
||||
size_t count = mi_atomic_add_acq_rel(&out_len, (no_more_buf ? MI_MAX_DELAY_OUTPUT : 1));
|
||||
// and output the current contents
|
||||
if (count>MI_MAX_DELAY_OUTPUT) count = MI_MAX_DELAY_OUTPUT;
|
||||
out_buf[count] = 0;
|
||||
out(out_buf,arg);
|
||||
if (!no_more_buf) {
|
||||
out_buf[count] = '\n'; // if continue with the buffer, insert a newline
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Once this module is loaded, switch to this routine
|
||||
// which outputs to stderr and the delayed output buffer.
|
||||
static void mi_cdecl mi_out_buf_stderr(const char* msg, void* arg) {
|
||||
mi_out_stderr(msg,arg);
|
||||
mi_out_buf(msg,arg);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Default output handler
|
||||
// --------------------------------------------------------
|
||||
|
||||
// Should be atomic but gives errors on many platforms as generally we cannot cast a function pointer to a uintptr_t.
|
||||
// For now, don't register output from multiple threads.
|
||||
static mi_output_fun* volatile mi_out_default; // = NULL
|
||||
static _Atomic(void*) mi_out_arg; // = NULL
|
||||
|
||||
static mi_output_fun* mi_out_get_default(void** parg) {
|
||||
if (parg != NULL) { *parg = mi_atomic_load_ptr_acquire(void,&mi_out_arg); }
|
||||
mi_output_fun* out = mi_out_default;
|
||||
return (out == NULL ? &mi_out_buf : out);
|
||||
}
|
||||
|
||||
void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept {
|
||||
mi_out_default = (out == NULL ? &mi_out_stderr : out); // stop using the delayed output buffer
|
||||
mi_atomic_store_ptr_release(void,&mi_out_arg, arg);
|
||||
if (out!=NULL) mi_out_buf_flush(out,true,arg); // output all the delayed output now
|
||||
}
|
||||
|
||||
// add stderr to the delayed output after the module is loaded
|
||||
static void mi_add_stderr_output(void) {
|
||||
mi_assert_internal(mi_out_default == NULL);
|
||||
mi_out_buf_flush(&mi_out_stderr, false, NULL); // flush current contents to stderr
|
||||
mi_out_default = &mi_out_buf_stderr; // and add stderr to the delayed output
|
||||
}
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Messages, all end up calling `_mi_fputs`.
|
||||
// --------------------------------------------------------
|
||||
static _Atomic(size_t) error_count; // = 0; // when >= max_error_count stop emitting errors
|
||||
static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop emitting warnings
|
||||
|
||||
// When overriding malloc, we may recurse into mi_vfprintf if an allocation
|
||||
// inside the C runtime causes another message.
|
||||
// In some cases (like on macOS) the loader already allocates which
|
||||
// calls into mimalloc; if we then access thread locals (like `recurse`)
|
||||
// this may crash as the access may call _tlv_bootstrap that tries to
|
||||
// (recursively) invoke malloc again to allocate space for the thread local
|
||||
// variables on demand. This is why we use a _mi_preloading test on such
|
||||
// platforms. However, C code generator may move the initial thread local address
|
||||
// load before the `if` and we therefore split it out in a separate funcion.
|
||||
static mi_decl_thread bool recurse = false;
|
||||
|
||||
static mi_decl_noinline bool mi_recurse_enter_prim(void) {
|
||||
if (recurse) return false;
|
||||
recurse = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
static mi_decl_noinline void mi_recurse_exit_prim(void) {
|
||||
recurse = false;
|
||||
}
|
||||
|
||||
static bool mi_recurse_enter(void) {
|
||||
#if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
|
||||
if (_mi_preloading()) return false;
|
||||
#endif
|
||||
return mi_recurse_enter_prim();
|
||||
}
|
||||
|
||||
static void mi_recurse_exit(void) {
|
||||
#if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
|
||||
if (_mi_preloading()) return;
|
||||
#endif
|
||||
mi_recurse_exit_prim();
|
||||
}
|
||||
|
||||
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) {
|
||||
if (out==NULL || (void*)out==(void*)stdout || (void*)out==(void*)stderr) { // TODO: use mi_out_stderr for stderr?
|
||||
if (!mi_recurse_enter()) return;
|
||||
out = mi_out_get_default(&arg);
|
||||
if (prefix != NULL) out(prefix, arg);
|
||||
out(message, arg);
|
||||
mi_recurse_exit();
|
||||
}
|
||||
else {
|
||||
if (prefix != NULL) out(prefix, arg);
|
||||
out(message, arg);
|
||||
}
|
||||
}
|
||||
|
||||
// Define our own limited `fprintf` that avoids memory allocation.
|
||||
// We do this using `snprintf` with a limited buffer.
|
||||
static void mi_vfprintf( mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args ) {
|
||||
char buf[512];
|
||||
if (fmt==NULL) return;
|
||||
if (!mi_recurse_enter()) return;
|
||||
vsnprintf(buf,sizeof(buf)-1,fmt,args);
|
||||
mi_recurse_exit();
|
||||
_mi_fputs(out,arg,prefix,buf);
|
||||
}
|
||||
|
||||
void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) {
|
||||
va_list args;
|
||||
va_start(args,fmt);
|
||||
mi_vfprintf(out,arg,NULL,fmt,args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args) {
|
||||
if (prefix != NULL && _mi_strnlen(prefix,33) <= 32 && !_mi_is_main_thread()) {
|
||||
char tprefix[64];
|
||||
snprintf(tprefix, sizeof(tprefix), "%sthread 0x%llx: ", prefix, (unsigned long long)_mi_thread_id());
|
||||
mi_vfprintf(out, arg, tprefix, fmt, args);
|
||||
}
|
||||
else {
|
||||
mi_vfprintf(out, arg, prefix, fmt, args);
|
||||
}
|
||||
}
|
||||
|
||||
void _mi_trace_message(const char* fmt, ...) {
|
||||
if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
mi_vfprintf_thread(NULL, NULL, "mimalloc: ", fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void _mi_verbose_message(const char* fmt, ...) {
|
||||
if (!mi_option_is_enabled(mi_option_verbose)) return;
|
||||
va_list args;
|
||||
va_start(args,fmt);
|
||||
mi_vfprintf(NULL, NULL, "mimalloc: ", fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static void mi_show_error_message(const char* fmt, va_list args) {
|
||||
if (!mi_option_is_enabled(mi_option_verbose)) {
|
||||
if (!mi_option_is_enabled(mi_option_show_errors)) return;
|
||||
if (mi_max_error_count >= 0 && (long)mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return;
|
||||
}
|
||||
mi_vfprintf_thread(NULL, NULL, "mimalloc: error: ", fmt, args);
|
||||
}
|
||||
|
||||
void _mi_warning_message(const char* fmt, ...) {
|
||||
if (!mi_option_is_enabled(mi_option_verbose)) {
|
||||
if (!mi_option_is_enabled(mi_option_show_errors)) return;
|
||||
if (mi_max_warning_count >= 0 && (long)mi_atomic_increment_acq_rel(&warning_count) > mi_max_warning_count) return;
|
||||
}
|
||||
va_list args;
|
||||
va_start(args,fmt);
|
||||
mi_vfprintf_thread(NULL, NULL, "mimalloc: warning: ", fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
|
||||
#if MI_DEBUG
|
||||
void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, const char* func ) {
|
||||
_mi_fprintf(NULL, NULL, "mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", fname, line, (func==NULL?"":func), assertion);
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Errors
|
||||
// --------------------------------------------------------
|
||||
|
||||
static mi_error_fun* volatile mi_error_handler; // = NULL
|
||||
static _Atomic(void*) mi_error_arg; // = NULL
|
||||
|
||||
static void mi_error_default(int err) {
|
||||
MI_UNUSED(err);
|
||||
#if (MI_DEBUG>0)
|
||||
if (err==EFAULT) {
|
||||
#ifdef _MSC_VER
|
||||
__debugbreak();
|
||||
#endif
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
#if (MI_SECURE>0)
|
||||
if (err==EFAULT) { // abort on serious errors in secure mode (corrupted meta-data)
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
#if defined(MI_XMALLOC)
|
||||
if (err==ENOMEM || err==EOVERFLOW) { // abort on memory allocation fails in xmalloc mode
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void mi_register_error(mi_error_fun* fun, void* arg) {
|
||||
mi_error_handler = fun; // can be NULL
|
||||
mi_atomic_store_ptr_release(void,&mi_error_arg, arg);
|
||||
}
|
||||
|
||||
void _mi_error_message(int err, const char* fmt, ...) {
|
||||
// show detailed error message
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
mi_show_error_message(fmt, args);
|
||||
va_end(args);
|
||||
// and call the error handler which may abort (or return normally)
|
||||
if (mi_error_handler != NULL) {
|
||||
mi_error_handler(err, mi_atomic_load_ptr_acquire(void,&mi_error_arg));
|
||||
}
|
||||
else {
|
||||
mi_error_default(err);
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Initialize options by checking the environment
|
||||
// --------------------------------------------------------
|
||||
char _mi_toupper(char c) {
|
||||
if (c >= 'a' && c <= 'z') return (c - 'a' + 'A');
|
||||
else return c;
|
||||
}
|
||||
|
||||
int _mi_strnicmp(const char* s, const char* t, size_t n) {
|
||||
if (n == 0) return 0;
|
||||
for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) {
|
||||
if (_mi_toupper(*s) != _mi_toupper(*t)) break;
|
||||
}
|
||||
return (n == 0 ? 0 : *s - *t);
|
||||
}
|
||||
|
||||
void _mi_strlcpy(char* dest, const char* src, size_t dest_size) {
|
||||
if (dest==NULL || src==NULL || dest_size == 0) return;
|
||||
// copy until end of src, or when dest is (almost) full
|
||||
while (*src != 0 && dest_size > 1) {
|
||||
*dest++ = *src++;
|
||||
dest_size--;
|
||||
}
|
||||
// always zero terminate
|
||||
*dest = 0;
|
||||
}
|
||||
|
||||
void _mi_strlcat(char* dest, const char* src, size_t dest_size) {
|
||||
if (dest==NULL || src==NULL || dest_size == 0) return;
|
||||
// find end of string in the dest buffer
|
||||
while (*dest != 0 && dest_size > 1) {
|
||||
dest++;
|
||||
dest_size--;
|
||||
}
|
||||
// and catenate
|
||||
_mi_strlcpy(dest, src, dest_size);
|
||||
}
|
||||
|
||||
size_t _mi_strlen(const char* s) {
|
||||
if (s==NULL) return 0;
|
||||
size_t len = 0;
|
||||
while(s[len] != 0) { len++; }
|
||||
return len;
|
||||
}
|
||||
|
||||
size_t _mi_strnlen(const char* s, size_t max_len) {
|
||||
if (s==NULL) return 0;
|
||||
size_t len = 0;
|
||||
while(s[len] != 0 && len < max_len) { len++; }
|
||||
return len;
|
||||
}
|
||||
|
||||
#ifdef MI_NO_GETENV
|
||||
static bool mi_getenv(const char* name, char* result, size_t result_size) {
|
||||
MI_UNUSED(name);
|
||||
MI_UNUSED(result);
|
||||
MI_UNUSED(result_size);
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
static bool mi_getenv(const char* name, char* result, size_t result_size) {
|
||||
if (name==NULL || result == NULL || result_size < 64) return false;
|
||||
return _mi_prim_getenv(name,result,result_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
// TODO: implement ourselves to reduce dependencies on the C runtime
|
||||
#include <stdlib.h> // strtol
|
||||
#include <string.h> // strstr
|
||||
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc) {
|
||||
// Read option value from the environment
|
||||
char s[64 + 1];
|
||||
char buf[64+1];
|
||||
_mi_strlcpy(buf, "mimalloc_", sizeof(buf));
|
||||
_mi_strlcat(buf, desc->name, sizeof(buf));
|
||||
bool found = mi_getenv(buf, s, sizeof(s));
|
||||
if (!found && desc->legacy_name != NULL) {
|
||||
_mi_strlcpy(buf, "mimalloc_", sizeof(buf));
|
||||
_mi_strlcat(buf, desc->legacy_name, sizeof(buf));
|
||||
found = mi_getenv(buf, s, sizeof(s));
|
||||
if (found) {
|
||||
_mi_warning_message("environment option \"mimalloc_%s\" is deprecated -- use \"mimalloc_%s\" instead.\n", desc->legacy_name, desc->name);
|
||||
}
|
||||
}
|
||||
|
||||
if (found) {
|
||||
size_t len = _mi_strnlen(s, sizeof(buf) - 1);
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
buf[i] = _mi_toupper(s[i]);
|
||||
}
|
||||
buf[len] = 0;
|
||||
if (buf[0] == 0 || strstr("1;TRUE;YES;ON", buf) != NULL) {
|
||||
desc->value = 1;
|
||||
desc->init = INITIALIZED;
|
||||
}
|
||||
else if (strstr("0;FALSE;NO;OFF", buf) != NULL) {
|
||||
desc->value = 0;
|
||||
desc->init = INITIALIZED;
|
||||
}
|
||||
else {
|
||||
char* end = buf;
|
||||
long value = strtol(buf, &end, 10);
|
||||
if (desc->option == mi_option_reserve_os_memory || desc->option == mi_option_arena_reserve) {
|
||||
// this option is interpreted in KiB to prevent overflow of `long`
|
||||
if (*end == 'K') { end++; }
|
||||
else if (*end == 'M') { value *= MI_KiB; end++; }
|
||||
else if (*end == 'G') { value *= MI_MiB; end++; }
|
||||
else { value = (value + MI_KiB - 1) / MI_KiB; }
|
||||
if (end[0] == 'I' && end[1] == 'B') { end += 2; }
|
||||
else if (*end == 'B') { end++; }
|
||||
}
|
||||
if (*end == 0) {
|
||||
desc->value = value;
|
||||
desc->init = INITIALIZED;
|
||||
}
|
||||
else {
|
||||
// set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose.
|
||||
desc->init = DEFAULTED;
|
||||
if (desc->option == mi_option_verbose && desc->value == 0) {
|
||||
// if the 'mimalloc_verbose' env var has a bogus value we'd never know
|
||||
// (since the value defaults to 'off') so in that case briefly enable verbose
|
||||
desc->value = 1;
|
||||
_mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name);
|
||||
desc->value = 0;
|
||||
}
|
||||
else {
|
||||
_mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name);
|
||||
}
|
||||
}
|
||||
}
|
||||
mi_assert_internal(desc->init != UNINIT);
|
||||
}
|
||||
else if (!_mi_preloading()) {
|
||||
desc->init = DEFAULTED;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,689 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/atomic.h"
|
||||
#include "mimalloc/prim.h"
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Initialization.
|
||||
On windows initializes support for aligned allocation and
|
||||
large OS pages (if MIMALLOC_LARGE_OS_PAGES is true).
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static mi_os_mem_config_t mi_os_mem_config = {
|
||||
4096, // page size
|
||||
0, // large page size (usually 2MiB)
|
||||
4096, // allocation granularity
|
||||
true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems)
|
||||
false, // must free whole? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span)
|
||||
true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory)
|
||||
};
|
||||
|
||||
bool _mi_os_has_overcommit(void) {
|
||||
return mi_os_mem_config.has_overcommit;
|
||||
}
|
||||
|
||||
bool _mi_os_has_virtual_reserve(void) {
|
||||
return mi_os_mem_config.has_virtual_reserve;
|
||||
}
|
||||
|
||||
|
||||
// OS (small) page size
|
||||
size_t _mi_os_page_size(void) {
|
||||
return mi_os_mem_config.page_size;
|
||||
}
|
||||
|
||||
// if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB)
|
||||
size_t _mi_os_large_page_size(void) {
|
||||
return (mi_os_mem_config.large_page_size != 0 ? mi_os_mem_config.large_page_size : _mi_os_page_size());
|
||||
}
|
||||
|
||||
bool _mi_os_use_large_page(size_t size, size_t alignment) {
|
||||
// if we have access, check the size and alignment requirements
|
||||
if (mi_os_mem_config.large_page_size == 0 || !mi_option_is_enabled(mi_option_allow_large_os_pages)) return false;
|
||||
return ((size % mi_os_mem_config.large_page_size) == 0 && (alignment % mi_os_mem_config.large_page_size) == 0);
|
||||
}
|
||||
|
||||
// round to a good OS allocation size (bounded by max 12.5% waste)
|
||||
size_t _mi_os_good_alloc_size(size_t size) {
|
||||
size_t align_size;
|
||||
if (size < 512*MI_KiB) align_size = _mi_os_page_size();
|
||||
else if (size < 2*MI_MiB) align_size = 64*MI_KiB;
|
||||
else if (size < 8*MI_MiB) align_size = 256*MI_KiB;
|
||||
else if (size < 32*MI_MiB) align_size = 1*MI_MiB;
|
||||
else align_size = 4*MI_MiB;
|
||||
if mi_unlikely(size >= (SIZE_MAX - align_size)) return size; // possible overflow?
|
||||
return _mi_align_up(size, align_size);
|
||||
}
|
||||
|
||||
void _mi_os_init(void) {
|
||||
_mi_prim_mem_init(&mi_os_mem_config);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Util
|
||||
-------------------------------------------------------------- */
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
|
||||
|
||||
static void* mi_align_up_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_up((uintptr_t)p, alignment);
|
||||
}
|
||||
|
||||
static void* mi_align_down_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_down((uintptr_t)p, alignment);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
aligned hinting
|
||||
-------------------------------------------------------------- */
|
||||
|
||||
// On 64-bit systems, we can do efficient aligned allocation by using
|
||||
// the 2TiB to 30TiB area to allocate those.
|
||||
#if (MI_INTPTR_SIZE >= 8)
|
||||
static mi_decl_cache_align _Atomic(uintptr_t)aligned_base;
|
||||
|
||||
// Return a MI_SEGMENT_SIZE aligned address that is probably available.
|
||||
// If this returns NULL, the OS will determine the address but on some OS's that may not be
|
||||
// properly aligned which can be more costly as it needs to be adjusted afterwards.
|
||||
// For a size > 1GiB this always returns NULL in order to guarantee good ASLR randomization;
|
||||
// (otherwise an initial large allocation of say 2TiB has a 50% chance to include (known) addresses
|
||||
// in the middle of the 2TiB - 6TiB address range (see issue #372))
|
||||
|
||||
#define MI_HINT_BASE ((uintptr_t)2 << 40) // 2TiB start
|
||||
#define MI_HINT_AREA ((uintptr_t)4 << 40) // upto 6TiB (since before win8 there is "only" 8TiB available to processes)
|
||||
#define MI_HINT_MAX ((uintptr_t)30 << 40) // wrap after 30TiB (area after 32TiB is used for huge OS pages)
|
||||
|
||||
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size)
|
||||
{
|
||||
if (try_alignment <= 1 || try_alignment > MI_SEGMENT_SIZE) return NULL;
|
||||
size = _mi_align_up(size, MI_SEGMENT_SIZE);
|
||||
if (size > 1*MI_GiB) return NULL; // guarantee the chance of fixed valid address is at most 1/(MI_HINT_AREA / 1<<30) = 1/4096.
|
||||
#if (MI_SECURE>0)
|
||||
size += MI_SEGMENT_SIZE; // put in `MI_SEGMENT_SIZE` virtual gaps between hinted blocks; this splits VLA's but increases guarded areas.
|
||||
#endif
|
||||
|
||||
uintptr_t hint = mi_atomic_add_acq_rel(&aligned_base, size);
|
||||
if (hint == 0 || hint > MI_HINT_MAX) { // wrap or initialize
|
||||
uintptr_t init = MI_HINT_BASE;
|
||||
#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode
|
||||
uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap());
|
||||
init = init + ((MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)) % MI_HINT_AREA); // (randomly 20 bits)*4MiB == 0 to 4TiB
|
||||
#endif
|
||||
uintptr_t expected = hint + size;
|
||||
mi_atomic_cas_strong_acq_rel(&aligned_base, &expected, init);
|
||||
hint = mi_atomic_add_acq_rel(&aligned_base, size); // this may still give 0 or > MI_HINT_MAX but that is ok, it is a hint after all
|
||||
}
|
||||
if (hint%try_alignment != 0) return NULL;
|
||||
return (void*)hint;
|
||||
}
|
||||
#else
|
||||
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
|
||||
MI_UNUSED(try_alignment); MI_UNUSED(size);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Free memory
|
||||
-------------------------------------------------------------- */
|
||||
|
||||
static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats);
|
||||
|
||||
static void mi_os_prim_free(void* addr, size_t size, bool still_committed, mi_stats_t* tld_stats) {
|
||||
MI_UNUSED(tld_stats);
|
||||
mi_assert_internal((size % _mi_os_page_size()) == 0);
|
||||
if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr)
|
||||
int err = _mi_prim_free(addr, size);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr);
|
||||
}
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
if (still_committed) { _mi_stat_decrease(&stats->committed, size); }
|
||||
_mi_stat_decrease(&stats->reserved, size);
|
||||
}
|
||||
|
||||
void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* tld_stats) {
|
||||
if (mi_memkind_is_os(memid.memkind)) {
|
||||
size_t csize = _mi_os_good_alloc_size(size);
|
||||
void* base = addr;
|
||||
// different base? (due to alignment)
|
||||
if (memid.mem.os.base != NULL) {
|
||||
mi_assert(memid.mem.os.base <= addr);
|
||||
mi_assert((uint8_t*)memid.mem.os.base + memid.mem.os.alignment >= (uint8_t*)addr);
|
||||
base = memid.mem.os.base;
|
||||
csize += ((uint8_t*)addr - (uint8_t*)memid.mem.os.base);
|
||||
}
|
||||
// free it
|
||||
if (memid.memkind == MI_MEM_OS_HUGE) {
|
||||
mi_assert(memid.is_pinned);
|
||||
mi_os_free_huge_os_pages(base, csize, tld_stats);
|
||||
}
|
||||
else {
|
||||
mi_os_prim_free(base, csize, still_committed, tld_stats);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// nothing to do
|
||||
mi_assert(memid.memkind < MI_MEM_OS);
|
||||
}
|
||||
}
|
||||
|
||||
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* tld_stats) {
|
||||
_mi_os_free_ex(p, size, true, memid, tld_stats);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Primitive allocation from the OS.
|
||||
-------------------------------------------------------------- */
|
||||
|
||||
// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
|
||||
static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* stats) {
|
||||
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
|
||||
mi_assert_internal(is_zero != NULL);
|
||||
mi_assert_internal(is_large != NULL);
|
||||
if (size == 0) return NULL;
|
||||
if (!commit) { allow_large = false; }
|
||||
if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning
|
||||
|
||||
*is_zero = false;
|
||||
void* p = NULL;
|
||||
int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large);
|
||||
}
|
||||
mi_stat_counter_increase(stats->mmap_calls, 1);
|
||||
if (p != NULL) {
|
||||
_mi_stat_increase(&stats->reserved, size);
|
||||
if (commit) {
|
||||
_mi_stat_increase(&stats->committed, size);
|
||||
// seems needed for asan (or `mimalloc-test-api` fails)
|
||||
#ifdef MI_TRACK_ASAN
|
||||
if (*is_zero) { mi_track_mem_defined(p,size); }
|
||||
else { mi_track_mem_undefined(p,size); }
|
||||
#endif
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
// Primitive aligned allocation from the OS.
|
||||
// This function guarantees the allocated memory is aligned.
|
||||
static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base, mi_stats_t* stats) {
|
||||
mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0));
|
||||
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
|
||||
mi_assert_internal(is_large != NULL);
|
||||
mi_assert_internal(is_zero != NULL);
|
||||
mi_assert_internal(base != NULL);
|
||||
if (!commit) allow_large = false;
|
||||
if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL;
|
||||
size = _mi_align_up(size, _mi_os_page_size());
|
||||
|
||||
// try first with a hint (this will be aligned directly on Win 10+ or BSD)
|
||||
void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats);
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
// aligned already?
|
||||
if (((uintptr_t)p % alignment) == 0) {
|
||||
*base = p;
|
||||
}
|
||||
else {
|
||||
// if not aligned, free it, overallocate, and unmap around it
|
||||
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
|
||||
mi_os_prim_free(p, size, commit, stats);
|
||||
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
|
||||
const size_t over_size = size + alignment;
|
||||
|
||||
if (mi_os_mem_config.must_free_whole) { // win32 virtualAlloc cannot free parts of an allocate block
|
||||
// over-allocate uncommitted (virtual) memory
|
||||
p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats);
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
// set p to the aligned part in the full region
|
||||
// note: this is dangerous on Windows as VirtualFree needs the actual base pointer
|
||||
// this is handled though by having the `base` field in the memid's
|
||||
*base = p; // remember the base
|
||||
p = mi_align_up_ptr(p, alignment);
|
||||
|
||||
// explicitly commit only the aligned part
|
||||
if (commit) {
|
||||
_mi_os_commit(p, size, NULL, stats);
|
||||
}
|
||||
}
|
||||
else { // mmap can free inside an allocation
|
||||
// overallocate...
|
||||
p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats);
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
// and selectively unmap parts around the over-allocated area. (noop on sbrk)
|
||||
void* aligned_p = mi_align_up_ptr(p, alignment);
|
||||
size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
|
||||
size_t mid_size = _mi_align_up(size, _mi_os_page_size());
|
||||
size_t post_size = over_size - pre_size - mid_size;
|
||||
mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size);
|
||||
if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit, stats); }
|
||||
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); }
|
||||
// we can return the aligned pointer on `mmap` (and sbrk) systems
|
||||
p = aligned_p;
|
||||
*base = aligned_p; // since we freed the pre part, `*base == p`.
|
||||
}
|
||||
}
|
||||
|
||||
mi_assert_internal(p == NULL || (p != NULL && *base != NULL && ((uintptr_t)p % alignment) == 0));
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
OS API: alloc and alloc_aligned
|
||||
----------------------------------------------------------- */
|
||||
|
||||
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* tld_stats) {
|
||||
MI_UNUSED(tld_stats);
|
||||
*memid = _mi_memid_none();
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
if (size == 0) return NULL;
|
||||
size = _mi_os_good_alloc_size(size);
|
||||
bool os_is_large = false;
|
||||
bool os_is_zero = false;
|
||||
void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats);
|
||||
if (p != NULL) {
|
||||
*memid = _mi_memid_create_os(true, os_is_zero, os_is_large);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats)
|
||||
{
|
||||
MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings
|
||||
MI_UNUSED(tld_stats);
|
||||
*memid = _mi_memid_none();
|
||||
if (size == 0) return NULL;
|
||||
size = _mi_os_good_alloc_size(size);
|
||||
alignment = _mi_align_up(alignment, _mi_os_page_size());
|
||||
|
||||
bool os_is_large = false;
|
||||
bool os_is_zero = false;
|
||||
void* os_base = NULL;
|
||||
void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base, &_mi_stats_main /*tld->stats*/ );
|
||||
if (p != NULL) {
|
||||
*memid = _mi_memid_create_os(commit, os_is_zero, os_is_large);
|
||||
memid->mem.os.base = os_base;
|
||||
memid->mem.os.alignment = alignment;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
OS aligned allocation with an offset. This is used
|
||||
for large alignments > MI_ALIGNMENT_MAX. We use a large mimalloc
|
||||
page where the object can be aligned at an offset from the start of the segment.
|
||||
As we may need to overallocate, we need to free such pointers using `mi_free_aligned`
|
||||
to use the actual start of the memory region.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats) {
|
||||
mi_assert(offset <= MI_SEGMENT_SIZE);
|
||||
mi_assert(offset <= size);
|
||||
mi_assert((alignment % _mi_os_page_size()) == 0);
|
||||
*memid = _mi_memid_none();
|
||||
if (offset > MI_SEGMENT_SIZE) return NULL;
|
||||
if (offset == 0) {
|
||||
// regular aligned allocation
|
||||
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld_stats);
|
||||
}
|
||||
else {
|
||||
// overallocate to align at an offset
|
||||
const size_t extra = _mi_align_up(offset, alignment) - offset;
|
||||
const size_t oversize = size + extra;
|
||||
void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid, tld_stats);
|
||||
if (start == NULL) return NULL;
|
||||
|
||||
void* const p = (uint8_t*)start + extra;
|
||||
mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment));
|
||||
// decommit the overallocation at the start
|
||||
if (commit && extra > _mi_os_page_size()) {
|
||||
_mi_os_decommit(start, extra, tld_stats);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
OS memory API: reset, commit, decommit, protect, unprotect.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// OS page align within a given area, either conservative (pages inside the area only),
|
||||
// or not (straddling pages outside the area is possible)
|
||||
static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size, size_t* newsize) {
|
||||
mi_assert(addr != NULL && size > 0);
|
||||
if (newsize != NULL) *newsize = 0;
|
||||
if (size == 0 || addr == NULL) return NULL;
|
||||
|
||||
// page align conservatively within the range
|
||||
void* start = (conservative ? mi_align_up_ptr(addr, _mi_os_page_size())
|
||||
: mi_align_down_ptr(addr, _mi_os_page_size()));
|
||||
void* end = (conservative ? mi_align_down_ptr((uint8_t*)addr + size, _mi_os_page_size())
|
||||
: mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size()));
|
||||
ptrdiff_t diff = (uint8_t*)end - (uint8_t*)start;
|
||||
if (diff <= 0) return NULL;
|
||||
|
||||
mi_assert_internal((conservative && (size_t)diff <= size) || (!conservative && (size_t)diff >= size));
|
||||
if (newsize != NULL) *newsize = (size_t)diff;
|
||||
return start;
|
||||
}
|
||||
|
||||
static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t* newsize) {
|
||||
return mi_os_page_align_areax(true, addr, size, newsize);
|
||||
}
|
||||
|
||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
|
||||
MI_UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
if (is_zero != NULL) { *is_zero = false; }
|
||||
_mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
|
||||
_mi_stat_counter_increase(&stats->commit_calls, 1);
|
||||
|
||||
// page align range
|
||||
size_t csize;
|
||||
void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize);
|
||||
if (csize == 0) return true;
|
||||
|
||||
// commit
|
||||
bool os_is_zero = false;
|
||||
int err = _mi_prim_commit(start, csize, &os_is_zero);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
||||
return false;
|
||||
}
|
||||
if (os_is_zero && is_zero != NULL) {
|
||||
*is_zero = true;
|
||||
mi_assert_expensive(mi_mem_is_zero(start, csize));
|
||||
}
|
||||
// note: the following seems required for asan (otherwise `mimalloc-test-stress` fails)
|
||||
#ifdef MI_TRACK_ASAN
|
||||
if (os_is_zero) { mi_track_mem_defined(start,csize); }
|
||||
else { mi_track_mem_undefined(start,csize); }
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_stats_t* tld_stats) {
|
||||
MI_UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
mi_assert_internal(needs_recommit!=NULL);
|
||||
_mi_stat_decrease(&stats->committed, size);
|
||||
|
||||
// page align
|
||||
size_t csize;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
if (csize == 0) return true;
|
||||
|
||||
// decommit
|
||||
*needs_recommit = true;
|
||||
int err = _mi_prim_decommit(start,csize,needs_recommit);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
||||
}
|
||||
mi_assert_internal(err == 0);
|
||||
return (err == 0);
|
||||
}
|
||||
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
|
||||
bool needs_recommit;
|
||||
return mi_os_decommit_ex(addr, size, &needs_recommit, tld_stats);
|
||||
}
|
||||
|
||||
|
||||
// Signal to the OS that the address range is no longer in use
|
||||
// but may be used later again. This will release physical memory
|
||||
// pages and reduce swapping while keeping the memory committed.
|
||||
// We page align to a conservative area inside the range to reset.
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||
// page align conservatively within the range
|
||||
size_t csize;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr)
|
||||
_mi_stat_increase(&stats->reset, csize);
|
||||
_mi_stat_counter_increase(&stats->reset_calls, 1);
|
||||
|
||||
#if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN
|
||||
memset(start, 0, csize); // pretend it is eagerly reset
|
||||
#endif
|
||||
|
||||
int err = _mi_prim_reset(start, csize);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("cannot reset OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
||||
}
|
||||
return (err == 0);
|
||||
}
|
||||
|
||||
|
||||
// either resets or decommits memory, returns true if the memory needs
|
||||
// to be recommitted if it is to be re-used later on.
|
||||
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
||||
{
|
||||
if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed?
|
||||
_mi_stat_counter_increase(&stats->purge_calls, 1);
|
||||
_mi_stat_increase(&stats->purged, size);
|
||||
|
||||
if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit?
|
||||
!_mi_preloading()) // don't decommit during preloading (unsafe)
|
||||
{
|
||||
bool needs_recommit = true;
|
||||
mi_os_decommit_ex(p, size, &needs_recommit, stats);
|
||||
return needs_recommit;
|
||||
}
|
||||
else {
|
||||
if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
|
||||
_mi_os_reset(p, size, stats);
|
||||
}
|
||||
return false; // needs no recommit
|
||||
}
|
||||
}
|
||||
|
||||
// either resets or decommits memory, returns true if the memory needs
|
||||
// to be recommitted if it is to be re-used later on.
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) {
|
||||
return _mi_os_purge_ex(p, size, true, stats);
|
||||
}
|
||||
|
||||
// Protect a region in memory to be not accessible.
|
||||
static bool mi_os_protectx(void* addr, size_t size, bool protect) {
|
||||
// page align conservatively within the range
|
||||
size_t csize = 0;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
if (csize == 0) return false;
|
||||
/*
|
||||
if (_mi_os_is_huge_reserved(addr)) {
|
||||
_mi_warning_message("cannot mprotect memory allocated in huge OS pages\n");
|
||||
}
|
||||
*/
|
||||
int err = _mi_prim_protect(start,csize,protect);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("cannot %s OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", (protect ? "protect" : "unprotect"), err, err, start, csize);
|
||||
}
|
||||
return (err == 0);
|
||||
}
|
||||
|
||||
bool _mi_os_protect(void* addr, size_t size) {
|
||||
return mi_os_protectx(addr, size, true);
|
||||
}
|
||||
|
||||
bool _mi_os_unprotect(void* addr, size_t size) {
|
||||
return mi_os_protectx(addr, size, false);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
Support for allocating huge OS pages (1Gib) that are reserved up-front
|
||||
and possibly associated with a specific NUMA node. (use `numa_node>=0`)
|
||||
-----------------------------------------------------------------------------*/
|
||||
#define MI_HUGE_OS_PAGE_SIZE (MI_GiB)
|
||||
|
||||
|
||||
#if (MI_INTPTR_SIZE >= 8)
|
||||
// To ensure proper alignment, use our own area for huge OS pages
|
||||
static mi_decl_cache_align _Atomic(uintptr_t) mi_huge_start; // = 0
|
||||
|
||||
// Claim an aligned address range for huge pages
|
||||
static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
|
||||
if (total_size != NULL) *total_size = 0;
|
||||
const size_t size = pages * MI_HUGE_OS_PAGE_SIZE;
|
||||
|
||||
uintptr_t start = 0;
|
||||
uintptr_t end = 0;
|
||||
uintptr_t huge_start = mi_atomic_load_relaxed(&mi_huge_start);
|
||||
do {
|
||||
start = huge_start;
|
||||
if (start == 0) {
|
||||
// Initialize the start address after the 32TiB area
|
||||
start = ((uintptr_t)32 << 40); // 32TiB virtual start address
|
||||
#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode
|
||||
uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap());
|
||||
start = start + ((uintptr_t)MI_HUGE_OS_PAGE_SIZE * ((r>>17) & 0x0FFF)); // (randomly 12bits)*1GiB == between 0 to 4TiB
|
||||
#endif
|
||||
}
|
||||
end = start + size;
|
||||
mi_assert_internal(end % MI_SEGMENT_SIZE == 0);
|
||||
} while (!mi_atomic_cas_strong_acq_rel(&mi_huge_start, &huge_start, end));
|
||||
|
||||
if (total_size != NULL) *total_size = size;
|
||||
return (uint8_t*)start;
|
||||
}
|
||||
#else
|
||||
static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
|
||||
MI_UNUSED(pages);
|
||||
if (total_size != NULL) *total_size = 0;
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Allocate MI_SEGMENT_SIZE aligned huge pages
|
||||
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_msecs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid) {
|
||||
*memid = _mi_memid_none();
|
||||
if (psize != NULL) *psize = 0;
|
||||
if (pages_reserved != NULL) *pages_reserved = 0;
|
||||
size_t size = 0;
|
||||
uint8_t* start = mi_os_claim_huge_pages(pages, &size);
|
||||
if (start == NULL) return NULL; // or 32-bit systems
|
||||
|
||||
// Allocate one page at the time but try to place them contiguously
|
||||
// We allocate one page at the time to be able to abort if it takes too long
|
||||
// or to at least allocate as many as available on the system.
|
||||
mi_msecs_t start_t = _mi_clock_start();
|
||||
size_t page = 0;
|
||||
bool all_zero = true;
|
||||
while (page < pages) {
|
||||
// allocate a page
|
||||
bool is_zero = false;
|
||||
void* addr = start + (page * MI_HUGE_OS_PAGE_SIZE);
|
||||
void* p = NULL;
|
||||
int err = _mi_prim_alloc_huge_os_pages(addr, MI_HUGE_OS_PAGE_SIZE, numa_node, &is_zero, &p);
|
||||
if (!is_zero) { all_zero = false; }
|
||||
if (err != 0) {
|
||||
_mi_warning_message("unable to allocate huge OS page (error: %d (0x%x), address: %p, size: %zx bytes)\n", err, err, addr, MI_HUGE_OS_PAGE_SIZE);
|
||||
break;
|
||||
}
|
||||
|
||||
// Did we succeed at a contiguous address?
|
||||
if (p != addr) {
|
||||
// no success, issue a warning and break
|
||||
if (p != NULL) {
|
||||
_mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr);
|
||||
mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true, &_mi_stats_main);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// success, record it
|
||||
page++; // increase before timeout check (see issue #711)
|
||||
_mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
|
||||
_mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE);
|
||||
|
||||
// check for timeout
|
||||
if (max_msecs > 0) {
|
||||
mi_msecs_t elapsed = _mi_clock_end(start_t);
|
||||
if (page >= 1) {
|
||||
mi_msecs_t estimate = ((elapsed / (page+1)) * pages);
|
||||
if (estimate > 2*max_msecs) { // seems like we are going to timeout, break
|
||||
elapsed = max_msecs + 1;
|
||||
}
|
||||
}
|
||||
if (elapsed > max_msecs) {
|
||||
_mi_warning_message("huge OS page allocation timed out (after allocating %zu page(s))\n", page);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
mi_assert_internal(page*MI_HUGE_OS_PAGE_SIZE <= size);
|
||||
if (pages_reserved != NULL) { *pages_reserved = page; }
|
||||
if (psize != NULL) { *psize = page * MI_HUGE_OS_PAGE_SIZE; }
|
||||
if (page != 0) {
|
||||
mi_assert(start != NULL);
|
||||
*memid = _mi_memid_create_os(true /* is committed */, all_zero, true /* is_large */);
|
||||
memid->memkind = MI_MEM_OS_HUGE;
|
||||
mi_assert(memid->is_pinned);
|
||||
#ifdef MI_TRACK_ASAN
|
||||
if (all_zero) { mi_track_mem_defined(start,size); }
|
||||
#endif
|
||||
}
|
||||
return (page == 0 ? NULL : start);
|
||||
}
|
||||
|
||||
// free every huge page in a range individually (as we allocated per page)
|
||||
// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems.
|
||||
static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats) {
|
||||
if (p==NULL || size==0) return;
|
||||
uint8_t* base = (uint8_t*)p;
|
||||
while (size >= MI_HUGE_OS_PAGE_SIZE) {
|
||||
mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true, stats);
|
||||
size -= MI_HUGE_OS_PAGE_SIZE;
|
||||
base += MI_HUGE_OS_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
Support NUMA aware allocation
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
_Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count
|
||||
|
||||
size_t _mi_os_numa_node_count_get(void) {
|
||||
size_t count = mi_atomic_load_acquire(&_mi_numa_node_count);
|
||||
if (count <= 0) {
|
||||
long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly?
|
||||
if (ncount > 0) {
|
||||
count = (size_t)ncount;
|
||||
}
|
||||
else {
|
||||
count = _mi_prim_numa_node_count(); // or detect dynamically
|
||||
if (count == 0) count = 1;
|
||||
}
|
||||
mi_atomic_store_release(&_mi_numa_node_count, count); // save it
|
||||
_mi_verbose_message("using %zd numa regions\n", count);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
int _mi_os_numa_node_get(mi_os_tld_t* tld) {
|
||||
MI_UNUSED(tld);
|
||||
size_t numa_count = _mi_os_numa_node_count();
|
||||
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
|
||||
// never more than the node count and >= 0
|
||||
size_t numa_node = _mi_prim_numa_node();
|
||||
if (numa_node >= numa_count) { numa_node = numa_node % numa_count; }
|
||||
return (int)numa_node;
|
||||
}
|
|
@ -0,0 +1,332 @@
|
|||
/*----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Definition of page queues for each block size
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#ifndef MI_IN_PAGE_C
|
||||
#error "this file should be included from 'page.c'"
|
||||
#endif
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Minimal alignment in machine words (i.e. `sizeof(void*)`)
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE)
|
||||
#error "define alignment for more than 4x word size for this platform"
|
||||
#elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE)
|
||||
#define MI_ALIGN4W // 4 machine words minimal alignment
|
||||
#elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE)
|
||||
#define MI_ALIGN2W // 2 machine words minimal alignment
|
||||
#else
|
||||
// ok, default alignment is 1 word
|
||||
#endif
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Queue query
|
||||
----------------------------------------------------------- */
|
||||
|
||||
|
||||
static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
|
||||
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
|
||||
}
|
||||
|
||||
static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
|
||||
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
|
||||
}
|
||||
|
||||
static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
|
||||
return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Bins
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Return the bin for a given field size.
|
||||
// Returns MI_BIN_HUGE if the size is too large.
|
||||
// We use `wsize` for the size in "machine word sizes",
|
||||
// i.e. byte size == `wsize*sizeof(void*)`.
|
||||
static inline uint8_t mi_bin(size_t size) {
|
||||
size_t wsize = _mi_wsize_from_size(size);
|
||||
uint8_t bin;
|
||||
if (wsize <= 1) {
|
||||
bin = 1;
|
||||
}
|
||||
#if defined(MI_ALIGN4W)
|
||||
else if (wsize <= 4) {
|
||||
bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
|
||||
}
|
||||
#elif defined(MI_ALIGN2W)
|
||||
else if (wsize <= 8) {
|
||||
bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
|
||||
}
|
||||
#else
|
||||
else if (wsize <= 8) {
|
||||
bin = (uint8_t)wsize;
|
||||
}
|
||||
#endif
|
||||
else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
|
||||
bin = MI_BIN_HUGE;
|
||||
}
|
||||
else {
|
||||
#if defined(MI_ALIGN4W)
|
||||
if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
|
||||
#endif
|
||||
wsize--;
|
||||
// find the highest bit
|
||||
uint8_t b = (uint8_t)mi_bsr(wsize); // note: wsize != 0
|
||||
// and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
|
||||
// - adjust with 3 because we use do not round the first 8 sizes
|
||||
// which each get an exact bin
|
||||
bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3;
|
||||
mi_assert_internal(bin < MI_BIN_HUGE);
|
||||
}
|
||||
mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE);
|
||||
return bin;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Queue of pages with free blocks
|
||||
----------------------------------------------------------- */
|
||||
|
||||
uint8_t _mi_bin(size_t size) {
|
||||
return mi_bin(size);
|
||||
}
|
||||
|
||||
size_t _mi_bin_size(uint8_t bin) {
|
||||
return _mi_heap_empty.pages[bin].block_size;
|
||||
}
|
||||
|
||||
// Good size for allocation
|
||||
size_t mi_good_size(size_t size) mi_attr_noexcept {
|
||||
if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||
return _mi_bin_size(mi_bin(size));
|
||||
}
|
||||
else {
|
||||
return _mi_align_up(size,_mi_os_page_size());
|
||||
}
|
||||
}
|
||||
|
||||
#if (MI_DEBUG>1)
|
||||
static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_page_t* list = queue->first;
|
||||
while (list != NULL) {
|
||||
mi_assert_internal(list->next == NULL || list->next->prev == list);
|
||||
mi_assert_internal(list->prev == NULL || list->prev->next == list);
|
||||
if (list == page) break;
|
||||
list = list->next;
|
||||
}
|
||||
return (list == page);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG>1)
|
||||
static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) {
|
||||
return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]);
|
||||
}
|
||||
#endif
|
||||
|
||||
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal(bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size);
|
||||
mi_assert_expensive(mi_page_queue_contains(pq, page));
|
||||
return pq;
|
||||
}
|
||||
|
||||
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
|
||||
mi_assert_internal(bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size);
|
||||
return pq;
|
||||
}
|
||||
|
||||
// The current small page array is for efficiency and for each
|
||||
// small size (up to 256) it points directly to the page for that
|
||||
// size without having to compute the bin. This means when the
|
||||
// current free page queue is updated for a small bin, we need to update a
|
||||
// range of entries in `_mi_page_small_free`.
|
||||
static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) {
|
||||
mi_assert_internal(mi_heap_contains_queue(heap,pq));
|
||||
size_t size = pq->block_size;
|
||||
if (size > MI_SMALL_SIZE_MAX) return;
|
||||
|
||||
mi_page_t* page = pq->first;
|
||||
if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty;
|
||||
|
||||
// find index in the right direct page array
|
||||
size_t start;
|
||||
size_t idx = _mi_wsize_from_size(size);
|
||||
mi_page_t** pages_free = heap->pages_free_direct;
|
||||
|
||||
if (pages_free[idx] == page) return; // already set
|
||||
|
||||
// find start slot
|
||||
if (idx<=1) {
|
||||
start = 0;
|
||||
}
|
||||
else {
|
||||
// find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
|
||||
uint8_t bin = mi_bin(size);
|
||||
const mi_page_queue_t* prev = pq - 1;
|
||||
while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) {
|
||||
prev--;
|
||||
}
|
||||
start = 1 + _mi_wsize_from_size(prev->block_size);
|
||||
if (start > idx) start = idx;
|
||||
}
|
||||
|
||||
// set size range to the right page
|
||||
mi_assert(start <= idx);
|
||||
for (size_t sz = start; sz <= idx; sz++) {
|
||||
pages_free[sz] = page;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
|
||||
return (queue->first == NULL);
|
||||
}
|
||||
*/
|
||||
|
||||
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(mi_page_queue_contains(queue, page));
|
||||
mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
|
||||
if (page->prev != NULL) page->prev->next = page->next;
|
||||
if (page->next != NULL) page->next->prev = page->prev;
|
||||
if (page == queue->last) queue->last = page->prev;
|
||||
if (page == queue->first) {
|
||||
queue->first = page->next;
|
||||
// update first
|
||||
mi_assert_internal(mi_heap_contains_queue(heap, queue));
|
||||
mi_heap_queue_first_update(heap,queue);
|
||||
}
|
||||
heap->page_count--;
|
||||
page->next = NULL;
|
||||
page->prev = NULL;
|
||||
// mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL);
|
||||
mi_page_set_in_full(page,false);
|
||||
}
|
||||
|
||||
|
||||
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
|
||||
mi_assert_internal(mi_page_heap(page) == heap);
|
||||
mi_assert_internal(!mi_page_queue_contains(queue, page));
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||
#endif
|
||||
mi_assert_internal(page->xblock_size == queue->block_size ||
|
||||
(page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
|
||||
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
|
||||
// mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap);
|
||||
page->next = queue->first;
|
||||
page->prev = NULL;
|
||||
if (queue->first != NULL) {
|
||||
mi_assert_internal(queue->first->prev == NULL);
|
||||
queue->first->prev = page;
|
||||
queue->first = page;
|
||||
}
|
||||
else {
|
||||
queue->first = queue->last = page;
|
||||
}
|
||||
|
||||
// update direct
|
||||
mi_heap_queue_first_update(heap, queue);
|
||||
heap->page_count++;
|
||||
}
|
||||
|
||||
|
||||
static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(mi_page_queue_contains(from, page));
|
||||
mi_assert_expensive(!mi_page_queue_contains(to, page));
|
||||
|
||||
mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) ||
|
||||
(page->xblock_size == to->block_size && mi_page_queue_is_full(from)) ||
|
||||
(page->xblock_size == from->block_size && mi_page_queue_is_full(to)) ||
|
||||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
|
||||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
|
||||
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
if (page->prev != NULL) page->prev->next = page->next;
|
||||
if (page->next != NULL) page->next->prev = page->prev;
|
||||
if (page == from->last) from->last = page->prev;
|
||||
if (page == from->first) {
|
||||
from->first = page->next;
|
||||
// update first
|
||||
mi_assert_internal(mi_heap_contains_queue(heap, from));
|
||||
mi_heap_queue_first_update(heap, from);
|
||||
}
|
||||
|
||||
page->prev = to->last;
|
||||
page->next = NULL;
|
||||
if (to->last != NULL) {
|
||||
mi_assert_internal(heap == mi_page_heap(to->last));
|
||||
to->last->next = page;
|
||||
to->last = page;
|
||||
}
|
||||
else {
|
||||
to->first = page;
|
||||
to->last = page;
|
||||
mi_heap_queue_first_update(heap, to);
|
||||
}
|
||||
|
||||
mi_page_set_in_full(page, mi_page_queue_is_full(to));
|
||||
}
|
||||
|
||||
// Only called from `mi_heap_absorb`.
|
||||
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
|
||||
mi_assert_internal(mi_heap_contains_queue(heap,pq));
|
||||
mi_assert_internal(pq->block_size == append->block_size);
|
||||
|
||||
if (append->first==NULL) return 0;
|
||||
|
||||
// set append pages to new heap and count
|
||||
size_t count = 0;
|
||||
for (mi_page_t* page = append->first; page != NULL; page = page->next) {
|
||||
// inline `mi_page_set_heap` to avoid wrong assertion during absorption;
|
||||
// in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
|
||||
mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
|
||||
// set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
|
||||
// side effect that it spins until any DELAYED_FREEING is finished. This ensures
|
||||
// that after appending only the new heap will be used for delayed free operations.
|
||||
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);
|
||||
count++;
|
||||
}
|
||||
|
||||
if (pq->last==NULL) {
|
||||
// take over afresh
|
||||
mi_assert_internal(pq->first==NULL);
|
||||
pq->first = append->first;
|
||||
pq->last = append->last;
|
||||
mi_heap_queue_first_update(heap, pq);
|
||||
}
|
||||
else {
|
||||
// append to end
|
||||
mi_assert_internal(pq->last!=NULL);
|
||||
mi_assert_internal(append->first!=NULL);
|
||||
pq->last->next = append->first;
|
||||
append->first->prev = pq->last;
|
||||
pq->last = append->last;
|
||||
}
|
||||
return count;
|
||||
}
|
|
@ -0,0 +1,939 @@
|
|||
/*----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
The core of the allocator. Every segment contains
|
||||
pages of a certain block size. The main function
|
||||
exported is `mi_malloc_generic`.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/atomic.h"
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Definition of page queues for each block size
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#define MI_IN_PAGE_C
|
||||
#include "page-queue.c"
|
||||
#undef MI_IN_PAGE_C
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Page helpers
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Index a block in a page
|
||||
static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t block_size, size_t i) {
|
||||
MI_UNUSED(page);
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_internal(i <= page->reserved);
|
||||
return (mi_block_t*)((uint8_t*)page_start + (i * block_size));
|
||||
}
|
||||
|
||||
static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld);
|
||||
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld);
|
||||
|
||||
#if (MI_DEBUG>=3)
|
||||
static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) {
|
||||
size_t count = 0;
|
||||
while (head != NULL) {
|
||||
mi_assert_internal(page == _mi_ptr_page(head));
|
||||
count++;
|
||||
head = mi_block_next(page, head);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
// Start of the page available memory
|
||||
static inline uint8_t* mi_page_area(const mi_page_t* page) {
|
||||
return _mi_page_start(_mi_page_segment(page), page, NULL);
|
||||
}
|
||||
*/
|
||||
|
||||
static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
|
||||
size_t psize;
|
||||
uint8_t* page_area = _mi_page_start(_mi_page_segment(page), page, &psize);
|
||||
mi_block_t* start = (mi_block_t*)page_area;
|
||||
mi_block_t* end = (mi_block_t*)(page_area + psize);
|
||||
while(p != NULL) {
|
||||
if (p < start || p >= end) return false;
|
||||
p = mi_block_next(page, p);
|
||||
}
|
||||
#if MI_DEBUG>3 // generally too expensive to check this
|
||||
if (page->free_is_zero) {
|
||||
const size_t ubsize = mi_page_usable_block_size(page);
|
||||
for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) {
|
||||
mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mi_page_is_valid_init(mi_page_t* page) {
|
||||
mi_assert_internal(page->xblock_size > 0);
|
||||
mi_assert_internal(page->used <= page->capacity);
|
||||
mi_assert_internal(page->capacity <= page->reserved);
|
||||
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
uint8_t* start = _mi_page_start(segment,page,NULL);
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL));
|
||||
//const size_t bsize = mi_page_block_size(page);
|
||||
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
|
||||
|
||||
mi_assert_internal(mi_page_list_is_valid(page,page->free));
|
||||
mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
|
||||
|
||||
#if MI_DEBUG>3 // generally too expensive to check this
|
||||
if (page->free_is_zero) {
|
||||
const size_t ubsize = mi_page_usable_block_size(page);
|
||||
for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
|
||||
mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !MI_TRACK_ENABLED && !MI_TSAN
|
||||
mi_block_t* tfree = mi_page_thread_free(page);
|
||||
mi_assert_internal(mi_page_list_is_valid(page, tfree));
|
||||
//size_t tfree_count = mi_page_list_count(page, tfree);
|
||||
//mi_assert_internal(tfree_count <= page->thread_freed + 1);
|
||||
#endif
|
||||
|
||||
size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free);
|
||||
mi_assert_internal(page->used + free_count == page->capacity);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
extern bool _mi_process_is_initialized; // has mi_process_init been called?
|
||||
|
||||
bool _mi_page_is_valid(mi_page_t* page) {
|
||||
mi_assert_internal(mi_page_is_valid_init(page));
|
||||
#if MI_SECURE
|
||||
mi_assert_internal(page->keys[0] != 0);
|
||||
#endif
|
||||
if (mi_page_heap(page)!=NULL) {
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
|
||||
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
if (segment->kind != MI_SEGMENT_HUGE)
|
||||
#endif
|
||||
{
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
mi_assert_internal(mi_page_queue_contains(pq, page));
|
||||
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
|
||||
mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
|
||||
while (!_mi_page_try_use_delayed_free(page, delay, override_never)) {
|
||||
mi_atomic_yield();
|
||||
}
|
||||
}
|
||||
|
||||
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
|
||||
mi_thread_free_t tfreex;
|
||||
mi_delayed_t old_delay;
|
||||
mi_thread_free_t tfree;
|
||||
size_t yield_count = 0;
|
||||
do {
|
||||
tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS;
|
||||
tfreex = mi_tf_set_delayed(tfree, delay);
|
||||
old_delay = mi_tf_delayed(tfree);
|
||||
if mi_unlikely(old_delay == MI_DELAYED_FREEING) {
|
||||
if (yield_count >= 4) return false; // give up after 4 tries
|
||||
yield_count++;
|
||||
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
|
||||
// tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
|
||||
}
|
||||
else if (delay == old_delay) {
|
||||
break; // avoid atomic operation if already equal
|
||||
}
|
||||
else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) {
|
||||
break; // leave never-delayed flag set
|
||||
}
|
||||
} while ((old_delay == MI_DELAYED_FREEING) ||
|
||||
!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
|
||||
|
||||
return true; // success
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Page collect the `local_free` and `thread_free` lists
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Collect the local `thread_free` list using an atomic exchange.
|
||||
// Note: The exchange must be done atomically as this is used right after
|
||||
// moving to the full list in `mi_page_collect_ex` and we need to
|
||||
// ensure that there was no race where the page became unfull just before the move.
|
||||
static void _mi_page_thread_free_collect(mi_page_t* page)
|
||||
{
|
||||
mi_block_t* head;
|
||||
mi_thread_free_t tfreex;
|
||||
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
|
||||
do {
|
||||
head = mi_tf_block(tfree);
|
||||
tfreex = mi_tf_set_block(tfree,NULL);
|
||||
} while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tfree, tfreex));
|
||||
|
||||
// return if the list is empty
|
||||
if (head == NULL) return;
|
||||
|
||||
// find the tail -- also to get a proper count (without data races)
|
||||
uint32_t max_count = page->capacity; // cannot collect more than capacity
|
||||
uint32_t count = 1;
|
||||
mi_block_t* tail = head;
|
||||
mi_block_t* next;
|
||||
while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) {
|
||||
count++;
|
||||
tail = next;
|
||||
}
|
||||
// if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free)
|
||||
if (count > max_count) {
|
||||
_mi_error_message(EFAULT, "corrupted thread-free list\n");
|
||||
return; // the thread-free items cannot be freed
|
||||
}
|
||||
|
||||
// and append the current local free list
|
||||
mi_block_set_next(page,tail, page->local_free);
|
||||
page->local_free = head;
|
||||
|
||||
// update counts now
|
||||
page->used -= count;
|
||||
}
|
||||
|
||||
void _mi_page_free_collect(mi_page_t* page, bool force) {
|
||||
mi_assert_internal(page!=NULL);
|
||||
|
||||
// collect the thread free list
|
||||
if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation
|
||||
_mi_page_thread_free_collect(page);
|
||||
}
|
||||
|
||||
// and the local free list
|
||||
if (page->local_free != NULL) {
|
||||
if mi_likely(page->free == NULL) {
|
||||
// usual case
|
||||
page->free = page->local_free;
|
||||
page->local_free = NULL;
|
||||
page->free_is_zero = false;
|
||||
}
|
||||
else if (force) {
|
||||
// append -- only on shutdown (force) as this is a linear operation
|
||||
mi_block_t* tail = page->local_free;
|
||||
mi_block_t* next;
|
||||
while ((next = mi_block_next(page, tail)) != NULL) {
|
||||
tail = next;
|
||||
}
|
||||
mi_block_set_next(page, tail, page->free);
|
||||
page->free = page->local_free;
|
||||
page->local_free = NULL;
|
||||
page->free_is_zero = false;
|
||||
}
|
||||
}
|
||||
|
||||
mi_assert_internal(!force || page->local_free == NULL);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Page fresh and retire
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// called from segments when reclaiming abandoned pages
|
||||
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
|
||||
mi_assert_internal(mi_page_heap(page) == heap);
|
||||
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||
#endif
|
||||
|
||||
// TODO: push on full queue immediately if it is full?
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
|
||||
mi_page_queue_push(heap, pq, page);
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
}
|
||||
|
||||
// allocate a fresh page from a segment
|
||||
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) {
|
||||
#if !MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(pq != NULL);
|
||||
mi_assert_internal(mi_heap_contains_queue(heap, pq));
|
||||
mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
|
||||
#endif
|
||||
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
|
||||
if (page == NULL) {
|
||||
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
|
||||
return NULL;
|
||||
}
|
||||
mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||
mi_assert_internal(pq!=NULL || page->xblock_size != 0);
|
||||
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
|
||||
// a fresh page was found, initialize it
|
||||
const size_t full_block_size = ((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
||||
mi_assert_internal(full_block_size >= block_size);
|
||||
mi_page_init(heap, page, full_block_size, heap->tld);
|
||||
mi_heap_stat_increase(heap, pages, 1);
|
||||
if (pq != NULL) { mi_page_queue_push(heap, pq, page); }
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
return page;
|
||||
}
|
||||
|
||||
// Get a fresh page to use
|
||||
static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
|
||||
mi_assert_internal(mi_heap_contains_queue(heap, pq));
|
||||
mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0);
|
||||
if (page==NULL) return NULL;
|
||||
mi_assert_internal(pq->block_size==mi_page_block_size(page));
|
||||
mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page)));
|
||||
return page;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Do any delayed frees
|
||||
(put there by other threads if they deallocated in a full page)
|
||||
----------------------------------------------------------- */
|
||||
void _mi_heap_delayed_free_all(mi_heap_t* heap) {
|
||||
while (!_mi_heap_delayed_free_partial(heap)) {
|
||||
mi_atomic_yield();
|
||||
}
|
||||
}
|
||||
|
||||
// returns true if all delayed frees were processed
|
||||
bool _mi_heap_delayed_free_partial(mi_heap_t* heap) {
|
||||
// take over the list (note: no atomic exchange since it is often NULL)
|
||||
mi_block_t* block = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
|
||||
while (block != NULL && !mi_atomic_cas_ptr_weak_acq_rel(mi_block_t, &heap->thread_delayed_free, &block, NULL)) { /* nothing */ };
|
||||
bool all_freed = true;
|
||||
|
||||
// and free them all
|
||||
while(block != NULL) {
|
||||
mi_block_t* next = mi_block_nextx(heap,block, heap->keys);
|
||||
// use internal free instead of regular one to keep stats etc correct
|
||||
if (!_mi_free_delayed_block(block)) {
|
||||
// we might already start delayed freeing while another thread has not yet
|
||||
// reset the delayed_freeing flag; in that case delay it further by reinserting the current block
|
||||
// into the delayed free list
|
||||
all_freed = false;
|
||||
mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
|
||||
do {
|
||||
mi_block_set_nextx(heap, block, dfree, heap->keys);
|
||||
} while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
|
||||
}
|
||||
block = next;
|
||||
}
|
||||
return all_freed;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Unfull, abandon, free and retire
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Move a page from the full list back to a regular list
|
||||
void _mi_page_unfull(mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
mi_assert_internal(mi_page_is_in_full(page));
|
||||
if (!mi_page_is_in_full(page)) return;
|
||||
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL];
|
||||
mi_page_set_in_full(page, false); // to get the right queue
|
||||
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
||||
mi_page_set_in_full(page, true);
|
||||
mi_page_queue_enqueue_from(pq, pqfull, page);
|
||||
}
|
||||
|
||||
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
|
||||
mi_assert_internal(pq == mi_page_queue_of(page));
|
||||
mi_assert_internal(!mi_page_immediate_available(page));
|
||||
mi_assert_internal(!mi_page_is_in_full(page));
|
||||
|
||||
if (mi_page_is_in_full(page)) return;
|
||||
mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page);
|
||||
_mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
|
||||
}
|
||||
|
||||
|
||||
// Abandon a page with used blocks at the end of a thread.
|
||||
// Note: only call if it is ensured that no references exist from
|
||||
// the `page->heap->thread_delayed_free` into this page.
|
||||
// Currently only called through `mi_heap_collect_ex` which ensures this.
|
||||
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
mi_assert_internal(pq == mi_page_queue_of(page));
|
||||
mi_assert_internal(mi_page_heap(page) != NULL);
|
||||
|
||||
mi_heap_t* pheap = mi_page_heap(page);
|
||||
|
||||
// remove from our page list
|
||||
mi_segments_tld_t* segments_tld = &pheap->tld->segments;
|
||||
mi_page_queue_remove(pq, page);
|
||||
|
||||
// page is no longer associated with our heap
|
||||
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
|
||||
mi_page_set_heap(page, NULL);
|
||||
|
||||
#if (MI_DEBUG>1) && !MI_TRACK_ENABLED
|
||||
// check there are no references left..
|
||||
for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) {
|
||||
mi_assert_internal(_mi_ptr_page(block) != page);
|
||||
}
|
||||
#endif
|
||||
|
||||
// and abandon it
|
||||
mi_assert_internal(mi_page_heap(page) == NULL);
|
||||
_mi_segment_page_abandon(page,segments_tld);
|
||||
}
|
||||
|
||||
|
||||
// Free a page with no more free blocks
|
||||
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
mi_assert_internal(pq == mi_page_queue_of(page));
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING);
|
||||
|
||||
// no more aligned blocks in here
|
||||
mi_page_set_has_aligned(page, false);
|
||||
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
|
||||
// remove from the page list
|
||||
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
|
||||
mi_segments_tld_t* segments_tld = &heap->tld->segments;
|
||||
mi_page_queue_remove(pq, page);
|
||||
|
||||
// and free it
|
||||
mi_page_set_heap(page,NULL);
|
||||
_mi_segment_page_free(page, force, segments_tld);
|
||||
}
|
||||
|
||||
// Retire parameters
|
||||
#define MI_MAX_RETIRE_SIZE (MI_MEDIUM_OBJ_SIZE_MAX)
|
||||
#define MI_RETIRE_CYCLES (16)
|
||||
|
||||
// Retire a page with no more used blocks
|
||||
// Important to not retire too quickly though as new
|
||||
// allocations might coming.
|
||||
// Note: called from `mi_free` and benchmarks often
|
||||
// trigger this due to freeing everything and then
|
||||
// allocating again so careful when changing this.
|
||||
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
|
||||
mi_page_set_has_aligned(page, false);
|
||||
|
||||
// don't retire too often..
|
||||
// (or we end up retiring and re-allocating most of the time)
|
||||
// NOTE: refine this more: we should not retire if this
|
||||
// is the only page left with free blocks. It is not clear
|
||||
// how to check this efficiently though...
|
||||
// for now, we don't retire if it is the only page left of this size class.
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_queue_is_special(pq)) { // not too large && not full or huge queue?
|
||||
if (pq->last==page && pq->first==page) { // the only page in the queue?
|
||||
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
|
||||
page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_assert_internal(pq >= heap->pages);
|
||||
const size_t index = pq - heap->pages;
|
||||
mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE);
|
||||
if (index < heap->page_retired_min) heap->page_retired_min = index;
|
||||
if (index > heap->page_retired_max) heap->page_retired_max = index;
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
return; // dont't free after all
|
||||
}
|
||||
}
|
||||
_mi_page_free(page, pq, false);
|
||||
}
|
||||
|
||||
// free retired pages: we don't need to look at the entire queues
|
||||
// since we only retire pages that are at the head position in a queue.
|
||||
void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
|
||||
size_t min = MI_BIN_FULL;
|
||||
size_t max = 0;
|
||||
for(size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) {
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_page_t* page = pq->first;
|
||||
if (page != NULL && page->retire_expire != 0) {
|
||||
if (mi_page_all_free(page)) {
|
||||
page->retire_expire--;
|
||||
if (force || page->retire_expire == 0) {
|
||||
_mi_page_free(pq->first, pq, force);
|
||||
}
|
||||
else {
|
||||
// keep retired, update min/max
|
||||
if (bin < min) min = bin;
|
||||
if (bin > max) max = bin;
|
||||
}
|
||||
}
|
||||
else {
|
||||
page->retire_expire = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
heap->page_retired_min = min;
|
||||
heap->page_retired_max = max;
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Initialize the initial free list in a page.
|
||||
In secure mode we initialize a randomized list by
|
||||
alternating between slices.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#define MI_MAX_SLICE_SHIFT (6) // at most 64 slices
|
||||
#define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT)
|
||||
#define MI_MIN_SLICES (2)
|
||||
|
||||
static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) {
|
||||
MI_UNUSED(stats);
|
||||
#if (MI_SECURE<=2)
|
||||
mi_assert_internal(page->free == NULL);
|
||||
mi_assert_internal(page->local_free == NULL);
|
||||
#endif
|
||||
mi_assert_internal(page->capacity + extend <= page->reserved);
|
||||
mi_assert_internal(bsize == mi_page_block_size(page));
|
||||
void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL);
|
||||
|
||||
// initialize a randomized free list
|
||||
// set up `slice_count` slices to alternate between
|
||||
size_t shift = MI_MAX_SLICE_SHIFT;
|
||||
while ((extend >> shift) == 0) {
|
||||
shift--;
|
||||
}
|
||||
const size_t slice_count = (size_t)1U << shift;
|
||||
const size_t slice_extend = extend / slice_count;
|
||||
mi_assert_internal(slice_extend >= 1);
|
||||
mi_block_t* blocks[MI_MAX_SLICES]; // current start of the slice
|
||||
size_t counts[MI_MAX_SLICES]; // available objects in the slice
|
||||
for (size_t i = 0; i < slice_count; i++) {
|
||||
blocks[i] = mi_page_block_at(page, page_area, bsize, page->capacity + i*slice_extend);
|
||||
counts[i] = slice_extend;
|
||||
}
|
||||
counts[slice_count-1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?)
|
||||
|
||||
// and initialize the free list by randomly threading through them
|
||||
// set up first element
|
||||
const uintptr_t r = _mi_heap_random_next(heap);
|
||||
size_t current = r % slice_count;
|
||||
counts[current]--;
|
||||
mi_block_t* const free_start = blocks[current];
|
||||
// and iterate through the rest; use `random_shuffle` for performance
|
||||
uintptr_t rnd = _mi_random_shuffle(r|1); // ensure not 0
|
||||
for (size_t i = 1; i < extend; i++) {
|
||||
// call random_shuffle only every INTPTR_SIZE rounds
|
||||
const size_t round = i%MI_INTPTR_SIZE;
|
||||
if (round == 0) rnd = _mi_random_shuffle(rnd);
|
||||
// select a random next slice index
|
||||
size_t next = ((rnd >> 8*round) & (slice_count-1));
|
||||
while (counts[next]==0) { // ensure it still has space
|
||||
next++;
|
||||
if (next==slice_count) next = 0;
|
||||
}
|
||||
// and link the current block to it
|
||||
counts[next]--;
|
||||
mi_block_t* const block = blocks[current];
|
||||
blocks[current] = (mi_block_t*)((uint8_t*)block + bsize); // bump to the following block
|
||||
mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next`
|
||||
current = next;
|
||||
}
|
||||
// prepend to the free list (usually NULL)
|
||||
mi_block_set_next(page, blocks[current], page->free); // end of the list
|
||||
page->free = free_start;
|
||||
}
|
||||
|
||||
static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats)
|
||||
{
|
||||
MI_UNUSED(stats);
|
||||
#if (MI_SECURE <= 2)
|
||||
mi_assert_internal(page->free == NULL);
|
||||
mi_assert_internal(page->local_free == NULL);
|
||||
#endif
|
||||
mi_assert_internal(page->capacity + extend <= page->reserved);
|
||||
mi_assert_internal(bsize == mi_page_block_size(page));
|
||||
void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL );
|
||||
|
||||
mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity);
|
||||
|
||||
// initialize a sequential free list
|
||||
mi_block_t* const last = mi_page_block_at(page, page_area, bsize, page->capacity + extend - 1);
|
||||
mi_block_t* block = start;
|
||||
while(block <= last) {
|
||||
mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize);
|
||||
mi_block_set_next(page,block,next);
|
||||
block = next;
|
||||
}
|
||||
// prepend to free list (usually `NULL`)
|
||||
mi_block_set_next(page, last, page->free);
|
||||
page->free = start;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Page initialize and extend the capacity
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well.
|
||||
#if (MI_SECURE>0)
|
||||
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
|
||||
#else
|
||||
#define MI_MIN_EXTEND (4)
|
||||
#endif
|
||||
|
||||
// Extend the capacity (up to reserved) by initializing a free list
|
||||
// We do at most `MI_MAX_EXTEND` to avoid touching too much memory
|
||||
// Note: we also experimented with "bump" allocation on the first
|
||||
// allocations but this did not speed up any benchmark (due to an
|
||||
// extra test in malloc? or cache effects?)
|
||||
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
|
||||
MI_UNUSED(tld);
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
#if (MI_SECURE<=2)
|
||||
mi_assert(page->free == NULL);
|
||||
mi_assert(page->local_free == NULL);
|
||||
if (page->free != NULL) return;
|
||||
#endif
|
||||
if (page->capacity >= page->reserved) return;
|
||||
|
||||
size_t page_size;
|
||||
_mi_page_start(_mi_page_segment(page), page, &page_size);
|
||||
mi_stat_counter_increase(tld->stats.pages_extended, 1);
|
||||
|
||||
// calculate the extend count
|
||||
const size_t bsize = (page->xblock_size < MI_HUGE_BLOCK_SIZE ? page->xblock_size : page_size);
|
||||
size_t extend = page->reserved - page->capacity;
|
||||
mi_assert_internal(extend > 0);
|
||||
|
||||
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)bsize);
|
||||
if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
|
||||
mi_assert_internal(max_extend > 0);
|
||||
|
||||
if (extend > max_extend) {
|
||||
// ensure we don't touch memory beyond the page to reduce page commit.
|
||||
// the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%.
|
||||
extend = max_extend;
|
||||
}
|
||||
|
||||
mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved);
|
||||
mi_assert_internal(extend < (1UL<<16));
|
||||
|
||||
// and append the extend the free list
|
||||
if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) {
|
||||
mi_page_free_list_extend(page, bsize, extend, &tld->stats );
|
||||
}
|
||||
else {
|
||||
mi_page_free_list_extend_secure(heap, page, bsize, extend, &tld->stats);
|
||||
}
|
||||
// enable the new free list
|
||||
page->capacity += (uint16_t)extend;
|
||||
mi_stat_increase(tld->stats.page_committed, extend * bsize);
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
}
|
||||
|
||||
// Initialize a fresh page
|
||||
static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_tld_t* tld) {
|
||||
mi_assert(page != NULL);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_assert(segment != NULL);
|
||||
mi_assert_internal(block_size > 0);
|
||||
// set fields
|
||||
mi_page_set_heap(page, heap);
|
||||
page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start
|
||||
size_t page_size;
|
||||
const void* page_start = _mi_segment_page_start(segment, page, &page_size);
|
||||
MI_UNUSED(page_start);
|
||||
mi_track_mem_noaccess(page_start,page_size);
|
||||
mi_assert_internal(mi_page_block_size(page) <= page_size);
|
||||
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
|
||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||
page->reserved = (uint16_t)(page_size / block_size);
|
||||
mi_assert_internal(page->reserved > 0);
|
||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||
page->keys[0] = _mi_heap_random_next(heap);
|
||||
page->keys[1] = _mi_heap_random_next(heap);
|
||||
#endif
|
||||
page->free_is_zero = page->is_zero_init;
|
||||
#if MI_DEBUG>2
|
||||
if (page->is_zero_init) {
|
||||
mi_track_mem_defined(page_start, page_size);
|
||||
mi_assert_expensive(mi_mem_is_zero(page_start, page_size));
|
||||
}
|
||||
#endif
|
||||
|
||||
mi_assert_internal(page->is_committed);
|
||||
mi_assert_internal(page->capacity == 0);
|
||||
mi_assert_internal(page->free == NULL);
|
||||
mi_assert_internal(page->used == 0);
|
||||
mi_assert_internal(page->xthread_free == 0);
|
||||
mi_assert_internal(page->next == NULL);
|
||||
mi_assert_internal(page->prev == NULL);
|
||||
mi_assert_internal(page->retire_expire == 0);
|
||||
mi_assert_internal(!mi_page_has_aligned(page));
|
||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||
mi_assert_internal(page->keys[0] != 0);
|
||||
mi_assert_internal(page->keys[1] != 0);
|
||||
#endif
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
|
||||
// initialize an initial free list
|
||||
mi_page_extend_free(heap,page,tld);
|
||||
mi_assert(mi_page_immediate_available(page));
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Find pages with free blocks
|
||||
-------------------------------------------------------------*/
|
||||
|
||||
// Find a page with free blocks of `page->block_size`.
|
||||
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
|
||||
{
|
||||
// search through the pages in "next fit" order
|
||||
#if MI_STAT
|
||||
size_t count = 0;
|
||||
#endif
|
||||
mi_page_t* page = pq->first;
|
||||
while (page != NULL)
|
||||
{
|
||||
mi_page_t* next = page->next; // remember next
|
||||
#if MI_STAT
|
||||
count++;
|
||||
#endif
|
||||
|
||||
// 0. collect freed blocks by us and other threads
|
||||
_mi_page_free_collect(page, false);
|
||||
|
||||
// 1. if the page contains free blocks, we are done
|
||||
if (mi_page_immediate_available(page)) {
|
||||
break; // pick this one
|
||||
}
|
||||
|
||||
// 2. Try to extend
|
||||
if (page->capacity < page->reserved) {
|
||||
mi_page_extend_free(heap, page, heap->tld);
|
||||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
break;
|
||||
}
|
||||
|
||||
// 3. If the page is completely full, move it to the `mi_pages_full`
|
||||
// queue so we don't visit long-lived pages too often.
|
||||
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
|
||||
mi_page_to_full(page, pq);
|
||||
|
||||
page = next;
|
||||
} // for each page
|
||||
|
||||
mi_heap_stat_counter_increase(heap, searches, count);
|
||||
|
||||
if (page == NULL) {
|
||||
_mi_heap_collect_retired(heap, false); // perhaps make a page available?
|
||||
page = mi_page_fresh(heap, pq);
|
||||
if (page == NULL && first_try) {
|
||||
// out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
|
||||
page = mi_page_queue_find_free_ex(heap, pq, false);
|
||||
}
|
||||
}
|
||||
else {
|
||||
mi_assert(pq->first == page);
|
||||
page->retire_expire = 0;
|
||||
}
|
||||
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
|
||||
return page;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Find a page with free blocks of `size`.
|
||||
static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
|
||||
mi_page_queue_t* pq = mi_page_queue(heap,size);
|
||||
mi_page_t* page = pq->first;
|
||||
if (page != NULL) {
|
||||
#if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
|
||||
if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) {
|
||||
mi_page_extend_free(heap, page, heap->tld);
|
||||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
_mi_page_free_collect(page,false);
|
||||
}
|
||||
|
||||
if (mi_page_immediate_available(page)) {
|
||||
page->retire_expire = 0;
|
||||
return page; // fast path
|
||||
}
|
||||
}
|
||||
return mi_page_queue_find_free_ex(heap, pq, true);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Users can register a deferred free function called
|
||||
when the `free` list is empty. Since the `local_free`
|
||||
is separate this is deterministically called after
|
||||
a certain number of allocations.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static mi_deferred_free_fun* volatile deferred_free = NULL;
|
||||
static _Atomic(void*) deferred_arg; // = NULL
|
||||
|
||||
void _mi_deferred_free(mi_heap_t* heap, bool force) {
|
||||
heap->tld->heartbeat++;
|
||||
if (deferred_free != NULL && !heap->tld->recurse) {
|
||||
heap->tld->recurse = true;
|
||||
deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg));
|
||||
heap->tld->recurse = false;
|
||||
}
|
||||
}
|
||||
|
||||
void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noexcept {
|
||||
deferred_free = fn;
|
||||
mi_atomic_store_ptr_release(void,&deferred_arg, arg);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
General allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Large and huge page allocation.
|
||||
// Huge pages are allocated directly without being in a queue.
|
||||
// Because huge pages contain just one block, and the segment contains
|
||||
// just that page, we always treat them as abandoned and any thread
|
||||
// that frees the block can free the whole page and segment directly.
|
||||
// Huge pages are also use if the requested alignment is very large (> MI_ALIGNMENT_MAX).
|
||||
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
||||
size_t block_size = _mi_os_good_alloc_size(size);
|
||||
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
||||
bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
|
||||
#else
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_HUGE_BLOCK_SIZE : block_size); // not block_size as that can be low if the page_alignment > 0
|
||||
mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
|
||||
#endif
|
||||
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
|
||||
if (page != NULL) {
|
||||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
|
||||
if (is_huge) {
|
||||
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
||||
mi_assert_internal(_mi_page_segment(page)->used==1);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
||||
mi_page_set_heap(page, NULL);
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||
}
|
||||
|
||||
const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_increase(heap, large, bsize);
|
||||
mi_heap_stat_counter_increase(heap, large_count, 1);
|
||||
}
|
||||
else {
|
||||
mi_heap_stat_increase(heap, huge, bsize);
|
||||
mi_heap_stat_counter_increase(heap, huge_count, 1);
|
||||
}
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
||||
|
||||
// Allocate a page
|
||||
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
||||
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
|
||||
// huge allocation?
|
||||
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
||||
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
|
||||
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
|
||||
return NULL;
|
||||
}
|
||||
else {
|
||||
return mi_large_huge_page_alloc(heap,size,huge_alignment);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// otherwise find a page with free blocks in our size segregated queues
|
||||
#if MI_PADDING
|
||||
mi_assert_internal(size >= MI_PADDING_SIZE);
|
||||
#endif
|
||||
return mi_find_free_page(heap, size);
|
||||
}
|
||||
}
|
||||
|
||||
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
|
||||
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
||||
// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for
|
||||
// very large requested alignments in which case we use a huge segment.
|
||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
|
||||
{
|
||||
mi_assert_internal(heap != NULL);
|
||||
|
||||
// initialize if necessary
|
||||
if mi_unlikely(!mi_heap_is_initialized(heap)) {
|
||||
heap = mi_heap_get_default(); // calls mi_thread_init
|
||||
if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
|
||||
}
|
||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||
|
||||
// call potential deferred free routines
|
||||
_mi_deferred_free(heap, false);
|
||||
|
||||
// free delayed frees from other threads (but skip contended ones)
|
||||
_mi_heap_delayed_free_partial(heap);
|
||||
|
||||
// find (or allocate) a page of the right size
|
||||
mi_page_t* page = mi_find_page(heap, size, huge_alignment);
|
||||
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
|
||||
mi_heap_collect(heap, true /* force */);
|
||||
page = mi_find_page(heap, size, huge_alignment);
|
||||
}
|
||||
|
||||
if mi_unlikely(page == NULL) { // out of memory
|
||||
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
||||
_mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||
|
||||
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
|
||||
if mi_unlikely(zero && page->xblock_size == 0) {
|
||||
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
|
||||
void* p = _mi_page_malloc(heap, page, size, false);
|
||||
mi_assert_internal(p != NULL);
|
||||
_mi_memzero_aligned(p, mi_page_usable_block_size(page));
|
||||
return p;
|
||||
}
|
||||
else {
|
||||
return _mi_page_malloc(heap, page, size, zero);
|
||||
}
|
||||
}
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче