Performance Improvement: Add support for downloading/uploading ccache archives (#6314)

* Add support for per-package ccache.

* Add support for removing ccache archives that are not the latest.

* Protect against failed ccache manager initialization.

* Update ccache configuration parameter dump.

* Use runtime build architecture instead of rpm node architecture.

* Fix error wrapping + minor changes.

* Fix formatting (make go-tidy-all).

* Fix typo in error message.
This commit is contained in:
George Mileka 2023-10-17 10:57:36 -07:00 коммит произвёл GitHub
Родитель b9e50cd145
Коммит a4101d67e4
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
15 изменённых файлов: 1207 добавлений и 39 удалений

Просмотреть файл

@ -87,6 +87,7 @@ BUILD_DIR ?= $(PROJECT_ROOT)/build
OUT_DIR ?= $(PROJECT_ROOT)/out
SPECS_DIR ?= $(PROJECT_ROOT)/SPECS
CCACHE_DIR ?= $(PROJECT_ROOT)/ccache
CCACHE_CONFIG ?= $(RESOURCES_DIR)/manifests/package/ccache-configuration.json
# Sub-folder defines
LOGS_DIR ?= $(BUILD_DIR)/logs

Просмотреть файл

@ -0,0 +1,74 @@
{
"remoteStore": {
"type": "azure-blob-storage",
"tenantId": "",
"userName": "",
"password": "",
"storageAccount": "marinerccache",
"containerName": "20-stable",
"tagsFolder": "tags",
"downloadEnabled": true,
"downloadLatest": true,
"downloadFolder": "",
"uploadEnabled": false,
"uploadFolder": "",
"updateLatest": false,
"keepLatestOnly": false
},
"groups": [
{
"name": "jflex",
"comment": "",
"enabled": true,
"packageNames": [ "jflex", "jflex-bootstrap" ]
},
{
"name": "java-cup",
"comment": "",
"enabled": true,
"packageNames": [ "java-cup", "java-cup-bootstrap" ]
},
{
"name": "kernel",
"comment": "",
"enabled": true,
"packageNames": [ "kernel", "kernel-azure", "kernel-hci", "kernel-mshv", "kernel-uvm" ]
},
{
"name": "libdb",
"comment": "Disabling ccache for pkg because it breaks the build when enabled.",
"enabled": false,
"packageNames": [ "libdb" ]
},
{
"name": "m2crypto",
"comment": "Disabling ccache for pkg because it breaks the build when enabled.",
"enabled": false,
"packageNames": [ "m2crypto" ]
},
{
"name": "openssh",
"comment": "Disabling ccache for pkg because it breaks the build when enabled.",
"enabled": false,
"packageNames": [ "openssh" ]
},
{
"name": "php",
"comment": "Disabling ccache for pkg because it breaks the build when enabled.",
"enabled": false,
"packageNames": [ "php" ]
},
{
"name": "php-pecl-zip",
"comment": "Disabling ccache for pkg because it breaks the build when enabled.",
"enabled": false,
"packageNames": [ "php-pecl-zip" ]
},
{
"name": "systemd",
"comment": "",
"enabled": true,
"packageNames": [ "systemd", "systemd-bootstrap" ]
}
]
}

Просмотреть файл

@ -278,7 +278,6 @@ $(STATUS_FLAGS_DIR)/build-rpms.flag: $(no_repo_acl) $(preprocessed_file) $(chroo
--toolchain-rpms-dir="$(TOOLCHAIN_RPMS_DIR)" \
--srpm-dir="$(SRPMS_DIR)" \
--cache-dir="$(remote_rpms_cache_dir)" \
--ccache-dir="$(CCACHE_DIR)" \
--build-logs-dir="$(rpmbuilding_logs_dir)" \
--dist-tag="$(DIST_TAG)" \
--distro-release-version="$(RELEASE_VERSION)" \
@ -310,6 +309,8 @@ $(STATUS_FLAGS_DIR)/build-rpms.flag: $(no_repo_acl) $(preprocessed_file) $(chroo
$(if $(filter-out y,$(CLEANUP_PACKAGE_BUILDS)),--no-cleanup) \
$(if $(filter y,$(DELTA_BUILD)),--optimize-with-cached-implicit) \
$(if $(filter y,$(USE_CCACHE)),--use-ccache) \
$(if $(filter y,$(USE_CCACHE)),--ccache-dir="$(CCACHE_DIR)") \
$(if $(filter y,$(USE_CCACHE)),--ccache-config="$(CCACHE_CONFIG)") \
$(if $(filter y,$(ALLOW_TOOLCHAIN_REBUILDS)),--allow-toolchain-rebuilds) \
--max-cpu="$(MAX_CPU)" \
$(if $(PACKAGE_BUILD_TIMEOUT),--timeout="$(PACKAGE_BUILD_TIMEOUT)") \

Просмотреть файл

@ -3,6 +3,8 @@ module github.com/microsoft/CBL-Mariner/toolkit/tools
go 1.20
require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
github.com/bendahl/uinput v1.4.0
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e
@ -16,26 +18,35 @@ require (
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.7.1
github.com/ulikunitz/xz v0.5.10
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f
golang.org/x/sys v0.11.0
gonum.org/v1/gonum v0.11.0
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/ini.v1 v1.67.0
gopkg.in/yaml.v3 v3.0.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/gdamore/encoding v1.0.0 // indirect
github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/klauspost/compress v1.10.5 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lucasb-eyer/go-colorful v1.0.3 // indirect
github.com/mattn/go-runewidth v0.0.7 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20170218160415-a3153f7040e9 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3 // indirect
golang.org/x/text v0.3.8 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/text v0.12.0 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
)

Просмотреть файл

@ -1,4 +1,15 @@
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 h1:/iHxaJhsFr0+xVFfbMr5vxz848jyiWuIEDhYq3y5odY=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 h1:LNHhpdK7hzUcx/k1LIcuh5k7k1LGIWLQfCjaneSj7Fc=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1/go.mod h1:uE9zaUfEQT/nbQjVi2IblCG9iaLtZsuYZ8ne+PuQ02M=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 h1:nVocQV40OQne5613EeLayJiRAJuKlBGy+m22qWG+WRg=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4=
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk=
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
@ -16,12 +27,17 @@ github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oD
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM=
github.com/gdamore/tcell v1.4.0 h1:vUnHwJRvcPQa3tzi+0QI4U9JINXYJlOz9yiaiPQ2wMU=
github.com/gdamore/tcell v1.4.0/go.mod h1:vxEiSDZdW3L+Uhjii9c3375IlDmR05bzxY404ZVSMo0=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jinzhu/copier v0.3.2 h1:QdBOCbaouLDYaIPFfi1bKv5F5tPpeTwXe4sD0jqtz5w=
github.com/jinzhu/copier v0.3.2/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
github.com/juliangruber/go-intersect v1.1.0 h1:sc+y5dCjMMx0pAdYk/N6KBm00tD/f3tq+Iox7dYDUrY=
@ -35,6 +51,8 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s=
github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac=
github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
@ -47,6 +65,8 @@ github.com/muesli/crunchy v0.4.0 h1:qdiml8gywULHBsztiSAf6rrE6EyuNasNKZ104mAaahM=
github.com/muesli/crunchy v0.4.0/go.mod h1:9k4x6xdSbb7WwtAVy0iDjaiDjIk6Wa5AgUIqp+HqOpU=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/tview v0.0.0-20200219135020-0ba8301b415c h1:Q1oRqcTvxE0hjV0Gw4bEcYYLM0ztcuARGVSWEF2tKaI=
@ -66,6 +86,8 @@ github.com/xrash/smetrics v0.0.0-20170218160415-a3153f7040e9 h1:w8V9v0qVympSF6Gj
github.com/xrash/smetrics v0.0.0-20170218160415-a3153f7040e9/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3 h1:n9HxLrNxWWtEb1cA950nuEEj3QnKbtsCJ6KjcgisNUs=
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
@ -75,20 +97,23 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191018095205-727590c5006e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -102,6 +127,7 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

Просмотреть файл

@ -0,0 +1,136 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
package azureblobstoragepkg
import (
"context"
"errors"
"fmt"
"os"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger"
)
const (
AnonymousAccess = 0
AuthenticatedAccess = 1
)
type AzureBlobStorage struct {
theClient *azblob.Client
}
func (abs *AzureBlobStorage) Upload(
ctx context.Context,
localFileName string,
containerName string,
blobName string) (err error) {
uploadStartTime := time.Now()
localFile, err := os.OpenFile(localFileName, os.O_RDONLY, 0)
if err != nil {
return fmt.Errorf("Failed to open local file for upload:\n%w", err)
}
defer localFile.Close()
_, err = abs.theClient.UploadFile(ctx, containerName, blobName, localFile, nil)
if err != nil {
return fmt.Errorf("Failed to upload local file to blob:\n%w", err)
}
uploadEndTime := time.Now()
logger.Log.Infof(" upload time: %s", uploadEndTime.Sub(uploadStartTime))
return nil
}
func (abs *AzureBlobStorage) Download(
ctx context.Context,
containerName string,
blobName string,
localFileName string) (err error) {
downloadStartTime := time.Now()
localFile, err := os.Create(localFileName)
if err != nil {
return fmt.Errorf(" failed to create local file for download:\n%w", err)
}
defer func() {
localFile.Close()
// If there was an error, ensure that the file is removed
if err != nil {
cleanupErr := file.RemoveFileIfExists(localFileName)
if cleanupErr != nil {
logger.Log.Warnf("Failed to remove failed network download file '%s': %v", localFileName, err)
}
}
}()
_, err = abs.theClient.DownloadFile(ctx, containerName, blobName, localFile, nil)
if err != nil {
return fmt.Errorf("Failed to download blob to local file:\n%w", err)
}
downloadEndTime := time.Now()
logger.Log.Infof(" download time: %v", downloadEndTime.Sub(downloadStartTime))
return nil
}
func (abs *AzureBlobStorage) Delete(
ctx context.Context,
containerName string,
blobName string) (err error) {
deleteStartTime := time.Now()
_, err = abs.theClient.DeleteBlob(ctx, containerName, blobName, nil)
if err != nil {
return fmt.Errorf("Failed to delete blob:\n%w", err)
}
deleteEndTime := time.Now()
logger.Log.Infof(" delete time: %v", deleteEndTime.Sub(deleteStartTime))
return nil
}
func Create(tenantId string, userName string, password string, storageAccount string, authenticationType int) (abs *AzureBlobStorage, err error) {
url := "https://" + storageAccount + ".blob.core.windows.net/"
abs = &AzureBlobStorage{}
if authenticationType == AnonymousAccess {
abs.theClient, err = azblob.NewClientWithNoCredential(url, nil)
if err != nil {
return nil, fmt.Errorf("Unable to init azure blob storage read-only client:\n%w", err)
}
return abs, nil
} else if authenticationType == AuthenticatedAccess {
credential, err := azidentity.NewClientSecretCredential(tenantId, userName, password, nil)
if err != nil {
return nil, fmt.Errorf("Unable to init azure identity:\n%w", err)
}
abs.theClient, err = azblob.NewClient(url, credential, nil)
if err != nil {
return nil, fmt.Errorf("Unable to init azure blob storage read-write client:\n%w", err)
}
return abs, nil
}
return nil, errors.New("Unknown authentication type.")
}

Просмотреть файл

@ -0,0 +1,769 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
package ccachemanagerpkg
import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/azureblobstorage"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/directory"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell"
)
// CCacheManager
//
// CCacheManager is a central place to hold the ccache configuration and
// abstract its work into easy to use functions.
//
// The configurations include:
// - Whether ccache is enabled or not.
// - Whether to download ccache artifacts from a previous build or not.
// - Whether to upload the newly generated ccache artifacts or not.
// - Other configuration like local working folders, etc.
//
// The main object exposed to the caller/user is CCacheManager and is
// instantiated through the CreateManager() function.
//
// An instance of CCacheManager can then be set to a specific package family by
// calling SetCurrentPkgGroup(). This function causes the CCacheManager to
// calculate a number of settings for this particular package group.
//
// After a successful call to SetcurrentPkgGroup(), the user can then use the
// DownloadPkgGroupCCache() or UploadPkgGroupCache() to download or upload
// ccache artifacts respectively.
//
// UploadMultiPkgGroupCCaches() is provided to call at the end of the build
// (where individual contexts for each pkg is not available) to enumerate
// the generated ccache artifacts from the folders and upload those that have
// not been uploaded - namely, package groups that have more than one package.
//
// Note that the design allows for storing multiple versions of the ccache
// artifacts on the remote store. This also means that the user may choose to
// download from any of these versions, and upload to a new location.
//
// The design supports a 'latest' notion - where the user can just indicate
// that the download is to use the 'latest' and the CCacheManager will find
// out the latest version and download.
//
// Also, the user may choose to upload the artifacts generated in this build,
// and marker them latest - so that a subsequent build can make use of them.
//
// The 'latest' flow is implemented by creating one text file per package
// family that holds the folder where the latest artifacts are. On downloading
// the latest, that file is read (downloaded from the blob storage and read).
// On uploading, the file is created locally and uploaded (to overwrite the
// existing version if it exists).
//
// While we can store the ccache artifacts from multiple builds, and consumer
// builds can choose which ones to download from, this is not the typical
// scenario.
//
// Instead, an official production build is to upload ccache artifacts and mark
// them 'latest'. And consumer builds will just always pick-up the latest.
//
// This implies that we do not need to keep older ccache artifacts on the
// remote store and we should delete them. To support that, there is a flag
// 'KeepLatestOnly' that tells CCacheManager to delete unused older versions.
// It identifies unused versions by capturing the latest version information
// right before it updates it to the current build. Then, it knows that the
// previous latest version is no longer in use and delete it.
//
// This has an implication if we keep switching the KeepLatestOnly flag on and
// off. CCacheManager will not be able to delete older unused versions unless
// they are the versions that we just switched away from. If this proves to be
// a problem, we can always write a tool to enumerate all the versions in use
// , and anything that is not in that list can be removed. This is not the
// default behavior because such enumeration takes about 10 minutes and we do
// not want this to be part of each build.
//
const (
CCacheTarSuffix = "-ccache.tar.gz"
CCacheTagSuffix = "-latest-build.txt"
// This are just place holders when constructing a new manager object.
UninitializedGroupName = "unknown"
UninitializedGroupSize = 0
UninitializedGroupArchitecture = "unknown"
)
// RemoveStoreConfig holds the following:
// - The remote store end-point and the necessary credentials.
// - The behavior of the download from the remote store.
// - The behavior of the upload to the remote store.
// - The clean-up policy of the remote store.
type RemoteStoreConfig struct {
// The remote store type. Currently, there is only one type support;
// Azure blob storage.
Type string `json:"type"`
// Azure subscription tenant id.
TenantId string `json:"tenantId"`
// Service principal client id with write-permissions to the Azure blob
// storage. This can be left empty if upload is disabled.
UserName string `json:"userName"`
// Service principal secret with write-permissions to the Azure blob
// storage. This can be left empty if upload is disabled.
Password string `json:"password"`
// Azure storage account name.
StorageAccount string `json:"storageAccount"`
// Azure storage container name.
ContainerName string `json:"containerName"`
// Tags folder is the location where the files holding information about
// the latest folders are kept.
TagsFolder string `json:"tagsFolder"`
// If true, the build will download ccache artifacts from the remote store
// (before the package family builds).
DownloadEnabled bool `json:"downloadEnabled"`
// If true, the build will determine the latest build and download its
// artifacts. If true, DownloadFolder does not need to be set.
DownloadLatest bool `json:"downloadLatest"`
// The folder on the remote store where the ccache artifacts to use are.
// There should be a folder for each build.
// If DownloadLatest is true, this does not need to be set.
DownloadFolder string `json:"downloadFolder"`
// If true, the build will upload ccache artifacts to the remote store
// after the package family builds).
UploadEnabled bool `json:"uploadEnabled"`
// The folder on the remote store where the ccache artifacts are to be
// uploaded.
UploadFolder string `json:"uploadFolder"`
// If true, the tags specifying the latest artifacts will be updated to
// point to the current upload.
UpdateLatest bool `json:"updateLatest"`
// If true, previous 'latest' ccache artifacts will be deleted from the
// remote store.
KeepLatestOnly bool `json:"keepLatestOnly"`
}
// CCacheGroupConfig is where package groups are defined.
// A package group is a group of packages that can share the same ccache
// artifacts. This is typical for packages like kernel and kernel-hci, for
// example.
// A package group can have an arbitrary name, and a list of package names
// associated with it.
// A package group can also be disabled if the ccache breaks its build. This
// is usually a bug - and would need to be investigated further. The field
// 'comment' can be used to clarify any configuration related to this package
// family.
type CCacheGroupConfig struct {
Name string `json:"name"`
Comment string `json:"comment"`
Enabled bool `json:"enabled"`
PackageNames []string `json:"packageNames"`
}
type CCacheConfiguration struct {
RemoteStoreConfig *RemoteStoreConfig `json:"remoteStore"`
Groups []CCacheGroupConfig `json:"groups"`
}
// Note that the design separate the artifacts we download (source) from those
// that we upload (target).
// What we start with (source), has a remote path on the remote storage, and is
// downloaded to disk at the local path).
// What the generate (target), has a local path where we create the archive, and
// uploaded to the remote store at the remote target path.
type CCacheArchive struct {
LocalSourcePath string
RemoteSourcePath string
LocalTargetPath string
RemoteTargetPath string
}
// CCachePkgGroup is calculated for each package as we encounter it during the
// build. It is derived from the CCacheGroupConfig + runtime parameters.
type CCachePkgGroup struct {
Name string
Enabled bool
Size int
Arch string
CCacheDir string
TarFile *CCacheArchive
TagFile *CCacheArchive
}
// CCacheManager is the main object...
type CCacheManager struct {
// Full path to the ccache json configuration file.
ConfigFileName string
// The in-memory representation of the ConfigFile contents.
Configuration *CCacheConfiguration
// ccache root folder as specified by build pipelines.
RootWorkDir string
// Working folder where CCacheManager will download artifacts.
LocalDownloadsDir string
// Working folder where CCacheManager will create archives in preparation
// for uploading them.
LocalUploadsDir string
// Pointer to the current active pkg group state/configuration.
CurrentPkgGroup *CCachePkgGroup
// A utility helper for downloading/uploading archives from/to Azure blob
// storage.
AzureBlobStorage *azureblobstoragepkg.AzureBlobStorage
}
func buildRemotePath(arch, folder, name, suffix string) string {
return arch + "/" + folder + "/" + name + suffix
}
func (g *CCachePkgGroup) buildTarRemotePath(folder string) string {
return buildRemotePath(g.Arch, folder, g.Name, CCacheTarSuffix)
}
func (g *CCachePkgGroup) buildTagRemotePath(folder string) string {
return buildRemotePath(g.Arch, folder, g.Name, CCacheTagSuffix)
}
func (g *CCachePkgGroup) UpdateTagsPaths(remoteStoreConfig *RemoteStoreConfig, localDownloadsDir string, localUploadsDir string) {
tagFile := &CCacheArchive{
LocalSourcePath: localDownloadsDir + "/" + g.Name + CCacheTagSuffix,
RemoteSourcePath: g.buildTagRemotePath(remoteStoreConfig.TagsFolder),
LocalTargetPath: localUploadsDir + "/" + g.Name + CCacheTagSuffix,
RemoteTargetPath: g.buildTagRemotePath(remoteStoreConfig.TagsFolder),
}
logger.Log.Infof(" tag local source : (%s)", tagFile.LocalSourcePath)
logger.Log.Infof(" tag remote source : (%s)", tagFile.RemoteSourcePath)
logger.Log.Infof(" tag local target : (%s)", tagFile.LocalTargetPath)
logger.Log.Infof(" tag remote target : (%s)", tagFile.RemoteTargetPath)
g.TagFile = tagFile
}
func (g *CCachePkgGroup) UpdateTarPaths(remoteStoreConfig *RemoteStoreConfig, localDownloadsDir string, localUploadsDir string) {
tarFile := &CCacheArchive{
LocalSourcePath: localDownloadsDir + "/" + g.Name + CCacheTarSuffix,
RemoteSourcePath: g.buildTarRemotePath(remoteStoreConfig.DownloadFolder),
LocalTargetPath: localUploadsDir + "/" + g.Name + CCacheTarSuffix,
RemoteTargetPath: g.buildTarRemotePath(remoteStoreConfig.UploadFolder),
}
logger.Log.Infof(" tar local source : (%s)", tarFile.LocalSourcePath)
logger.Log.Infof(" tar remote source : (%s)", tarFile.RemoteSourcePath)
logger.Log.Infof(" tar local target : (%s)", tarFile.LocalTargetPath)
logger.Log.Infof(" tar remote target : (%s)", tarFile.RemoteTargetPath)
g.TarFile = tarFile
}
func (g *CCachePkgGroup) getLatestTag(azureBlobStorage *azureblobstoragepkg.AzureBlobStorage, containerName string) (string, error) {
logger.Log.Infof(" checking if (%s) already exists...", g.TagFile.LocalSourcePath)
_, err := os.Stat(g.TagFile.LocalSourcePath)
if err != nil {
// If file is not available locally, try downloading it...
logger.Log.Infof(" downloading (%s) to (%s)...", g.TagFile.RemoteSourcePath, g.TagFile.LocalSourcePath)
err = azureBlobStorage.Download(context.Background(), containerName, g.TagFile.RemoteSourcePath, g.TagFile.LocalSourcePath)
if err != nil {
return "", fmt.Errorf("Unable to download ccache tag file:\n%w", err)
}
}
latestBuildTagData, err := ioutil.ReadFile(g.TagFile.LocalSourcePath)
if err != nil {
return "", fmt.Errorf("Unable to read ccache tag file contents:\n%w", err)
}
return string(latestBuildTagData), nil
}
// SetCurrentPkgGroup() is called once per package.
func (m *CCacheManager) SetCurrentPkgGroup(basePackageName string, arch string) (err error) {
// Note that findGroup() always succeeds.
// If it cannot find the package, it assumes the packages belongs to the
// 'common' group.
groupName, groupEnabled, groupSize := m.findGroup(basePackageName)
return m.setCurrentPkgGroupInternal(groupName, groupEnabled, groupSize, arch)
}
// setCurrentPkgGroupInternal() is called once per package.
func (m *CCacheManager) setCurrentPkgGroupInternal(groupName string, groupEnabled bool, groupSize int, arch string) (err error) {
ccachePkgGroup := &CCachePkgGroup{
Name: groupName,
Enabled: groupEnabled,
Size: groupSize,
Arch: arch,
}
ccachePkgGroup.CCacheDir, err = m.buildPkgCCacheDir(ccachePkgGroup.Name, ccachePkgGroup.Arch)
if err != nil {
return fmt.Errorf("Failed to construct the ccache directory name:\n%w", err)
}
// Note that we create the ccache working folder here as opposed to the
// download function because there is a case where the group is configured
// to enable ccache, but does not download.
if ccachePkgGroup.Enabled {
logger.Log.Infof(" ccache pkg folder : (%s)", ccachePkgGroup.CCacheDir)
err = directory.EnsureDirExists(ccachePkgGroup.CCacheDir)
if err != nil {
return fmt.Errorf("Cannot create ccache download folder:\n%w", err)
}
ccachePkgGroup.UpdateTagsPaths(m.Configuration.RemoteStoreConfig, m.LocalDownloadsDir, m.LocalUploadsDir)
if m.Configuration.RemoteStoreConfig.DownloadLatest {
logger.Log.Infof(" ccache is configured to use the latest from the remote store...")
latestTag, err := ccachePkgGroup.getLatestTag(m.AzureBlobStorage, m.Configuration.RemoteStoreConfig.ContainerName)
if err == nil {
// Adjust the download folder from 'latest' to the tag loaded from the file...
logger.Log.Infof(" updating (%s) to (%s)...", m.Configuration.RemoteStoreConfig.DownloadFolder, latestTag)
m.Configuration.RemoteStoreConfig.DownloadFolder = latestTag
} else {
logger.Log.Warnf(" unable to get the latest ccache tag. Might be the first run and no ccache tag has been uploaded before.")
}
}
if m.Configuration.RemoteStoreConfig.DownloadFolder == "" {
logger.Log.Infof(" ccache archive source download folder is an empty string. Disabling ccache download.")
m.Configuration.RemoteStoreConfig.DownloadEnabled = false
}
ccachePkgGroup.UpdateTarPaths(m.Configuration.RemoteStoreConfig, m.LocalDownloadsDir, m.LocalUploadsDir)
}
m.CurrentPkgGroup = ccachePkgGroup
return nil
}
func loadConfiguration(configFileName string) (configuration *CCacheConfiguration, err error) {
logger.Log.Infof(" loading ccache configuration file: %s", configFileName)
err = jsonutils.ReadJSONFile(configFileName, &configuration)
if err != nil {
return nil, fmt.Errorf("Failed to load file:\n%w", err)
}
logger.Log.Infof(" Type : %s", configuration.RemoteStoreConfig.Type)
logger.Log.Infof(" TenantId : %s", configuration.RemoteStoreConfig.TenantId)
logger.Log.Infof(" UserName : %s", configuration.RemoteStoreConfig.UserName)
logger.Log.Infof(" StorageAccount : %s", configuration.RemoteStoreConfig.StorageAccount)
logger.Log.Infof(" ContainerName : %s", configuration.RemoteStoreConfig.ContainerName)
logger.Log.Infof(" Tagsfolder : %s", configuration.RemoteStoreConfig.TagsFolder)
logger.Log.Infof(" DownloadEnabled: %v", configuration.RemoteStoreConfig.DownloadEnabled)
logger.Log.Infof(" DownloadLatest : %v", configuration.RemoteStoreConfig.DownloadLatest)
logger.Log.Infof(" DownloadFolder : %s", configuration.RemoteStoreConfig.DownloadFolder)
logger.Log.Infof(" UploadEnabled : %v", configuration.RemoteStoreConfig.UploadEnabled)
logger.Log.Infof(" UploadFolder : %s", configuration.RemoteStoreConfig.UploadFolder)
logger.Log.Infof(" UpdateLatest : %v", configuration.RemoteStoreConfig.UpdateLatest)
logger.Log.Infof(" KeepLatestOnly : %v", configuration.RemoteStoreConfig.KeepLatestOnly)
return configuration, err
}
func compressDir(sourceDir string, archiveName string) (err error) {
// Ensure the output file does not exist...
_, err = os.Stat(archiveName)
if err == nil {
err = os.Remove(archiveName)
if err != nil {
return fmt.Errorf("Unable to delete ccache out tar:\n%w", err)
}
}
// Create the archive...
logger.Log.Infof(" compressing (%s) into (%s).", sourceDir, archiveName)
compressStartTime := time.Now()
tarArgs := []string{
"cf",
archiveName,
"-C",
sourceDir,
"."}
_, stderr, err := shell.Execute("tar", tarArgs...)
if err != nil {
return fmt.Errorf("Unable compress ccache files into archive:\n%s", stderr)
}
compressEndTime := time.Now()
logger.Log.Infof(" compress time: %s", compressEndTime.Sub(compressStartTime))
return nil
}
func uncompressFile(archiveName string, targetDir string) (err error) {
logger.Log.Infof(" uncompressing (%s) into (%s).", archiveName, targetDir)
uncompressStartTime := time.Now()
tarArgs := []string{
"xf",
archiveName,
"-C",
targetDir,
"."}
_, stderr, err := shell.Execute("tar", tarArgs...)
if err != nil {
return fmt.Errorf("Unable extract ccache files from archive:\n%s", stderr)
}
uncompressEndTime := time.Now()
logger.Log.Infof(" uncompress time: %v", uncompressEndTime.Sub(uncompressStartTime))
return nil
}
func CreateManager(rootDir string, configFileName string) (m *CCacheManager, err error) {
logger.Log.Infof("* Creating a ccache manager instance *")
logger.Log.Infof(" ccache root folder : (%s)", rootDir)
logger.Log.Infof(" ccache remote configuration: (%s)", configFileName)
if rootDir == "" {
return nil, errors.New("CCache root directory cannot be empty.")
}
if configFileName == "" {
return nil, errors.New("CCache configuration file cannot be empty.")
}
configuration, err := loadConfiguration(configFileName)
if err != nil {
return nil, fmt.Errorf("Failed to load remote store configuration:\n%w", err)
}
logger.Log.Infof(" creating blob storage client...")
accessType := azureblobstoragepkg.AnonymousAccess
if configuration.RemoteStoreConfig.UploadEnabled {
accessType = azureblobstoragepkg.AuthenticatedAccess
}
azureBlobStorage, err := azureblobstoragepkg.Create(configuration.RemoteStoreConfig.TenantId, configuration.RemoteStoreConfig.UserName, configuration.RemoteStoreConfig.Password, configuration.RemoteStoreConfig.StorageAccount, accessType)
if err != nil {
return nil, fmt.Errorf("Unable to init azure blob storage client:\n%w", err)
}
err = directory.EnsureDirExists(rootDir)
if err != nil {
return nil, fmt.Errorf("Cannot create ccache working folder:\n%w", err)
}
rootWorkDir := rootDir + "/work"
err = directory.EnsureDirExists(rootWorkDir)
if err != nil {
return nil, fmt.Errorf("Cannot create ccache work folder:\n%w", err)
}
localDownloadsDir := rootDir + "/downloads"
err = directory.EnsureDirExists(localDownloadsDir)
if err != nil {
return nil, fmt.Errorf("Cannot create ccache downloads folder:\n%w", err)
}
localUploadsDir := rootDir + "/uploads"
err = directory.EnsureDirExists(localUploadsDir)
if err != nil {
return nil, fmt.Errorf("Cannot create ccache uploads folder:\n%w", err)
}
ccacheManager := &CCacheManager{
ConfigFileName: configFileName,
Configuration: configuration,
RootWorkDir: rootWorkDir,
LocalDownloadsDir: localDownloadsDir,
LocalUploadsDir: localUploadsDir,
AzureBlobStorage: azureBlobStorage,
}
ccacheManager.setCurrentPkgGroupInternal(UninitializedGroupName, false, UninitializedGroupSize, UninitializedGroupArchitecture)
return ccacheManager, nil
}
// This function returns groupName="common" and groupSize=0 if any failure is
// encountered. This allows the ccachemanager to 'hide' the details of packages
// that are not part of any remote storage group.
func (m *CCacheManager) findGroup(basePackageName string) (groupName string, groupEnabled bool, groupSize int) {
//
// We assume that:
// - all packages want ccache enabled for them.
// - each package belongs to its own group.
// Then, we iterate to see if those assumptions do not apply for a certain
// package and overwrite them with the actual configuration.
//
groupName = basePackageName
groupEnabled = true
groupSize = 1
found := false
for _, group := range m.Configuration.Groups {
for _, packageName := range group.PackageNames {
if packageName == basePackageName {
logger.Log.Infof(" found group (%s) for base package (%s)...", group.Name, basePackageName)
groupName = group.Name
groupEnabled = group.Enabled
groupSize = len(group.PackageNames)
if !groupEnabled {
logger.Log.Infof(" ccache is explicitly disabled for this group in the ccache configuration.")
}
found = true
break
}
}
if found {
break
}
}
return groupName, groupEnabled, groupSize
}
func (m *CCacheManager) findCCacheGroupInfo(groupName string) (groupEnabled bool, groupSize int) {
//
// We assume that:
// - all packages want ccache enabled for them.
// - each package belongs to its own group.
// Then, we iterate to see if those assumptions do not apply for a certain
// package and overwrite them with the actual configuration.
//
groupEnabled = true
groupSize = 1
for _, group := range m.Configuration.Groups {
if groupName == group.Name {
groupEnabled = group.Enabled
groupSize = len(group.PackageNames)
}
}
return groupEnabled, groupSize
}
func (m *CCacheManager) buildPkgCCacheDir(pkgCCacheGroupName string, pkgArchitecture string) (string, error) {
if pkgArchitecture == "" {
return "", errors.New("CCache package pkgArchitecture cannot be empty.")
}
if pkgCCacheGroupName == "" {
return "", errors.New("CCache package group name cannot be empty.")
}
return m.RootWorkDir + "/" + pkgArchitecture + "/" + pkgCCacheGroupName, nil
}
func (m *CCacheManager) DownloadPkgGroupCCache() (err error) {
logger.Log.Infof("* processing download of ccache artifacts...")
remoteStoreConfig := m.Configuration.RemoteStoreConfig
if !remoteStoreConfig.DownloadEnabled {
logger.Log.Infof(" downloading archived ccache artifacts is disabled. Skipping download...")
return nil
}
logger.Log.Infof(" downloading (%s) to (%s)...", m.CurrentPkgGroup.TarFile.RemoteSourcePath, m.CurrentPkgGroup.TarFile.LocalSourcePath)
err = m.AzureBlobStorage.Download(context.Background(), remoteStoreConfig.ContainerName, m.CurrentPkgGroup.TarFile.RemoteSourcePath, m.CurrentPkgGroup.TarFile.LocalSourcePath)
if err != nil {
return fmt.Errorf("Unable to download ccache archive:\n%w", err)
}
err = uncompressFile(m.CurrentPkgGroup.TarFile.LocalSourcePath, m.CurrentPkgGroup.CCacheDir)
if err != nil {
return fmt.Errorf("Unable uncompress ccache files from archive:\n%w", err)
}
return nil
}
func (m *CCacheManager) UploadPkgGroupCCache() (err error) {
logger.Log.Infof("* processing upload of ccache artifacts...")
// Check if ccache has actually generated any content.
// If it has, it would have created a specific folder structure - so,
// checking for folders is reasonable enough.
pkgCCacheDirContents, err := directory.GetChildDirs(m.CurrentPkgGroup.CCacheDir)
if err != nil {
return fmt.Errorf("Failed to enumerate the contents of (%s):\n%w", m.CurrentPkgGroup.CCacheDir, err)
}
if len(pkgCCacheDirContents) == 0 {
logger.Log.Infof(" %s is empty. Nothing to archive and upload. Skipping...", m.CurrentPkgGroup.CCacheDir)
return nil
}
remoteStoreConfig := m.Configuration.RemoteStoreConfig
if !remoteStoreConfig.UploadEnabled {
logger.Log.Infof(" ccache update is disabled for this build.")
return nil
}
err = compressDir(m.CurrentPkgGroup.CCacheDir, m.CurrentPkgGroup.TarFile.LocalTargetPath)
if err != nil {
return fmt.Errorf("Unable compress ccache files into archive:\n%w", err)
}
// Upload the ccache archive
logger.Log.Infof(" uploading ccache archive (%s) to (%s)...", m.CurrentPkgGroup.TarFile.LocalTargetPath, m.CurrentPkgGroup.TarFile.RemoteTargetPath)
err = m.AzureBlobStorage.Upload(context.Background(), m.CurrentPkgGroup.TarFile.LocalTargetPath, remoteStoreConfig.ContainerName, m.CurrentPkgGroup.TarFile.RemoteTargetPath)
if err != nil {
return fmt.Errorf("Unable to upload ccache archive:\n%w", err)
}
if remoteStoreConfig.UpdateLatest {
logger.Log.Infof(" update latest is enabled.")
// If KeepLatestOnly is true, we need to capture the current source
// ccache archive path which is about to be dereferenced. That way,
// we can delete it after we update the latest tag to point to the
// new ccache archive.
//
// First we assume it does not exist (i.e. first time to run).
//
previousLatestTarSourcePath := ""
if remoteStoreConfig.KeepLatestOnly {
logger.Log.Infof(" keep latest only is enabled. Capturing path to previous ccache archive if it exists...")
// getLatestTag() will check locally first if the tag file has
// been downloaded and use it. If not, it will attempt to
// download it. If not, then there is no way to get to the
// previous latest tar (if it exists at all).
latestTag, err := m.CurrentPkgGroup.getLatestTag(m.AzureBlobStorage, m.Configuration.RemoteStoreConfig.ContainerName)
if err == nil {
// build the archive remote path based on the latestTag.
previousLatestTarSourcePath = m.CurrentPkgGroup.buildTarRemotePath(latestTag)
logger.Log.Infof(" (%s) is about to be de-referenced.", previousLatestTarSourcePath)
} else {
logger.Log.Warnf(" unable to get the latest ccache tag. This might be the first run and no latest ccache tag has been uploaded before.")
}
}
// Create the latest tag file...
logger.Log.Infof(" creating a tag file (%s) with content: (%s)...", m.CurrentPkgGroup.TagFile.LocalTargetPath, remoteStoreConfig.UploadFolder)
err = ioutil.WriteFile(m.CurrentPkgGroup.TagFile.LocalTargetPath, []byte(remoteStoreConfig.UploadFolder), 0644)
if err != nil {
return fmt.Errorf("Unable to write tag information to temporary file:\n%w", err)
}
// Upload the latest tag file...
logger.Log.Infof(" uploading tag file (%s) to (%s)...", m.CurrentPkgGroup.TagFile.LocalTargetPath, m.CurrentPkgGroup.TagFile.RemoteTargetPath)
err = m.AzureBlobStorage.Upload(context.Background(), m.CurrentPkgGroup.TagFile.LocalTargetPath, remoteStoreConfig.ContainerName, m.CurrentPkgGroup.TagFile.RemoteTargetPath)
if err != nil {
return fmt.Errorf("Unable to upload ccache archive:\n%w", err)
}
if remoteStoreConfig.KeepLatestOnly {
logger.Log.Infof(" keep latest only is enabled. Removing previous ccache archive if it exists...")
if previousLatestTarSourcePath == "" {
logger.Log.Infof(" cannot remove old archive with an empty name. No previous ccache archive to remove.")
} else {
logger.Log.Infof(" removing ccache archive (%s) from remote store...", previousLatestTarSourcePath)
err = m.AzureBlobStorage.Delete(context.Background(), remoteStoreConfig.ContainerName, previousLatestTarSourcePath)
if err != nil {
return fmt.Errorf("Unable to remove previous ccache archive:\n%w", err)
}
}
}
}
return nil
}
// After building a package or more, the ccache folder is expected to look as
// follows:
//
// <rootDir> (i.e. /ccache)
//
// <m.LocalDownloadsDir>
// <m.LocalUploadsDir>
// <m.RootWorkDir>
// x86_64
// <groupName-1>
// <groupName-2>
// noarch
// <groupName-3>
// <groupName-4>
//
// This function is typically called at the end of the build - after all
// packages have completed building.
//
// At that point, there is not per package information about the group name
// or the architecture.
//
// We use this directory structure to encode the per package group information
// at build time, so we can use them now.
func (m *CCacheManager) UploadMultiPkgGroupCCaches() (err error) {
architectures, err := directory.GetChildDirs(m.RootWorkDir)
errorsOccured := false
if err != nil {
return fmt.Errorf("failed to enumerate ccache child folders under (%s):\n%w", m.RootWorkDir, err)
}
for _, architecture := range architectures {
groupNames, err := directory.GetChildDirs(filepath.Join(m.RootWorkDir, architecture))
if err != nil {
logger.Log.Warnf("failed to enumerate child folders under (%s):\n%v", m.RootWorkDir, err)
errorsOccured = true
} else {
for _, groupName := range groupNames {
// Enable this continue only if we enable uploading as
// soon as packages are done building.
groupEnabled, groupSize := m.findCCacheGroupInfo(groupName)
if !groupEnabled {
// This should never happen unless a previous run had it
// enabled and the folder got created. The correct behavior
// is that the folder is not even created before the pkg
// build starts and hence by reaching this method, it
// should not be there.
//
logger.Log.Infof(" ccache is explicitly disabled for this group in the ccache configuration. Skipping...")
continue
}
if groupSize < 2 {
// This has either been processed earlier or there is
// nothing to process.
continue
}
groupCCacheDir, err := m.buildPkgCCacheDir(groupName, architecture)
if err != nil {
logger.Log.Warnf("Failed to get ccache dir for architecture (%s) and group name (%s):\n%v", architecture, groupName, err)
errorsOccured = true
}
logger.Log.Infof(" processing ccache folder (%s)...", groupCCacheDir)
m.setCurrentPkgGroupInternal(groupName, groupEnabled, groupSize, architecture)
err = m.UploadPkgGroupCCache()
if err != nil {
errorsOccured = true
logger.Log.Warnf("CCache will not be archived for (%s) (%s):\n%v", architecture, groupName, err)
}
}
}
}
if errorsOccured {
return errors.New("CCache archiving and upload failed. See above warnings for more details.")
}
return nil
}

Просмотреть файл

@ -72,7 +72,53 @@ func CopyContents(srcDir, dstDir string) (err error) {
return
}
}
return
}
func EnsureDirExists(dirName string) (err error) {
_, err = os.Stat(dirName)
if err == nil {
return nil
}
if os.IsNotExist(err) {
err = os.MkdirAll(dirName, 0755)
if err != nil {
return err
}
} else {
return err
}
return nil
}
func GetChildDirs(parentFolder string) ([]string, error) {
childFolders := []string{}
dir, err := os.Open(parentFolder)
if err != nil {
return nil, err
}
defer dir.Close()
children, err := dir.Readdirnames(-1)
if err != nil {
return nil, err
}
for _, child := range children {
childPath := filepath.Join(parentFolder, child)
info, err := os.Stat(childPath)
if err != nil {
continue
}
if info.IsDir() {
childFolders = append(childFolders, child)
}
}
return childFolders, nil
}

Просмотреть файл

@ -4,6 +4,7 @@
package rpm
import (
"errors"
"fmt"
"path/filepath"
"regexp"
@ -99,6 +100,23 @@ func GetRpmArch(goArch string) (rpmArch string, err error) {
return
}
func GetBasePackageNameFromSpecFile(specPath string) (basePackageName string, err error) {
baseName := filepath.Base(specPath)
if baseName == "" {
return "", errors.New(fmt.Sprintf("Cannot extract file name from specPath (%s).", specPath))
}
fileExtension := filepath.Ext(baseName)
if fileExtension == "" {
return "", errors.New(fmt.Sprintf("Cannot extract file extension from file name (%s).", baseName))
}
basePackageName = baseName[:len(baseName)-len(fileExtension)]
return
}
// SetMacroDir adds RPM_CONFIGDIR=$(newMacroDir) into the shell's environment for the duration of a program.
// To restore the environment the caller can use shell.SetEnvironment() with the returned origenv.
// On an empty string argument return success immediately and do not modify the environment.

Просмотреть файл

@ -10,9 +10,11 @@ import (
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/ccachemanager"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger"
@ -44,7 +46,7 @@ var (
srpmsDirPath = app.Flag("srpm-dir", "The output directory for source RPM packages").Required().String()
toolchainDirPath = app.Flag("toolchain-rpms-dir", "Directory that contains already built toolchain RPMs. Should contain a top level directory for each architecture.").Required().ExistingDir()
cacheDir = app.Flag("cache-dir", "The cache directory containing downloaded dependency RPMS from CBL-Mariner Base").Required().ExistingDir()
ccacheDir = app.Flag("ccache-dir", "The directory used to store ccache outputs").Required().ExistingDir()
basePackageName = app.Flag("base-package-name", "The name of the spec file used to build this package without the extension.").Required().String()
noCleanup = app.Flag("no-cleanup", "Whether or not to delete the chroot folder after the build is done").Bool()
distTag = app.Flag("dist-tag", "The distribution tag the SPEC will be built with.").Required().String()
distroReleaseVersion = app.Flag("distro-release-version", "The distro release version that the SRPM will be built with").Required().String()
@ -54,6 +56,8 @@ var (
packagesToInstall = app.Flag("install-package", "Filepaths to RPM packages that should be installed before building.").Strings()
outArch = app.Flag("out-arch", "Architecture of resulting package").String()
useCcache = app.Flag("use-ccache", "Automatically install and use ccache during package builds").Bool()
ccacheRootDir = app.Flag("ccache-root-dir", "The directory used to store ccache outputs").String()
ccachConfig = app.Flag("ccache-config", "The configuration file for ccache.").String()
maxCPU = app.Flag("max-cpu", "Max number of CPUs used for package building").Default("").String()
timeout = app.Flag("timeout", "Timeout for package building").Required().Duration()
@ -85,14 +89,36 @@ func main() {
defines[rpm.DistroReleaseVersionDefine] = *distroReleaseVersion
defines[rpm.DistroBuildNumberDefine] = *distroBuildNumber
defines[rpm.MarinerModuleLdflagsDefine] = "-Wl,-dT,%{_topdir}/BUILD/module_info.ld"
if *useCcache {
defines[rpm.MarinerCCacheDefine] = "true"
ccacheManager, err := ccachemanagerpkg.CreateManager(*ccacheRootDir, *ccachConfig)
if err == nil {
if *useCcache {
buildArch, err := rpm.GetRpmArch(runtime.GOARCH)
if err == nil {
err = ccacheManager.SetCurrentPkgGroup(*basePackageName, buildArch)
if err == nil {
if ccacheManager.CurrentPkgGroup.Enabled {
defines[rpm.MarinerCCacheDefine] = "true"
}
} else {
logger.Log.Warnf("Failed to set package ccache configuration:\n%v", err)
ccacheManager = nil
}
} else {
logger.Log.Warnf("Failed to get build architecture:\n%v", err)
ccacheManager = nil
}
}
} else {
logger.Log.Warnf("Failed to initialize the ccache manager:\n%v", err)
ccacheManager = nil
}
if *maxCPU != "" {
defines[rpm.MaxCPUDefine] = *maxCPU
}
builtRPMs, err := buildSRPMInChroot(chrootDir, rpmsDirAbsPath, toolchainDirAbsPath, *workerTar, *srpmFile, *repoFile, *rpmmacrosFile, *outArch, defines, *noCleanup, *runCheck, *packagesToInstall, *useCcache, *timeout)
builtRPMs, err := buildSRPMInChroot(chrootDir, rpmsDirAbsPath, toolchainDirAbsPath, *workerTar, *srpmFile, *repoFile, *rpmmacrosFile, *outArch, defines, *noCleanup, *runCheck, *packagesToInstall, ccacheManager, *timeout)
logger.PanicOnError(err, "Failed to build SRPM '%s'. For details see log file: %s .", *srpmFile, *logFile)
err = copySRPMToOutput(*srpmFile, srpmsDirAbsPath)
@ -123,7 +149,12 @@ func buildChrootDirPath(workDir, srpmFilePath string, runCheck bool) (chrootDirP
return filepath.Join(workDir, buildDirName)
}
func buildSRPMInChroot(chrootDir, rpmDirPath, toolchainDirPath, workerTar, srpmFile, repoFile, rpmmacrosFile, outArch string, defines map[string]string, noCleanup, runCheck bool, packagesToInstall []string, useCcache bool, timeout time.Duration) (builtRPMs []string, err error) {
func isCCacheEnabled(ccacheManager *ccachemanagerpkg.CCacheManager) bool {
return ccacheManager != nil && ccacheManager.CurrentPkgGroup.Enabled
}
func buildSRPMInChroot(chrootDir, rpmDirPath, toolchainDirPath, workerTar, srpmFile, repoFile, rpmmacrosFile, outArch string, defines map[string]string, noCleanup, runCheck bool, packagesToInstall []string, ccacheManager *ccachemanagerpkg.CCacheManager, timeout time.Duration) (builtRPMs []string, err error) {
const (
buildHeartbeatTimeout = 30 * time.Minute
@ -156,14 +187,24 @@ func buildSRPMInChroot(chrootDir, rpmDirPath, toolchainDirPath, workerTar, srpmF
quit <- true
}()
if isCCacheEnabled(ccacheManager) {
err = ccacheManager.DownloadPkgGroupCCache()
if err != nil {
logger.Log.Infof("CCache will not be able to use previously generated artifacts:\n%v", err)
}
}
// Create the chroot used to build the SRPM
chroot := safechroot.NewChroot(chrootDir, existingChrootDir)
outRpmsOverlayMount, outRpmsOverlayExtraDirs := safechroot.NewOverlayMountPoint(chroot.RootDir(), overlaySource, chrootLocalRpmsDir, rpmDirPath, chrootLocalRpmsDir, overlayWorkDirRpms)
toolchainRpmsOverlayMount, toolchainRpmsOverlayExtraDirs := safechroot.NewOverlayMountPoint(chroot.RootDir(), overlaySource, chrootLocalToolchainDir, toolchainDirPath, chrootLocalToolchainDir, overlayWorkDirToolchain)
rpmCacheMount := safechroot.NewMountPoint(*cacheDir, chrootLocalRpmsCacheDir, "", safechroot.BindMountPointFlags, "")
ccacheMount := safechroot.NewMountPoint(*ccacheDir, chrootCcacheDir, "", safechroot.BindMountPointFlags, "")
mountPoints := []*safechroot.MountPoint{outRpmsOverlayMount, toolchainRpmsOverlayMount, rpmCacheMount, ccacheMount}
mountPoints := []*safechroot.MountPoint{outRpmsOverlayMount, toolchainRpmsOverlayMount, rpmCacheMount}
if isCCacheEnabled(ccacheManager) {
ccacheMount := safechroot.NewMountPoint(ccacheManager.CurrentPkgGroup.CCacheDir, chrootCcacheDir, "", safechroot.BindMountPointFlags, "")
mountPoints = append(mountPoints, ccacheMount)
}
extraDirs := append(outRpmsOverlayExtraDirs, chrootLocalRpmsCacheDir, chrootCcacheDir)
extraDirs = append(extraDirs, toolchainRpmsOverlayExtraDirs...)
@ -183,7 +224,7 @@ func buildSRPMInChroot(chrootDir, rpmDirPath, toolchainDirPath, workerTar, srpmF
results := make(chan error)
go func() {
buildErr := chroot.Run(func() (err error) {
return buildRPMFromSRPMInChroot(srpmFileInChroot, outArch, runCheck, defines, packagesToInstall, useCcache)
return buildRPMFromSRPMInChroot(srpmFileInChroot, outArch, runCheck, defines, packagesToInstall, isCCacheEnabled(ccacheManager))
})
results <- buildErr
}()
@ -204,10 +245,20 @@ func buildSRPMInChroot(chrootDir, rpmDirPath, toolchainDirPath, workerTar, srpmF
builtRPMs, err = moveBuiltRPMs(chroot.RootDir(), rpmDirPath)
}
// Only if the groupSize is 1 we can archive since no other packages will
// re-update this cache.
if isCCacheEnabled(ccacheManager) && ccacheManager.CurrentPkgGroup.Size == 1 {
err = ccacheManager.UploadPkgGroupCCache()
if err != nil {
logger.Log.Warnf("Unable to upload ccache archive:\n%v", err)
}
}
return
}
func buildRPMFromSRPMInChroot(srpmFile, outArch string, runCheck bool, defines map[string]string, packagesToInstall []string, useCcache bool) (err error) {
// Convert /localrpms into a repository that a package manager can use.
err = rpmrepomanager.CreateRepo(chrootLocalRpmsDir)
if err != nil {

Просмотреть файл

@ -32,12 +32,13 @@ func (c *ChrootAgent) Initialize(config *BuildAgentConfig) (err error) {
}
// BuildPackage builds a given file and returns the output files or error.
// - basePackageName is the base package name (i.e. 'kernel').
// - inputFile is the SRPM to build.
// - logName is the file name to save the package build log to.
// - outArch is the target architecture to build for.
// - runCheck is true if the package should run the "%check" section during the build
// - dependencies is a list of dependencies that need to be installed before building.
func (c *ChrootAgent) BuildPackage(inputFile, logName, outArch string, runCheck bool, dependencies []string) (builtFiles []string, logFile string, err error) {
func (c *ChrootAgent) BuildPackage(basePackageName, inputFile, logName, outArch string, runCheck bool, dependencies []string) (builtFiles []string, logFile string, err error) {
// On success, pkgworker will print a comma-seperated list of all RPMs built to stdout.
// This will be the last stdout line written.
const delimiter = ","
@ -54,7 +55,7 @@ func (c *ChrootAgent) BuildPackage(inputFile, logName, outArch string, runCheck
logger.Log.Trace(lastStdoutLine)
}
args := serializeChrootBuildAgentConfig(c.config, inputFile, logFile, outArch, runCheck, dependencies)
args := serializeChrootBuildAgentConfig(c.config, basePackageName, inputFile, logFile, outArch, runCheck, dependencies)
err = shell.ExecuteLiveWithCallback(onStdout, logger.Log.Trace, true, c.config.Program, args...)
if err == nil && lastStdoutLine != "" {
@ -75,7 +76,7 @@ func (c *ChrootAgent) Close() (err error) {
}
// serializeChrootBuildAgentConfig serializes a BuildAgentConfig into arguments usable by pkgworker for the sake of building the package.
func serializeChrootBuildAgentConfig(config *BuildAgentConfig, inputFile, logFile, outArch string, runCheck bool, dependencies []string) (serializedArgs []string) {
func serializeChrootBuildAgentConfig(config *BuildAgentConfig, basePackageName, inputFile, logFile, outArch string, runCheck bool, dependencies []string) (serializedArgs []string) {
serializedArgs = []string{
fmt.Sprintf("--input=%s", inputFile),
fmt.Sprintf("--work-dir=%s", config.WorkDir),
@ -85,7 +86,7 @@ func serializeChrootBuildAgentConfig(config *BuildAgentConfig, inputFile, logFil
fmt.Sprintf("--toolchain-rpms-dir=%s", config.ToolchainDir),
fmt.Sprintf("--srpm-dir=%s", config.SrpmDir),
fmt.Sprintf("--cache-dir=%s", config.CacheDir),
fmt.Sprintf("--ccache-dir=%s", config.CCacheDir),
fmt.Sprintf("--base-package-name=%s", basePackageName),
fmt.Sprintf("--dist-tag=%s", config.DistTag),
fmt.Sprintf("--distro-release-version=%s", config.DistroReleaseVersion),
fmt.Sprintf("--distro-build-number=%s", config.DistroBuildNumber),
@ -110,6 +111,8 @@ func serializeChrootBuildAgentConfig(config *BuildAgentConfig, inputFile, logFil
if config.UseCcache {
serializedArgs = append(serializedArgs, "--use-ccache")
serializedArgs = append(serializedArgs, fmt.Sprintf("--ccache-root-dir=%s", config.CCacheDir))
serializedArgs = append(serializedArgs, fmt.Sprintf("--ccache-config=%s", config.CCacheConfig))
}
for _, dependency := range dependencies {

Просмотреть файл

@ -20,6 +20,7 @@ type BuildAgentConfig struct {
SrpmDir string
CacheDir string
CCacheDir string
CCacheConfig string
DistTag string
DistroReleaseVersion string
@ -41,12 +42,13 @@ type BuildAgent interface {
Initialize(config *BuildAgentConfig) error
// BuildPackage builds a given file and returns the output files or error.
// - basePackageName is the base package name derived from the spec file (i.e. 'kernel').
// - inputFile is the SRPM to build.
// - logName is the file name to save the package build log to.
// - outArch is the target architecture to build for.
// - runCheck is true if the package should run the "%check" section during the build
// - dependencies is a list of dependencies that need to be installed before building.
BuildPackage(inputFile, logName, outArch string, runCheck bool, dependencies []string) ([]string, string, error)
BuildPackage(basePackageName, inputFile, logName, outArch string, runCheck bool, dependencies []string) ([]string, string, error)
// Config returns a copy of the agent's configuration.
Config() BuildAgentConfig

Просмотреть файл

@ -28,7 +28,7 @@ func (t *TestAgent) Initialize(config *BuildAgentConfig) (err error) {
}
// BuildPackage simply sleeps and then returns success for TestAgent.
func (t *TestAgent) BuildPackage(inputFile, logName, outArch string, runCheck bool, dependencies []string) (builtFiles []string, logFile string, err error) {
func (t *TestAgent) BuildPackage(basePackageName, inputFile, logName, outArch string, runCheck bool, dependencies []string) (builtFiles []string, logFile string, err error) {
const sleepDuration = time.Second * 5
time.Sleep(sleepDuration)

Просмотреть файл

@ -12,6 +12,7 @@ import (
"sync"
"time"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/ccachemanager"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph"
@ -63,7 +64,6 @@ var (
toolchainDirPath = app.Flag("toolchain-rpms-dir", "Directory that contains already built toolchain RPMs. Should contain top level directories for architecture.").Required().ExistingDir()
srpmDir = app.Flag("srpm-dir", "The output directory for source RPM packages").Required().String()
cacheDir = app.Flag("cache-dir", "The cache directory containing downloaded dependency RPMS from Mariner Base").Required().ExistingDir()
ccacheDir = app.Flag("ccache-dir", "The directory used to store ccache outputs").Required().ExistingDir()
buildLogsDir = app.Flag("build-logs-dir", "Directory to store package build logs").Required().ExistingDir()
imageConfig = app.Flag("image-config-file", "Optional image config file to extract a package list from.").String()
@ -82,6 +82,8 @@ var (
toolchainManifest = app.Flag("toolchain-manifest", "Path to a list of RPMs which are created by the toolchain. RPMs from this list will are considered 'prebuilt' and will not be rebuilt").ExistingFile()
optimizeWithCachedImplicit = app.Flag("optimize-with-cached-implicit", "Optimize the build process by allowing cached implicit packages to be used to optimize the initial build graph instead of waiting for a real package build to provide the nodes.").Bool()
useCcache = app.Flag("use-ccache", "Automatically install and use ccache during package builds").Bool()
ccacheDir = app.Flag("ccache-dir", "The directory used to store ccache outputs").String()
ccacheConfig = app.Flag("ccache-config", "The ccache configuration file path.").String()
allowToolchainRebuilds = app.Flag("allow-toolchain-rebuilds", "Allow toolchain packages to rebuild without causing an error.").Bool()
maxCPU = app.Flag("max-cpu", "Max number of CPUs used for package building").Default("").String()
timeout = app.Flag("timeout", "Max duration for any individual package build/test").Default(defaultTimeout).Duration()
@ -150,7 +152,6 @@ func main() {
buildAgentConfig := &buildagents.BuildAgentConfig{
Program: *buildAgentProgram,
CacheDir: *cacheDir,
CCacheDir: *ccacheDir,
RepoFile: *repoFile,
RpmDir: *rpmDir,
ToolchainDir: *toolchainDirPath,
@ -163,10 +164,12 @@ func main() {
DistroBuildNumber: *distroBuildNumber,
RpmmacrosFile: *rpmmacrosFile,
NoCleanup: *noCleanup,
UseCcache: *useCcache,
MaxCpu: *maxCPU,
Timeout: *timeout,
NoCleanup: *noCleanup,
UseCcache: *useCcache,
CCacheDir: *ccacheDir,
CCacheConfig: *ccacheConfig,
MaxCpu: *maxCPU,
Timeout: *timeout,
LogDir: *buildLogsDir,
LogLevel: *logLevel,
@ -194,6 +197,19 @@ func main() {
if err != nil {
logger.Log.Fatalf("Unable to build package graph.\nFor details see the build summary section above.\nError: %s.", err)
}
if *useCcache {
logger.Log.Infof(" ccache is enabled. processing multi-package groups under (%s)...", *ccacheDir)
ccacheManager, err := ccachemanagerpkg.CreateManager(*ccacheDir, *ccacheConfig)
if err == nil {
err = ccacheManager.UploadMultiPkgGroupCCaches()
if err != nil {
logger.Log.Warnf("Failed to archive CCache artifacts:\n%v.", err)
}
} else {
logger.Log.Warnf("Failed to initialize the ccache manager:\n%v", err)
}
}
}
// cancelOutstandingBuilds stops any builds that are currently running.
@ -450,7 +466,6 @@ func buildAllNodes(stopOnFailure, canUseCache bool, packagesToRebuild, testsToRe
logger.Log.Infof("%d currently active test(s): %v.", len(activeTests), activeTests)
}
}
}
// Let the workers know they are done

Просмотреть файл

@ -17,6 +17,7 @@ import (
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/retry"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/rpm"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/sliceutils"
"github.com/microsoft/CBL-Mariner/toolkit/tools/scheduler/buildagents"
"gonum.org/v1/gonum/graph"
@ -146,6 +147,13 @@ func BuildNodeWorker(channels *BuildChannels, agent buildagents.BuildAgent, grap
func buildNode(request *BuildRequest, graphMutex *sync.RWMutex, agent buildagents.BuildAgent, buildAttempts int, ignoredPackages []*pkgjson.PackageVer) (ignored bool, builtFiles []string, logFile string, err error) {
node := request.Node
baseSrpmName := node.SRPMFileName()
basePackageName, err := rpm.GetBasePackageNameFromSpecFile(node.SpecPath)
if err != nil {
// This can only happen if the spec file does not have a name (only an extension).
logger.Log.Warnf("An error occured while getting the base package name from (%s). This may result in further errors.", node.SpecPath)
}
ignored = sliceutils.Contains(ignoredPackages, node.VersionedPkg, sliceutils.PackageVerMatch)
if ignored {
@ -162,7 +170,7 @@ func buildNode(request *BuildRequest, graphMutex *sync.RWMutex, agent buildagent
dependencies := getBuildDependencies(node, request.PkgGraph, graphMutex)
logger.Log.Infof("Building: %s", baseSrpmName)
builtFiles, logFile, err = buildSRPMFile(agent, buildAttempts, node.SrpmPath, node.Architecture, dependencies)
builtFiles, logFile, err = buildSRPMFile(agent, buildAttempts, basePackageName, node.SrpmPath, node.Architecture, dependencies)
return
}
@ -170,6 +178,13 @@ func buildNode(request *BuildRequest, graphMutex *sync.RWMutex, agent buildagent
func testNode(request *BuildRequest, graphMutex *sync.RWMutex, agent buildagents.BuildAgent, checkAttempts int, ignoredTests []*pkgjson.PackageVer) (ignored bool, logFile string, err error) {
node := request.Node
baseSrpmName := node.SRPMFileName()
basePackageName, err := rpm.GetBasePackageNameFromSpecFile(node.SpecPath)
if err != nil {
// This can only happen if the spec file does not have a name (only an extension).
logger.Log.Warnf("An error occured while getting the base package name from (%s). This may result in further errors.", node.SpecPath)
}
ignored = sliceutils.Contains(ignoredTests, node.VersionedPkg, sliceutils.PackageVerMatch)
if ignored {
@ -185,7 +200,7 @@ func testNode(request *BuildRequest, graphMutex *sync.RWMutex, agent buildagents
dependencies := getBuildDependencies(node, request.PkgGraph, graphMutex)
logger.Log.Infof("Testing: %s", baseSrpmName)
logFile, err = testSRPMFile(agent, checkAttempts, node.SrpmPath, node.Architecture, dependencies)
logFile, err = testSRPMFile(agent, checkAttempts, basePackageName, node.SrpmPath, node.Architecture, dependencies)
return
}
@ -254,7 +269,7 @@ func parseCheckSection(logFile string) (err error) {
}
// buildSRPMFile sends an SRPM to a build agent to build.
func buildSRPMFile(agent buildagents.BuildAgent, buildAttempts int, srpmFile, outArch string, dependencies []string) (builtFiles []string, logFile string, err error) {
func buildSRPMFile(agent buildagents.BuildAgent, buildAttempts int, basePackageName, srpmFile, outArch string, dependencies []string) (builtFiles []string, logFile string, err error) {
const (
retryDuration = time.Second
runCheck = false
@ -262,7 +277,7 @@ func buildSRPMFile(agent buildagents.BuildAgent, buildAttempts int, srpmFile, ou
logBaseName := filepath.Base(srpmFile) + ".log"
err = retry.Run(func() (buildErr error) {
builtFiles, logFile, buildErr = agent.BuildPackage(srpmFile, logBaseName, outArch, runCheck, dependencies)
builtFiles, logFile, buildErr = agent.BuildPackage(basePackageName, srpmFile, logBaseName, outArch, runCheck, dependencies)
return
}, buildAttempts, retryDuration)
@ -270,7 +285,7 @@ func buildSRPMFile(agent buildagents.BuildAgent, buildAttempts int, srpmFile, ou
}
// testSRPMFile sends an SRPM to a build agent to test.
func testSRPMFile(agent buildagents.BuildAgent, checkAttempts int, srpmFile string, outArch string, dependencies []string) (logFile string, err error) {
func testSRPMFile(agent buildagents.BuildAgent, checkAttempts int, basePackageName string, srpmFile string, outArch string, dependencies []string) (logFile string, err error) {
const (
retryDuration = time.Second
runCheck = true
@ -282,7 +297,7 @@ func testSRPMFile(agent buildagents.BuildAgent, checkAttempts int, srpmFile stri
err = retry.Run(func() (buildErr error) {
checkFailed = false
_, logFile, buildErr = agent.BuildPackage(srpmFile, logBaseName, outArch, runCheck, dependencies)
_, logFile, buildErr = agent.BuildPackage(basePackageName, srpmFile, logBaseName, outArch, runCheck, dependencies)
if buildErr != nil {
logger.Log.Warnf("Test build for '%s' failed on a non-test build issue. Error: %s", srpmFile, buildErr)
return