Fixing the TestRPM-HydratedBuild pipeline to not report a toolchain error if allowToolchainRebuilds is true (#4949)

- Created a new variable ALLOW_TOOLCHAIN_REBUILDS to use this flag for the pipelines to not report and fail the pipeline on the toolchain error.
- Set up the flag to be "false" during all the builds except testing pipelines. Included changes in CBL-Mariner-pipelines code.
- Added allowToolchainRebuilds into RecordBuildResult function conditions and changed the PrintBuildSummary function to not print the error log but print info log if the flag is true

---------

Co-authored-by: Betty Lakes <bettylakes@microsoft.com>
This commit is contained in:
Betty 2023-03-13 15:50:55 -07:00 коммит произвёл GitHub
Родитель 1a316eb216
Коммит 4ec62f2467
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
6 изменённых файлов: 52 добавлений и 39 удалений

Просмотреть файл

@ -13,11 +13,11 @@ ifeq ($(origin CONFIG_FILE), undefined)
CONFIG_FILE = $(toolkit_root)/imageconfigs/core-efi.json
$(warning CONFIG_FILE is undefined, defaulting to toolkit's core-efi.json.)
endif
CONFIG_BASE_DIR ?= $(dir $(CONFIG_FILE))
PACKAGE_BUILD_LIST ?=
PACKAGE_REBUILD_LIST ?=
PACKAGE_IGNORE_LIST ?=
SRPM_PACK_LIST ?=
CONFIG_BASE_DIR ?= $(dir $(CONFIG_FILE))
PACKAGE_BUILD_LIST ?=
PACKAGE_REBUILD_LIST ?=
PACKAGE_IGNORE_LIST ?=
SRPM_PACK_LIST ?=
REBUILD_TOOLCHAIN ?= n
INCREMENTAL_TOOLCHAIN ?= n
@ -52,6 +52,7 @@ REBUILD_DEP_CHAINS ?= y
HYDRATED_BUILD ?= n
DELTA_BUILD ?= n
TARGET_ARCH ?=
ALLOW_TOOLCHAIN_REBUILDS ?= n
# Folder defines
toolkit_root := $(abspath $(dir $(lastword $(MAKEFILE_LIST))))

Просмотреть файл

@ -733,7 +733,8 @@ To reproduce an ISO build, run the same make invocation as before, but set:
| ARCHIVE_TOOL | $(shell if command -v pigz 1>/dev/null 2>&1 ; then echo pigz ; else echo gzip ; fi ) | Default tool to use in conjunction with `tar` to extract `*.tar.gz` files. Tries to use `pigz` if available, otherwise uses `gzip`
| INCREMENTAL_TOOLCHAIN | n | Only build toolchain RPM packages if they are not already present
| RUN_CHECK | n | Run the %check sections when compiling packages
| PACKAGE_BUILD_RETRIES | 1 | Number of build retries for each package
| ALLOW_TOOLCHAIN_REBUILDS | n | Do not treat rebuilds of toolchain packages during regular package build phase as errors.
| PACKAGE_BUILD_RETRIES | 1 | Number of build retries for each package
| CHECK_BUILD_RETRIES | 1 | Minimum number of check section retries for each package if RUN_CHECK=y and tests fail.
| IMAGE_TAG | (empty) | Text appended to a resulting image name - empty by default. Does not apply to the initrd. The text will be prepended with a hyphen.
| CONCURRENT_PACKAGE_BUILDS | 0 | The maximum number of concurrent package builds that are allowed at once. If set to 0 this defaults to the number of logical CPUs.

Просмотреть файл

@ -223,6 +223,7 @@ $(STATUS_FLAGS_DIR)/build-rpms.flag: $(preprocessed_file) $(chroot_worker) $(go-
$(if $(filter-out y,$(CLEANUP_PACKAGE_BUILDS)),--no-cleanup) \
$(if $(filter y,$(DELTA_BUILD)),--delta-build) \
$(if $(filter y,$(USE_CCACHE)),--use-ccache) \
$(if $(filter y,$(ALLOW_TOOLCHAIN_REBUILDS)),--allow-toolchain-rebuilds) \
$(logging_command) && \
touch $@

Просмотреть файл

@ -61,19 +61,20 @@ var (
imageConfig = app.Flag("image-config-file", "Optional image config file to extract a package list from.").String()
baseDirPath = app.Flag("base-dir", "Base directory for relative file paths from the config. Defaults to config's directory.").ExistingDir()
distTag = app.Flag("dist-tag", "The distribution tag SRPMs will be built with.").Required().String()
distroReleaseVersion = app.Flag("distro-release-version", "The distro release version that the SRPM will be built with.").Required().String()
distroBuildNumber = app.Flag("distro-build-number", "The distro build number that the SRPM will be built with.").Required().String()
rpmmacrosFile = app.Flag("rpmmacros-file", "Optional file path to an rpmmacros file for rpmbuild to use.").ExistingFile()
buildAttempts = app.Flag("build-attempts", "Sets the number of times to try building a package.").Default(defaultBuildAttempts).Int()
checkAttempts = app.Flag("check-attempts", "Sets the minimum number of times to test a package if the tests fail.").Default(defaultCheckAttempts).Int()
runCheck = app.Flag("run-check", "Run the check during package builds.").Bool()
noCleanup = app.Flag("no-cleanup", "Whether or not to delete the chroot folder after the build is done").Bool()
noCache = app.Flag("no-cache", "Disables using prebuilt cached packages.").Bool()
stopOnFailure = app.Flag("stop-on-failure", "Stop on failed build").Bool()
reservedFileListFile = app.Flag("reserved-file-list-file", "Path to a list of files which should not be generated during a build").ExistingFile()
deltaBuild = app.Flag("delta-build", "Enable delta build using remote cached packages.").Bool()
useCcache = app.Flag("use-ccache", "Automatically install and use ccache during package builds").Bool()
distTag = app.Flag("dist-tag", "The distribution tag SRPMs will be built with.").Required().String()
distroReleaseVersion = app.Flag("distro-release-version", "The distro release version that the SRPM will be built with.").Required().String()
distroBuildNumber = app.Flag("distro-build-number", "The distro build number that the SRPM will be built with.").Required().String()
rpmmacrosFile = app.Flag("rpmmacros-file", "Optional file path to an rpmmacros file for rpmbuild to use.").ExistingFile()
buildAttempts = app.Flag("build-attempts", "Sets the number of times to try building a package.").Default(defaultBuildAttempts).Int()
checkAttempts = app.Flag("check-attempts", "Sets the minimum number of times to test a package if the tests fail.").Default(defaultCheckAttempts).Int()
runCheck = app.Flag("run-check", "Run the check during package builds.").Bool()
noCleanup = app.Flag("no-cleanup", "Whether or not to delete the chroot folder after the build is done").Bool()
noCache = app.Flag("no-cache", "Disables using prebuilt cached packages.").Bool()
stopOnFailure = app.Flag("stop-on-failure", "Stop on failed build").Bool()
reservedFileListFile = app.Flag("reserved-file-list-file", "Path to a list of files which should not be generated during a build").ExistingFile()
deltaBuild = app.Flag("delta-build", "Enable delta build using remote cached packages.").Bool()
useCcache = app.Flag("use-ccache", "Automatically install and use ccache during package builds").Bool()
allowToolchainRebuilds = app.Flag("allow-toolchain-rebuilds", "Allow toolchain packages to rebuild without causing an error.").Bool()
validBuildAgentFlags = []string{buildagents.TestAgentFlag, buildagents.ChrootAgentFlag}
buildAgent = app.Flag("build-agent", "Type of build agent to build packages with.").PlaceHolder(exe.PlaceHolderize(validBuildAgentFlags)).Required().Enum(validBuildAgentFlags...)
@ -172,7 +173,7 @@ func main() {
signal.Notify(signals, unix.SIGINT, unix.SIGTERM)
go cancelBuildsOnSignal(signals, agent)
err = buildGraph(*inputGraphFile, *outputGraphFile, agent, *workers, *buildAttempts, *checkAttempts, *stopOnFailure, !*noCache, packageVersToBuild, packagesNamesToRebuild, ignoredPackages, reservedFiles, *deltaBuild)
err = buildGraph(*inputGraphFile, *outputGraphFile, agent, *workers, *buildAttempts, *checkAttempts, *stopOnFailure, !*noCache, packageVersToBuild, packagesNamesToRebuild, ignoredPackages, reservedFiles, *deltaBuild, *allowToolchainRebuilds)
if err != nil {
logger.Log.Fatalf("Unable to build package graph.\nFor details see the build summary section above.\nError: %s", err)
}
@ -200,7 +201,7 @@ func cancelBuildsOnSignal(signals chan os.Signal, agent buildagents.BuildAgent)
// buildGraph builds all packages in the dependency graph requested.
// It will save the resulting graph to outputFile.
func buildGraph(inputFile, outputFile string, agent buildagents.BuildAgent, workers, buildAttempts int, checkAttempts int, stopOnFailure, canUseCache bool, packagesToBuild []*pkgjson.PackageVer, packagesNamesToRebuild, ignoredPackages, reservedFiles []string, deltaBuild bool) (err error) {
func buildGraph(inputFile, outputFile string, agent buildagents.BuildAgent, workers, buildAttempts int, checkAttempts int, stopOnFailure, canUseCache bool, packagesToBuild []*pkgjson.PackageVer, packagesNamesToRebuild, ignoredPackages, reservedFiles []string, deltaBuild bool, allowToolchainRebuilds bool) (err error) {
// graphMutex guards pkgGraph from concurrent reads and writes during build.
var graphMutex sync.RWMutex
@ -216,7 +217,7 @@ func buildGraph(inputFile, outputFile string, agent buildagents.BuildAgent, work
logger.Log.Infof("Building %d nodes with %d workers", numberOfNodes, workers)
// After this call pkgGraph will be given to multiple routines and accessing it requires acquiring the mutex.
builtGraph, err := buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache, packagesNamesToRebuild, pkgGraph, &graphMutex, goalNode, channels, reservedFiles, deltaBuild)
builtGraph, err := buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache, packagesNamesToRebuild, pkgGraph, &graphMutex, goalNode, channels, reservedFiles, deltaBuild, allowToolchainRebuilds)
if builtGraph != nil {
graphMutex.RLock()
@ -269,7 +270,7 @@ func startWorkerPool(agent buildagents.BuildAgent, workers, buildAttempts, check
// - Attempts to satisfy any unresolved dynamic dependencies with new implicit provides from the build result.
// - Attempts to subgraph the graph to only contain the requested packages if possible.
// - Repeat.
func buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache bool, packagesNamesToRebuild []string, pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, goalNode *pkggraph.PkgNode, channels *schedulerChannels, reservedFiles []string, deltaBuild bool) (builtGraph *pkggraph.PkgGraph, err error) {
func buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache bool, packagesNamesToRebuild []string, pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, goalNode *pkggraph.PkgNode, channels *schedulerChannels, reservedFiles []string, deltaBuild bool, allowToolchainRebuilds bool) (builtGraph *pkggraph.PkgGraph, err error) {
var (
// stopBuilding tracks if the build has entered a failed state and this routine should stop as soon as possible.
stopBuilding bool
@ -334,7 +335,7 @@ func buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache bool, packagesNa
// Process the the next build result
res := <-channels.Results
schedulerutils.PrintBuildResult(res)
buildState.RecordBuildResult(res)
buildState.RecordBuildResult(res, allowToolchainRebuilds)
if !stopBuilding {
if res.Err == nil {
@ -400,9 +401,11 @@ func buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache bool, packagesNa
time.Sleep(time.Second)
builtGraph = pkgGraph
schedulerutils.PrintBuildSummary(builtGraph, graphMutex, buildState)
schedulerutils.PrintBuildSummary(builtGraph, graphMutex, buildState, allowToolchainRebuilds)
schedulerutils.RecordBuildSummary(builtGraph, graphMutex, buildState, *outputCSVFile)
if !allowToolchainRebuilds && (len(buildState.ConflictingRPMs()) > 0 || len(buildState.ConflictingSRPMs()) > 0) {
err = fmt.Errorf("toolchain packages rebuilt. See build summary for details. Use '--allow-prebuilt-rebuilds' to suppress this error if rebuilds were expected")
}
return
}

Просмотреть файл

@ -126,7 +126,7 @@ func (g *GraphBuildState) isConflictWithToolchain(fileToCheck string) (hadConfli
// RecordBuildResult records a build result in the graph build state.
// - It will record the result as a failure if applicable.
// - It will record all ancillary nodes of the result.
func (g *GraphBuildState) RecordBuildResult(res *BuildResult) {
func (g *GraphBuildState) RecordBuildResult(res *BuildResult, allowToolchainRebuilds bool) {
logger.Log.Debugf("Recording build result: %s", res.Node.FriendlyName())
@ -145,7 +145,7 @@ func (g *GraphBuildState) RecordBuildResult(res *BuildResult) {
g.nodeToState[node] = state
}
if !res.Skipped && !res.UsedCache {
if !allowToolchainRebuilds && !res.Skipped && !res.UsedCache {
for _, file := range res.BuiltFiles {
if g.isConflictWithToolchain(file) {
g.conflictingRPMs[filepath.Base(file)] = true
@ -153,7 +153,7 @@ func (g *GraphBuildState) RecordBuildResult(res *BuildResult) {
}
}
} else {
logger.Log.Debugf("skipping checking conflicts since this is not a built node (%v)", res.Node)
logger.Log.Debugf("skipping checking conflicts since this is either not a built node (%v) or the ALLOW_TOOLCHAIN_REBUILDS flag was set to 'y'", res.Node)
}
return

Просмотреть файл

@ -140,7 +140,7 @@ func RecordBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, b
}
// PrintBuildSummary prints the summary of the entire build to the logger.
func PrintBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, buildState *GraphBuildState) {
func PrintBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, buildState *GraphBuildState, allowToolchainRebuilds bool) {
graphMutex.RLock()
defer graphMutex.RUnlock()
@ -157,6 +157,11 @@ func PrintBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, bu
rpmConflicts := buildState.ConflictingRPMs()
srpmConflicts := buildState.ConflictingSRPMs()
conflictsLogger := logger.Log.Errorf
if allowToolchainRebuilds || (len(rpmConflicts) == 0 && len(srpmConflicts) == 0) {
conflictsLogger = logger.Log.Infof
}
buildNodes := pkgGraph.AllBuildNodes()
for _, node := range buildNodes {
if buildState.IsNodeCached(node) {
@ -188,12 +193,14 @@ func PrintBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, bu
logger.Log.Infof("Number of failed SRPMs: %d", len(failures))
logger.Log.Infof("Number of blocked SRPMs: %d", len(unbuiltSRPMs))
logger.Log.Infof("Number of unresolved dependencies: %d", len(unresolvedDependencies))
if allowToolchainRebuilds && (len(rpmConflicts) > 0 || len(srpmConflicts) > 0) {
logger.Log.Infof("Toolchain RPMs conflicts are ignored since ALLOW_TOOLCHAIN_REBUILDS=y")
}
if len(rpmConflicts) > 0 || len(srpmConflicts) > 0 {
logger.Log.Errorf("Number of toolchain RPM conflicts: %d", len(rpmConflicts))
logger.Log.Errorf("Number of toolchain SRPM conflicts: %d", len(srpmConflicts))
} else {
logger.Log.Infof("Number of toolchain RPM conflicts: %d", len(rpmConflicts))
logger.Log.Infof("Number of toolchain SRPM conflicts: %d", len(srpmConflicts))
conflictsLogger("Number of toolchain RPM conflicts: %d", len(rpmConflicts))
conflictsLogger("Number of toolchain SRPM conflicts: %d", len(srpmConflicts))
}
if len(builtSRPMs) != 0 {
@ -232,16 +239,16 @@ func PrintBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, bu
}
if len(rpmConflicts) != 0 {
logger.Log.Error("RPM Conflicts with toolchain:")
conflictsLogger("RPM conflicts with toolchain: ")
for _, conflict := range rpmConflicts {
logger.Log.Errorf("--> %s", conflict)
conflictsLogger("--> %s", conflict)
}
}
if len(srpmConflicts) != 0 {
logger.Log.Error("SRPM Conflicts with toolchain:")
conflictsLogger("SRPM conflicts with toolchain: ")
for _, conflict := range srpmConflicts {
logger.Log.Errorf("--> %s", conflict)
conflictsLogger("--> %s", conflict)
}
}
}