Added ptest results parsing to the `PackageBuild` template (#6113)
This commit is contained in:
Родитель
a3dc2834a0
Коммит
ab6d3ffb00
|
@ -94,6 +94,7 @@ extends:
|
|||
isCheckBuild: true
|
||||
isQuickRebuildPackages: true
|
||||
outputArtifactsFolder: $(ob_outputDirectory)
|
||||
pipArtifactFeeds: "mariner/Mariner-Pypi-Feed"
|
||||
selfRepoName: self
|
||||
|
||||
- task: PublishPipelineArtifact@1
|
||||
|
|
|
@ -22,6 +22,10 @@ parameters:
|
|||
type: string
|
||||
default: ""
|
||||
|
||||
- name: failOnTestFailures
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
- name: isCheckBuild
|
||||
type: string
|
||||
default: "default"
|
||||
|
@ -58,6 +62,9 @@ parameters:
|
|||
type: string
|
||||
default: "$(Build.ArtifactStagingDirectory)"
|
||||
|
||||
- name: pipArtifactFeeds
|
||||
type: string
|
||||
|
||||
- name: publishLogs
|
||||
type: boolean
|
||||
default: true
|
||||
|
@ -199,3 +206,11 @@ steps:
|
|||
tar -C "${{ parameters.buildRepoRoot }}/build/timestamp" -czf "$published_logs_dir/timestamp.tar.gz" .
|
||||
condition: always()
|
||||
displayName: "Copy logs for publishing"
|
||||
|
||||
- ${{ if parameters.isCheckBuild }}:
|
||||
- template: PackageTestResultsAnalysis.yml@${{ parameters.selfRepoName }}
|
||||
parameters:
|
||||
buildRepoRoot: ${{ parameters.buildRepoRoot }}
|
||||
failOnTestFailures: ${{ parameters.failOnTestFailures }}
|
||||
outputArtifactsFolder: ${{ parameters.outputArtifactsFolder }}
|
||||
pipArtifactFeeds: ${{ parameters.pipArtifactFeeds }}
|
||||
|
|
|
@ -0,0 +1,295 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT License.
|
||||
|
||||
parameters:
|
||||
- name: buildRepoRoot
|
||||
type: string
|
||||
default: "$(Build.SourcesDirectory)"
|
||||
|
||||
- name: failOnTestFailures
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
- name: outputArtifactsFolder
|
||||
type: string
|
||||
default: "$(Build.ArtifactStagingDirectory)"
|
||||
|
||||
- name: pipArtifactFeeds
|
||||
type: string
|
||||
|
||||
# Local constants. Can't use variables in a template without jobs or stages.
|
||||
- name: reportFileName
|
||||
type: string
|
||||
default: "pkgtest_report_junit.xml"
|
||||
|
||||
- name: testsWorkspace
|
||||
type: string
|
||||
default: "$(Agent.TempDirectory)"
|
||||
|
||||
steps:
|
||||
- ${{ if parameters.pipArtifactFeeds }}:
|
||||
- task: PipAuthenticate@1
|
||||
inputs:
|
||||
artifactFeeds: "${{ parameters.pipArtifactFeeds }}"
|
||||
displayName: "Authenticate to custom pip artifact feeds"
|
||||
|
||||
- bash: |
|
||||
pip3 install junit_xml
|
||||
displayName: "Install Python dependencies"
|
||||
|
||||
- task: PythonScript@0
|
||||
inputs:
|
||||
pythonInterpreter: "/bin/python3"
|
||||
scriptSource: "inline"
|
||||
script: |
|
||||
from platform import machine
|
||||
import argparse
|
||||
import inspect
|
||||
import re
|
||||
|
||||
from dateutil.parser import parse
|
||||
from glob import glob
|
||||
from junit_xml import TestSuite, TestCase
|
||||
from os.path import basename
|
||||
|
||||
# Markers of package test for detecting pass/fail
|
||||
PACKAGE_TEST_END_REGEX = re.compile(r'msg="====== CHECK DONE .*\. EXIT STATUS (\d+)')
|
||||
PACKAGE_TEST_IGNORE_REGEX = re.compile(r'msg="\+ echo')
|
||||
PACKAGE_TEST_SKIP_REGEX = re.compile(r'msg="====== SKIPPING CHECK')
|
||||
PACKAGE_TEST_START_REGEX = re.compile(r'msg="====== CHECK START')
|
||||
PACKAGE_TEST_STATUS_INDEX = 0
|
||||
|
||||
TEST_FILE_REGEX = re.compile(r'(.*/)*(.*)-(.*)-(.*?)(\.src.rpm.test.log)')
|
||||
TEST_FILE_PACKAGE_NAME_INDEX = 1
|
||||
TEST_FILE_PACKAGE_VERSION_INDEX = 2
|
||||
TEST_FILE_PACKAGE_RELEASE_INDEX = 3
|
||||
|
||||
class ADOPipelineLogger:
|
||||
def log(self, msg):
|
||||
'''
|
||||
Regular message log for an ADO pipeline.
|
||||
'''
|
||||
print(msg)
|
||||
|
||||
|
||||
def log_debug(self, msg):
|
||||
'''
|
||||
Debug log for an ADO pipeline.
|
||||
'''
|
||||
current_frame = inspect.currentframe()
|
||||
caller_frame = inspect.getouterframes(current_frame, 2)
|
||||
caller = caller_frame[1][3]
|
||||
print(f"##[debug]PACKAGE_TESTS::{caller}::{msg}")
|
||||
|
||||
|
||||
def log_group_begin(self, msg):
|
||||
'''
|
||||
Group begin log for an ADO pipeline.
|
||||
'''
|
||||
print(f"##[group]{msg}")
|
||||
|
||||
|
||||
def log_group_end(self):
|
||||
'''
|
||||
Group end log for an ADO pipeline.
|
||||
'''
|
||||
print("##[endgroup]")
|
||||
|
||||
|
||||
def log_progress(self, percentage):
|
||||
'''
|
||||
Task progress indicator for an ADO pipeline.
|
||||
'''
|
||||
print(f"##vso[task.setprogress value={percentage};]Log parsing progress")
|
||||
|
||||
|
||||
class PackageTestAnalyzer:
|
||||
'''
|
||||
Package test class to expose all the required functionality for parsing
|
||||
the Mariner package build logs.
|
||||
'''
|
||||
def __init__(self, logger):
|
||||
self.logger = logger
|
||||
|
||||
|
||||
def _get_package_details(self, log_path):
|
||||
'''
|
||||
Fetch the package details from the log filename
|
||||
'''
|
||||
matched_groups = TEST_FILE_REGEX.search(log_path).groups()
|
||||
package_name = matched_groups[TEST_FILE_PACKAGE_NAME_INDEX]
|
||||
version = f"{matched_groups[TEST_FILE_PACKAGE_VERSION_INDEX]}-{matched_groups[TEST_FILE_PACKAGE_RELEASE_INDEX]}"
|
||||
self.logger.log_debug(f"Package: {package_name} Version: {version}")
|
||||
|
||||
return package_name, version
|
||||
|
||||
def _get_timestamp(self, line):
|
||||
'''
|
||||
Get the timestamp from the log. Time is converted to epoch time.
|
||||
'''
|
||||
try:
|
||||
timestamp = parse((line.split(" ")[0].split("=")[1]).replace('"', ''))
|
||||
except ValueError as err:
|
||||
self.logger.log_debug(f"Timestamp parsing failed. Line: '{line}'. Error: '{str(err)}'.")
|
||||
return None
|
||||
# return epoch time
|
||||
return timestamp.timestamp()
|
||||
|
||||
def _get_test_status(self, status):
|
||||
'''
|
||||
Get the test status from the log.
|
||||
'''
|
||||
self.logger.log_debug(f"STATUS => {status}.")
|
||||
|
||||
return "Pass" if status == "0" else "Fail"
|
||||
|
||||
def _get_test_details(self, f):
|
||||
'''
|
||||
Check the package test status
|
||||
'''
|
||||
start_time = end_time = None
|
||||
status = "Not Supported"
|
||||
for line in f:
|
||||
line = line.strip("\n")
|
||||
if PACKAGE_TEST_IGNORE_REGEX.search(line):
|
||||
continue
|
||||
|
||||
if PACKAGE_TEST_START_REGEX.search(line):
|
||||
self.logger.log_debug(line)
|
||||
start_time = self._get_timestamp(line)
|
||||
|
||||
end_line_match = PACKAGE_TEST_END_REGEX.search(line)
|
||||
if end_line_match:
|
||||
self.logger.log_debug(line)
|
||||
end_time = self._get_timestamp(line)
|
||||
status = self._get_test_status(end_line_match.groups()[PACKAGE_TEST_STATUS_INDEX])
|
||||
break
|
||||
|
||||
if PACKAGE_TEST_SKIP_REGEX.search(line):
|
||||
self.logger.log_debug(line)
|
||||
status = "Skipped"
|
||||
break
|
||||
if start_time != None and end_time == None:
|
||||
status = "Aborted"
|
||||
return status, start_time, end_time
|
||||
|
||||
def _analyze_package_test_log(self, log_path):
|
||||
'''
|
||||
Scrape the package test log and detect the test status.
|
||||
'''
|
||||
start_time = end_time = status = None
|
||||
elapsed_time = 0
|
||||
with open(log_path, 'r') as f:
|
||||
status, start_time, end_time = self._get_test_details(f)
|
||||
|
||||
if start_time != None and end_time != None:
|
||||
elapsed_time = end_time - start_time
|
||||
|
||||
self.logger.log_debug(f"Status: {status}. Start time: {start_time}. End time: {end_time}.")
|
||||
return status, elapsed_time
|
||||
|
||||
def _get_test_output(self, log_path):
|
||||
start_log = False
|
||||
contents = []
|
||||
with open(log_path, 'r') as f:
|
||||
for line in f:
|
||||
if PACKAGE_TEST_IGNORE_REGEX.search(line):
|
||||
continue
|
||||
|
||||
if PACKAGE_TEST_START_REGEX.search(line):
|
||||
start_log = True
|
||||
|
||||
if start_log:
|
||||
contents.append(line)
|
||||
|
||||
if PACKAGE_TEST_END_REGEX.search(line):
|
||||
break
|
||||
f.close()
|
||||
return " ".join(contents)
|
||||
|
||||
def _build_junit_test_case(self, package_name, status, time, log_path, test_name):
|
||||
stdout = None
|
||||
if status == "Fail":
|
||||
stdout = self._get_test_output(log_path)
|
||||
|
||||
tc = TestCase(package_name, test_name, time, stdout)
|
||||
|
||||
if status == "Pass":
|
||||
return tc
|
||||
|
||||
if status == "Fail":
|
||||
tc.add_failure_info("TEST FAILED. CHECK ATTACHMENTS TAB FOR FAILURE LOG")
|
||||
elif status == "Skipped":
|
||||
tc.add_skipped_info("PACKAGE TEST SKIPPED")
|
||||
elif status == "Not Supported":
|
||||
tc.add_skipped_info("PACKAGE TEST NOT SUPPORTED")
|
||||
else:
|
||||
tc.add_error_info(status)
|
||||
|
||||
return tc
|
||||
|
||||
|
||||
def scan_package_test_logs(self, logs_path, junit_xml_filename, test_name):
|
||||
'''
|
||||
Scan the RPM build log folder and generate the package test report.
|
||||
'''
|
||||
test_cases = []
|
||||
test_logs = glob(f"{logs_path}/*.src.rpm.test.log")
|
||||
test_logs_count = len(test_logs)
|
||||
with open(junit_xml_filename, "w") as f:
|
||||
for index, log_path in enumerate(test_logs):
|
||||
self.logger.log_group_begin(f"Processing : {basename(log_path)}")
|
||||
self.logger.log_progress((index + 1) * 100 / test_logs_count)
|
||||
|
||||
package_name, version = self._get_package_details(log_path)
|
||||
status, elapsed_time = self._analyze_package_test_log(log_path)
|
||||
test_cases.append(self._build_junit_test_case(package_name, status, elapsed_time, log_path, test_name))
|
||||
|
||||
self.logger.log_debug(f"Package name: {package_name}. Version: {version}. Test status: {status}. Duration: {elapsed_time}.")
|
||||
self.logger.log_group_end()
|
||||
|
||||
test_suite = TestSuite(test_name, test_cases)
|
||||
TestSuite.to_file(f, [test_suite], prettyprint=True)
|
||||
|
||||
|
||||
logs_dir_path = "${{ parameters.buildRepoRoot }}/build/logs/pkggen/rpmbuilding"
|
||||
report_path = "${{ parameters.testsWorkspace }}/${{ parameters.reportFileName }}"
|
||||
test_suit_name = f"[{machine()}] Package test"
|
||||
|
||||
logger = ADOPipelineLogger()
|
||||
logger.log(f"Analyzing tests results inside '{logs_dir_path}'.")
|
||||
|
||||
# Instantiate the ptest object and process the package test logs
|
||||
analyzer = PackageTestAnalyzer(logger)
|
||||
analyzer.scan_package_test_logs(
|
||||
logs_dir_path,
|
||||
report_path,
|
||||
test_suit_name)
|
||||
displayName: "Convert test logs to a JUnit report"
|
||||
|
||||
- task: PublishTestResults@2
|
||||
inputs:
|
||||
testResultsFiles: "**/${{ parameters.reportFileName }}"
|
||||
searchFolder: "${{ parameters.testsWorkspace }}"
|
||||
displayName: "Publish test results"
|
||||
|
||||
- ${{ if parameters.outputArtifactsFolder }}:
|
||||
- bash: |
|
||||
published_tests_dir="${{ parameters.outputArtifactsFolder }}/TESTS"
|
||||
mkdir -p "$published_tests_dir"
|
||||
cp "${{ parameters.testsWorkspace }}/${{ parameters.reportFileName }}" "$published_tests_dir"
|
||||
displayName: "Copy test results to the output directory"
|
||||
|
||||
- ${{ if parameters.failOnTestFailures }}:
|
||||
- bash: |
|
||||
report_path="${{ parameters.testsWorkspace }}/${{ parameters.reportFileName }}"
|
||||
if [[ ! -f "$report_path" ]]; then
|
||||
echo "##[error]Test report not found at '$report_path'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! grep -q '^<testsuites.*failures="0"' "$report_path"; then
|
||||
echo "##[error]Test report has failing tests. See the 'Tests' tab for details."
|
||||
exit 1
|
||||
fi
|
||||
displayName: "Verify all tests passed"
|
|
@ -284,7 +284,7 @@ func testSRPMFile(agent buildagents.BuildAgent, checkAttempts int, srpmFile stri
|
|||
|
||||
_, logFile, buildErr = agent.BuildPackage(srpmFile, logBaseName, outArch, runCheck, dependencies)
|
||||
if buildErr != nil {
|
||||
logger.Log.Warnf("Test build for '%s' failed on a non-test build issue. Error: %s", srpmFile, err)
|
||||
logger.Log.Warnf("Test build for '%s' failed on a non-test build issue. Error: %s", srpmFile, buildErr)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче