refactor(ci): Upload stage telemetry separately for each stage (#22616)

## Description

This refactors our setup to upload telemetry for stages during ADO
pipeline runs.

One of the main problems the current setup has is that it waits for all
"target" stages (the ones whose telemetry we want to upload) in a given
pipeline to complete before the stage that uploads telemetry runs. In
the E2E tests pipeline, where the stage that runs tests against ODSP
usually takes ~2hrs to run but can also wait many hours to _start_
(because we have exclusive locks so only one pipeline run can execute
tests against a given external service at the same time, and thus the
stage has to wait for the corresponding stage in previous pipeline runs
to finish), the telemetry for other stages can severely lag the actual
time when things happened. This can cause confusion when our OCEs get
IcM incidents, because the thing that caused the incident to fire
happened many hours ago (sometimes the previous day).

The refactor in this PR makes it so instead of having a single stage at
the end of a pipeline run which uploads the telemetry for all other
relevant stages in that run, we now have one for each of the relevant
"target" stages. The new stage depends only on the "target" stage, so it
runs immediately after it. This does have the disadvantage that we now
have many more stages in a pipeline run, each one needs to be scheduled
on an available build agent, and they all run similar steps, like
checking out the repository. So the total usage time for build agents
will probably go up a bit. The monetary cost should not be significant,
though, so I think this is fine. All of this applies to the test
pipelines; I kept the existing setup for Build - client because in that
one we don't really care about tracking each stage separately, we really
only care about the pipeline as a whole.

The refactor also entailed some cleanup and improvements on some JS/TS
code related to stage telemetry. The scripts that get the run
information for a stage now can take a specific `STAGE_ID` from the
environment instead of getting the list of stage ids themselves. They're
also more aggressive with validation of the inputs they expect.
This commit is contained in:
Alex Villarreal 2024-09-26 10:38:46 -05:00 коммит произвёл GitHub
Родитель e0ad436486
Коммит c126bba051
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
12 изменённых файлов: 873 добавлений и 902 удалений

Просмотреть файл

@ -8,64 +8,41 @@
const BUILD_ID = process.env.BUILD_ID;
// The token need to make the API calls.
const ADO_API_TOKEN = process.env.ADO_API_TOKEN;
// The id of the stage for which to retrieve the test pass rate information
const STAGE_ID = process.env.STAGE_ID;
// The workspace where the new files/folder created in this script will be stored.
const BASE_OUTPUT_FOLDER = process.env.BASE_OUTPUT_FOLDER;
const WORK_FOLDER = process.env.WORK_FOLDER;
if (
BUILD_ID === undefined ||
STAGE_ID === undefined ||
ADO_API_TOKEN === undefined ||
BASE_OUTPUT_FOLDER === undefined
WORK_FOLDER === undefined
) {
throw new Error(
"One or more required environment variables are undefined. Please specify 'BUILD_ID', 'ADO_API_TOKEN', and 'BASE_OUTPUT_FOLDER' in order to run this script.",
"One or more required environment variables are undefined. Please specify 'BUILD_ID', 'STAGE_ID', 'ADO_API_TOKEN', and 'WORK_FOLDER' in order to run this script.",
);
}
console.log("BUILD_ID:", BUILD_ID);
console.log("BASE_OUTPUT_FOLDER:", BASE_OUTPUT_FOLDER);
console.log("STAGE_ID:", STAGE_ID);
console.log("WORK_FOLDER:", WORK_FOLDER);
// Create output folder - Note: This requires Node.js fs module
// Create output folder
import * as fs from "fs";
if (!fs.existsSync(`${BASE_OUTPUT_FOLDER}/stageFiles`)) {
fs.mkdirSync(`${BASE_OUTPUT_FOLDER}/stageFiles`, { recursive: true });
console.log("Folder created");
if (!fs.existsSync(WORK_FOLDER)) {
fs.mkdirSync(WORK_FOLDER, { recursive: true });
console.log(`Created folder '${WORK_FOLDER}'.`);
}
const apiUrl = `https://dev.azure.com/fluidframework/internal/_apis/build/builds/${BUILD_ID}/timeline?api-version=7.1-preview.2`;
// Fetch data from Timeline API
const response = await fetch(apiUrl, {
headers: {
Authorization: `Basic ${Buffer.from(":" + ADO_API_TOKEN).toString("base64")}`,
},
});
console.log(response);
if (!response.ok) {
throw new Error(`Error during API call to get build timeline. Status: ${response.status}`);
}
const data = await response.json();
console.log("Saving stage names");
// Extract and save all stage names
const stages = data.records
.filter((record) => record.type === "Stage")
.map((record) => record.identifier);
for (const stage of stages) {
if (stage === "runAfterAll") {
continue;
}
console.log(`Fetching data for stage: ${stage}`);
// Fetch test rate data for each stage.
const stageApiUrl = `https://vstmr.dev.azure.com/fluidframework/internal/_apis/testresults/metrics?pipelineId=${BUILD_ID}&stageName=${stage}&api-version=7.1-preview.1`;
const stageResponse = await fetch(stageApiUrl, {
headers: {
Authorization: `Basic ${Buffer.from(":" + ADO_API_TOKEN).toString("base64")}`,
},
});
if (!stageResponse.ok) {
throw new Error(`Error during API call to get build metrics. Status: ${response.status}`);
}
const stageData = await stageResponse.json();
// Save the API data to a JSON file.
fs.writeFileSync(
`${BASE_OUTPUT_FOLDER}/stageFiles/${stage}.json`,
JSON.stringify(stageData),
);
// Fetch test results for the specified build + stage and save to a file
console.log(`Fetching data for stage: ${STAGE_ID}`);
const testResultsApiUrl = `https://vstmr.dev.azure.com/fluidframework/internal/_apis/testresults/metrics?pipelineId=${BUILD_ID}&stageName=${STAGE_ID}&api-version=7.1-preview.1`;
const stageResponse = await fetch(testResultsApiUrl, {
headers: { Authorization: `Basic ${Buffer.from(":" + ADO_API_TOKEN).toString("base64")}` },
});
if (!stageResponse.ok) {
throw new Error(`Error during API call to get test results. Status: ${response.status}`);
}
const stageData = await stageResponse.json();
fs.writeFileSync(`${WORK_FOLDER}/${STAGE_ID}.json`, JSON.stringify(stageData));

Просмотреть файл

@ -609,20 +609,17 @@ extends:
condition: succeededOrFailed()
dependsOn:
- build
# NOTE: This is brittle; we need to only apply these stage dependencies when the corresponding stages actually
# get created in the pipeline, in the include-publish-npm-package.yml file, so we want to match the compile-time
# conditions *and exact stage names* that exist there. At some point it might be preferable to always create the
# stages, control their execution with 'condition:', and update this stage to always depend on all previous
# stages (while still running if some of the dependencies were skipped).
- ${{ if eq(parameters.publish, true) }}:
- ${{ if eq(variables['testBuild'], true) }}:
- publish_npm_internal_test
- ${{ if eq(variables['testBuild'], false) }}:
- publish_npm_internal_build
- ${{ if and(eq(variables['testBuild'], false), eq(parameters.isReleaseGroup, true)) }}:
- publish_npm_internal_dev
- ${{ if or(eq(variables['release'], 'release'), eq(variables['release'], 'prerelease')) }}:
- publish_npm_public
# Note: the publish stages are created in include-publish-npm-package.yml. We need to match the ids exactly.
- publish_npm_internal_test
- publish_npm_internal_build
- publish_npm_public
# NOTE: This is brittle; since the publish_npm_internal_dev stage is addded to the pipeline conditionally,
# we create a dependency on it based on the same condition.
# So this needs to be kept in sync with the logic that include-publish-npm-package.yml uses to create the stage.
# At some point it might be preferable to always create the stage, control its execution solely with
# 'condition:', and update this bit to always depend on publish_npm_internal_dev, since it will always exist.
- ${{ if eq(parameters.isReleaseGroup, true) }}:
- publish_npm_internal_dev
jobs:
- job: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
@ -630,7 +627,7 @@ extends:
variables:
- group: ado-feeds
- name: pipelineTelemetryWorkdir
value: $(Pipeline.Workspace)/pipelineTelemetryWorkdir
value: $(Pipeline.Workspace)/pipelineTelemetryWorkdir/timingOutput
readonly: true
- name: absolutePathToTelemetryGenerator
value: $(Build.SourcesDirectory)/tools/telemetry-generator
@ -642,21 +639,30 @@ extends:
officeFeedUrl: $(ado-feeds-office)
isCheckoutNeeded: true
- task: Bash@3
displayName: Retrieve buildId results
displayName: Get stage timing and result data from ADO
env:
BUILD_ID: $(Build.BuildId)
ADO_API_TOKEN: $(System.AccessToken)
WORK_FOLDER: $(pipelineTelemetryWorkdir)
inputs:
targetType: inline
script: |
echo "Creating work folder '$WORK_FOLDER'";
mkdir -p $WORK_FOLDER;
echo "Retrieving data from ADO API";
echo "curl -u \":<REDACTED>\" \"https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline?api-version=7.1-preview.2\""
curl -u ":$ADO_API_TOKEN" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline?api-version=7.1-preview.2" > $WORK_FOLDER/output.json
- task: Bash@3
displayName: Submit telemetry for stage timing and result
env:
BUILD_ID: $(Build.BuildId)
PIPELINE: BuildClient
WORK_FOLDER: $(pipelineTelemetryWorkdir)
inputs:
targetType: inline
workingDirectory: $(absolutePathToTelemetryGenerator)
script: |
echo "Creating output folder ..."
mkdir -p $(pipelineTelemetryWorkdir)/timingOutput
echo "Retrieving pipeline run timeline data ..."
echo 'curl -u ":<REDACTED>" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline"'
curl -u ":$ADO_API_TOKEN" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline\?api-version\=6.0-preview.1" > $(pipelineTelemetryWorkdir)/timingOutput/output.json
pwd;
ls -laR $(pipelineTelemetryWorkdir)/timingOutput/output.json;
cat $(pipelineTelemetryWorkdir)/timingOutput/output.json;
node --require @ff-internal/aria-logger bin/run --handlerModule $(absolutePathToTelemetryGenerator)/dist/handlers/stageTimingRetriever.js --dir '$(pipelineTelemetryWorkdir)/timingOutput/';
env:
BUILD_ID: $(Build.BuildId)
ADO_API_TOKEN: $(System.AccessToken)
PIPELINE: BuildClient
echo "Listing files in '$WORK_FOLDER'"
ls -laR $WORK_FOLDER;
node --require @ff-internal/aria-logger bin/run --handlerModule "$(absolutePathToTelemetryGenerator)/dist/handlers/stageTimingRetriever.js" --dir "$WORK_FOLDER";

Просмотреть файл

@ -61,24 +61,24 @@ stages:
AFFECTED_PATHS: ${{ join(';', package.affectedPaths) }}
- ${{ each package in parameters.packages }}:
# The ids for these stages should be kept in sync with the dependency on them in tools/pipelines/test-dds-stress.yml
- stage: ${{ replace(replace(package.name, '@fluidframework/', ''), '@fluid-experimental/', 'experimental_') }}_stress_tests
dependsOn: CheckAffectedPaths
displayName: Run ${{ package.name }} stress tests
jobs:
- template: include-test-real-service.yml
parameters:
# Ideally this would be a condition on the stage rather than the job, but it doesn't seem like that is supported (and ADO UI gives very little debug information
# as to what might be going wrong). This only impacts the "stage" view of the pipeline, in that packages with skipped tests will show up as successful stages
# rather than skipped stages. Clicking on a skipped stage still shows that the corresponding test job wasn't run.
condition: eq(stageDependencies.CheckAffectedPaths.Job.outputs['Check${{ replace(package.testFileTarName, '-', '') }}.AffectedFilesModified'],'true')
poolBuild: ${{ parameters.pool }}
loggerPackage: ''
artifactBuildId: ${{ parameters.artifactBuildId }}
testPackage: ${{ package.name }}
testWorkspace: ${{ parameters.testWorkspace }}
timeoutInMinutes: 120
testFileTarName: ${{ package.testFileTarName }}
testCommand: ${{ package.testCommand }}
env:
FUZZ_STRESS_RUN: true
- template: /tools/pipelines/templates/include-test-real-service.yml@self
parameters:
stageId: ${{ replace(replace(package.name, '@fluidframework/', ''), '@fluid-experimental/', 'experimental_') }}_stress_tests
stageDisplayName: Run ${{ package.name }} stress tests
stageDependencies:
- CheckAffectedPaths
pipelineIdentifierForTelemetry: 'DdsStressService'
# Ideally this would be a condition on the stage rather than the job, but it doesn't seem like that is supported (and ADO UI gives very little debug information
# as to what might be going wrong). This only impacts the "stage" view of the pipeline, in that packages with skipped tests will show up as successful stages
# rather than skipped stages. Clicking on a skipped stage still shows that the corresponding test job wasn't run.
condition: eq(stageDependencies.CheckAffectedPaths.Job.outputs['Check${{ replace(package.testFileTarName, '-', '') }}.AffectedFilesModified'],'true')
poolBuild: ${{ parameters.pool }}
loggerPackage: ''
artifactBuildId: ${{ parameters.artifactBuildId }}
testPackage: ${{ package.name }}
testWorkspace: ${{ parameters.testWorkspace }}
timeoutInMinutes: 120
testFileTarName: ${{ package.testFileTarName }}
testCommand: ${{ package.testCommand }}
env:
FUZZ_STRESS_RUN: true

Просмотреть файл

@ -4,6 +4,34 @@
# include-test-real-service
parameters:
# Id for the stage that runs tests
- name: stageId
type: string
# Display name for the stage that runs tests
- name: stageDisplayName
type: string
# The stage that runs tests will have its dependsOn property set to this value
- name: stageDependencies
type: object
default: []
# Variables to be set in the stage that runs tests
- name: stageVariables
type: object
default:
# If true, the stage that uploads pipeline telemetry to Kusto will include a task to upload
# test pass rate telemetry.
- name: uploadTestPassRateTelemetry
type: boolean
default: false
# Unique identifier used to identify telemetry data in Kusto for this pipeline
- name: pipelineIdentifierForTelemetry
type: string
- name: poolBuild
type: object
default: Small
@ -77,207 +105,122 @@ parameters:
type: boolean
default: false
jobs:
- ${{ each variant in parameters.splitTestVariants }}:
- job:
displayName: Run ${{ variant.name }}
pool: ${{ parameters.poolBuild }}
condition: ${{ parameters.condition }}
timeoutInMinutes: ${{ parameters.timeoutInMinutes }}
variables:
- group: ado-feeds
- name: isTestBranch
value: ${{ startsWith(variables['Build.SourceBranch'], 'refs/heads/test/') }}
readonly: true
# We use 'chalk' to colorize output, which auto-detects color support in the
# running terminal. The log output shown in Azure DevOps job runs only has
# basic ANSI color support though, so force that in the pipeline
- name: FORCE_COLOR
value: 1
- name: testPackageDir
value: '${{ parameters.testWorkspace }}/node_modules/${{ parameters.testPackage }}'
- name: testPackageFilePattern
value: ${{ replace(replace(parameters.testPackage, '@', '' ), '/', '-') }}-[0-9]*.tgz
# Note that this path must match the path that the packed packages are saved to in the build pipeline.
# It should be kept up to date with the path defined in scripts/pack-packages.sh.
- name: testPackagePathPattern
value: $(Pipeline.Workspace)/client/pack/tarballs/${{ variables.testPackageFilePattern }}
- name: skipComponentGovernanceDetection
value: true
- name: artifactPipeline
value: Build - client packages
- name: feed
${{ if eq(variables.isTestBranch, 'true') }}:
value: $(ado-feeds-internal) # Comes from the ado-feeds variable group
${{ else }}:
value: $(ado-feeds-build) # Comes from the ado-feeds variable group
- name: devFeed
${{ if eq(variables.isTestBranch, 'true') }}:
value: $(ado-feeds-internal) # Comes from the ado-feeds variable group
${{ else }}:
value: $(ado-feeds-dev) # Comes from the ado-feeds variable group
stages:
- stage: ${{ parameters.stageId}}
displayName: ${{ parameters.stageDisplayName }}
dependsOn: ${{ parameters.stageDependencies }}
${{ if parameters.stageVariables }}:
variables:
${{ parameters.stageVariables }}
jobs:
- ${{ each variant in parameters.splitTestVariants }}:
- job:
displayName: Run ${{ variant.name }}
pool: ${{ parameters.poolBuild }}
condition: ${{ parameters.condition }}
timeoutInMinutes: ${{ parameters.timeoutInMinutes }}
variables:
- group: ado-feeds
- name: isTestBranch
value: ${{ startsWith(variables['Build.SourceBranch'], 'refs/heads/test/') }}
readonly: true
# We use 'chalk' to colorize output, which auto-detects color support in the
# running terminal. The log output shown in Azure DevOps job runs only has
# basic ANSI color support though, so force that in the pipeline
- name: FORCE_COLOR
value: 1
- name: testPackageDir
value: '${{ parameters.testWorkspace }}/node_modules/${{ parameters.testPackage }}'
- name: testPackageFilePattern
value: ${{ replace(replace(parameters.testPackage, '@', '' ), '/', '-') }}-[0-9]*.tgz
# Note that this path must match the path that the packed packages are saved to in the build pipeline.
# It should be kept up to date with the path defined in scripts/pack-packages.sh.
- name: testPackagePathPattern
value: $(Pipeline.Workspace)/client/pack/tarballs/${{ variables.testPackageFilePattern }}
- name: skipComponentGovernanceDetection
value: true
- name: artifactPipeline
value: Build - client packages
- name: feed
${{ if eq(variables.isTestBranch, 'true') }}:
value: $(ado-feeds-internal) # Comes from the ado-feeds variable group
${{ else }}:
value: $(ado-feeds-build) # Comes from the ado-feeds variable group
- name: devFeed
${{ if eq(variables.isTestBranch, 'true') }}:
value: $(ado-feeds-internal) # Comes from the ado-feeds variable group
${{ else }}:
value: $(ado-feeds-dev) # Comes from the ado-feeds variable group
steps:
# Setup
- checkout: none
clean: true
steps:
# Setup
- checkout: none
clean: true
# Install self-signed cert for R11s deployment in local cert store
- ${{ if ne(parameters.r11sSelfSignedCertSecureFile, '') }}:
- task: DownloadSecureFile@1
displayName: 'Download r11s self-signed cert'
name: downloadCertTask
inputs:
secureFile: ${{ parameters.r11sSelfSignedCertSecureFile }}
retryCount: '2'
# Install self-signed cert for R11s deployment in local cert store
- ${{ if ne(parameters.r11sSelfSignedCertSecureFile, '') }}:
- task: DownloadSecureFile@1
displayName: 'Download r11s self-signed cert'
name: downloadCertTask
inputs:
secureFile: ${{ parameters.r11sSelfSignedCertSecureFile }}
retryCount: '2'
- task: Bash@3
displayName: 'Install r11s self-signed cert in local cert store'
inputs:
targetType: 'inline'
script: |
# Extract public part from cert
openssl x509 -in $(downloadCertTask.secureFilePath) -out cert.crt
# Install cert
sudo cp cert.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
# Print parameters/Vars
# Variables declared outside this template will only work with "macro syntax": $(name).
# Variables declared inside this template also work with "template expression syntax": ${{ variables.name }}.
- task: Bash@3
displayName: 'Install r11s self-signed cert in local cert store'
displayName: Print Parameters and Variables
inputs:
targetType: 'inline'
script: |
# Show all task group conditions
# Extract public part from cert
openssl x509 -in $(downloadCertTask.secureFilePath) -out cert.crt
# Install cert
sudo cp cert.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
echo "
Pipeline Parameters:
poolBuild=${{ parameters.poolBuild }}
testPackage=${{ parameters.testPackage }}
# Print parameters/Vars
# Variables declared outside this template will only work with "macro syntax": $(name).
# Variables declared inside this template also work with "template expression syntax": ${{ variables.name }}.
- task: Bash@3
displayName: Print Parameters and Variables
inputs:
targetType: 'inline'
script: |
# Show all task group conditions
Pipeline Variables:
isTestBranch=${{ variables.isTestBranch }}
testWorkspace=${{ parameters.testWorkspace }}
testPackageFilePattern=${{ variables.testPackageFilePattern }}
feed=${{ variables.feed }}
devFeed=${{ variables.devFeed }}
testCommand=${{ parameters.testCommand }}
continueOnError=${{ parameters.continueOnError }}
variant.flag=${{ variant.flags }}
testFileTarName=${{ parameters.testFileTarName }}
artifactPipeline=${{ variables.artifactPipeline }}
artifactBuildId=${{ parameters.artifactBuildId }}
"
echo "
Pipeline Parameters:
poolBuild=${{ parameters.poolBuild }}
testPackage=${{ parameters.testPackage }}
- template: include-use-node-version.yml
Pipeline Variables:
isTestBranch=${{ variables.isTestBranch }}
testWorkspace=${{ parameters.testWorkspace }}
testPackageFilePattern=${{ variables.testPackageFilePattern }}
feed=${{ variables.feed }}
devFeed=${{ variables.devFeed }}
testCommand=${{ parameters.testCommand }}
continueOnError=${{ parameters.continueOnError }}
variant.flag=${{ variant.flags }}
testFileTarName=${{ parameters.testFileTarName }}
artifactPipeline=${{ variables.artifactPipeline }}
artifactBuildId=${{ parameters.artifactBuildId }}
"
- template: include-use-node-version.yml
# Download artifact
- task: DownloadPipelineArtifact@2
displayName: Download test package
inputs:
source: specific
project: internal
pipeline: ${{ variables.artifactPipeline }}
buildVersionToDownload: specific
buildId: ${{ parameters.artifactBuildId }}
artifact: pack
patterns: "**/${{ variables.testPackageFilePattern }}"
path: $(Pipeline.Workspace)/client/pack
# allowPartiallySucceededBuilds: true # No effect as long as we have buildVersionToDownload: specific
# branchName: $(Build.SourceBranch) # No effect as long as we have buildVersionToDownload: specific
# It seems there's a bug and preferTriggeringPipeline is not respected.
# We force the behavior by explicitly specifying:
# - buildVersionToDownload: specific
# - buildId: <the id of the triggering build>
# preferTriggeringPipeline: true
- task: Bash@3
displayName: Create test directory
inputs:
targetType: 'inline'
script: |
mkdir ${{ parameters.testWorkspace }}
- task: Bash@3
name: Initialize
displayName: Initialize
inputs:
targetType: 'inline'
workingDirectory: ${{ parameters.testWorkspace }}
# Note: $(ado-feeds-build) and $(ado-feeds-office) come from the ado-feeds variable group
script: |
echo Initialize package
npm init --yes
echo Generating .npmrc
echo "registry=https://registry.npmjs.org" >> ./.npmrc
echo "always-auth=false" >> ./.npmrc
echo "@fluidframework:registry=${{ variables.feed }}" >> ./.npmrc
echo "@fluid-experimental:registry=${{ variables.feed }}" >> ./.npmrc
# This is confusing, but we are using the .npmrc here to load dependencies from different feeds. These
# scopes must be loaded from the "dev" feed because that is the only place they are published. Ideally this
# logic will be centralized outside of CI in the future.
echo "@fluid-internal:registry=${{ variables.devFeed }}" >> ./.npmrc
echo "@fluid-private:registry=${{ variables.devFeed }}" >> ./.npmrc
echo "@fluid-tools:registry=${{ variables.devFeed }}" >> ./.npmrc
# This scope must be loaded from the "build" feed
echo "@ff-internal:registry=$(ado-feeds-build)" >> ./.npmrc
# This scope must be loaded from the internal Office feed
echo "@microsoft:registry=$(ado-feeds-office)" >> ./.npmrc
echo "always-auth=true" >> ./.npmrc
cat .npmrc
if [[ `ls -1 ${{ variables.testPackagePathPattern }} | wc -l` -eq 1 ]]; then
echo "##vso[task.setvariable variable=testPackageTgz;isOutput=true]`ls ${{ variables.testPackagePathPattern }}`"
else
ls -1 ${{ variables.testPackagePathPattern }}
echo "##vso[task.logissue type=error]Test package '${{ parameters.testPackage }}' not found, or there are more than one found"
exit -1
fi
# Auth to internal feed
- task: npmAuthenticate@0
displayName: 'npm authenticate (internal feed)'
inputs:
workingFile: ${{ parameters.testWorkspace }}/.npmrc
# Install test and logger package
- task: Npm@1
displayName: 'npm install'
# ADO feeds have latency on the order of minutes before packages are available downstream. See:
# https://learn.microsoft.com/en-us/azure/devops/artifacts/concepts/upstream-sources?view=azure-devops#upstream-sources-health-status
# This pipeline installs packages which were published very recently relative to its runtime, hence the rather high retry count here.
retryCountOnTaskFailure: 10
inputs:
command: 'custom'
workingDir: ${{ parameters.testWorkspace }}
customCommand: 'install $(Initialize.testPackageTgz) ${{ parameters.loggerPackage }}'
customRegistry: 'useNpmrc'
# Download Test Files & Install Extra Dependencies
# These steps are intended to include extra dependencies that are not available as
# part of the normal package .tgz installed previously in the pipeline.
- ${{ if ne(parameters.testFileTarName, 'null') }}:
# Download Artifact - Test Files
# Download artifact
- task: DownloadPipelineArtifact@2
displayName: Download test files
displayName: Download test package
inputs:
source: specific
project: internal
pipeline: ${{ variables.artifactPipeline }}
buildVersionToDownload: specific
buildId: ${{ parameters.artifactBuildId }}
artifact: test-files
path: $(Pipeline.Workspace)/test-files
artifact: pack
patterns: "**/${{ variables.testPackageFilePattern }}"
path: $(Pipeline.Workspace)/client/pack
# allowPartiallySucceededBuilds: true # No effect as long as we have buildVersionToDownload: specific
# branchName: $(Build.SourceBranch) # No effect as long as we have buildVersionToDownload: specific
# It seems there's a bug and preferTriggeringPipeline is not respected.
@ -286,115 +229,214 @@ jobs:
# - buildId: <the id of the triggering build>
# preferTriggeringPipeline: true
# Unpack test files
- task: Bash@3
displayName: Unpack test files
inputs:
workingDirectory: ${{ parameters.testWorkspace }}/node_modules/${{ parameters.testPackage }}
targetType: 'inline'
script: |
TAR_PATH=$(Pipeline.Workspace)/test-files/${{ parameters.testFileTarName }}.test-files.tar
echo "Unpacking test files for ${{ parameters.testPackage }} from file '$TAR_PATH' in '$(pwd)'"
# Note: we could skip the last argument and have it unpack everything at once, but if we later change
# the structure/contents of the tests tar file, the extraction could overwrite things we didn't intend to,
# so keeping the paths to extract explicit.
# Also, extracting is finicky with the exact format of the last argument, it needs to match how the
# tarfile was created (e.g. './lib/test' works here but 'lib/test' does not).
tar --extract --verbose --file $TAR_PATH ./lib/test
tar --extract --verbose --file $TAR_PATH ./dist/test
tar --extract --verbose --file $TAR_PATH ./src/test
- task: Bash@3
displayName: Copy devDependencies
displayName: Create test directory
inputs:
targetType: 'inline'
script: |
mkdir ${{ parameters.testWorkspace }}
- task: Bash@3
name: Initialize
displayName: Initialize
inputs:
targetType: 'inline'
workingDirectory: ${{ parameters.testWorkspace }}
targetType: 'inline'
# Note: $(ado-feeds-build) and $(ado-feeds-office) come from the ado-feeds variable group
script: |
testPkgJsonPath=${{ parameters.testWorkspace }}/node_modules/${{ parameters.testPackage }}/package.json
pkgJsonPath=${{ parameters.testWorkspace }}/package.json
node -e "
const { devDependencies } = require('$testPkgJsonPath');
const pkg = require('$pkgJsonPath');
pkg.devDependencies=devDependencies;
require('fs').writeFileSync('$pkgJsonPath', JSON.stringify(pkg));
"
echo Initialize package
npm init --yes
echo Generating .npmrc
echo "registry=https://registry.npmjs.org" >> ./.npmrc
echo "always-auth=false" >> ./.npmrc
echo "@fluidframework:registry=${{ variables.feed }}" >> ./.npmrc
echo "@fluid-experimental:registry=${{ variables.feed }}" >> ./.npmrc
# This is confusing, but we are using the .npmrc here to load dependencies from different feeds. These
# scopes must be loaded from the "dev" feed because that is the only place they are published. Ideally this
# logic will be centralized outside of CI in the future.
echo "@fluid-internal:registry=${{ variables.devFeed }}" >> ./.npmrc
echo "@fluid-private:registry=${{ variables.devFeed }}" >> ./.npmrc
echo "@fluid-tools:registry=${{ variables.devFeed }}" >> ./.npmrc
# This scope must be loaded from the "build" feed
echo "@ff-internal:registry=$(ado-feeds-build)" >> ./.npmrc
# This scope must be loaded from the internal Office feed
echo "@microsoft:registry=$(ado-feeds-office)" >> ./.npmrc
echo "always-auth=true" >> ./.npmrc
cat .npmrc
if [[ `ls -1 ${{ variables.testPackagePathPattern }} | wc -l` -eq 1 ]]; then
echo "##vso[task.setvariable variable=testPackageTgz;isOutput=true]`ls ${{ variables.testPackagePathPattern }}`"
else
ls -1 ${{ variables.testPackagePathPattern }}
echo "##vso[task.logissue type=error]Test package '${{ parameters.testPackage }}' not found, or there are more than one found"
exit -1
fi
# Auth to internal feed
- task: npmAuthenticate@0
displayName: 'npm authenticate (internal feed)'
inputs:
workingFile: ${{ parameters.testWorkspace }}/.npmrc
# Install test and logger package
- task: Npm@1
displayName: 'npm install - extra dependencies for test files'
displayName: 'npm install'
# ADO feeds have latency on the order of minutes before packages are available downstream. See:
# https://learn.microsoft.com/en-us/azure/devops/artifacts/concepts/upstream-sources?view=azure-devops#upstream-sources-health-status
# This pipeline installs packages which were published very recently relative to its runtime, hence the rather high retry count here.
retryCountOnTaskFailure: 10
inputs:
command: 'custom'
workingDir: ${{ parameters.testWorkspace }}
customCommand: 'install'
customCommand: 'install $(Initialize.testPackageTgz) ${{ parameters.loggerPackage }}'
customRegistry: 'useNpmrc'
- ${{ if eq(parameters.cacheCompatVersionsInstalls, true) }}:
- task: Cache@2
displayName: Cache compat versions install location
timeoutInMinutes: 3
continueOnError: true
inputs:
key: '"compat-version-installs" | "$(Agent.OS)" | "${{ parameters.testCommand }}" | "${{ variant.name }}"'
path: ${{ parameters.testWorkspace }}/node_modules/@fluid-private/test-version-utils/node_modules/.legacy/
# Download Test Files & Install Extra Dependencies
# These steps are intended to include extra dependencies that are not available as
# part of the normal package .tgz installed previously in the pipeline.
- ${{ if ne(parameters.testFileTarName, 'null') }}:
# Download Artifact - Test Files
- task: DownloadPipelineArtifact@2
displayName: Download test files
inputs:
source: specific
project: internal
pipeline: ${{ variables.artifactPipeline }}
buildVersionToDownload: specific
buildId: ${{ parameters.artifactBuildId }}
artifact: test-files
path: $(Pipeline.Workspace)/test-files
# allowPartiallySucceededBuilds: true # No effect as long as we have buildVersionToDownload: specific
# branchName: $(Build.SourceBranch) # No effect as long as we have buildVersionToDownload: specific
# It seems there's a bug and preferTriggeringPipeline is not respected.
# We force the behavior by explicitly specifying:
# - buildVersionToDownload: specific
# - buildId: <the id of the triggering build>
# preferTriggeringPipeline: true
# run the test
- task: Npm@1
displayName: '[test] ${{ parameters.testCommand }} ${{ variant.flags }}'
continueOnError: ${{ parameters.continueOnError }}
env:
${{ parameters.env }}
inputs:
command: 'custom'
workingDir: ${{ parameters.testWorkspace }}/node_modules/${{ parameters.testPackage }}
customCommand: 'run ${{ parameters.testCommand }} -- ${{ variant.flags }}'
# Unpack test files
- task: Bash@3
displayName: Unpack test files
inputs:
workingDirectory: ${{ parameters.testWorkspace }}/node_modules/${{ parameters.testPackage }}
targetType: 'inline'
script: |
TAR_PATH=$(Pipeline.Workspace)/test-files/${{ parameters.testFileTarName }}.test-files.tar
echo "Unpacking test files for ${{ parameters.testPackage }} from file '$TAR_PATH' in '$(pwd)'"
# Note: we could skip the last argument and have it unpack everything at once, but if we later change
# the structure/contents of the tests tar file, the extraction could overwrite things we didn't intend to,
# so keeping the paths to extract explicit.
# Also, extracting is finicky with the exact format of the last argument, it needs to match how the
# tarfile was created (e.g. './lib/test' works here but 'lib/test' does not).
tar --extract --verbose --file $TAR_PATH ./lib/test
tar --extract --verbose --file $TAR_PATH ./dist/test
tar --extract --verbose --file $TAR_PATH ./src/test
- ${{ if eq(parameters.skipTestResultPublishing, false) }}:
# filter report
- task: Bash@3
displayName: Filter skipped test from report
condition: succeededOrFailed()
- task: Bash@3
displayName: Copy devDependencies
inputs:
workingDirectory: ${{ parameters.testWorkspace }}
targetType: 'inline'
script: |
testPkgJsonPath=${{ parameters.testWorkspace }}/node_modules/${{ parameters.testPackage }}/package.json
pkgJsonPath=${{ parameters.testWorkspace }}/package.json
node -e "
const { devDependencies } = require('$testPkgJsonPath');
const pkg = require('$pkgJsonPath');
pkg.devDependencies=devDependencies;
require('fs').writeFileSync('$pkgJsonPath', JSON.stringify(pkg));
"
- task: Npm@1
displayName: 'npm install - extra dependencies for test files'
retryCountOnTaskFailure: 10
inputs:
command: 'custom'
workingDir: ${{ parameters.testWorkspace }}
customCommand: 'install'
customRegistry: 'useNpmrc'
- ${{ if eq(parameters.cacheCompatVersionsInstalls, true) }}:
- task: Cache@2
displayName: Cache compat versions install location
timeoutInMinutes: 3
continueOnError: true
inputs:
key: '"compat-version-installs" | "$(Agent.OS)" | "${{ parameters.testCommand }}" | "${{ variant.name }}"'
path: ${{ parameters.testWorkspace }}/node_modules/@fluid-private/test-version-utils/node_modules/.legacy/
# run the test
- task: Npm@1
displayName: '[test] ${{ parameters.testCommand }} ${{ variant.flags }}'
continueOnError: ${{ parameters.continueOnError }}
env:
${{ parameters.env }}
inputs:
workingDir: ${{ variables.testPackageDir }}/nyc
targetType: 'inline'
script: |
if [[ -d ${{ variables.testPackageDir }}/nyc ]]; then
echo "directory '${{ variables.testPackageDir }}/nyc' exists."
cd ${{ variables.testPackageDir }}/nyc
if ! [[ -z "$(ls -A .)" ]]; then
curdirfiles=`ls`
echo "report file(s) ${curdirfiles} found. Filtering skipped tests..."
for i in `ls`; do sed -i '/<skipped/d' $i; done
command: 'custom'
workingDir: ${{ parameters.testWorkspace }}/node_modules/${{ parameters.testPackage }}
customCommand: 'run ${{ parameters.testCommand }} -- ${{ variant.flags }}'
- ${{ if eq(parameters.skipTestResultPublishing, false) }}:
# filter report
- task: Bash@3
displayName: Filter skipped test from report
condition: succeededOrFailed()
inputs:
workingDir: ${{ variables.testPackageDir }}/nyc
targetType: 'inline'
script: |
if [[ -d ${{ variables.testPackageDir }}/nyc ]]; then
echo "directory '${{ variables.testPackageDir }}/nyc' exists."
cd ${{ variables.testPackageDir }}/nyc
if ! [[ -z "$(ls -A .)" ]]; then
curdirfiles=`ls`
echo "report file(s) ${curdirfiles} found. Filtering skipped tests..."
for i in `ls`; do sed -i '/<skipped/d' $i; done
else
echo "No report files found in '${{ variables.testPackageDir }}/nyc'"
fi
else
echo "No report files found in '${{ variables.testPackageDir }}/nyc'"
echo "Directory '${{ variables.testPackageDir }}/nyc' not found"
fi
else
echo "Directory '${{ variables.testPackageDir }}/nyc' not found"
fi
# Upload results
- task: PublishTestResults@2
displayName: Publish Test Results
inputs:
testResultsFormat: 'JUnit'
testResultsFiles: '**/*junit-report.xml'
searchFolder: ${{ variables.testPackageDir }}/nyc
mergeTestResults: false
condition: succeededOrFailed()
# Upload results
- task: PublishTestResults@2
displayName: Publish Test Results
inputs:
testResultsFormat: 'JUnit'
testResultsFiles: '**/*junit-report.xml'
searchFolder: ${{ variables.testPackageDir }}/nyc
mergeTestResults: false
condition: succeededOrFailed()
# Publish tinylicious log for troubleshooting
- ${{ if or(contains(convertToJson(parameters.testCommand), 'tinylicious'), contains(convertToJson(parameters.testCommand), 't9s')) }}:
- task: PublishPipelineArtifact@1
displayName: Publish Artifact - Tinylicious Log
inputs:
targetPath: '${{ parameters.testWorkspace }}/node_modules/${{ parameters.testPackage }}/tinylicious.log'
artifactName: 'tinyliciousLog_attempt-$(System.JobAttempt)'
publishLocation: 'pipeline'
condition: always()
continueOnError: true # Keep running subsequent tasks even if this one fails (e.g. the tinylicious log wasn't there)
# Publish tinylicious log for troubleshooting
- ${{ if or(contains(convertToJson(parameters.testCommand), 'tinylicious'), contains(convertToJson(parameters.testCommand), 't9s')) }}:
- task: PublishPipelineArtifact@1
displayName: Publish Artifact - Tinylicious Log
inputs:
targetPath: '${{ parameters.testWorkspace }}/node_modules/${{ parameters.testPackage }}/tinylicious.log'
artifactName: 'tinyliciousLog_attempt-$(System.JobAttempt)'
publishLocation: 'pipeline'
condition: always()
continueOnError: true # Keep running subsequent tasks even if this one fails (e.g. the tinylicious log wasn't there)
# Log Test Failures
# - template: include-process-test-results.yml
# parameters:
# buildDirectory: ${{ variables.testPackageDir }}
# Log Test Failures
# - template: include-process-test-results.yml
# parameters:
# buildDirectory: ${{ variables.testPackageDir }}
- ${{ parameters.additionalSteps }}
- ${{ parameters.additionalSteps }}
- template: /tools/pipelines/templates/include-upload-stage-telemetry.yml@self
parameters:
stageId: ${{ parameters.stageId }}
uploadTestPassRateTelemetry: ${{ parameters.uploadTestPassRateTelemetry }}
pipelineIdentifierForTelemetry: ${{ parameters.pipelineIdentifierForTelemetry }}
testWorkspace: ${{ parameters.testWorkspace }}

Просмотреть файл

@ -0,0 +1,114 @@
# Copyright (c) Microsoft Corporation and contributors. All rights reserved.
# Licensed under the MIT License.
# include-upload-stage-telemetry
# This template adds a new stage to a pipeline.
# The new stage "targets" another stage (specified as a parameter) and uploads telemetry about that stage to Kusto.
parameters:
# Id for the stage whose runtime + result telemetry will be uploaded to Kusto.
- name: stageId
type: string
# If true, the stage that uploads pipeline telemetry to Kusto will include tasks to upload
# test pass rate telemetry for the pipeline specified in stageId.
- name: uploadTestPassRateTelemetry
type: boolean
default: false
# Unique identifier for the pipeline that is including this template.
# Used to identify telemetry data in Kusto for this pipeline.
- name: pipelineIdentifierForTelemetry
type: string
- name: testWorkspace
type: string
stages:
- stage: ${{ parameters.stageId }}_upload_telemetry
displayName: Upload stage telemetry to Kusto ('${{ parameters.stageId }}')
condition: succeededOrFailed()
dependsOn:
- ${{ parameters.stageId}}
variables:
- group: ado-feeds
- name: absolutePathToTelemetryGenerator
value: $(Build.SourcesDirectory)/tools/telemetry-generator
readonly: true
jobs:
- job: upload_run_telemetry
displayName: Upload stage telemetry to Kusto
pool: Small
variables:
- group: ado-feeds
steps:
- template: /tools/pipelines/templates/include-telemetry-setup.yml@self
parameters:
devFeedUrl: $(ado-feeds-dev)
officeFeedUrl: $(ado-feeds-office)
isCheckoutNeeded: true
- task: Bash@3
displayName: Get stage timing and result data from ADO
env:
BUILD_ID: $(Build.BuildId)
ADO_API_TOKEN: $(System.AccessToken)
WORK_FOLDER: ${{ parameters.testWorkspace }}/stageTimingAndResult
inputs:
targetType: 'inline'
script: |
echo "Creating output folder '$WORK_FOLDER'"
mkdir -p $WORK_FOLDER
echo "Retrieving data from ADO API";
echo "curl -u \":<REDACTED>\" \"https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline?api-version=7.1-preview.2\""
curl -u ":$ADO_API_TOKEN" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline\?api-version=7.1-preview.2" > $WORK_FOLDER/output.json
- task: Bash@3
displayName: Submit telemetry for stage timing and result
env:
BUILD_ID: $(Build.BuildId)
STAGE_ID: ${{ parameters.stageId }}
PIPELINE: ${{ parameters.pipelineIdentifierForTelemetry }}
WORK_FOLDER: ${{ parameters.testWorkspace }}/stageTimingAndResult
inputs:
targetType: 'inline'
workingDirectory: $(absolutePathToTelemetryGenerator)
script: |
echo "Listing files in '$WORK_FOLDER'"
ls -laR $WORK_FOLDER;
node --require @ff-internal/aria-logger bin/run --handlerModule $(absolutePathToTelemetryGenerator)/dist/handlers/stageTimingRetriever.js --dir "$WORK_FOLDER";
- ${{ if parameters.uploadTestPassRateTelemetry }}:
- task: Bash@3
displayName: Get test pass rate data from ADO
env:
BUILD_ID: $(Build.BuildId)
STAGE_ID: ${{ parameters.stageId }}
ADO_API_TOKEN: $(System.AccessToken)
WORK_FOLDER: ${{ parameters.testWorkspace }}/stageTestPassRate
inputs:
targetType: 'inline'
script: |
echo "Fetching test pass rate data and saving into JSON files"
node "$(Build.SourcesDirectory)/scripts/get-test-pass-rate.mjs"
- task: Bash@3
displayName: Submit telemetry for test pass rate
env:
BUILD_ID: $(Build.BuildId)
STAGE_ID: ${{ parameters.stageId }}
PIPELINE: ${{ parameters.pipelineIdentifierForTelemetry }}
WORK_FOLDER: ${{ parameters.testWorkspace }}/stageTestPassRate
inputs:
targetType: 'inline'
workingDirectory: $(absolutePathToTelemetryGenerator)
script: |
echo "Listing files in '$WORK_FOLDER'"
ls -laR $WORK_FOLDER;
node --require @ff-internal/aria-logger bin/run --handlerModule "$(absolutePathToTelemetryGenerator)/dist/handlers/testPassRate.js" --dir "$WORK_FOLDER"

Просмотреть файл

@ -59,51 +59,9 @@ parameters:
testFileTarName: matrix
testCommand: test:stress
stages:
- template: templates/include-conditionally-run-stress-tests.yml
parameters:
artifactBuildId: $(resources.pipeline.client.runID)
packages: ${{ parameters.packages }}
testWorkspace: ${{ variables.testWorkspace }}
# Capture telemetry about pipeline stages
- stage: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
condition: succeededOrFailed()
dependsOn:
- ${{ each package in parameters.packages }}:
# This list of stages should be kept in sync with the way we generate their ids in tools/pipelines/templates/include-conditionally-run-stress-tests.yml
- ${{ replace(replace(package.name, '@fluidframework/', ''), '@fluid-experimental/', 'experimental_') }}_stress_tests
jobs:
- job: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
pool: Small
variables:
- group: ado-feeds
steps:
- template: templates/include-telemetry-setup.yml
parameters:
devFeedUrl: $(ado-feeds-dev)
officeFeedUrl: $(ado-feeds-office)
isCheckoutNeeded: true
- task: Bash@3
displayName: Retrieve buildId results
inputs:
targetType: 'inline'
workingDirectory: $(absolutePathToTelemetryGenerator)
script: |
echo "creating output folder"
mkdir -p ${{ variables.testWorkspace }}/timingOutput
echo "Retrieving pipeline run timeline data command ..."
echo 'curl -u ":<REDACTED>" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline"'
curl -u ":$ADO_API_TOKEN" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline\?api-version\=6.0-preview.1" > ${{ variables.testWorkspace }}/timingOutput/output.json
pwd;
ls -laR ${{ variables.testWorkspace }}/timingOutput/output.json;
cat ${{ variables.testWorkspace }}/timingOutput/output.json;
node --require @ff-internal/aria-logger bin/run --handlerModule $(absolutePathToTelemetryGenerator)/dist/handlers/stageTimingRetriever.js --dir '${{ variables.testWorkspace }}/timingOutput/';
env:
BUILD_ID: $(Build.BuildId)
ADO_API_TOKEN: $(System.AccessToken)
PIPELINE: 'DdsStressService'

Просмотреть файл

@ -81,6 +81,9 @@ variables:
readonly: true
- group: prague-key-vault
- group: ado-feeds
- name: pipelineIdentifierForTelemetry
value: 'PerformanceBenchmark'
readonly: true
lockBehavior: sequential
stages:
@ -209,6 +212,12 @@ stages:
artifactName: 'perf-test-outputs_execution-time'
condition: succeededOrFailed()
- template: /tools/pipelines/templates/include-upload-stage-telemetry.yml@self
parameters:
stageId: perf_unit_tests_runtime
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
testWorkspace: ${{ variables.testWorkspace }}
# Performance unit tests - memory
- stage: perf_unit_tests_memory
displayName: Perf unit tests - memory
@ -334,6 +343,12 @@ stages:
artifactName: 'perf-test-outputs_memory-usage'
condition: succeededOrFailed()
- template: /tools/pipelines/templates/include-upload-stage-telemetry.yml@self
parameters:
stageId: perf_unit_tests_memory
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
testWorkspace: ${{ variables.testWorkspace }}
# Performance unit tests - customBenchmark
- stage: perf_unit_tests_customBenchmark
displayName: Perf unit tests - customBenchmark
@ -459,7 +474,11 @@ stages:
artifactName: 'perf-test-outputs_custom-data'
condition: succeededOrFailed()
- template: /tools/pipelines/templates/include-upload-stage-telemetry.yml@self
parameters:
stageId: perf_unit_tests_customBenchmark
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
testWorkspace: ${{ variables.testWorkspace }}
- ${{ each endpointObject in parameters.endpoints }}:
@ -656,45 +675,8 @@ stages:
echo "Cleanup ${{ variables.memoryUsageTestOutputFolder }}"
rm -rf ${{ variables.memoryUsageTestOutputFolder }};
# Capture telemetry about pipeline stages
- stage: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
condition: succeededOrFailed()
dependsOn:
- perf_unit_tests_runtime
- perf_unit_tests_memory
- perf_unit_tests_customBenchmark
- perf_e2e_tests_local
- perf_e2e_tests_odsp
- perf_e2e_tests_frs
jobs:
- job: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
pool: Small
steps:
- template: templates/include-telemetry-setup.yml
parameters:
# ado-feeds-dev and ado-feeds-office come from the ado-feeds variable group
devFeedUrl: $(ado-feeds-dev)
officeFeedUrl: $(ado-feeds-office)
isCheckoutNeeded: true
- task: Bash@3
displayName: Retrieve and Upload pipeline run stats to Kusto
env:
BUILD_ID: $(Build.BuildId)
ADO_API_TOKEN: $(System.AccessToken)
PIPELINE: 'PerformanceBenchmark'
inputs:
targetType: 'inline'
workingDirectory: $(absolutePathToTelemetryGenerator)
script: |
echo "creating output folder"
mkdir -p ${{ variables.testWorkspace }}/timingOutput
echo "Retrieving pipeline run timeline data ..."
echo 'curl -u ":<REDACTED>" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$(Build.BuildId)/timeline"'
curl -u ":$(System.AccessToken)" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$(Build.BuildId)/timeline\?api-version\=6.0-preview.1" > ${{ variables.testWorkspace }}/timingOutput/output.json
pwd;
ls -laR ${{ variables.testWorkspace }}/timingOutput/output.json;
cat ${{ variables.testWorkspace }}/timingOutput/output.json;
node --require @ff-internal/aria-logger bin/run --handlerModule $(absolutePathToTelemetryGenerator)/dist/handlers/stageTimingRetriever.js --dir '${{ variables.testWorkspace }}/timingOutput/';
- template: /tools/pipelines/templates/include-upload-stage-telemetry.yml@self
parameters:
stageId: perf_e2e_tests_${{ endpointObject.endpointName }}
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
testWorkspace: ${{ variables.testWorkspace }}

Просмотреть файл

@ -30,154 +30,105 @@ variables:
- name: testPackage
value: "@fluid-internal/test-service-load"
readonly: true
- name: pipelineIdentifierForTelemetry
value: 'RealStressService'
readonly: true
lockBehavior: sequential
stages:
# stress tests odsp
- stage: stress_tests_odsp
displayName: Stress tests - Odsp
dependsOn: []
# use a variable group with exclusive lock force only one run at a time and avoid overloading the server/throttling
variables:
- group: stress-odsp-lock
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Large
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 120
testCommand: start:odsp
skipTestResultPublishing: true
env:
login__microsoft__clientId: $(login-microsoft-clientId)
login__odsp__test__tenants: $(automation-stress-login-odsp-test-tenants)
FLUID_TEST_LOGGER_PKG_SPECIFIER: '@ff-internal/aria-logger' # Contains getTestLogger impl to inject
- template: templates/include-test-real-service.yml
parameters:
stageId: stress_tests_odsp
stageDisplayName: Stress tests - Odsp
poolBuild: Large
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 120
testCommand: start:odsp
skipTestResultPublishing: true
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
stageVariables:
- group: stress-odsp-lock
env:
login__microsoft__clientId: $(login-microsoft-clientId)
login__odsp__test__tenants: $(automation-stress-login-odsp-test-tenants)
FLUID_TEST_LOGGER_PKG_SPECIFIER: '@ff-internal/aria-logger' # Contains getTestLogger impl to inject
# stress tests odsp dogfood
- stage: stress_tests_odspdf
displayName: Stress tests - Odspdf
dependsOn: []
# use a variable group with exclusive lock force only one run at a time and avoid overloading the server/throttling
variables:
- group: stress-odspdf-lock
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Large
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 120
testCommand: start:odspdf
skipTestResultPublishing: true
env:
login__microsoft__clientId: $(login-microsoft-clientId)
login__odspdf__test__tenants: $(automation-stress-login-odspdf-test-tenants)
FLUID_TEST_LOGGER_PKG_SPECIFIER: '@ff-internal/aria-logger' # Contains getTestLogger impl to inject
- template: templates/include-test-real-service.yml
parameters:
stageId: stress_tests_odspdf
stageDisplayName: Stress tests - Odspdf
poolBuild: Large
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 120
testCommand: start:odspdf
skipTestResultPublishing: true
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
stageVariables:
- group: stress-odspdf-lock
env:
login__microsoft__clientId: $(login-microsoft-clientId)
login__odspdf__test__tenants: $(automation-stress-login-odspdf-test-tenants)
FLUID_TEST_LOGGER_PKG_SPECIFIER: '@ff-internal/aria-logger' # Contains getTestLogger impl to inject
# stress tests tinylicious
- stage: stress_tests_tinylicious
displayName: Stress tests - tinylicious
dependsOn: []
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Large
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 120
testCommand: start:t9s
skipTestResultPublishing: true
env:
FLUID_TEST_LOGGER_PKG_SPECIFIER: '@ff-internal/aria-logger' # Contains getTestLogger impl to inject
# Disable colorization for tinylicious logs (not useful when printing to a file)
logger__colorize: "false" # Need to pass it as string so ADO doesn't convert it into False (capital F) which doesn't work
logger__morganFormat: tiny
- template: templates/include-test-real-service.yml
parameters:
stageId: stress_tests_tinylicious
stageDisplayName: Stress tests - tinylicious
poolBuild: Large
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 120
testCommand: start:t9s
skipTestResultPublishing: true
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
env:
FLUID_TEST_LOGGER_PKG_SPECIFIER: '@ff-internal/aria-logger' # Contains getTestLogger impl to inject
# Disable colorization for tinylicious logs (not useful when printing to a file)
logger__colorize: "false" # Need to pass it as string so ADO doesn't convert it into False (capital F) which doesn't work
logger__morganFormat: tiny
# stress tests frs
- stage: stress_tests_frs
displayName: Stress tests - frs
dependsOn: []
# use a variable group with exclusive lock force only one run at a time and avoid overloading the server/throttling
variables:
- group: stress-frs-lock
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Large
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 120
testCommand: start:frs
skipTestResultPublishing: true
env:
fluid__test__driver__frs: $(automation-fluid-test-driver-frs-stress-test)
FLUID_TEST_LOGGER_PKG_SPECIFIER: '@ff-internal/aria-logger' # Contains getTestLogger impl to inject
- template: templates/include-test-real-service.yml
parameters:
stageId: stress_tests_frs
stageDisplayName: Stress tests - frs
poolBuild: Large
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 120
testCommand: start:frs
skipTestResultPublishing: true
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
stageVariables:
- group: stress-frs-lock
env:
fluid__test__driver__frs: $(automation-fluid-test-driver-frs-stress-test)
FLUID_TEST_LOGGER_PKG_SPECIFIER: '@ff-internal/aria-logger' # Contains getTestLogger impl to inject
# stress tests frs canary
- stage: stress_tests_frs_canary
displayName: Stress tests - frs canary
dependsOn: []
# use a variable group with exclusive lock force only one run at a time and avoid overloading the server/throttling
variables:
- group: stress-frs-canary
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Large
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 120
testCommand: start:frs:canary
skipTestResultPublishing: true
env:
fluid__test__driver__frsCanary: $(automation-fluid-driver-frs-canary-stress-test)
FLUID_TEST_LOGGER_PKG_SPECIFIER: '@ff-internal/aria-logger' # Contains getTestLogger impl to inject
# Capture telemetry about pipeline stages
- stage: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
condition: succeededOrFailed()
dependsOn:
- stress_tests_odsp
- stress_tests_odspdf
- stress_tests_tinylicious
- stress_tests_frs
- stress_tests_frs_canary
jobs:
- job: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
pool: Small
variables:
- group: ado-feeds
steps:
- template: templates/include-telemetry-setup.yml
parameters:
devFeedUrl: $(ado-feeds-dev)
officeFeedUrl: $(ado-feeds-office)
isCheckoutNeeded: true
- task: Bash@3
displayName: Retrieve buildId results
inputs:
targetType: 'inline'
workingDirectory: $(absolutePathToTelemetryGenerator)
script: |
echo "creating output folder"
mkdir -p ${{ variables.testWorkspace }}/timingOutput
echo "Retrieving pipeline run timeline data command ..."
echo 'curl -u ":<REDACTED>" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline"'
curl -u ":$ADO_API_TOKEN" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline\?api-version\=6.0-preview.1" > ${{ variables.testWorkspace }}/timingOutput/output.json
pwd;
ls -laR ${{ variables.testWorkspace }}/timingOutput/output.json;
cat ${{ variables.testWorkspace }}/timingOutput/output.json;
node --require @ff-internal/aria-logger bin/run --handlerModule $(absolutePathToTelemetryGenerator)/dist/handlers/stageTimingRetriever.js --dir '${{ variables.testWorkspace }}/timingOutput/';
env:
BUILD_ID: $(Build.BuildId)
ADO_API_TOKEN: $(System.AccessToken)
PIPELINE: 'RealStressService'
- template: templates/include-test-real-service.yml
parameters:
stageId: stress_tests_frs_canary
stageDisplayName: Stress tests - frs canary
poolBuild: Large
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 120
testCommand: start:frs:canary
skipTestResultPublishing: true
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
stageVariables:
- group: stress-frs-canary
env:
fluid__test__driver__frsCanary: $(automation-fluid-driver-frs-canary-stress-test)
FLUID_TEST_LOGGER_PKG_SPECIFIER: '@ff-internal/aria-logger' # Contains getTestLogger impl to inject

Просмотреть файл

@ -30,200 +30,140 @@ variables:
- name: absolutePathToTelemetryGenerator
value: $(Build.SourcesDirectory)/tools/telemetry-generator
readonly: true
- name: pipelineIdentifierForTelemetry
value: 'EndToEndTests'
readonly: true
lockBehavior: sequential
stages:
# end-to-end tests local server
- stage: e2e_local_server
displayName: e2e - local server
dependsOn: []
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: NewLarge-linux-1ES # Need Large pool for full-compat matrix
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
testCommand: test:realsvc:local:report:full
cacheCompatVersionsInstalls: true
env:
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
- template: templates/include-test-real-service.yml
parameters:
stageId: e2e_local_server
stageDisplayName: e2e - local server
poolBuild: NewLarge-linux-1ES # Need Large pool for full-compat matrix
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
testCommand: test:realsvc:local:report:full
cacheCompatVersionsInstalls: true
uploadTestPassRateTelemetry: true
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
env:
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
# end-to-end tests tinylicious
- stage: e2e_tinylicious
displayName: e2e - tinylicious
dependsOn: []
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Large # Need Large pool for full-compat matrix
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
testCommand: test:realsvc:tinylicious:report:full
cacheCompatVersionsInstalls: true
# TODO: AB#8968 tracks figuring out the root cause of the extended delay, and restoring this timeout to 90m or less
timeoutInMinutes: 120
env:
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
# Disable colorization for tinylicious logs (not useful when printing to a file)
logger__colorize: "false" # Need to pass it as string so ADO doesn't convert it into False (capital F) which doesn't work
logger__morganFormat: tiny
- template: templates/include-test-real-service.yml
parameters:
stageId: e2e_tinylicious
stageDisplayName: e2e - tinylicious
poolBuild: Large # Need Large pool for full-compat matrix
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
testCommand: test:realsvc:tinylicious:report:full
cacheCompatVersionsInstalls: true
# TODO: AB#8968 tracks figuring out the root cause of the extended delay, and restoring this timeout to 90m or less
timeoutInMinutes: 120
uploadTestPassRateTelemetry: true
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
env:
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
# Disable colorization for tinylicious logs (not useful when printing to a file)
logger__colorize: "false" # Need to pass it as string so ADO doesn't convert it into False (capital F) which doesn't work
logger__morganFormat: tiny
# end-to-end tests routerlicious
- stage: e2e_routerlicious
displayName: e2e - routerlicious
dependsOn: []
# use a variable group with exclusive lock force only one run at a time and avoid overloading the server/throttling
variables:
- group: e2e-r11s-lock
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Small
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
testCommand: test:realsvc:routerlicious:report
continueOnError: true
r11sSelfSignedCertSecureFile: wu2-tls-certificate.pem
splitTestVariants:
- name: Non-compat
flags: --compatVersion=0
- name: N-1
flags: --compatVersion=-1
- name: LTS
flags: --compatVersion=LTS
- name: Cross-version
flags: --compatVersion=CROSS_VERSION
cacheCompatVersionsInstalls: true
env:
fluid__test__driver__r11s: $(automation-fluid-test-driver-r11s)
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
- template: templates/include-test-real-service.yml
parameters:
stageId: e2e_routerlicious
stageDisplayName: e2e - routerlicious
poolBuild: Small
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
testCommand: test:realsvc:routerlicious:report
continueOnError: true
r11sSelfSignedCertSecureFile: wu2-tls-certificate.pem
stageVariables:
- group: e2e-r11s-lock
splitTestVariants:
- name: Non-compat
flags: --compatVersion=0
- name: N-1
flags: --compatVersion=-1
- name: LTS
flags: --compatVersion=LTS
- name: Cross-version
flags: --compatVersion=CROSS_VERSION
cacheCompatVersionsInstalls: true
uploadTestPassRateTelemetry: true
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
env:
fluid__test__driver__r11s: $(automation-fluid-test-driver-r11s)
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
# end-to-end tests frs
- stage: e2e_frs
displayName: e2e - frs
dependsOn: []
# use a variable group with exclusive lock force only one run at a time and avoid overloading the server/throttling
variables:
- group: e2e-frs-lock
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Small
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 360
continueOnError: true
testCommand: test:realsvc:frs:report
splitTestVariants:
- name: Non-compat
flags: --compatVersion=0
- name: N-1
flags: --compatVersion=-1
- name: LTS
flags: --compatVersion=LTS
- name: Cross-Version
flags: --compatVersion=CROSS_VERSION
cacheCompatVersionsInstalls: true
env:
fluid__test__driver__frs: $(automation-fluid-test-driver-frs)
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
- template: templates/include-test-real-service.yml
parameters:
stageId: e2e_frs
stageDisplayName: e2e - frs
poolBuild: Small
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 360
continueOnError: true
testCommand: test:realsvc:frs:report
r11sSelfSignedCertSecureFile: wu2-tls-certificate.pem
stageVariables:
- group: e2e-frs-lock
splitTestVariants:
- name: Non-compat
flags: --compatVersion=0
- name: N-1
flags: --compatVersion=-1
- name: LTS
flags: --compatVersion=LTS
- name: Cross-Version
flags: --compatVersion=CROSS_VERSION
cacheCompatVersionsInstalls: true
uploadTestPassRateTelemetry: true
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
env:
fluid__test__driver__frs: $(automation-fluid-test-driver-frs)
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
# end-to-end tests odsp
- stage: e2e_odsp
displayName: e2e - odsp
dependsOn: []
# use a variable group with exclusive lock force only one run at a time and avoid overloading the server/throttling
variables:
- group: e2e-odsp-lock
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Small
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 360
continueOnError: true
testCommand: test:realsvc:odsp:report
splitTestVariants:
- name: Non-compat
flags: --compatVersion=0 --tenantIndex=0
- name: N-1
flags: --compatVersion=-1 --tenantIndex=1
- name: Cross-Version
flags: --compatVersion=CROSS_VERSION
# Assumes Non-compat and N-1 scenarios are covered
# Tests N-2 to LTS+1 back compat for loader
# Tests N-2 to LTS+3 back compat for loader + driver
- name: N-2ToLTS+1-back-compat
flags: --compatVersion=V2_INT_3 --tenantIndex=3
cacheCompatVersionsInstalls: true
env:
login__microsoft__clientId: $(login-microsoft-clientId)
login__odsp__test__tenants: $(automation-e2e-login-odsp-test-tenants)
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
# Capture telemetry about pipeline stages
- stage: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
condition: succeededOrFailed()
dependsOn:
- e2e_local_server
- e2e_tinylicious
- e2e_routerlicious
- e2e_frs
- e2e_odsp
jobs:
- job: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
pool: Small
variables:
- group: ado-feeds
steps:
- template: templates/include-telemetry-setup.yml
parameters:
devFeedUrl: $(ado-feeds-dev)
officeFeedUrl: $(ado-feeds-office)
isCheckoutNeeded: true
- task: Bash@3
displayName: Retrieve buildId results
env:
BUILD_ID: $(Build.BuildId)
ADO_API_TOKEN: $(System.AccessToken)
PIPELINE: 'EndToEndTests'
inputs:
targetType: 'inline'
workingDirectory: $(absolutePathToTelemetryGenerator)
script: |
echo "creating output folder"
mkdir -p ${{ variables.testWorkspace }}/timingOutput
echo "Retrieving pipeline run timeline data command ..."
echo 'curl -u ":<REDACTED>" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline"'
curl -u ":$ADO_API_TOKEN" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline\?api-version\=6.0-preview.1" > ${{ variables.testWorkspace }}/timingOutput/output.json
pwd;
ls -laR ${{ variables.testWorkspace }}/timingOutput/output.json;
cat ${{ variables.testWorkspace }}/timingOutput/output.json;
node --require @ff-internal/aria-logger bin/run --handlerModule $(absolutePathToTelemetryGenerator)/dist/handlers/stageTimingRetriever.js --dir '${{ variables.testWorkspace }}/timingOutput/';
# Retrieve E2E test pass rate
- task: Bash@3
displayName: Retrieve test pass rate
env:
BUILD_ID: $(Build.BuildId)
ADO_API_TOKEN: $(System.AccessToken)
BASE_OUTPUT_FOLDER: ${{ variables.testWorkspace }}
BUILD_SOURCES_DIRECTORY: $(Build.SourcesDirectory)
PIPELINE: 'EndToEndTests'
inputs:
targetType: 'inline'
workingDirectory: $(absolutePathToTelemetryGenerator)
script: |
echo "Fetching pass rate data and saving into JSON files"
node "$BUILD_SOURCES_DIRECTORY/scripts/get-test-pass-rate.mjs"
echo "Files created. Now running telemetry-generator"
node --require @ff-internal/aria-logger bin/run --handlerModule "$(absolutePathToTelemetryGenerator)/dist/handlers/testPassRate.js" --dir "$BASE_OUTPUT_FOLDER/stageFiles"
- template: templates/include-test-real-service.yml
parameters:
stageId: e2e_odsp
stageDisplayName: e2e - odsp
poolBuild: Small
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
artifactBuildId: $(resources.pipeline.client.runID)
timeoutInMinutes: 360
continueOnError: true
testCommand: test:realsvc:odsp:report
stageVariables:
- group: e2e-odsp-lock
splitTestVariants:
- name: Non-compat
flags: --compatVersion=0 --tenantIndex=0
- name: N-1
flags: --compatVersion=-1 --tenantIndex=1
- name: Cross-Version
flags: --compatVersion=CROSS_VERSION
# Assumes Non-compat and N-1 scenarios are covered
# Tests N-2 to LTS+1 back compat for loader
# Tests N-2 to LTS+3 back compat for loader + driver
- name: N-2ToLTS+1-back-compat
flags: --compatVersion=V2_INT_3 --tenantIndex=3
cacheCompatVersionsInstalls: true
uploadTestPassRateTelemetry: true
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
env:
login__microsoft__clientId: $(login-microsoft-clientId)
login__odsp__test__tenants: $(automation-e2e-login-odsp-test-tenants)
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject

Просмотреть файл

@ -30,100 +30,57 @@ variables:
- name: testOdspPackage
value: "@fluid-experimental/odsp-end-to-end-tests"
readonly: true
- name: pipelineIdentifierForTelemetry
value: 'ServiceClientsEndToEndTests'
readonly: true
stages:
# Run Azure Client FRS Tests
- stage: e2e_azure_client_frs
displayName: e2e - azure client with frs
dependsOn: []
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Small
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
testCommand: test:realsvc:azure
artifactBuildId: $(resources.pipeline.client.runID)
env:
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
azure__fluid__relay__service__tenantId: $(azure-fluid-relay-service-tenantId)
azure__fluid__relay__service__endpoint: $(azure-fluid-relay-service-endpoint)
azure__fluid__relay__service__key: $(azure-fluid-relay-service-key)
- template: templates/include-test-real-service.yml
parameters:
stageId: e2e_azure_client_frs
stageDisplayName: e2e - azure client with frs
poolBuild: Small
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
testCommand: test:realsvc:azure
artifactBuildId: $(resources.pipeline.client.runID)
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
env:
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
azure__fluid__relay__service__tenantId: $(azure-fluid-relay-service-tenantId)
azure__fluid__relay__service__endpoint: $(azure-fluid-relay-service-endpoint)
azure__fluid__relay__service__key: $(azure-fluid-relay-service-key)
- stage: e2e_azure_client_local_server
displayName: e2e - azure client with azure local service
dependsOn: []
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Small
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
testCommand: test:realsvc:tinylicious
artifactBuildId: $(resources.pipeline.client.runID)
env:
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
# Disable colorization for tinylicious logs (not useful when printing to a file)
logger__colorize: "false" # Need to pass it as string so ADO doesn't convert it into False (capital F) which doesn't work
logger__morganFormat: tiny
- template: templates/include-test-real-service.yml
parameters:
stageId: e2e_azure_client_local_server
stageDisplayName: e2e - azure client with azure local service
poolBuild: Small
testPackage: ${{ variables.testPackage }}
testWorkspace: ${{ variables.testWorkspace }}
testCommand: test:realsvc:tinylicious
artifactBuildId: $(resources.pipeline.client.runID)
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
env:
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
# Disable colorization for tinylicious logs (not useful when printing to a file)
logger__colorize: "false" # Need to pass it as string so ADO doesn't convert it into False (capital F) which doesn't work
logger__morganFormat: tiny
- stage: e2e_odsp_client_odsp_server
displayName: e2e - odsp client with odsp service
dependsOn: []
jobs:
- template: templates/include-test-real-service.yml
parameters:
poolBuild: Small
testPackage: ${{ variables.testOdspPackage }}
testWorkspace: ${{ variables.testWorkspace }}
testCommand: test:realsvc:odsp:run
artifactBuildId: $(resources.pipeline.client.runID)
env:
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
odsp__client__clientId: $(odsp-client-clientId)
odsp__client__siteUrl: $(odsp-client-siteUrl)
odsp__client__driveId: $(odsp-client-driveId)
login__odspclient__spe__test__tenants: $(login-odspclient-spe-test-tenants)
# Capture pipeline stage results
- stage: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
condition: succeededOrFailed()
dependsOn:
- e2e_azure_client_frs
- e2e_azure_client_local_server
- e2e_odsp_client_odsp_server
jobs:
- job: upload_run_telemetry
displayName: Upload pipeline run telemetry to Kusto
pool: Small
variables:
- group: ado-feeds
- name: absolutePathToTelemetryGenerator
value: $(Build.SourcesDirectory)/tools/telemetry-generator
readonly: true
steps:
- template: templates/include-telemetry-setup.yml
parameters:
devFeedUrl: $(ado-feeds-dev)
officeFeedUrl: $(ado-feeds-office)
isCheckoutNeeded: true
- task: Bash@3
displayName: Retrieve buildId results
inputs:
targetType: 'inline'
workingDirectory: $(absolutePathToTelemetryGenerator)
script: |
echo "creating output folder"
mkdir -p ${{ variables.testWorkspace }}/timingOutput
echo "Retrieving pipeline run timeline data command ..."
echo 'curl -u ":<REDACTED>" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline"'
curl -u ":$ADO_API_TOKEN" "https://dev.azure.com/fluidframework/internal/_apis/build/builds/$BUILD_ID/timeline\?api-version\=6.0-preview.1" > ${{ variables.testWorkspace }}/timingOutput/output.json
pwd;
ls -laR ${{ variables.testWorkspace }}/timingOutput/output.json;
cat ${{ variables.testWorkspace }}/timingOutput/output.json;
node --require @ff-internal/aria-logger bin/run --handlerModule $(absolutePathToTelemetryGenerator)/dist/handlers/stageTimingRetriever.js --dir '${{ variables.testWorkspace }}/timingOutput/';
env:
BUILD_ID: $(Build.BuildId)
ADO_API_TOKEN: $(System.AccessToken)
PIPELINE: ServiceClientsEndToEndTests
- template: templates/include-test-real-service.yml
parameters:
stageId: e2e_odsp_client_odsp_server
stageDisplayName: e2e - odsp client with odsp service
poolBuild: Small
testPackage: ${{ variables.testOdspPackage }}
testWorkspace: ${{ variables.testWorkspace }}
testCommand: test:realsvc:odsp:run
artifactBuildId: $(resources.pipeline.client.runID)
pipelineIdentifierForTelemetry: ${{ variables.pipelineIdentifierForTelemetry }}
env:
FLUID_TEST_LOGGER_PKG_PATH: ${{ variables.testWorkspace }}/node_modules/@ff-internal/aria-logger # Contains getTestLogger impl to inject
odsp__client__clientId: $(odsp-client-clientId)
odsp__client__siteUrl: $(odsp-client-siteUrl)
odsp__client__driveId: $(odsp-client-driveId)
login__odspclient__spe__test__tenants: $(login-odspclient-spe-test-tenants)

Просмотреть файл

@ -10,60 +10,94 @@ interface ParsedJob {
stageName: string;
startTime: number;
finishTime: number;
totalTime: number;
totalSeconds: number;
state: string;
result: string;
}
/**
* This handler is used to process a JSON payload with timing and result data for one or more stages in a pipeline.
* The payload is assumed to be the output from the ADO REST API that gives us information about a pipeline run.
* The handler then sends telemetry events to Kusto with the processed data.
*
* It assumes the specified stage (or if no specific stage is specified, all other stages in the pipeline) has already
* completed, and will throw otherwise.
*
* @param fileData - A JSON object obtained by calling JSON.parse() on the output of the ADO REST API that gives us
* information about a pipeline run, i.e.
* https://dev.azure.com/fluidframework/internal/_apis/build/builds/<buildId>/timeline?api-version=6.0-preview.1
* @param logger - The `ITelemetryLogger` to use to output the extracted data.
*/
module.exports = function handler(fileData, logger): void {
// - fileData is a JSON object obtained by calling JSON.parse() on the contents of a file.
// In this particular handler, we are using the timeline REST API to retrieve the status of the pipeline:
// Ex. https://dev.azure.com/fluidframework/internal/_apis/build/builds/<buildId>/timeline?api-version=6.0-preview.1
// - logger is an ITelemetryBufferedLogger. Call its send() method to write the output telemetry
// events.
if (fileData.records?.length === undefined || fileData.records?.length === 0) {
console.log(`could not locate records info`);
throw new Error("No records found in the input data.");
}
if (process.env.BUILD_ID === undefined) {
console.log("BUILD_ID not defined.");
} else {
console.log("BUILD_ID", process.env.BUILD_ID);
throw new Error("BUILD_ID environment variable is not set.");
}
if (process.env.PIPELINE === undefined) {
throw new Error("PIPELINE environment variable is not set.");
}
// Note: type == "Task" would include tasks from the stages in the result set. It might be interesting in the future - for now we will only collect stages.
console.log("BUILD_ID:", process.env.BUILD_ID);
console.log("PIPELINE:", process.env.PIPELINE);
console.log("STAGE_ID:", process.env.STAGE_ID);
const parsedJobs: ParsedJob[] = fileData.records
.filter((job) => job.type === "Stage")
.map((job) => {
const startTime = Date.parse(job.startTime?.toString()) ?? undefined;
const finishTime = Date.parse(job.finishTime?.toString()) ?? undefined;
const dateDiff =
finishTime && startTime ? Math.abs(finishTime - startTime) / 1000 : undefined; // diff in seconds
console.log(`Name=${job.name}`);
// Note: type === "Task" or type === "Job" would include task-level (or job-level, respectively) telemetry.
// It might be interesting in the future - for now we will only collect stage-level telemetry.
// If given a specific STAGE_ID, only process that stage. Otherwise process all stages.
.filter(
(job) =>
job.type === "Stage" &&
(process.env.STAGE_ID === undefined || job.identifier === process.env.STAGE_ID),
)
.map((job): ParsedJob | undefined => {
console.log(
`Processing stage - name='${job.name}' identifier='${job.identifier}' state='${job.state}' result='${job.result}'`,
);
const finishTime = Date.parse(job.finishTime?.toString());
if (Number.isNaN(finishTime)) {
console.error(
`Failed to parse finishTime '${job.finishTime}'. The specified pipeline stage might not have finished yet. Telemetry for this stage will not be sent.`,
);
return undefined;
}
// A null start time when 'state === completed' indicates the stage was skipped.
// In that case set startTime = finishTime so duration ends up being 0.
const startTime: number =
job.state === "completed" && job.startTime === null
? finishTime
: Date.parse(job.startTime?.toString());
if (Number.isNaN(startTime)) {
console.error(
`Failed to parse startTime '${job.startTime}'. Telemetry for this stage will not be sent.`,
);
return undefined;
}
return {
// Using the 'identifier' property because that's the one available in the API response for test results,
// and we want the values to be consistent so we can correlate them later.
stageName: job.identifier,
startTime,
finishTime,
totalTime: dateDiff,
totalSeconds: (finishTime - startTime) / 1000,
state: job.state,
result: job.result,
};
});
for (const job of parsedJobs) {
// Hardcoding the last stage name for now as it will need to be bypassed (still in Progress).
if (job.stageName === "runAfterAll") {
continue;
}
for (const job of parsedJobs.filter((x) => x !== undefined)) {
logger.send({
namespace: "FFEngineering", // Transfer the telemetry associated with pipeline status to namespace "FFEngineering".
category: "performance",
eventName: "StageTiming",
benchmarkType: "PipelineInfo",
stageName: job.stageName,
duration: job.totalTime,
duration: job.totalSeconds,
state: job.state,
result: job.result,
buildId: process.env.BUILD_ID ?? "",

Просмотреть файл

@ -16,11 +16,21 @@ module.exports = function handler(fileData, logger): void {
console.log(`Could not locate test result info.`);
return;
}
if (process.env.BUILD_ID === undefined) {
console.log("BUILD_ID not defined.");
} else {
console.log("BUILD_ID", process.env.BUILD_ID);
throw new Error("BUILD_ID environment variable is not set.");
}
if (process.env.PIPELINE === undefined) {
throw new Error("PIPELINE environment variable is not set.");
}
if (process.env.STAGE_ID === undefined) {
throw new Error("STAGE_ID environment variable is not set.");
}
console.log("BUILD_ID:", process.env.BUILD_ID);
console.log("PIPELINE:", process.env.PIPELINE);
console.log("STAGE_ID:", process.env.STAGE_ID);
const resultSummary = fileData.resultSummary.resultSummaryByRunState.Completed;
console.log(resultSummary);
@ -28,7 +38,7 @@ module.exports = function handler(fileData, logger): void {
const failedTests: number = resultSummary.aggregatedResultDetailsByOutcome.Failed?.count ?? 0;
const totalTests = passedTests + failedTests;
const passRate = totalTests === 0 ? 0 : passedTests / totalTests;
console.log(passRate);
console.log("Pass rate:", passRate);
logger.send({
namespace: "FFEngineering", // Transfer the telemetry associated with test passing rate to namespace "FFEngineering"