ci: ACN PR Pipeline Security Feature Branch (#2985)

* ci: Add Main Pipeline Template

* chore: azure-cni-overlay work

* chore: cilium overlay work

* test: cilium overlay E2E

* chore: cni overlay ds work

* chore: cilium ds work

* fixes

* ci: Add ACN Trigger

* ci: Use Git Ref Under Review for Testing

* Use Duplicate Files for Feature Test

* Disable Pipeline Trigger

---------

Co-authored-by: Sheyla Trudo <shtrudo@microsoft.com>
Co-authored-by: jpayne3506 <payne.3506@gmail.com>
This commit is contained in:
sheylatrudo 2024-09-15 13:20:44 -07:00 коммит произвёл GitHub
Родитель cc1ba097c7
Коммит 64c6c112e3
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
34 изменённых файлов: 4154 добавлений и 0 удалений

26
.config/.gdnsuppress Normal file
Просмотреть файл

@ -0,0 +1,26 @@
{
"version": "latest",
"suppressionSets": {
"default": {
"name": "default",
"createdDate": "2022-11-28 20:04:38Z",
"lastUpdatedDate": "2022-11-28 20:04:38Z"
}
},
"results": {
"d7e55b5f3e54f9253a2fec595f97520ab0ffece607981d2db0fcfe4dae4cd490": {
"signature": "d7e55b5f3e54f9253a2fec595f97520ab0ffece607981d2db0fcfe4dae4cd490",
"alternativeSignatures": [],
"target": "**/testdata/dummy.pem",
"memberOf": [
"default"
],
"tool": "credscan",
"ruleId": "CSCAN-GENERAL0020",
"justification": null,
"createdDate": "2022-11-28 20:04:38Z",
"expirationDate": null,
"type": null
}
}
}

Просмотреть файл

@ -0,0 +1,4 @@
{
"tool": "Credential Scanner",
"suppressions": []
}

Просмотреть файл

@ -0,0 +1,165 @@
parameters:
clusterName: ""
os: ""
dependsOn: ""
sub: ""
cni: cni
jobs:
- job: CNI_${{ parameters.os }}
condition: and( not(canceled()), not(failed()) )
displayName: CNI k8s E2E ${{ parameters.os }}
dependsOn: ${{ parameters.dependsOn }}
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- task: AzureCLI@2
inputs:
azureSubscription: ${{ parameters.sub }}
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
# sig-release provides test suite tarball(s) per k8s release. Just need to provide k8s version "v1.xx.xx"
# pulling k8s version from AKS.
eval k8sVersion="v"$( az aks show -g ${{ parameters.clusterName }} -n ${{ parameters.clusterName }} --query "currentKubernetesVersion")
echo $k8sVersion
curl -L https://dl.k8s.io/$k8sVersion/kubernetes-test-linux-amd64.tar.gz -o ./kubernetes-test-linux-amd64.tar.gz
# https://github.com/kubernetes/sig-release/blob/master/release-engineering/artifacts.md#content-of-kubernetes-test-system-archtargz-on-example-of-kubernetes-test-linux-amd64targz-directories-removed-from-list
# explictly unzip and strip directories from ginkgo and e2e.test
tar -xvzf kubernetes-test-linux-amd64.tar.gz --strip-components=3 kubernetes/test/bin/ginkgo kubernetes/test/bin/e2e.test
displayName: "Setup Environment"
retryCountOnTaskFailure: 5
- ${{ if contains(parameters.os, 'windows') }}:
- script: |
set -e
kubectl apply -f test/integration/manifests/load/privileged-daemonset-windows.yaml
kubectl rollout status -n kube-system ds privileged-daemonset
kubectl get pod -n kube-system -l app=privileged-daemonset,os=windows -owide
pods=`kubectl get pod -n kube-system -l app=privileged-daemonset,os=windows --no-headers | awk '{print $1}'`
for pod in $pods; do
kubectl exec -i -n kube-system $pod -- powershell "Restart-Service kubeproxy"
kubectl exec -i -n kube-system $pod -- powershell "Get-Service kubeproxy"
done
workingDirectory: $(ACN_DIR)
name: kubeproxy
displayName: Restart Kubeproxy on Windows nodes
retryCountOnTaskFailure: 3
- ${{ if eq(parameters.datapath, true) }}:
- template: k8s-e2e.steps.yaml@ACNTools
parameters:
testName: Datapath
name: datapath
ginkgoFocus: '(.*).Networking.should|(.*).Networking.Granular|(.*)kubernetes.api'
ginkgoSkip: 'SCTP|Disruptive|Slow|hostNetwork|kube-proxy|IPv6'
os: ${{ parameters.os }}
processes: 8
attempts: 10
- ${{ if eq(parameters.dns, true) }}:
- template: k8s-e2e.steps.yaml@ACNTools
parameters:
testName: DNS
name: dns
ginkgoFocus: '\[sig-network\].DNS.should'
ginkgoSkip: 'resolv|256 search'
os: ${{ parameters.os }}
processes: 8
attempts: 3
- ${{ if eq(parameters.portforward, true) }}:
- template: k8s-e2e.steps.yaml@ACNTools
parameters:
testName: Kubectl Portforward
name: portforward
ginkgoFocus: '\[sig-cli\].Kubectl.Port'
ginkgoSkip: ''
os: ${{ parameters.os }}
processes: 8
attempts: 3
- ${{ if and( eq(parameters.service, true), contains(parameters.cni, 'cni') ) }}:
- template: k8s-e2e.steps.yaml@ACNTools
parameters:
testName: Service Conformance
name: service
ginkgoFocus: 'Services.*\[Conformance\].*'
ginkgoSkip: ''
os: ${{ parameters.os }}
processes: 8
attempts: 3
- ${{ if and( eq(parameters.service, true), contains(parameters.cni, 'cilium') ) }}:
- template: k8s-e2e.steps.yaml@ACNTools
parameters:
testName: Service Conformance|Cilium
name: service
ginkgoFocus: 'Services.*\[Conformance\].*'
ginkgoSkip: 'should serve endpoints on same port and different protocols' # Cilium does not support this feature. For more info on test: https://github.com/kubernetes/kubernetes/blame/e602e9e03cd744c23dde9fee09396812dd7bdd93/test/conformance/testdata/conformance.yaml#L1780-L1788
os: ${{ parameters.os }}
processes: 8
attempts: 3
- ${{ if eq(parameters.hostport, true) }}:
- template: k8s-e2e.steps.yaml@ACNTools
parameters:
testName: Host Port
name: hostport
ginkgoFocus: '\[sig-network\](.*)HostPort|\[sig-scheduling\](.*)hostPort'
ginkgoSkip: 'SCTP|exists conflict' # Skip slow 5 minute test
os: ${{ parameters.os }}
processes: 1 # Has a short serial test
attempts: 3
- ${{ if and(eq(parameters.hybridWin, true), eq(parameters.os, 'windows')) }}:
- template: k8s-e2e.steps.yaml@ACNTools
parameters:
testName: Hybrid Network
name: hybrid
ginkgoFocus: '\[sig-windows\].Hybrid'
ginkgoSkip: ''
os: ${{ parameters.os }}
processes: 8
attempts: 3
- ${{ if and( eq(parameters.dualstack, true), eq(contains(parameters.cni, 'cilium'), false) ) }}:
- template: k8s-e2e.steps.yaml@ACNTools
parameters:
testName: DualStack Test
name: DualStack
clusterName: ${{ parameters.clusterName }}
ginkgoFocus: '\[Feature:IPv6DualStack\]'
ginkgoSkip: 'SCTP|session affinity'
os: ${{ parameters.os }}
processes: 8
attempts: 3
- ${{ if and( eq(parameters.dualstack, true), contains(parameters.cni, 'cilium') ) }}:
- template: k8s-e2e.steps.yaml@ACNTools
parameters:
testName: DualStack Test|Cilium
name: DualStack
clusterName: ${{ parameters.clusterName }}
ginkgoFocus: '\[Feature:IPv6DualStack\]'
ginkgoSkip: 'SCTP|session affinity|should function for service endpoints using hostNetwork' # Cilium dualstack has a known issue with this test https://github.com/cilium/cilium/issues/25135
os: ${{ parameters.os }}
processes: 8
attempts: 3

Просмотреть файл

@ -0,0 +1,67 @@
parameters:
testName: ""
name: ""
ginkgoFocus: ""
ginkgoSkip: ""
os: ""
processes: "" # Number of parallel processes
attempts: ""
steps:
- script: |
set -ex
# ginkgoSkip cant handle only |LinuxOnly. Need to have check
if ${{ lower(and(ge(length(parameters.ginkgoSkip), 1), eq(parameters.os, 'windows'))) }}
then
SKIP="|LinuxOnly"
elif ${{ lower(eq(parameters.os, 'windows')) }}
then
SKIP="LinuxOnly"
fi
# Taint Linux nodes so that windows tests do not run on them
if ${{ lower(eq(parameters.os, 'windows')) }}
then
kubectl rollout status -n kube-system deployment/konnectivity-agent --timeout=3m
kubectl taint nodes -l kubernetes.azure.com/mode=system node-role.kubernetes.io/control-plane:NoSchedule
fi
# Taint Windows nodes so that Linux tests do not run on them
if ${{ lower(eq(parameters.os, 'linux')) }}
then
kubectl taint nodes -l kubernetes.azure.com/mode=user node-role.kubernetes.io/control-plane:NoSchedule
fi
# Depreciating flags. Change once k8s minimum version supported is > 1.24
# nodes -> procs
# flakeAttempts -> flake-attempts
# dryRun -> dry-run
./ginkgo --nodes=${{ parameters.processes }} \
./e2e.test -- \
--num-nodes=2 \
--provider=skeleton \
--ginkgo.focus='${{ parameters.ginkgoFocus }}' \
--ginkgo.skip="${{ parameters.ginkgoSkip }}$SKIP" \
--ginkgo.flakeAttempts=${{ parameters.attempts }} \
--ginkgo.v \
--node-os-distro=${{ parameters.os }} \
--kubeconfig=$HOME/.kube/config
# Untaint Linux nodes once testing is complete
if ${{ lower(eq(parameters.os, 'windows')) }}
then
kubectl taint nodes -l kubernetes.azure.com/mode=system node-role.kubernetes.io/control-plane:NoSchedule-
fi
# Untaint Windows nodes once testing is complete
if ${{ lower(eq(parameters.os, 'linux')) }}
then
kubectl taint nodes -l kubernetes.azure.com/mode=user node-role.kubernetes.io/control-plane:NoSchedule-
fi
name: ${{ parameters.name }}
displayName: k8s E2E - ${{ parameters.testName }}
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 5

Просмотреть файл

@ -0,0 +1,39 @@
parameters:
arch: ""
name: ""
os: ""
os_version: ""
steps:
- task: AzureCLI@2
displayName: "Login"
inputs:
azureSubscription: $(ACR_ARM_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
inlineScript: |
az acr login -n $(ACR)
- script: |
set -e
if [ "$IN_OS" = 'windows' ]; then export BUILDX_ACTION='--push'; fi
make "$IMGNAME" OS="$IN_OS" ARCH="$IN_ARCH" OS_VERSION="$IN_OS_VERSION"
name: image_build
displayName: Image Build
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
env:
IMGNAME: '${{ parameters.name }}-image'
IN_OS: '${{ parameters.os }}'
IN_ARCH: '${{ parameters.arch }}'
IN_OS_VERSION: '${{ parameters.os_version }}'
- task: AzureCLI@2
displayName: "Logout"
inputs:
azureSubscription: $(ACR_ARM_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
inlineScript: |
docker logout

Просмотреть файл

@ -0,0 +1,54 @@
parameters:
name: ""
platforms: ""
os_versions: ""
steps:
- task: AzureCLI@2
displayName: "Login"
inputs:
azureSubscription: $(ACR_ARM_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
inlineScript: |
az acr login -n $(ACR)
- script: |
set -e
make ${{ parameters.name }}-manifest-build PLATFORMS="${{ parameters.platforms }}" OS_VERSIONS="${{ parameters.os_versions }}"
workingDirectory: $(ACN_DIR)
name: manifest_build
displayName: Manifest Build
retryCountOnTaskFailure: 3
- script: |
set -ex
echo "checking XDG_RUNTIME_DIR"
echo $XDG_RUNTIME_DIR
make ${{ parameters.name }}-manifest-push
mkdir -p $(Build.ArtifactStagingDirectory)/images
echo "setting XDG_RUNTIME_DIR"
export XDG_RUNTIME_DIR=/run/user/$(id -u)
echo $XDG_RUNTIME_DIR
make ${{ parameters.name }}-skopeo-archive IMAGE_ARCHIVE_DIR=$(Build.ArtifactStagingDirectory)/images
name: manifest_push
displayName: Manifest Push
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
- task: AzureCLI@2
displayName: "Logout"
inputs:
azureSubscription: $(ACR_ARM_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
inlineScript: |
docker logout
- task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0
displayName: "Add SBOM Generator tool"
inputs:
BuildDropPath: "$(Build.ArtifactStagingDirectory)"

Просмотреть файл

@ -0,0 +1,69 @@
parameters:
name: ""
displayName: ""
clusterType: ""
clusterName: ""
vmSize: ""
k8sVersion: ""
dependsOn: ""
nodePoolName: ""
continueOnError: true
stages:
- stage: ${{ parameters.clusterName }}
displayName: Create Cluster - ${{ parameters.displayName }}
dependsOn:
- ${{ parameters.dependsOn }}
- setup
variables:
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- template: ../templates/create-cluster-swiftv2.jobs.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
displayName: ${{ parameters.displayName }}
clusterType: ${{ parameters.clusterType }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
vmSize: ${{ parameters.vmSize }}
k8sVersion: ${{ parameters.k8sVersion }}
dependsOn: ${{ parameters.dependsOn }}
continueOnError: ${{ parameters.continueOnError }}
region: $(REGION_SWIFTV2_CLUSTER_TEST) # Swiftv2 has a specific region requirements
- stage: ${{ parameters.name }}
condition: and( succeeded(), not(eq(dependencies.mtacluster.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies
displayName: E2E - ${{ parameters.displayName }}
dependsOn:
- setup
- publish
- ${{ parameters.clusterName }}
variables:
ACN_DIR: $(Build.SourcesDirectory)
GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path
GOBIN: "$(GOPATH)/bin" # Go binaries path
modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking"
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
jobs:
- job: ${{ parameters.name }}
displayName: Swiftv2 Multitenancy E2E Test Suite - (${{ parameters.name }})
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: swiftv2-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: linux

Просмотреть файл

@ -0,0 +1,78 @@
parameters:
name: ""
clusterName: ""
continueOnError: true
steps:
- bash: |
go version
go env
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
name: "GoEnv"
displayName: "Set up the Go environment"
- task: KubectlInstaller@0
inputs:
kubectlVersion: latest
- task: AzureCLI@2
inputs:
azureSubscription: $(ACN_TEST_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
ls -lah
pwd
kubectl cluster-info
kubectl get po -owide -A
echo "Apply the pod network yaml to start the delegation"
less test/integration/manifests/swiftv2/podnetwork.yaml
envsubst '${SUBNET_TOKEN},${SUBNET_RESOURCE_ID},${VNET_GUID}' < test/integration/manifests/swiftv2/podnetwork.yaml | kubectl apply -f -
echo "Check the podnetwork yaml file"
less test/integration/manifests/swiftv2/podnetwork.yaml
kubectl get pn
kubectl describe pn
echo "Apply the pod network instance yaml to reserve IP"
kubectl apply -f test/integration/manifests/swiftv2/pni.yaml
kubectl get pni
kubectl describe pni
export NODE_NAME_0="$(kubectl get nodes -o json | jq -r .items[0].metadata.name)"
echo $NODE_NAME_0
echo "Start the first pod using the reserved IP"
envsubst '$NODE_NAME_0' < test/integration/manifests/swiftv2/mtpod0.yaml | kubectl apply -f -
export NODE_NAME_1="$(kubectl get nodes -o json | jq -r .items[1].metadata.name)"
echo $NODE_NAME_1
echo "Start another pod using the reserved IP"
envsubst '$NODE_NAME_1' < test/integration/manifests/swiftv2/mtpod1.yaml | kubectl apply -f -
sleep 2m
kubectl get pod -o wide -A
sleep 2m
echo "Check pods after 4 minutes"
kubectl get po -owide -A
kubectl describe pni
name: "start_swiftv2_pods"
displayName: "Start Swiftv2 Pods"
continueOnError: ${{ parameters.continueOnError }}
env:
SUBNET_TOKEN: $(SUBNET_TOKEN)
- script: |
set -e
kubectl get po -owide -A
cd test/integration/swiftv2
echo "TestSwiftv2PodToPod and will run it after migration from scripts."
go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestSwiftv2PodToPod$ -tags=swiftv2,integration -v
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "Swiftv2_Tests_future_version"
displayName: "Swiftv2 Tests through code"
continueOnError: ${{ parameters.continueOnError }}

Просмотреть файл

@ -0,0 +1,574 @@
parameters:
- name: triggerBuildSourceBranch
type: string
default: ''
- name: triggerBuildReason
type: string
default: ''
- name: triggerBuildGitRef
type: string
default: ''
- name: triggerBuildQueuedBy
type: string
default: ''
stages:
- stage: setup
displayName: ACN
# Block build start until pre-build validation occurs.
dependsOn: pre_build
variables:
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- job: env
displayName: Setup
pool:
isCustom: true
type: linux
name: "$(BUILD_POOL_NAME_DEFAULT)"
steps:
- checkout: ACNTools
clean: true
- script: |
# To use the variables below, you must make the respective stage's dependsOn have - setup or it will not retain context of this stage
BUILD_NUMBER=$(Build.BuildNumber)
echo "##vso[task.setvariable variable=StorageID;isOutput=true]$(echo ${BUILD_NUMBER//./-})"
echo "##vso[task.setvariable variable=commitID;isOutput=true]$(echo $(make revision)-$(date "+%d%H%M"))"
echo "##vso[task.setvariable variable=Tag;isOutput=true]$(make version)"
echo "##vso[task.setvariable variable=npmVersion;isOutput=true]$(make npm-version)"
cat /etc/os-release
uname -a
sudo chown -R $(whoami):$(whoami) .
go version
go env
which go
echo $PATH
name: "EnvironmentalVariables"
displayName: "Set environmental variables"
- template: templates/unit-tests.stages.yaml@ACNTools
- ${{ if not(contains(parameters.triggerBuildSourceBranch, 'refs/pull')) }}:
- stage: binaries
displayName: Build Binaries
dependsOn:
- setup
- test
variables:
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- job: build
displayName: Build Binaries
variables:
STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ]
ob_outputDirectory: $(Build.ArtifactStagingDirectory)
ob_git_checkout: true
pool:
isCustom: true
type: linux
name: "$(BUILD_POOL_NAME_DEFAULT)"
steps:
- checkout: ACNReviewChanges
clean: true
- script: |
make ipv6-hp-bpf-lib
make all-binaries-platforms
name: "BuildAllPlatformBinaries"
displayName: "Build all platform binaries"
workingDirectory: $(ACN_DIR)
- script: |
mkdir -p ./output/bins
cd ./output
find . -name '*.tgz' -print -exec mv -t ./bins/ {} +
find . -name '*.zip' -print -exec mv -t ./bins/ {} +
shopt -s extglob
rm -rf !("bins")
name: "PrepareArtifacts"
displayName: "Prepare Artifacts"
- task: CopyFiles@2
inputs:
sourceFolder: "output"
targetFolder: $(Build.ArtifactStagingDirectory)
condition: succeeded()
- stage: containerize
displayName: Build Images
dependsOn:
- setup
- test
variables:
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- job: containerize_amd64
displayName: Build Images
pool:
isCustom: true
type: linux
name: "$(BUILD_POOL_NAME_LINUX_AMD64)"
strategy:
maxParallel: 4
matrix:
azure_ipam_linux_amd64:
Suffix: azure_ipam_linux_amd64
arch: amd64
name: azure-ipam
os: linux
azure_ipam_windows2019_amd64:
Suffix: azure_ipam_windows2019_amd64
arch: amd64
name: azure-ipam
os: windows
os_version: ltsc2019
azure_ipam_windows2022_amd64:
Suffix: azure_ipam_windows2022_amd64
arch: amd64
name: azure-ipam
os: windows
os_version: ltsc2022
cni_linux_amd64:
Suffix: cni_linux_amd64
arch: amd64
name: cni
os: linux
cni_windows2019_amd64:
Suffix: cni_windows2019_amd64
arch: amd64
name: cni
os: windows
os_version: ltsc2019
cni_windows2022_amd64:
Suffix: cni_windows2022_amd64
arch: amd64
name: cni
os: windows
os_version: ltsc2022
cni_windows2025_amd64:
Suffix: cni_windows2025_amd64
arch: amd64
name: cni
os: windows
os_version: ltsc2025
cni_dropgz_linux_amd64:
Suffix: cni_dropgz_linux_amd64
arch: amd64
name: cni-dropgz
os: linux
cni_dropgz_windows2019_amd64:
Suffix: cni_dropgz_windows2019_amd64
arch: amd64
name: cni-dropgz
os: windows
os_version: ltsc2019
cni_dropgz_windows2022_amd64:
Suffix: cni_dropgz_windows2022_amd64
arch: amd64
name: cni-dropgz
os: windows
os_version: ltsc2022
cns_linux_amd64:
Suffix: cns_linux_amd64
arch: amd64
name: cns
os: linux
cns_windows2019_amd64:
Suffix: cns_windows2019_amd64
arch: amd64
name: cns
os: windows
os_version: ltsc2019
cns_windows2022_amd64:
Suffix: cns_windows2022_amd64
arch: amd64
name: cns
os: windows
os_version: ltsc2022
cns_windows2025_amd64:
Suffix: cns_windows2025_amd64
arch: amd64
name: cns
os: windows
os_version: ltsc2025
ipv6_hp_bpf_linux_amd64:
Suffix: ipv6_hp_bpf_linux_amd64
arch: amd64
name: ipv6-hp-bpf
os: linux
npm_linux_amd64:
Suffix: npm_linux_amd64
arch: amd64
name: npm
os: linux
npm_windows2022_amd64:
Suffix: npm_windows2022_amd64
arch: amd64
name: npm
os: windows
os_version: ltsc2022
variables:
ob_git_checkout: true
ob_artifactSuffix: $(Suffix) # this is needed to not collide published artifact containers
ob_outputDirectory: $(System.ArtifactStagingDirectory)
steps:
- checkout: ACNReviewChanges
clean: true
- template: containers/container-template.steps.yaml@ACNTools
parameters:
arch: $(arch)
name: $(name)
os: $(os)
os_version: $(os_version)
- job: containerize_linux_arm64
displayName: Build Images
pool:
isCustom: true
type: linux
name: "$(BUILD_POOL_NAME_LINUX_ARM64)"
strategy:
maxParallel: 4
matrix:
azure_ipam_linux_arm64:
arch: arm64
name: azure-ipam
os: linux
Suffix: azure-ipam-linux-arm64
cni_linux_arm64:
arch: arm64
name: cni
os: linux
Suffix: cni-linux-arm64
cni_dropgz_linux_arm64:
arch: arm64
name: cni-dropgz
os: linux
Suffix: cni-dropgz
cns_linux_arm64:
arch: arm64
name: cns
os: linux
Suffix: cns
ipv6_hp_bpf_linux_arm64:
arch: arm64
name: ipv6-hp-bpf
os: linux
Suffix: ipv6-hp-bpf-linux-arm64
npm_linux_arm64:
arch: arm64
name: npm
os: linux
Suffix: npm-linux-arm64
variables:
STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ]
ob_outputDirectory: $(Build.ArtifactStagingDirectory)
ob_git_checkout: true
ob_artifactSuffix: $(Suffix)
steps:
- checkout: ACNReviewChanges
clean: true
- template: containers/container-template.steps.yaml@ACNTools
parameters:
arch: $(arch)
name: $(name)
os: $(os)
- stage: publish
displayName: Publish Multiarch Manifests
dependsOn:
- containerize
variables:
Packaging.EnableSBOMSigning: false
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- job: manifest
displayName: Compile Manifests
pool:
isCustom: true
type: linux
name: "$(BUILD_POOL_NAME_DEFAULT)"
strategy:
maxParallel: 4
matrix:
azure_ipam:
name: azure-ipam
os_versions: ltsc2019 ltsc2022
platforms: linux/amd64 linux/arm64 windows/amd64
Suffix: azure-ipam
cni:
name: cni
os_versions: ltsc2019 ltsc2022 ltsc2025
platforms: linux/amd64 linux/arm64 windows/amd64
Suffix: cni
cni_dropgz:
name: cni-dropgz
os_versions: ltsc2019 ltsc2022
platforms: linux/amd64 linux/arm64 windows/amd64
Suffix: cni-dropgz
cns:
name: cns
os_versions: ltsc2019 ltsc2022 ltsc2025
platforms: linux/amd64 linux/arm64 windows/amd64
Suffix: cns
ipv6_hp_bpf:
name: ipv6-hp-bpf
platforms: linux/amd64 linux/arm64
Suffix: ipv6-hp-bpf
npm:
name: npm
os_versions: ltsc2022
platforms: linux/amd64 linux/arm64 windows/amd64
Suffix: npm
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)
ob_git_checkout: true
STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ]
# this is needed to not collide published artifact containers
#ob_artifactBaseName: drop_$(Job.StageName)_$(Job.JobName)_
ob_artifactSuffix: $(Suffix)
#artifactName: ${{ ob_artifactBaseName }}${{ name }}
steps:
- checkout: ACNReviewChanges
clean: true
- template: containers/manifest-template.steps.yaml@ACNTools
parameters:
name: $(name)
os_versions: $(os_versions)
platforms: $(platforms)
# Cilium Podsubnet E2E tests
- template: singletenancy/cilium/cilium-e2e.jobs.yaml@ACNTools
parameters:
name: "cilium_e2e"
displayName: Cilium
clusterType: swift-byocni-nokubeproxy-up
clusterName: "ciliume2e"
vmSize: Standard_B2ms
k8sVersion: ""
dependsOn: "containerize"
# Cilium Overlay E2E tests
- template: singletenancy/cilium-overlay/cilium-overlay-e2e.jobs.yaml@ACNTools
parameters:
name: "cilium_overlay_e2e"
displayName: Cilium on AKS Overlay
clusterType: overlay-byocni-nokubeproxy-up
clusterName: "cilovere2e"
vmSize: Standard_B2ms
k8sVersion: ""
dependsOn: "containerize"
# Cilium Dualstack Overlay E2E tests
- template: singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.jobs.yaml@ACNTools
parameters:
name: "cilium_dualstackoverlay_e2e"
displayName: Cilium on AKS DualStack Overlay
os: linux
clusterType: dualstack-byocni-nokubeproxy-up
clusterName: "cildsovere2e"
vmSize: Standard_B2ms
k8sVersion: ""
dependsOn: "containerize"
# Cilium Overlay with hubble E2E tests
- template: singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.jobs.yaml@ACNTools
parameters:
name: "cilium_h_overlay_e2e"
displayName: Cilium on AKS Overlay with Hubble
clusterType: overlay-byocni-nokubeproxy-up
clusterName: "cilwhleovere2e"
vmSize: Standard_B2ms
k8sVersion: ""
dependsOn: "containerize"
testHubble: true
# Azure Overlay E2E tests
- template: singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.jobs.yaml@ACNTools
parameters:
name: "azure_overlay_e2e"
displayName: Azure Overlay
os: linux
clusterType: overlay-byocni-up
clusterName: "azovere2e"
vmSize: Standard_B2ms
k8sVersion: ""
dependsOn: "containerize"
# AKS Swift E2E tests
- template: singletenancy/aks-swift/aks-swift-e2e.jobs.yaml@ACNTools
parameters:
name: "aks_swift_e2e"
displayName: AKS Swift Ubuntu
os: linux
clusterType: swift-byocni-up
clusterName: "swifte2e"
vmSize: Standard_B2ms
k8sVersion: ""
dependsOn: "containerize"
# AKS Swift Vnet Scale E2E tests
- template: singletenancy/aks-swift/aks-swift-e2e.jobs.yaml@ACNTools
parameters:
name: "aks_swift_vnetscale_e2e"
displayName: AKS Swift Vnet Scale Ubuntu
os: linux
clusterType: vnetscale-swift-byocni-up
clusterName: "vscaleswifte2e"
vmSize: Standard_B2ms
k8sVersion: "1.28"
dependsOn: "containerize"
# CNIv1 E2E tests
- template: singletenancy/aks/aks-e2e.jobs.yaml@ACNTools
parameters:
name: "aks_ubuntu_22_linux_e2e"
displayName: AKS Ubuntu 22
arch: "amd64"
os: "linux"
clusterType: linux-cniv1-up
clusterName: "ubuntu22e2e"
vmSize: Standard_B2s
k8sVersion: 1.25
scaleup: 100
dependsOn: "containerize"
- template: singletenancy/aks/aks-e2e.jobs.yaml@ACNTools
parameters:
name: "aks_windows_22_e2e"
displayName: AKS Windows 2022
arch: amd64
os: windows
clusterType: windows-cniv1-up
clusterName: "win22e2e"
vmSize: Standard_B2ms
os_version: "ltsc2022"
scaleup: 50
dependsOn: "containerize"
# CNI dual stack overlay E2E tests
- template: singletenancy/dualstack-overlay/dualstackoverlay-e2e.jobs.yaml@ACNTools
parameters:
name: "dualstackoverlay_e2e"
displayName: AKS DualStack Overlay
os: linux
clusterType: dualstack-overlay-byocni-up
clusterName: "dsovere2e"
vmSize: Standard_B2ms
dependsOn: "containerize"
# Swiftv2 E2E tests with multitenancy cluster start up
- template: multitenancy/swiftv2-e2e.jobs.yaml@ACNTools
parameters:
name: "swiftv2_e2e"
displayName: Swiftv2 Multitenancy
os: linux
clusterType: swiftv2-multitenancy-cluster-up
clusterName: "mtacluster"
nodePoolName: "mtapool"
vmSize: $(SWIFTV2_MT_CLUSTER_SKU)
dependsOn: "containerize"
dummyClusterName: "swiftv2dummy"
dummyClusterType: "swiftv2-dummy-cluster-up"
dummyClusterDisplayName: Swiftv2 Multitenancy Dummy Cluster
- stage: delete
displayName: Delete Clusters
condition: always()
dependsOn:
- setup
- azure_overlay_e2e
- aks_swift_e2e
- cilium_e2e
- cilium_overlay_e2e
- cilium_h_overlay_e2e
- aks_ubuntu_22_linux_e2e
- aks_swift_vnetscale_e2e
- aks_windows_22_e2e
- dualstackoverlay_e2e
- cilium_dualstackoverlay_e2e
- swiftv2_e2e
variables:
ACN_DIR: $(Build.SourcesDirectory)
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
jobs:
- job: delete
displayName: Delete Cluster
pool:
isCustom: true
type: linux
name: "$(BUILD_POOL_NAME_DEFAULT)"
strategy:
matrix:
cilium_e2e:
name: cilium_e2e
clusterName: "ciliume2e"
Suffix: cilium_e2e
cilium_overlay_e2e:
name: cilium_overlay_e2e
clusterName: "cilovere2e"
Suffix: cilium_overlay_e2e
cilium_h_overlay_e2e:
name: cilium_h_overlay_e2e
clusterName: "cilwhleovere2e"
Suffix: cilium_h_overlay_e2e
azure_overlay_e2e:
name: azure_overlay_e2e
clusterName: "azovere2e"
Suffix: azure_overlay_e2e
aks_swift_e2e:
name: aks_swift_e2e
clusterName: "swifte2e"
Suffix: aks_swift_e2e
aks_swift_vnetscale_e2e:
name: aks_swift_vnetscale_e2e
clusterName: "vscaleswifte2e"
Suffix: aks_swift_vnetscale_e2e
aks_ubuntu_22_linux_e2e:
name: aks_ubuntu_22_linux_e2e
clusterName: "ubuntu22e2e"
Suffix: aks_ubuntu_22_linux_e2e
aks_windows_22_e2e:
name: aks_windows_22_e2e
clusterName: "win22e2e"
Suffix: aks_windows_22_e2e
dualstackoverlay_e2e:
name: dualstackoverlay_e2e
clusterName: "dsovere2e"
Suffix: dualstackoverlay_e2e
cilium_dualstackoverlay_e2e:
name: cilium_dualstackoverlay_e2e
clusterName: "cildsovere2e"
Suffix: cilium_dualstackoverlay_e2e
swiftv2_e2e:
name: swiftv2_e2e
clusterName: "mtcluster"
Suffix: swiftv2_e2e
swiftv2_dummy_e2e:
name: swiftv2_dummy_e2e
clusterName: "swiftv2dummy"
Suffix: swiftv2_dummy_e2e
variables:
STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ]
ob_outputDirectory: $(Build.ArtifactStagingDirectory)
ob_git_checkout: true
ob_artifactSuffix: $(Suffix) # this is needed to not collide published artifact containers
steps:
- checkout: ACNReviewChanges
clean: true
- template: templates/delete-cluster.steps.yaml@ACNTools
parameters:
name: $(name)
clusterName: $(clusterName)-$(commitID)
region: $(REGION_AKS_CLUSTER_TEST)

Просмотреть файл

@ -0,0 +1,104 @@
parameters:
name: ""
displayName: ""
clusterType: ""
clusterName: ""
vmSize: ""
k8sVersion: ""
dependsOn: ""
stages:
- stage: ${{ parameters.clusterName }}
displayName: Create Cluster - ${{ parameters.displayName }}
dependsOn:
- ${{ parameters.dependsOn }}
- setup
variables:
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- template: ../../templates/create-cluster.jobs.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
displayName: ${{ parameters.displayName }}
clusterType: ${{ parameters.clusterType }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
vmSize: ${{ parameters.vmSize }}
k8sVersion: ${{ parameters.k8sVersion }}
dependsOn: ${{ parameters.dependsOn }}
region: $(REGION_AKS_CLUSTER_TEST)
- stage: ${{ parameters.name }}
displayName: E2E - ${{ parameters.displayName }}
dependsOn:
- setup
- publish
- ${{ parameters.clusterName }}
variables:
ACN_DIR: $(Build.SourcesDirectory)
TAG: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.Tag'] ]
CURRENT_VERSION: $[ stagedependencies.containerize.check_tag.outputs['CurrentTagManifests.currentTagManifests'] ]
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path
GOBIN: "$(GOPATH)/bin" # Go binaries path
modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking"
condition: and(succeeded(), eq(variables.TAG, variables.CURRENT_VERSION))
jobs:
- job: ${{ parameters.name }}
displayName: Singletenancy AKS Swift Suite - (${{ parameters.name }})
timeoutInMinutes: 120
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: aks-swift-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
scaleup: 100
- template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml@ACNTools
parameters:
sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
dependsOn: ${{ parameters.name }}
datapath: true
dns: true
portforward: true
hostport: true
service: true
- job: failedE2ELogs
displayName: "Failure Logs"
dependsOn:
- ${{ parameters.name }}
- cni_linux
condition: failed()
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(ACN_DIR)/${{ parameters.clusterName }}-$(commitID)_FailedE2ELogs_Attempt_#$(System.StageAttempt)
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: ../../templates/log-template.steps.yaml@ACNTools
parameters:
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
cni: cniv2

Просмотреть файл

@ -0,0 +1,103 @@
parameters:
name: ""
clusterName: ""
scaleup: ""
steps:
- bash: |
go version
go env
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
name: "GoEnv"
displayName: "Set up the Go environment"
- task: KubectlInstaller@0
inputs:
kubectlVersion: latest
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
name: "kubeconfig"
displayName: "Set Kubeconfig"
- script: |
ls -lah
pwd
kubectl cluster-info
kubectl get po -owide -A
sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_AZURE_VNET=true CNS_VERSION=$(make cns-version) CNI_VERSION=$(make cni-version) CLEANUP=true
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "aksswifte2e"
displayName: "Run AKS Swift E2E"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
kubectl get po -owide -A
clusterName=${{ parameters.clusterName }}
echo "Restarting nodes"
for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do
make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val}
done
displayName: "Restart Nodes"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
cd test/integration/load
# Scale Cluster Up/Down to confirm functioning CNS
ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$
kubectl get pods -owide -A
cd ../../..
echo "Validating Node Restart"
make test-validate-state OS_TYPE=linux RESTART_CASE=true CNI_TYPE=cniv2
kubectl delete ns load-test
displayName: "Validate Node Restart"
retryCountOnTaskFailure: 3
- script: |
echo "Run wireserver and metadata connectivity Tests"
bash test/network/wireserver_metadata_test.sh
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "WireserverMetadataConnectivityTests"
displayName: "Run Wireserver and Metadata Connectivity Tests"
- script: |
cd hack/scripts
chmod +x async-delete-test.sh
./async-delete-test.sh
if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then
kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]'
fi
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "testAsyncDelete"
displayName: "Verify Async Delete when CNS is down"

Просмотреть файл

@ -0,0 +1,107 @@
parameters:
name: ""
displayName: ""
arch: ""
os: ""
clusterType: ""
clusterName: ""
vmSize: ""
k8sVersion: ""
os_version: ""
scaleup: ""
dependsOn: ""
stages:
- stage: ${{ parameters.clusterName }}
displayName: Create Cluster - ${{ parameters.displayName }}
dependsOn:
- ${{ parameters.dependsOn }}
- setup
variables:
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- template: ../../templates/create-cluster.jobs.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
displayName: ${{ parameters.displayName }}
clusterType: ${{ parameters.clusterType }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
vmSize: ${{ parameters.vmSize }}
vmSizeWin: ${{ parameters.vmSize }} # Matching linux vmSize
k8sVersion: ${{ parameters.k8sVersion }}
dependsOn: ${{ parameters.dependsOn }}
region: $(REGION_AKS_CLUSTER_TEST)
- stage: ${{ parameters.name }}
displayName: E2E - ${{ parameters.displayName }}
dependsOn:
- setup
- publish
- ${{ parameters.clusterName }}
variables:
ACN_DIR: $(Build.SourcesDirectory)
GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path
GOBIN: "$(GOPATH)/bin" # Go binaries path
modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking"
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
jobs:
- job: ${{ parameters.name }}
displayName: Singletenancy AKS - (${{ parameters.name }})
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: aks-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
arch: ${{ parameters.arch }}
os: ${{ parameters.os }}
os_version: ${{ parameters.os_version }}
scaleup: ${{ parameters.scaleup }}
- template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml@ACNTools
parameters:
sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
datapath: true
dns: true
portforward: true
hybridWin: true
service: true
hostport: true
dependsOn: ${{ parameters.name }}
- job: failedE2ELogs
displayName: "Failure Logs"
dependsOn:
- ${{ parameters.name }}
- cni_${{ parameters.os }}
condition: failed()
pool:
type: linux
variables:
ob_outputDirectory: $(ACN_DIR)/${{ parameters.clusterName }}-$(commitID)_FailedE2ELogs_Attempt_#$(System.StageAttempt)
ob_git_checkout: true
steps:
- checkout: ACNTools
clean: true
- template: ../../templates/log-template.steps.yaml@ACNTools
parameters:
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
cni: cniv1

Просмотреть файл

@ -0,0 +1,86 @@
parameters:
name: ""
clusterName: ""
arch: ""
os: ""
os_version: ""
scaleup: ""
steps:
- bash: |
go version
go env
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
name: "GoEnv"
displayName: "Set up the Go environment"
- task: KubectlInstaller@0
inputs:
kubectlVersion: latest
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
echo "Upload CNI"
echo "Deploying on Linux nodes"
if [ "${{parameters.os}}" == "windows" ]; then
export CNI_IMAGE=$(make cni-image-name-and-tag OS='linux' ARCH=${{ parameters.arch }})
echo "CNI image: $CNI_IMAGE"
envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f -
kubectl rollout status daemonset/azure-cni -n kube-system
echo "Deploying on windows nodes"
export CNI_IMAGE=$( make cni-image-name-and-tag OS='windows' ARCH=${{ parameters.arch }} OS_VERSION=${{ parameters.os_version }})
echo "CNI image: $CNI_IMAGE"
envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1-windows.yaml | kubectl apply -f -
kubectl rollout status daemonset/azure-cni-windows -n kube-system
else
export CNI_IMAGE=$(make cni-image-name-and-tag OS=${{ parameters.os }} ARCH=${{ parameters.arch }})
echo "CNI image: $CNI_IMAGE"
envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f -
kubectl rollout status daemonset/azure-cni -n kube-system
fi
name: "deployCNI"
displayName: "Deploy CNI"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
clusterName=${{ parameters.clusterName }}
echo "Restarting nodes"
for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do
make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val}
done
displayName: "Restart Nodes"
- script: |
kubectl get pods -A -o wide
echo "Deploying test pods"
pushd test/integration/load
ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=${{ parameters.os }} go test -count 1 -timeout 30m -tags load -run ^TestLoad$
popd
make test-validate-state OS_TYPE=${{ parameters.os }} CNI_TYPE=cniv1
kubectl delete ns load-test
workingDirectory: $(ACN_DIR)
displayName: "Validate State"
retryCountOnTaskFailure: 3

Просмотреть файл

@ -0,0 +1,199 @@
parameters:
name: ""
displayName: ""
clusterType: ""
clusterName: ""
vmSize: ""
k8sVersion: ""
dependsOn: ""
stages:
- stage: ${{ parameters.clusterName }}
displayName: Create Cluster - ${{ parameters.displayName }}
dependsOn:
- ${{ parameters.dependsOn }}
- setup
variables:
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- template: ../../templates/create-cluster.jobs.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
displayName: ${{ parameters.displayName }}
clusterType: ${{ parameters.clusterType }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
vmSize: ${{ parameters.vmSize }}
k8sVersion: ${{ parameters.k8sVersion }}
dependsOn: ${{ parameters.dependsOn }}
region: $(REGION_AKS_CLUSTER_TEST)
- stage: ${{ parameters.name }}
displayName: E2E - ${{ parameters.displayName }}
dependsOn:
- setup
- publish
- ${{ parameters.clusterName }}
variables:
ACN_DIR: $(Build.SourcesDirectory)
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path
GOBIN: "$(GOPATH)/bin" # Go binaries path
modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking"
jobs:
- job: ${{ parameters.name }}_linux
displayName: Azure CNI Overlay Test Suite | Linux - (${{ parameters.name }})
timeoutInMinutes: 120
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: azure-cni-overlay-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: linux
scaleup: 100
- job: windows_nodepool
displayName: Add Windows Nodepool
dependsOn: ${{ parameters.name }}_linux
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID)
make -C ./hack/aks windows-nodepool-up AZCLI=az SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }}-$(commitID) VM_SIZE_WIN=${{ parameters.vmSize }}
echo "Windows node are successfully added to v4 Overlay Cluster"
kubectl cluster-info
kubectl get node -owide
kubectl get po -owide -A
name: "Add_Windows_Node"
displayName: "Add windows node on v4 overlay cluster"
- job: ${{ parameters.name }}_windows
displayName: Azure CNI Overlay Test Suite | Windows - (${{ parameters.name }})
timeoutInMinutes: 120
dependsOn: windows_nodepool
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: azure-cni-overlay-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: windows
scaleup: 50
- template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml@ACNTools
parameters:
sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
dependsOn: ${{ parameters.name }}_windows
datapath: true
dns: true
portforward: true
hostport: true
service: true
- template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml@ACNTools
parameters:
sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: windows
dependsOn: cni_${{ parameters.os }}
datapath: true
dns: true
portforward: true
hostport: true
service: true
hybridWin: true
- job: failedE2ELogs_linux
displayName: "Linux Failure Logs"
dependsOn:
- ${{ parameters.name }}_linux
- CNI_linux
condition: failed()
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(ACN_DIR)/${{ parameters.clusterName }}-$(commitID)_FailedE2ELogs_linux_Attempt_#$(System.StageAttempt)
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: ../../templates/log-template.steps.yaml@ACNTools
parameters:
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: linux
cni: cniv2
- job: failedE2ELogs_windows
displayName: "Windows Failure Logs"
dependsOn:
- ${{ parameters.name }}_windows
- CNI_windows
condition: failed()
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(ACN_DIR)/${{ parameters.clusterName }}-$(commitID)_FailedE2ELogs_windows_Attempt_#$(System.StageAttempt)
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: ../../templates/log-template.steps.yaml@ACNTools
parameters:
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: windows
cni: cniv2

Просмотреть файл

@ -0,0 +1,152 @@
parameters:
name: ""
clusterName: ""
os: ""
scaleup: ""
steps:
- bash: |
go version
go env
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
name: "GoEnv"
displayName: "Set up the Go environment"
- task: KubectlInstaller@0
inputs:
kubectlVersion: latest
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
name: "kubeconfig"
displayName: "Set Kubeconfig"
- ${{ if eq(parameters.os, 'linux') }}:
- script: |
echo "Start Integration Tests on Overlay Cluster"
kubectl get po -owide -A
sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_AZURE_CNI_OVERLAY=true VALIDATE_V4OVERLAY=true AZURE_IPAM_VERSION=$(make azure-ipam-version) CNS_VERSION=$(make cns-version) CNI_VERSION=$(make cni-version) CLEANUP=true
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 2
name: "integrationTest"
displayName: "Run CNS Integration Tests on AKS Overlay"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
kubectl get po -owide -A
clusterName=${{ parameters.clusterName }}
echo "Restarting nodes"
for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do
make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val}
done
displayName: "Restart Nodes"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
cd test/integration/load
# Scale Cluster Up/Down to confirm functioning CNS
ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$
kubectl get pods -owide -A
cd ../../..
echo "Validating Node Restart"
make test-validate-state OS_TYPE=linux RESTART_CASE=true CNI_TYPE=cniv2
kubectl delete ns load-test
displayName: "Validate Node Restart"
retryCountOnTaskFailure: 3
- script: |
echo "Run wireserver and metadata connectivity Tests"
bash test/network/wireserver_metadata_test.sh
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "WireserverMetadataConnectivityTests"
displayName: "Run Wireserver and Metadata Connectivity Tests"
- ${{ if eq(parameters.os, 'windows') }}:
- script: |
nodeList=`kubectl get node -owide | grep Windows | awk '{print $1}'`
for node in $nodeList; do
taint=`kubectl describe node $node | grep Taints | awk '{print $2}'`
if [ $taint == "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule" ]; then
kubectl taint nodes $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule-
fi
done
sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=windows CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_AZURE_CNI_OVERLAY=true VALIDATE_V4OVERLAY=true CNS_VERSION=$(make cns-version) CNI_VERSION=$(make cni-version) CLEANUP=true
workingDirectory: $(ACN_DIR)
name: "WindowsOverlayControlPlaneScaleTests"
displayName: "Windows v4Overlay ControlPlane Scale Tests"
retryCountOnTaskFailure: 2
- script: |
echo "IPv4 Overlay DataPath Test"
cd test/integration/datapath
sudo -E env "PATH=$PATH" go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -restartKubeproxy true -run ^TestDatapathWin$
workingDirectory: $(ACN_DIR)
name: "WindowsV4OverlayDatapathTests"
displayName: "Windows v4Overlay Datapath Tests"
retryCountOnTaskFailure: 3
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
kubectl get po -owide -A
clusterName=${{ parameters.clusterName }}
echo "Restarting nodes"
for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do
make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val}
done
displayName: "Restart Nodes"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
cd test/integration/load
# Scale Cluster Up/Down to confirm functioning CNS
ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=windows go test -count 1 -timeout 30m -tags load -run ^TestLoad$
kubectl get pods -owide -A
cd ../../..
echo "Validating Node Restart"
make test-validate-state OS_TYPE=windows RESTART_CASE=true CNI_TYPE=cniv2
kubectl delete ns load-test
displayName: "Validate Node Restart"
retryCountOnTaskFailure: 3

Просмотреть файл

@ -0,0 +1,100 @@
parameters:
name: ""
displayName: ""
clusterType: ""
clusterName: ""
vmSize: ""
k8sVersion: ""
dependsOn: ""
stages:
- stage: ${{ parameters.clusterName }}
displayName: Create Cluster - ${{ parameters.displayName }}
dependsOn:
- ${{ parameters.dependsOn }}
- setup
variables:
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- template: ../../templates/create-cluster.jobs.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
displayName: ${{ parameters.displayName }}
clusterType: ${{ parameters.clusterType }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
vmSize: ${{ parameters.vmSize }}
k8sVersion: ${{ parameters.k8sVersion }}
dependsOn: ${{ parameters.dependsOn }}
region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Dualstack has a specific region requirement
- stage: ${{ parameters.name }}
displayName: E2E - ${{ parameters.displayName }}
dependsOn:
- setup
- publish
- ${{ parameters.clusterName }}
variables:
GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path
GOBIN: "$(GOPATH)/bin" # Go binaries path
modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking"
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- job: ${{ parameters.name }}
displayName: Cilium Dualstack Overlay Test Suite - (${{ parameters.name }})
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: cilium-dualstackoverlay-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
scaleup: 100
- template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml@ACNTools
parameters:
sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
cni: cilium
dependsOn: ${{ parameters.name }}
dualstack: true
dns: true
portforward: true
service: true
- job: failedE2ELogs
displayName: "Failure Logs"
dependsOn:
- ${{ parameters.name }}
- cni_${{ parameters.os }}
condition: failed()
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(ACN_DIR)/${{ parameters.clusterName }}-$(commitID)_FailedE2ELogs_Attempt_#$(System.StageAttempt)
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: ../../templates/log-template.steps.yaml@ACNTools
parameters:
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
cni: cilium

Просмотреть файл

@ -0,0 +1,169 @@
parameters:
name: ""
clusterName: ""
scaleup: ""
steps:
- bash: |
go version
go env
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
name: "GoEnv"
displayName: "Set up the Go environment"
- task: KubectlInstaller@0
inputs:
kubectlVersion: latest
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
ls -lah
pwd
kubectl cluster-info
kubectl get po -owide -A
echo "install Cilium ${CILIUM_DUALSTACK_VERSION}"
export DIR=${CILIUM_DUALSTACK_VERSION%.*}
echo "installing files from ${DIR}"
echo "deploy Cilium ConfigMap"
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-dualstack.yaml
# Passes Cilium image to daemonset and deployment
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files
export CILIUM_VERSION_TAG=${CILIUM_DUALSTACK_VERSION}
export IPV6_HP_BPF_VERSION=$(make ipv6-hp-bpf-version)
echo "install Cilium ${CILIUM_DUALSTACK_VERSION} onto Overlay Cluster"
envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f -
envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f -
kubectl get po -owide -A
name: "installCilium"
displayName: "Install Cilium on AKS Dualstack Overlay"
- template: ../../templates/cilium-cli.steps.yaml@ACNTools
- script: |
echo "Start Azilium E2E Tests on Overlay Cluster"
sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux CNI_TYPE=cilium_dualstack VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_OVERLAY=true AZURE_IPAM_VERSION=$(make azure-ipam-version) CNS_VERSION=$(make cns-version) CLEANUP=true
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "aziliumTest"
displayName: "Run Azilium E2E on AKS Overlay"
- script: |
kubectl get pods -A
echo "Waiting < 2 minutes for cilium to be ready"
# Ensure Cilium is ready Xm\Xs
cilium status --wait --wait-duration 2m
retryCountOnTaskFailure: 3
name: "CiliumStatus"
displayName: "Cilium Status"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
kubectl get po -owide -A
clusterName=${{ parameters.clusterName }}
echo "Restarting nodes"
for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do
make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val}
done
displayName: "Restart Nodes"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
cd test/integration/load
# Scale Cluster Up/Down to confirm functioning CNS
ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$
kubectl get pods -owide -A
cd ../../..
echo "Validating Node Restart"
make test-validate-state OS_TYPE=linux RESTART_CASE=true CNI_TYPE=cilium_dualstack
kubectl delete ns load-test
displayName: "Validate Node Restart"
retryCountOnTaskFailure: 3
- script: |
echo "Run Cilium Connectivity Tests"
cilium status
cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption,!no-unexpected-packet-drops' --force-deploy
ns=`kubectl get ns | grep cilium-test | awk '{print $1}'`
echo "##vso[task.setvariable variable=ciliumNamespace]$ns"
retryCountOnTaskFailure: 3
name: "ciliumConnectivityTests"
displayName: "Run Cilium Connectivity Tests"
- script: |
set -e
kubectl get po -owide -A
cd test/integration/datapath
echo "Dualstack Overlay Linux datapath IPv6 test"
go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true
echo "Dualstack Overlay Linux datapath IPv4 test"
go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "DualStack_Overlay_Linux_Tests"
displayName: "DualStack Overlay Linux Tests"
- script: |
echo "validate pod IP assignment and check systemd-networkd restart"
kubectl get pod -owide -A
# Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change.
# Saves 17 minutes
kubectl delete deploy -n $(ciliumNamespace) echo-external-node
cd test/integration/load
CNI_TYPE=cilium_dualstack go test -timeout 30m -tags load -run ^TestValidateState$
echo "delete cilium connectivity test resources and re-validate state"
kubectl delete ns $(ciliumNamespace)
kubectl get pod -owide -A
CNI_TYPE=cilium_dualstack go test -timeout 30m -tags load -run ^TestValidateState$
workingDirectory: $(ACN_DIR)
name: "validatePods"
displayName: "Validate Pods"
- script: |
echo "Run wireserver and metadata connectivity Tests"
bash test/network/wireserver_metadata_test.sh
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "WireserverMetadataConnectivityTests"
displayName: "Run Wireserver and Metadata Connectivity Tests"
- script: |
cd hack/scripts
chmod +x async-delete-test.sh
./async-delete-test.sh
if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then
kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]'
fi
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "testAsyncDelete"
displayName: "Verify Async Delete when CNS is down"

Просмотреть файл

@ -0,0 +1,104 @@
parameters:
name: ""
displayName: ""
clusterType: ""
clusterName: ""
vmSize: ""
k8sVersion: ""
dependsOn: ""
os: "linux"
testHubble: false
stages:
- stage: ${{ parameters.clusterName }}
displayName: Create Cluster - ${{ parameters.displayName }}
dependsOn:
- ${{ parameters.dependsOn }}
- setup
variables:
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- template: ../../templates/create-cluster.jobs.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
displayName: ${{ parameters.displayName }}
clusterType: ${{ parameters.clusterType }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
vmSize: ${{ parameters.vmSize }}
k8sVersion: ${{ parameters.k8sVersion }}
dependsOn: ${{ parameters.dependsOn }}
region: $(REGION_AKS_CLUSTER_TEST)
- stage: ${{ parameters.name }}
displayName: E2E - ${{ parameters.displayName }}
dependsOn:
- setup
- publish
- ${{ parameters.clusterName }}
variables:
ACN_DIR: $(Build.SourcesDirectory)
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path
GOBIN: "$(GOPATH)/bin" # Go binaries path
modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking"
jobs:
- job: ${{ parameters.name }}
displayName: Cilium Overlay Test Suite - (${{ parameters.name }})
timeoutInMinutes: 120
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: cilium-overlay-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
testHubble: ${{ parameters.testHubble }}
scaleup: 100
- template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml@ACNTools
parameters:
sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
cni: cilium
dependsOn: ${{ parameters.name }}
datapath: true
dns: true
portforward: true
service: true
- job: failedE2ELogs
displayName: "Failure Logs"
dependsOn:
- ${{ parameters.name }}
- cni_${{ parameters.os }}
condition: failed()
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(ACN_DIR)/${{ parameters.clusterName }}-$(commitID)_FailedE2ELogs_Attempt_#$(System.StageAttempt)
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: ../../templates/log-template.steps.yaml@ACNTools
parameters:
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
cni: cilium

Просмотреть файл

@ -0,0 +1,212 @@
parameters:
name: ""
clusterName: ""
testHubble: false
scaleup: ""
steps:
- bash: |
go version
go env
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
name: "GoEnv"
displayName: "Set up the Go environment"
- task: KubectlInstaller@0
inputs:
kubectlVersion: latest
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
ls -lah
export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG}
export DIR=${CILIUM_VERSION_TAG%.*}
echo "installing files from ${DIR}"
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files
envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f -
envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f -
# Use different file directories for nightly and current cilium version
name: "installCilium"
displayName: "Install Cilium on AKS Overlay"
- template: ../../templates/cilium-cli.steps.yaml@ACNTools
- script: |
echo "Start Azilium E2E Tests on Overlay Cluster"
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]
then
CNS=$(CNS_VERSION) IPAM=$(AZURE_IPAM_VERSION) && echo "Running nightly"
else
CNS=$(make cns-version) IPAM=$(make azure-ipam-version)
fi
sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_OVERLAY=true AZURE_IPAM_VERSION=${IPAM} CNS_VERSION=${CNS} CLEANUP=true
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "aziliumTest"
displayName: "Run Azilium E2E on AKS Overlay"
- script: |
kubectl get pods -A
echo "Waiting < 2 minutes for cilium to be ready"
# Ensure Cilium is ready Xm\Xs
cilium status --wait --wait-duration 2m
retryCountOnTaskFailure: 3
name: "CiliumStatus"
displayName: "Cilium Status"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
kubectl get po -owide -A
clusterName=${{ parameters.clusterName }}
echo "Restarting nodes"
for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do
make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val}
done
displayName: "Restart Nodes"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
cd test/integration/load
# Scale Cluster Up/Down to confirm functioning CNS
ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$
kubectl get pods -owide -A
cd ../../..
echo "Validating Node Restart"
make test-validate-state OS_TYPE=linux RESTART_CASE=true
kubectl delete ns load-test
displayName: "Validate Node Restart"
retryCountOnTaskFailure: 3
- script: |
echo "Run Cilium Connectivity Tests"
cilium status
cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption' --force-deploy
ns=`kubectl get ns | grep cilium-test | awk '{print $1}'`
echo "##vso[task.setvariable variable=ciliumNamespace]$ns"
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "ciliumConnectivityTests"
displayName: "Run Cilium Connectivity Tests"
- ${{ if eq( parameters['testHubble'], true) }}:
- script: |
echo "enable Hubble metrics server"
kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml
kubectl apply -f test/integration/manifests/cilium/v1.14.4/cilium-config/cilium-config-hubble.yaml
kubectl rollout restart ds cilium -n kube-system
echo "wait <3 minutes for pods to be ready after restart"
kubectl rollout status ds cilium -n kube-system --timeout=3m
kubectl get pods -Aowide
echo "verify Hubble metrics endpoint is usable"
go test ./test/integration/networkobservability -v -tags=networkobservability
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "HubbleConnectivityTests"
displayName: "Run Hubble Connectivity Tests"
- script: |
echo "validate pod IP assignment and check systemd-networkd restart"
kubectl get pod -owide -A
# Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change.
# Saves 17 minutes
kubectl delete deploy -n $(ciliumNamespace) echo-external-node
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then
echo "Check cilium identities in $(ciliumNamespace) namepsace during nightly run"
echo "expect the identities to be deleted when the namespace is deleted"
kubectl get ciliumidentity | grep cilium-test
fi
make test-validate-state
echo "delete cilium connectivity test resources and re-validate state"
kubectl delete ns $(ciliumNamespace)
kubectl get pod -owide -A
make test-validate-state
workingDirectory: $(ACN_DIR)
name: "validatePods"
displayName: "Validate Pods"
- script: |
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then
kubectl get pod -owide -n $(ciliumNamespace)
echo "wait for pod and cilium identity deletion in cilium-test namespace"
while true; do
pods=$(kubectl get pods -n $(ciliumNamespace) --no-headers=true 2>/dev/null)
if [[ -z "$pods" ]]; then
echo "No pods found"
break
fi
sleep 2s
done
sleep 20s
echo "Verify cilium identities are deleted from cilium-test"
checkIdentity="$(kubectl get ciliumidentity -o json | grep cilium-test | jq -e 'length == 0')"
if [[ -n $checkIdentity ]]; then
echo "##[error]Cilium Identities still present in $(ciliumNamespace) namespace"
exit 1
else
printf -- "Identities deleted from $(ciliumNamespace) namespace\n"
fi
else
echo "skip cilium identities check for PR pipeline"
fi
name: "CiliumIdentities"
displayName: "Verify Cilium Identities Deletion"
- script: |
echo "Run wireserver and metadata connectivity Tests"
bash test/network/wireserver_metadata_test.sh
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "WireserverMetadataConnectivityTests"
displayName: "Run Wireserver and Metadata Connectivity Tests"
- script: |
cd hack/scripts
chmod +x async-delete-test.sh
./async-delete-test.sh
if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then
kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]'
fi
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "testAsyncDelete"
displayName: "Verify Async Delete when CNS is down"
- script: |
ARTIFACT_DIR=$(Build.ArtifactStagingDirectory)/test-output/
echo $ARTIFACT_DIR
sudo rm -rf $ARTIFACT_DIR
sudo rm -rf test/integration/logs
workingDirectory: $(ACN_DIR)
name: "Cleanupartifactdir"
displayName: "Cleanup artifact dir"
condition: always()

Просмотреть файл

@ -0,0 +1,102 @@
parameters:
name: ""
displayName: ""
clusterType: ""
clusterName: ""
vmSize: ""
k8sVersion: ""
dependsOn: ""
os: "linux"
stages:
- stage: ${{ parameters.clusterName }}
displayName: Create Cluster - ${{ parameters.displayName }}
dependsOn:
- ${{ parameters.dependsOn }}
- setup
variables:
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- template: ../../templates/create-cluster.jobs.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
displayName: ${{ parameters.displayName }}
clusterType: ${{ parameters.clusterType }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
vmSize: ${{ parameters.vmSize }}
k8sVersion: ${{ parameters.k8sVersion }}
dependsOn: ${{ parameters.dependsOn }}
region: $(REGION_AKS_CLUSTER_TEST)
- stage: ${{ parameters.name }}
displayName: E2E - ${{ parameters.displayName }}
dependsOn:
- setup
- publish
- ${{ parameters.clusterName }}
variables:
ACN_DIR: $(Build.SourcesDirectory)
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path
GOBIN: "$(GOPATH)/bin" # Go binaries path
modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking"
jobs:
- job: ${{ parameters.name }}
displayName: Cilium Overlay Test Suite - (${{ parameters.name }})
timeoutInMinutes: 120
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: cilium-overlay-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
scaleup: 100
- template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml@ACNTools
parameters:
sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
cni: cilium
dependsOn: ${{ parameters.name }}
datapath: true
dns: true
portforward: true
service: true
- job: failedE2ELogs
displayName: "Failure Logs"
dependsOn:
- ${{ parameters.name }}
- cni_${{ parameters.os }}
condition: failed()
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(ACN_DIR)/${{ parameters.clusterName }}-$(commitID)_FailedE2ELogs_Attempt_#$(System.StageAttempt)
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: ../../templates/log-template.steps.yaml@ACNTools
parameters:
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
cni: cilium

Просмотреть файл

@ -0,0 +1,245 @@
parameters:
name: ""
clusterName: ""
testHubble: false
scaleup: ""
steps:
- bash: |
go version
go env
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
name: "GoEnv"
displayName: "Set up the Go environment"
- task: KubectlInstaller@0
inputs:
kubectlVersion: latest
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
ls -lah
pwd
kubectl cluster-info
kubectl get po -owide -A
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then
FILE_PATH=-nightly
echo "Running nightly"
echo "deploy Cilium ConfigMap"
kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-config.yaml
# Passes Cilium image to daemonset and deployment
envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/daemonset.yaml | kubectl apply -f -
envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/deployment.yaml | kubectl apply -f -
# Use different file directories for nightly and current cilium version
kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-agent
kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-operator
else
echo "install Cilium ${CILIUM_VERSION_TAG}"
export DIR=${CILIUM_VERSION_TAG%.*}
echo "installing files from ${DIR}"
echo "deploy Cilium ConfigMap"
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml
# Passes Cilium image to daemonset and deployment
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files
envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f -
envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f -
fi
kubectl get po -owide -A
name: "installCilium"
displayName: "Install Cilium on AKS Overlay"
- template: ../../templates/cilium-cli.steps.yaml@ACNTools
- script: |
echo "Start Azilium E2E Tests on Overlay Cluster"
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]
then
CNS=$(CNS_VERSION) IPAM=$(AZURE_IPAM_VERSION) && echo "Running nightly"
else
CNS=$(make cns-version) IPAM=$(make azure-ipam-version)
fi
kubectl get po -owide -A
sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_OVERLAY=true AZURE_IPAM_VERSION=${IPAM} CNS_VERSION=${CNS} CLEANUP=true
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "aziliumTest"
displayName: "Run Azilium E2E on AKS Overlay"
- script: |
kubectl get po -owide -A
echo "Waiting < 2 minutes for cilium to be ready"
# Ensure Cilium is ready Xm\Xs
cilium status --wait --wait-duration 2m
retryCountOnTaskFailure: 3
name: "CiliumStatus"
displayName: "Cilium Status"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
kubectl get po -owide -A
clusterName=${{ parameters.clusterName }}
echo "Restarting nodes"
for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do
make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val}
done
displayName: "Restart Nodes"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
cd test/integration/load
# Scale Cluster Up/Down to confirm functioning CNS
ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$
kubectl get pods -owide -A
cd ../../..
echo "Validating Node Restart"
make test-validate-state OS_TYPE=linux RESTART_CASE=true
kubectl delete ns load-test
displayName: "Validate Node Restart"
retryCountOnTaskFailure: 3
- script: |
echo "Run Cilium Connectivity Tests"
cilium status
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]
then
cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption,!check-log-errors' --force-deploy
else
cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption' --force-deploy
fi
ns=`kubectl get ns | grep cilium-test | awk '{print $1}'`
echo "##vso[task.setvariable variable=ciliumNamespace]$ns"
retryCountOnTaskFailure: 3
name: "ciliumConnectivityTests"
displayName: "Run Cilium Connectivity Tests"
- ${{ if eq( parameters['testHubble'], true) }}:
- script: |
echo "enable Hubble metrics server"
kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml
kubectl apply -f test/integration/manifests/cilium/cilium-config-hubble.yaml
kubectl rollout restart ds cilium -n kube-system
echo "wait <3 minutes for pods to be ready after restart"
kubectl rollout status ds cilium -n kube-system --timeout=3m
kubectl get pods -Aowide
echo "verify Hubble metrics endpoint is usable"
go test ./test/integration/networkobservability -count=1 -v -tags=networkobservability
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "HubbleConnectivityTests"
displayName: "Run Hubble Connectivity Tests"
- script: |
echo "validate pod IP assignment and check systemd-networkd restart"
kubectl get pod -owide -A
# Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change.
# Saves 17 minutes
kubectl delete deploy -n $(ciliumNamespace) echo-external-node
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then
echo "Check cilium identities in $(ciliumNamespace) namepsace during nightly run"
echo "expect the identities to be deleted when the namespace is deleted"
kubectl get ciliumidentity | grep cilium-test
fi
make test-validate-state
echo "delete cilium connectivity test resources and re-validate state"
kubectl delete ns $(ciliumNamespace)
kubectl get pod -owide -A
make test-validate-state
workingDirectory: $(ACN_DIR)
name: "validatePods"
displayName: "Validate Pods"
- script: |
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then
kubectl get pod -owide -n $(ciliumNamespace)
echo "wait for pod and cilium identity deletion in $(ciliumNamespace) namespace"
while true; do
pods=$(kubectl get pods -n $(ciliumNamespace) --no-headers=true 2>/dev/null)
if [[ -z "$pods" ]]; then
echo "No pods found"
break
fi
sleep 2s
done
sleep 20s
echo "Verify cilium identities are deleted from $(ciliumNamespace)"
checkIdentity="$(kubectl get ciliumidentity -o json | grep cilium-test | jq -e 'length == 0')"
if [[ -n $checkIdentity ]]; then
echo "##[error]Cilium Identities still present in $(ciliumNamespace) namespace"
exit 1
else
printf -- "Identities deleted from $(ciliumNamespace) namespace\n"
fi
else
echo "skip cilium identities check for PR pipeline"
fi
name: "CiliumIdentities"
displayName: "Verify Cilium Identities Deletion"
- script: |
echo "validate pod IP assignment before CNS restart"
kubectl get pod -owide -A
make test-validate-state
echo "restart CNS"
kubectl rollout restart ds azure-cns -n kube-system
kubectl rollout status ds azure-cns -n kube-system
kubectl get pod -owide -A
echo "validate pods after CNS restart"
make test-validate-state
workingDirectory: $(ACN_DIR)
name: "restartCNS"
displayName: "Restart CNS and validate pods"
- script: |
echo "Run wireserver and metadata connectivity Tests"
bash test/network/wireserver_metadata_test.sh
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "WireserverMetadataConnectivityTests"
displayName: "Run Wireserver and Metadata Connectivity Tests"
- script: |
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then
echo "Running nightly, skip async delete test"
else
cd hack/scripts
chmod +x async-delete-test.sh
./async-delete-test.sh
if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then
kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]'
fi
fi
workingDirectory: $(ACN_DIR)
name: "testAsyncDelete"
displayName: "Verify Async Delete when CNS is down"

Просмотреть файл

@ -0,0 +1,105 @@
parameters:
name: ""
displayName: ""
clusterType: ""
clusterName: ""
vmSize: ""
k8sVersion: ""
dependsOn: ""
os: "linux"
stages:
- stage: ${{ parameters.clusterName }}
displayName: Create Cluster - ${{ parameters.displayName }}
dependsOn:
- ${{ parameters.dependsOn }}
- setup
variables:
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- template: ../../templates/create-cluster.jobs.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
displayName: ${{ parameters.displayName }}
clusterType: ${{ parameters.clusterType }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
vmSize: ${{ parameters.vmSize }}
k8sVersion: ${{ parameters.k8sVersion }}
dependsOn: ${{ parameters.dependsOn }}
region: $(REGION_AKS_CLUSTER_TEST)
- stage: ${{ parameters.name }}
displayName: E2E - ${{ parameters.displayName }}
dependsOn:
- setup
- publish
- ${{ parameters.clusterName }}
variables:
ACN_DIR: $(Build.SourcesDirectory)
TAG: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.Tag'] ]
CURRENT_VERSION: $[ stagedependencies.containerize.check_tag.outputs['CurrentTagManifests.currentTagManifests'] ]
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path
GOBIN: "$(GOPATH)/bin" # Go binaries path
modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking"
condition: and(succeeded(), eq(variables.TAG, variables.CURRENT_VERSION))
jobs:
- job: ${{ parameters.name }}
displayName: Cilium Test Suite - (${{ parameters.name }})
timeoutInMinutes: 120
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: cilium-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
scaleup: 100
- template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml@ACNTools
parameters:
sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
cni: cilium
dependsOn: ${{ parameters.name }}
datapath: true
dns: true
portforward: true
service: true
- job: failedE2ELogs
displayName: "Failure Logs"
condition: failed()
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
dependsOn:
- ${{ parameters.name }}
- cni_${{ parameters.os }}
variables:
ob_outputDirectory: $(ACN_DIR)/${{ parameters.clusterName }}-$(commitID)_FailedE2ELogs_Attempt_#$(System.StageAttempt)
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: ../../templates/log-template.steps.yaml@ACNTools
parameters:
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: ${{ parameters.os }}
cni: cilium

Просмотреть файл

@ -0,0 +1,152 @@
parameters:
name: ""
clusterName: ""
scaleup: ""
steps:
- bash: |
go version
go env
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
name: "GoEnv"
displayName: "Set up the Go environment"
- task: KubectlInstaller@0
inputs:
kubectlVersion: latest
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
ls -lah
pwd
kubectl cluster-info
kubectl get po -owide -A
echo "install Cilium ${CILIUM_VERSION_TAG}"
export DIR=${CILIUM_VERSION_TAG%.*}
echo "installing files from ${DIR}"
echo "deploy Cilium ConfigMap"
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml
# Passes Cilium image to daemonset and deployment
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files
envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f -
envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f -
kubectl get po -owide -A
name: "installCilium"
displayName: "Install Cilium"
- template: ../../templates/cilium-cli.steps.yaml@ACNTools
- script: |
echo "Start Azilium E2E Tests"
kubectl get po -owide -A
sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_AZILIUM=true AZURE_IPAM_VERSION=$(make azure-ipam-version) CNS_VERSION=$(make cns-version) CLEANUP=true
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "aziliumTest"
displayName: "Run Azilium E2E"
- script: |
kubectl get po -owide -A
echo "Waiting < 2 minutes for cilium to be ready"
# Ensure Cilium is ready Xm\Xs
cilium status --wait --wait-duration 2m
retryCountOnTaskFailure: 3
name: "CiliumStatus"
displayName: "Cilium Status"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
kubectl get po -owide -A
clusterName=${{ parameters.clusterName }}
echo "Restarting nodes"
for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do
make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val}
done
displayName: "Restart Nodes"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
cd test/integration/load
# Scale Cluster Up/Down to confirm functioning CNS
ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$
kubectl get pods -owide -A
cd ../../..
echo "Validating Node Restart"
make test-validate-state OS_TYPE=linux RESTART_CASE=true
kubectl delete ns load-test
displayName: "Validate Node Restart"
retryCountOnTaskFailure: 3
- script: |
echo "Run Cilium Connectivity Tests"
cilium status
cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption' --force-deploy
ns=`kubectl get ns | grep cilium-test | awk '{print $1}'`
echo "##vso[task.setvariable variable=ciliumNamespace]$ns"
retryCountOnTaskFailure: 3
name: "ciliumConnectivityTests"
displayName: "Run Cilium Connectivity Tests"
- script: |
echo "validate pod IP assignment and check systemd-networkd restart"
kubectl get pod -owide -A
# Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change.
# Saves 17 minutes
kubectl delete deploy -n $(ciliumNamespace) echo-external-node
make test-validate-state
echo "delete cilium connectivity test resources and re-validate state"
kubectl delete ns $(ciliumNamespace)
kubectl get pod -owide -A
make test-validate-state
workingDirectory: $(ACN_DIR)
name: "validatePods"
displayName: "Validate Pods"
- script: |
echo "Run wireserver and metadata connectivity Tests"
bash test/network/wireserver_metadata_test.sh
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "WireserverMetadataConnectivityTests"
displayName: "Run Wireserver and Metadata Connectivity Tests"
- script: |
cd hack/scripts
chmod +x async-delete-test.sh
./async-delete-test.sh
if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then
kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]'
fi
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "testAsyncDelete"
displayName: "Verify Async Delete when CNS is down"

Просмотреть файл

@ -0,0 +1,201 @@
parameters:
name: ""
displayName: ""
clusterType: ""
clusterName: ""
vmSize: ""
k8sVersion: ""
dependsOn: ""
stages:
- stage: ${{ parameters.clusterName }}
displayName: Create Cluster - ${{ parameters.displayName }}
dependsOn:
- ${{ parameters.dependsOn }}
- setup
variables:
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
ACN_DIR: $(Build.SourcesDirectory)
jobs:
- template: ../../templates/create-cluster.jobs.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
displayName: ${{ parameters.displayName }}
clusterType: ${{ parameters.clusterType }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
vmSize: ${{ parameters.vmSize }}
k8sVersion: ${{ parameters.k8sVersion }}
dependsOn: ${{ parameters.dependsOn }}
region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Dualstack has a specific region requirement
- stage: ${{ parameters.name }}
condition: and( succeeded(), not(eq(dependencies.dualstackoverlaye2e.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies
displayName: E2E - ${{ parameters.displayName }}
dependsOn:
- setup
- publish
- ${{ parameters.clusterName }}
variables:
ACN_DIR: $(Build.SourcesDirectory)
GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path
GOBIN: "$(GOPATH)/bin" # Go binaries path
modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking"
commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ]
jobs:
- job: ${{ parameters.name }}_linux
displayName: DualStack Overlay Test Suite | Linux - (${{ parameters.name }})
timeoutInMinutes: 120
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: dualstackoverlay-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: linux
scaleup: 100
- job: windows_nodepool
displayName: Add Windows Nodepool
dependsOn: ${{ parameters.name }}_linux
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID)
make -C ./hack/aks windows-nodepool-up AZCLI=az SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }}-$(commitID) VM_SIZE_WIN=${{ parameters.vmSize }}
echo "Windows nodes have been successfully added to DualStack Overlay Cluster"
kubectl cluster-info
kubectl get node -owide
kubectl get po -owide -A
name: "Add_Windows_Node"
displayName: "Add windows node"
- job: ${{ parameters.name }}_windows
displayName: DualStack Overlay Test Suite | Windows - (${{ parameters.name }})
timeoutInMinutes: 120
dependsOn: windows_nodepool
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
demands:
- agent.os -equals Linux
- Role -equals $(CUSTOM_E2E_ROLE)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: dualstackoverlay-e2e.steps.yaml@ACNTools
parameters:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: windows
scaleup: 50
- template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml@ACNTools
parameters:
sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: linux
dependsOn: ${{ parameters.name }}_windows
dualstack: true
dns: true
portforward: true
hostport: true
service: true
- template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml@ACNTools
parameters:
sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: windows
dependsOn: cni_linux
dualstack: true
dns: true
portforward: true
service: true
hostport: true
hybridWin: true
- job: failedE2ELogs_linux
displayName: "Linux Failure Logs"
dependsOn:
- CNI_linux # k8s tests
- ${{ parameters.name }}_linux
- cni_linux
condition: failed()
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(ACN_DIR)/${{ parameters.clusterName }}-$(commitID)_FailedE2ELogs_linux_Attempt_#$(System.StageAttempt)
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: ../../templates/log-template.steps.yaml@ACNTools
parameters:
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: linux
cni: cniv2
- job: failedE2ELogs_windows
displayName: "Windows Failure Logs"
dependsOn:
- CNI_windows # k8s tests
- ${{ parameters.name }}_windows
- cni_windows
condition: failed()
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(ACN_DIR)/${{ parameters.clusterName }}-$(commitID)_FailedE2ELogs_windows_Attempt_#$(System.StageAttempt)
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- template: ../../templates/log-template.steps.yaml@ACNTools
parameters:
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: windows
cni: cniv2

Просмотреть файл

@ -0,0 +1,157 @@
parameters:
name: ""
clusterName: ""
cni: "dualstack"
os: ""
scaleup: ""
steps:
- bash: |
go version
go env
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
name: "GoEnv"
displayName: "Set up the Go environment"
- task: KubectlInstaller@0
inputs:
kubectlVersion: latest
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
name: "kubeconfig"
displayName: "Set Kubeconfig"
- ${{ if eq(parameters.os, 'linux') }}:
- script: |
kubectl cluster-info
kubectl get node
kubectl get po -owide -A
sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux CNI_TYPE=dualstack VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true VALIDATE_DUALSTACK=true CNI_VERSION=$(make cni-version) CNS_VERSION=$(make cns-version) CLEANUP=true
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "integrationTest"
displayName: "Run CNS Integration Tests on AKS DualStack Overlay"
- script: |
set -e
kubectl get po -owide -A
cd test/integration/datapath
echo "Dualstack Overlay Linux datapath IPv6 test"
go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true
echo "Dualstack Overlay Linux datapath IPv4 test"
go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration
workingDirectory: $(ACN_DIR)
retryCountOnTaskFailure: 3
name: "DualStack_Overlay_Linux_Tests"
displayName: "DualStack Overlay Linux Tests"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
clusterName=${{ parameters.clusterName }}
echo "Restarting nodes"
for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) --query "[].name" -o tsv); do
make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) VMSS_NAME=${val}
done
displayName: "Restart Nodes"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
cd test/integration/load
# Scale Cluster Up/Down to confirm functioning CNS
ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$
kubectl get pods -owide -A
cd ../../..
echo "Validating Node Restart"
make test-validate-state OS_TYPE=linux RESTART_CASE=true CNI_TYPE=dualstack
kubectl delete ns load-test
displayName: "Validate Node Restart"
retryCountOnTaskFailure: 3
- ${{ if eq(parameters.os, 'windows') }}:
- script: |
nodeList=`kubectl get node -owide | grep Windows | awk '{print $1}'`
for node in $nodeList; do
taint=`kubectl describe node $node | grep Taints | awk '{print $2}'`
if [ $taint == "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule" ]; then
kubectl taint nodes $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule-
fi
done
sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=windows CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true VALIDATE_DUALSTACK=true CNI_VERSION=$(make cni-version) CNS_VERSION=$(make cns-version) CLEANUP=true
workingDirectory: $(ACN_DIR)
name: "WindowsDualStackOverlayControlPlaneScaleTests"
displayName: "Windows DualStack Overlay ControlPlane Scale Tests"
retryCountOnTaskFailure: 3
- script: |
echo "DualStack Overlay DataPath Test"
cd test/integration/datapath
sudo -E env "PATH=$PATH" go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -restartKubeproxy true -run ^TestDatapathWin$
workingDirectory: $(ACN_DIR)
name: "WindowsDualStackOverlayDatapathTests"
displayName: "Windows DualStack Overlay Datapath Tests"
retryCountOnTaskFailure: 3
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
clusterName=${{ parameters.clusterName }}
echo "Restarting nodes"
for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) --query "[].name" -o tsv); do
make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) VMSS_NAME=${val}
done
displayName: "Restart Nodes"
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
cd test/integration/load
# Scale Cluster Up/Down to confirm functioning CNS
ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=windows go test -count 1 -timeout 30m -tags load -run ^TestLoad$
kubectl get pods -owide -A
cd ../../..
echo "Validating Node Restart"
make test-validate-state OS_TYPE=windows RESTART_CASE=true CNI_TYPE=cniv2
kubectl delete ns load-test
displayName: "Validate Node Restart"
retryCountOnTaskFailure: 3

Просмотреть файл

@ -0,0 +1,176 @@
parameters:
- name: mainRepoName
type: string
- name: mainRepoRef
type: string
- name: mainRepoCommit
type: string
- name: mainRepoType
type: string
jobs:
- job: trigger
displayName: Test ACN Pull Request Changes
# 4 hour timeout
timeoutInMinutes: 240
steps:
- checkout: azure-container-networking
clean: true
- bash: |
set -e
[[ -n $SYSTEM_DEBUG ]] && [[ $SYSTEM_DEBUG =~ $IS_TRUE ]] && set -x || set +x
# Verify Branch Name
if [[ $TMPL_REPO_REF =~ $ACCEPTED_REPO_REFS ]]; then
echo >&2 "##[info]Verification passed."
else
echo >&2 "##[error]Verification failed (ref: "$TMPL_REPO_REF")."
exit 1
fi
# Verify Repo Name
if [[ $TMPL_REPO_NAME =~ $ACCEPTED_REPO_NAME ]]; then
echo >&2 "##[info]Verification passed."
else
echo >&2 "##[error]Verification failed (ref: "$TMPL_REPO_REF")."
exit 1
fi
# Verify Repo Type
if [[ $TMPL_REPO_TYPE =~ $ACCEPTED_REPO_TYPE ]]; then
echo >&2 "##[info]Verification passed."
else
echo >&2 "##[error]Verification failed (ref: "$TMPL_REPO_REF")."
exit 1
fi
displayName: "[Check]Primary Template Extends Master NOT Changes Under Test"
env:
TMPL_REPO_REF: '${{ parameters.mainRepoRef }}'
TMPL_REPO_NAME: '${{ parameters.mainRepoName }}'
TMPL_REPO_TYPE: '${{ parameters.mainRepoType }}'
ACCEPTED_REPO_REFS: '^refs/heads/feature/ob-onboard-0$'
ACCEPTED_REPO_NAME: '^Azure/azure-container-networking$'
ACCEPTED_REPO_TYPE: '^github$'
- bash: |
set -e
[[ -n $SYSTEM_DEBUG ]] && [[ $SYSTEM_DEBUG =~ $IS_TRUE ]] && set -x || set +x
# Get Build Reason
ACN_BUILD_REASON=$(echo -n "$BUILD_REASON")
# Get ACN Git Ref
ACN_BUILD_AZURE_ACN_GIT_REF="$BUILD_SOURCEBRANCH"
# Get Queuer
ACN_BUILD_QUEUEDBY="$BUILD_QUEUEDBY"
# Get Source Branch
ACN_BUILD_SOURCE_BRANCH="$BUILD_SOURCEBRANCH"
# Get System PR Queue Variables
ACN_BUILD_EXTRAPARAMETERS=$(jq -n \
--arg PRID "$SYSTEM_PULLREQUEST_PULLREQUESTID" \
--arg PRNUM "$SYSTEM_PULLREQUEST_PULLREQUESTNUMBER" \
--arg MERGEDAT "$SYSTEM_PULLREQUEST_MERGEDAT" \
--arg SRCBRANCH "$SYSTEM_PULLREQUEST_SOURCEBRANCH" \
--arg TARGETBRANCH "$SYSTEM_PULLREQUEST_TARGETBRANCH" \
--arg TARGETBRANCHNAME "$SYSTEM_PULLREQUEST_TARGETBRANCHNAME" \
--arg SRCREPOURI "$SYSTEM_PULLREQUEST_SOURCEREPOSITORYURI" \
--arg SRCCOMMITID "$SYSTEM_PULLREQUEST_SOURCECOMMITID" \
--arg ISFORK "$SYSTEM_PULLREQUEST_ISFORK" \
'{
"pullRequestId": $PRID,
"pullRequestNumber": $PRNUM,
"mergedAt": $MERGEDAT,
"sourceBranch": $SRCBRANCH,
"targetBranch": $TARGETBRANCH,
"targetBranchName": $TARGETBRANCHNAME,
"sourceRepositoryUri": $SRCREPOURI,
"sourceCommitID": $SRCCOMMITID,
"isFork": $ISFORK
}')
echo "$ACN_BUILD_EXTRAPARAMETERS" | jq .
ACN_BUILD_PARAMETERS=$(jq -rcn \
--arg REASON "$ACN_BUILD_REASON" \
--arg REF "$ACN_BUILD_AZURE_ACN_GIT_REF" \
--arg BRANCH "$ACN_BUILD_SOURCE_BRANCH" \
--arg QUEUEDBY "$ACN_BUILD_QUEUEDBY" \
--argjson EXTRA "$ACN_BUILD_EXTRAPARAMETERS" \
'{ "reason": $REASON, "ref": $REF, "queuedBy": $QUEUEDBY, "sourceBranch": $BRANCH, "extra": $EXTRA }')
echo "$ACN_BUILD_PARAMETERS" | jq .
ACN_BUILD_PARAMETERS="TriggerDetails: $ACN_BUILD_PARAMETERS"
echo >&2 "Triggering Pull Request build for ${BUILD_SOURCEBRANCH}."
echo >&2 "##vso[task.setvariable variable=templateParameters]$ACN_BUILD_PARAMETERS"
displayName: Retrieve PR Source Details
- task: TriggerBuild@4
name: trigger
displayName: Trigger Compliant Build
# 3 hour timeout
timeoutInMinutes: 180
inputs:
definitionIsInCurrentTeamProject: false
tfsServer: $(ADO_COMPLIANT_BUILD_PROJECT_URI)
teamProject: $(ADO_COMPLIANT_BUILD_ORG)
buildDefinition: $(ADO_COMPLIANT_PIPELINE_ID)
queueBuildForUserThatTriggeredBuild: true
useSameBranch: false
# master
branchToUse: feature/ob-onboard-0
authenticationMethod: $(ADO_AUTH_METHOD)
password: $(ADO_AUTHORIZATION)
storeInEnvironmentVariable: true
waitForQueuedBuildsToFinish: true
treatPartiallySucceededBuildAsSuccessful: false
downloadBuildArtifacts: false
failTaskIfBuildsNotSuccessful: true
# Refresh every 10 min
# Seconds
waitForQueuedBuildsToFinishRefreshTime: 600
ignoreSslCertificateErrors: false
templateParameters: $(templateParameters)
## Report Build Results
# - task: GitHubComment@0
# displayName: "Post PR Comment"
# condition: canceled()
# inputs:
# gitHubConnection: $(ADO_AUTHORIZATION)
# id: '$(System.PullRequest.PullRequestId)'
# comment: |
# 'The build (id: "$(TRIGGERED_BUILDID)") was canceled.'
# env:
# TRIGGERED_BUILDID: $(TriggeredBuildIds)
#
# - task: GitHubComment@0
# displayName: "Post PR Comment"
# condition: failed()
# inputs:
# gitHubConnection: $(ADO_AUTHORIZATION)
# repositoryName: '$(Build.Repository.Name)'
# id: '$(System.PullRequest.PullRequestId)'
# comment: |
# 'The build (id: "$(TRIGGERED_BUILDID)") failed. Please verify your changes.'
# env:
# TRIGGERED_BUILDID: $(TriggeredBuildIds)
#
# - task: GitHubComment@0
# displayName: "Post PR Comment"
# condition: succeeded()
# inputs:
# gitHubConnection: $(ADO_AUTHORIZATION)
# repositoryName: '$(Build.Repository.Name)'
# id: '$(System.PullRequest.PullRequestId)'
# comment: |
# 'The build (id: "$(TRIGGERED_BUILDID)") succeeded!'
# env:
# TRIGGERED_BUILDID: $(TriggeredBuildIds)

Просмотреть файл

@ -0,0 +1,24 @@
steps:
- script: |
echo "install cilium CLI"
if [[ ${CILIUM_VERSION_TAG} =~ ^1.1[1-3].[0-9]{1,2} ]]; then
echo "Cilium Agent Version ${BASH_REMATCH[0]}"
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable-v0.14.txt)
elif [[ ${CILIUM_VERSION_TAG} =~ ^1.14.[0-9]{1,2} ]]; then
echo "Cilium Agent Version ${BASH_REMATCH[0]}"
CILIUM_CLI_VERSION=v0.15.22
else
echo "Cilium Agent Version ${CILIUM_VERSION_TAG}"
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)
fi
CLI_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
cilium status
cilium version
workingDirectory: $(ACN_DIR)
name: "installCiliumCLI"
displayName: "Install Cilium CLI"

Просмотреть файл

@ -0,0 +1,47 @@
parameters:
os: linux
continueOnError: true
jobs:
- job: ${{ parameters.name }}
displayName: Cluster - ${{ parameters.name }}
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- task: AzureCLI@2
inputs:
azureSubscription: $(ACN_TEST_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
echo "Check az version"
az version
if ${{ lower(contains(parameters.clusterType, 'dualstack')) }}
then
echo "Install az cli extension preview"
az extension add --name aks-preview
az extension update --name aks-preview
fi
mkdir -p ~/.kube/
make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }}
make -C ./hack/aks ${{ parameters.clusterType }} \
AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) \
CLUSTER=${{ parameters.clusterName }} \
VM_SIZE=${{ parameters.vmSize }} OS=${{parameters.os}}
echo "Cluster successfully created"
displayName: Cluster - ${{ parameters.clusterType }}
continueOnError: ${{ parameters.continueOnError }}

Просмотреть файл

@ -0,0 +1,58 @@
parameters:
name: ""
displayName: ""
clusterType: ""
clusterName: "" # Recommended to pass in unique identifier
vmSize: ""
vmSizeWin: ""
k8sVersion: ""
osSkuWin: "Windows2022" # Currently we only support Windows2022
dependsOn: ""
region: ""
os: linux
jobs:
- job: ${{ parameters.name }}
displayName: Cluster - ${{ parameters.name }}
pool:
isCustom: true
type: linux
name: $(BUILD_POOL_NAME_DEFAULT)
variables:
ob_outputDirectory: $(Build.ArtifactStagingDirectory)/output
ob_git_checkout: true
steps:
- checkout: ACNReviewChanges
clean: true
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
echo "Check az version"
az version
if ${{ lower(contains(parameters.clusterType, 'dualstack')) }}
then
echo "Install az cli extension preview"
az extension add --name aks-preview
az extension update --name aks-preview
fi
mkdir -p ~/.kube/
make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }}
make -C ./hack/aks ${{ parameters.clusterType }} \
AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) \
CLUSTER=${{ parameters.clusterName }} \
VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \
OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{parameters.os}} \
WINDOWS_USERNAME=${WINDOWS_USERNAME} WINDOWS_PASSWORD=${WINDOWS_PASSWORD}
echo "Cluster successfully created"
displayName: Cluster - ${{ parameters.clusterType }}
continueOnError: ${{ contains(parameters.clusterType, 'dualstack') }}

Просмотреть файл

@ -0,0 +1,52 @@
parameters:
- name: region
type: string
- name: project_select
values:
- cni
- name: project_config
default:
- swift-byocni-nokubeproxy-up
- overlay-byocni-nokubeproxy-up
- dualstack-byocni-nokubeproxy-up
- overlay-byocni-nokubeproxy-up
- overlay-byocni-up
- swift-byocni-up
- vnetscale-swift-byocni-up
- linux-cniv1-up
- windows-cniv1-up
- dualstack-overlay-byocni-up
- swiftv2-multitenancy-cluster-up
steps:
- task: AzureCLI@2
inputs:
azureSubscription: $(ACN_TEST_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
set -e
echo "Check az version"
az version
if ${{ lower(contains(parameters.clusterType, 'dualstack')) }}
then
echo "Install az cli extension preview"
az extension add --name aks-preview
az extension update --name aks-preview
fi
mkdir -p ~/.kube/
make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }}
make -C ./hack/aks ${{ parameters.clusterType }} \
AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) \
CLUSTER=${{ parameters.clusterName }} \
VM_SIZE=${{ parameters.vmSize }} OS=${{parameters.os}} \
echo "Cluster successfully created"
displayName: Multitenant Cluster - ${{ parameters.clusterType }}
continueOnError: ${{ parameters.continueOnError }}

Просмотреть файл

@ -0,0 +1,21 @@
parameters:
name: ""
clusterName: ""
region: ""
steps:
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
addSpnToEnvironment: true
inlineScript: |
echo "Deleting cluster"
make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }}
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
make -C ./hack/aks down AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }}
echo "Cluster and resources down"
name: delete
displayName: Delete - ${{ parameters.name }}

Просмотреть файл

@ -0,0 +1,284 @@
# -- Captures --
# CNS, CNI, and Cilium Logs
# CNS, CNI, and Cilium State files
# Daemonset and Deployment Images
# Node Status
# kube-system namespace logs
# Non-ready pods on failure
# -- Controled by --
# CNI and OS | ${{ parameters.cni }} and ${{ parameters.os }}
# CNS ConfigMap | "ManageEndpointState"
# -- Generates --
# Logs on a per-node basis
# Outputs a singluar unique artifact per template call | ${{ parameters.clusterName }}_${{ parameters.jobName }}_Attempt_#$(System.StageAttempt)
# Each artifact is divided into sub-directories
# -- Intent --
# Provide through debugging information to understand why CNI test scenarios are failing without having to blindly reproduce
parameters:
clusterName: ""
logType: "failure"
os: ""
cni: ""
jobName: "FailedE2ELogs"
steps:
- task: KubectlInstaller@0
inputs:
kubectlVersion: latest
- task: AzureCLI@2
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
workingDirectory: $(ACN_DIR)
addSpnToEnvironment: true
inlineScript: |
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
acnLogs=$OB_OUTPUTDIRECTORY
mkdir -p $acnLogs
echo "Root Directory created: $acnLogs"
echo "##vso[task.setvariable variable=acnLogs]$acnLogs"
kubectl get pods -n kube-system -owide
podList=`kubectl get pods -n kube-system --no-headers | awk '{print $1}'`
mkdir -p $acnLogs/kube-system
echo "Directory created: $acnLogs/kube-system"
for pod in $podList; do
kubectl logs -n kube-system $pod > $acnLogs/kube-system/$pod-logs.txt
echo "$acnLogs/kube-system/$pod-logs.txt"
done
displayName: Kube-System Logs
condition: always()
continueOnError: true # Tends to fail after node restart due to pods still restarting. This should not block other tests or logs from running.
- bash: |
kubectl describe nodes
displayName: Node Status
condition: always()
- bash: |
kubectl get ds -A -owide
echo "Capture daemonset images being used"
dsList=`kubectl get ds -A | grep kube-system | awk '{print $2}'`
for ds in $dsList; do
echo "$ds"
kubectl describe ds -n kube-system $ds | grep Image
done
displayName: Daemonset Images
condition: always()
- bash: |
kubectl get deploy -A -owide
echo "Capture deployment images being used"
deployList=`kubectl get deploy -A | grep kube-system | awk '{print $2}'`
for deploy in $deployList; do
echo "$deploy"
kubectl describe deploy -n kube-system $deploy | grep Image
done
displayName: Deployment Images
condition: always()
- ${{ if eq(parameters.logType, 'failure') }}:
- bash: |
kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName
podList=`kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName --no-headers | grep -v Running | awk '{print $1}'`
array=($podList)
if [ -z ${array[0]} ]; then
echo "There are no kube-system pods in a non-ready state."
else
mkdir -p $acnLogs/${{ parameters.os }}non-ready
echo "Directory created: $acnLogs/${{ parameters.os }}non-ready"
echo "Capturing failed pods"
for pod in $podList; do
kubectl describe pod -n kube-system $pod > $acnLogs/${{ parameters.os }}non-ready/$pod.txt
echo "$acnLogs/${{ parameters.os }}non-ready/$pod.txt"
done
fi
displayName: Failure Logs
condition: always()
workingDirectory: $(ACN_DIR)
- ${{ if eq(parameters.os, 'linux') }}:
- bash: |
echo "Ensure that privileged pod exists on each node"
kubectl apply -f test/integration/manifests/load/privileged-daemonset.yaml
kubectl rollout status ds -n kube-system privileged-daemonset
echo "------ Log work ------"
kubectl get pods -n kube-system -l os=linux,app=privileged-daemonset -owide
echo "Capture logs from each linux node. Files located in var/logs/*."
podList=`kubectl get pods -n kube-system -l os=linux,app=privileged-daemonset -owide --no-headers | awk '{print $1}'`
for pod in $podList; do
index=0
files=(`kubectl exec -i -n kube-system $pod -- find ./var/log -maxdepth 2 -name "azure-*" -type f`)
fileBase=(`kubectl exec -i -n kube-system $pod -- find ./var/log -maxdepth 2 -name "azure-*" -type f -printf "%f\n"`)
node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'`
mkdir -p $(acnLogs)/"$node"_logs/log-output/
echo "Directory created: $(acnLogs)/"$node"_logs/"
for file in ${files[*]}; do
kubectl exec -i -n kube-system $pod -- cat $file > $(acnLogs)/"$node"_logs/log-output/${fileBase[$index]}
echo "Azure-*.log, ${fileBase[$index]}, captured: $(acnLogs)/"$node"_logs/log-output/${fileBase[$index]}"
((index++))
done
if [ ${{ parameters.cni }} = 'cilium' ]; then
file="cilium-cni.log"
kubectl exec -i -n kube-system $pod -- cat var/log/$file > $(acnLogs)/"$node"_logs/log-output/$file
echo "Cilium log, $file, captured: $(acnLogs)/"$node"_logs/log-output/$file"
fi
done
if ! [ ${{ parameters.cni }} = 'cilium' ]; then
echo "------ Privileged work ------"
kubectl get pods -n kube-system -l os=linux,app=privileged-daemonset -owide
echo "Capture State Files from privileged pods"
for pod in $podList; do
node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'`
mkdir -p $(acnLogs)/"$node"_logs/privileged-output/
echo "Directory created: $(acnLogs)/"$node"_logs/privileged-output/"
file="azure-vnet.json"
kubectl exec -i -n kube-system $pod -- cat /var/run/$file > $(acnLogs)/"$node"_logs/privileged-output/$file
echo "CNI State, $file, captured: $(acnLogs)/"$node"_logs/privileged-output/$file"
if [ ${{ parameters.cni }} = 'cniv1' ]; then
file="azure-vnet-ipam.json"
kubectl exec -i -n kube-system $pod -- cat /var/run/$file > $(acnLogs)/"$node"_logs/privileged-output/$file
echo "CNIv1 IPAM, $file, captured: $(acnLogs)/"$node"_logs/privileged-output/$file"
fi
done
fi
if [ ${{ parameters.cni }} = 'cilium' ] || [ ${{ parameters.cni }} = 'cniv2' ]; then
echo "------ CNS work ------"
kubectl get pods -n kube-system -l k8s-app=azure-cns
echo "Capture State Files from CNS pods"
cnsPods=`kubectl get pods -n kube-system -l k8s-app=azure-cns --no-headers | awk '{print $1}'`
for pod in $cnsPods; do
managed=`kubectl exec -i -n kube-system $pod -- cat etc/azure-cns/cns_config.json | jq .ManageEndpointState`
node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'`
mkdir -p $(acnLogs)/"$node"_logs/CNS-output/
echo "Directory created: $(acnLogs)/"$node"_logs/CNS-output/"
file="cnsCache.txt"
kubectl exec -i -n kube-system $pod -- curl localhost:10090/debug/ipaddresses -d {\"IPConfigStateFilter\":[\"Assigned\"]} > $(acnLogs)/"$node"_logs/CNS-output/$file
echo "CNS cache, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file"
file="azure-cns.json"
kubectl exec -i -n kube-system $pod -- cat /var/lib/azure-network/$file > $(acnLogs)/"$node"_logs/CNS-output/$file
echo "CNS State, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file"
if [[ $managed =~ true ]]; then
file="azure-endpoints.json"
kubectl exec -i -n kube-system $pod -- cat /var/run/azure-cns/$file > $(acnLogs)/"$node"_logs/CNS-output/$file
echo "CNS Managed State, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file"
fi
done
fi
if [ ${{ parameters.cni }} = 'cilium' ]; then
echo "------ Cilium work ------"
kubectl get pods -n kube-system -l k8s-app=cilium
echo "Capture State Files from Cilium pods"
ciliumPods=`kubectl get pods -n kube-system -l k8s-app=cilium --no-headers | awk '{print $1}'`
for pod in $ciliumPods; do
node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'`
mkdir -p $(acnLogs)/"$node"_logs/Cilium-output/
echo "Directory created: $(acnLogs)/"$node"_logs/Cilium-output/"
file="cilium-endpoint.json"
kubectl exec -i -n kube-system $pod -- cilium endpoint list -o json > $(acnLogs)/"$node"_logs/Cilium-output/$file
echo "Cilium, $file, captured: $(acnLogs)/"$node"_logs/Cilium-output/$file"
done
fi
workingDirectory: $(ACN_DIR)
displayName: Linux Logs
condition: always()
- ${{ if eq(parameters.os, 'windows') }}:
- bash: |
echo "Ensure that privileged pod exists on each node"
kubectl apply -f test/integration/manifests/load/privileged-daemonset-windows.yaml
kubectl rollout status ds -n kube-system privileged-daemonset
echo "------ Log work ------"
kubectl get pods -n kube-system -l os=windows,app=privileged-daemonset -owide
echo "Capture logs from each windows node. Files located in \k"
podList=`kubectl get pods -n kube-system -l os=windows,app=privileged-daemonset -owide --no-headers | awk '{print $1}'`
for pod in $podList; do
files=`kubectl exec -i -n kube-system $pod -- powershell "ls ../../k/azure*.log*" | grep azure | awk '{print $6}'`
node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'`
mkdir -p $(acnLogs)/"$node"_logs/log-output/
echo "Directory created: $(acnLogs)/"$node"_logs/log-output/"
for file in $files; do
kubectl exec -i -n kube-system $pod -- powershell "cat ../../k/$file" > $(acnLogs)/"$node"_logs/log-output/$file
echo "Azure-*.log, $file, captured: $(acnLogs)/"$node"_logs/log-output/$file"
done
if [ ${{ parameters.cni }} = 'cniv2' ]; then
file="azure-cns.log"
kubectl exec -i -n kube-system $pod -- cat k/azurecns/$file > $(acnLogs)/"$node"_logs/log-output/$file
echo "CNS Log, $file, captured: $(acnLogs)/"$node"_logs/log-output/$file"
fi
done
echo "------ Privileged work ------"
kubectl get pods -n kube-system -l os=windows,app=privileged-daemonset -owide
echo "Capture State Files from privileged pods"
for pod in $podList; do
node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'`
mkdir -p $(acnLogs)/"$node"_logs/privileged-output/
echo "Directory created: $(acnLogs)/"$node"_logs/privileged-output/"
file="azure-vnet.json"
kubectl exec -i -n kube-system $pod -- powershell cat ../../k/$file > $(acnLogs)/"$node"_logs/privileged-output/$file
echo "CNI State, $file, captured: $(acnLogs)/"$node"_logs/privileged-output/$file"
if [ ${{ parameters.cni }} = 'cniv1' ]; then
file="azure-vnet-ipam.json"
kubectl exec -i -n kube-system $pod -- powershell cat ../../k/$file > $(acnLogs)/"$node"_logs/privileged-output/$file
echo "CNI IPAM, $file, captured: $(acnLogs)/"$node"_logs/privileged-output/$file"
fi
done
if [ ${{ parameters.cni }} = 'cniv2' ]; then
echo "------ CNS work ------"
kubectl get pods -n kube-system -l k8s-app=azure-cns-win --no-headers
echo "Capture State Files from CNS pods"
cnsPods=`kubectl get pods -n kube-system -l k8s-app=azure-cns-win --no-headers | awk '{print $1}'`
for pod in $cnsPods; do
managed=`kubectl exec -i -n kube-system pod -- powershell cat etc/azure-cns/cns_config.json | jq .ManageEndpointState`
node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'`
mkdir -p $(acnLogs)/"$node"_logs/CNS-output/
echo "Directory created: $(acnLogs)/"$node"_logs/CNS-output/"
file="cnsCache.txt"
kubectl exec -i -n kube-system $pod -- powershell 'Invoke-WebRequest -Uri 127.0.0.1:10090/debug/ipaddresses -Method Post -ContentType application/x-www-form-urlencoded -Body "{`"IPConfigStateFilter`":[`"Assigned`"]}" -UseBasicParsing | Select-Object -Expand Content' > $(acnLogs)/"$node"_logs/CNS-output/$file
echo "CNS cache, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file"
file="azure-cns.json"
kubectl exec -i -n kube-system $pod -- powershell cat k/azurecns/azure-cns.json > $(acnLogs)/"$node"_logs/CNS-output/$file
echo "CNS State, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file"
if [ $managed = "true" ]; then
file="azure-endpoints.json"
kubectl exec -i -n kube-system $pod -- cat k/azurecns/$file > $(acnLogs)/"$node"_logs/CNS-output/$file
echo "CNS Managed State, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file"
fi
done
fi
workingDirectory: $(ACN_DIR)
displayName: Windows Logs
condition: always()
# - publish: $(System.DefaultWorkingDirectory)/${{ parameters.clusterName }}_${{ parameters.logType }}_Attempt_#$(System.StageAttempt)
# condition: always()
# artifact: ${{ parameters.clusterName }}_${{ parameters.os }}${{ parameters.jobName }}_Attempt_#$(System.StageAttempt)
# name: acnLogs_${{ parameters.logType }}
# displayName: Publish Cluster logs

Просмотреть файл

@ -0,0 +1,66 @@
stages:
- stage: test
displayName: Test ACN
dependsOn:
- setup
jobs:
- job: test
displayName: Run Tests
variables:
STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ]
ob_git_checkout: true
pool:
isCustom: true
type: linux
name: "$(BUILD_POOL_NAME_DEFAULT)"
steps:
# Test changes under review
- checkout: ACNReviewChanges
clean: true
- script: |
cd azure-container-networking
make tools
# run test, echo exit status code to fd 3, pipe output from test to tee, which splits output to stdout and go-junit-report (which converts test output to report.xml), stdout from tee is redirected to fd 4. Take output written to fd 3 (which is the exit code of test), redirect to stdout, pipe to read from stdout then exit with that status code. Read all output from fd 4 (output from tee) and write to top stdout
{ { { {
sudo -E env "PATH=$PATH" make test-all;
echo $? >&3;
} | tee >(build/tools/bin/go-junit-report > report.xml) >&4;
} 3>&1;
} | { read xs; exit $xs; }
} 4>&1
retryCountOnTaskFailure: 3
name: "Test"
displayName: "Run Tests"
- stage: test_windows
displayName: Test ACN Windows
dependsOn:
- setup
jobs:
- job: test
displayName: Run Tests
variables:
STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ]
ob_git_checkout: true
pool:
isCustom: true
type: windows
name: "$(BUILD_POOL_NAME_DEFAULT_WINDOWS_ALT)"
steps:
# Test changes under review
- checkout: ACNReviewChanges
clean: true
- script: |
cd azure-container-networking/
cd npm/
go test ./...
cd ../cni/
go test ./...
cd ../platform/
go test ./...
retryCountOnTaskFailure: 3
name: "TestWindows"
displayName: "Run Windows Tests"

52
.pipelines/trigger.yaml Normal file
Просмотреть файл

@ -0,0 +1,52 @@
trigger: none
#pr:
# branches:
# include:
# - master
# - release/*
# paths:
# exclude:
# - ".devcontainer"
# - ".hooks"
# - ".vscode"
# - ".github"
# - docs
#
#
#trigger:
# branches:
# include:
# - gh-readonly-queue/master/*
# tags:
# include:
# - "*"
resources:
repositories:
- repository: azure-container-networking
type: github
name: Azure/azure-container-networking
endpoint: 'Azure-ACN RO Service Connection'
ref: refs/heads/feature/ob-onboard-0
variables:
REPO_REF: $[ resources.repositories['azure-container-networking'].ref ]
REPO_COMMIT: $[ resources.repositories['azure-container-networking'].version ]
REPO_NAME: $[ resources.repositories['azure-container-networking'].name ]
REPO_TYPE: $[ resources.repositories['azure-container-networking'].type ]
CHANGESET_COMMIT: $[ resources.repositories['self'].version ]
pool:
vmImage: ubuntu-latest
extends:
template: /.pipelines/template.trigger.jobs.yaml@azure-container-networking
parameters:
mainRepoRef: $(REPO_REF)
mainRepoCommit: $(REPO_COMMIT)
mainRepoName: $(REPO_NAME)
mainRepoType: $(REPO_TYPE)