зеркало из https://github.com/Azure/orkestra.git
Add E2E testing to Orkestra (#177)
This commit is contained in:
Родитель
dd9dc4a76d
Коммит
d665d40526
3
Makefile
3
Makefile
|
@ -83,3 +83,6 @@ CONTROLLER_GEN=$(GOBIN)/controller-gen
|
|||
else
|
||||
CONTROLLER_GEN=$(shell which controller-gen)
|
||||
endif
|
||||
|
||||
test-e2e:
|
||||
./testing/validation.sh
|
|
@ -0,0 +1,13 @@
|
|||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: brigade-crb
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: default
|
||||
name: brigade-worker
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
|
@ -0,0 +1,50 @@
|
|||
const { events, Job } = require('brigadier')
|
||||
|
||||
events.on("exec", (brigadeEvent, project) => {
|
||||
console.log("Running on exec")
|
||||
let test = new Job("test-runner")
|
||||
test.timeout = 1500000
|
||||
test.image = "ubuntu"
|
||||
test.shell = "bash"
|
||||
|
||||
test.tasks = [
|
||||
"apt-get update -y",
|
||||
"apt-get upgrade -y",
|
||||
"apt-get install curl -y",
|
||||
"apt-get install sudo -y",
|
||||
"apt-get install git -y",
|
||||
"apt-get install make -y",
|
||||
"apt-get install wget -y",
|
||||
"apt-get install jq -y",
|
||||
"apt-get install sed -y",
|
||||
"curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.17/bin/linux/amd64/kubectl",
|
||||
"chmod +x ./kubectl",
|
||||
"sudo mv ./kubectl /usr/local/bin/kubectl",
|
||||
"echo installed kubectl",
|
||||
"curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3",
|
||||
"chmod 700 get_helm.sh",
|
||||
"./get_helm.sh",
|
||||
"echo installed helm",
|
||||
"wget -c https://golang.org/dl/go1.16.3.linux-amd64.tar.gz",
|
||||
"tar -C /usr/local -xzf go1.16.3.linux-amd64.tar.gz",
|
||||
"export PATH=$PATH:/usr/local/go/bin",
|
||||
"go version",
|
||||
"curl -sLO https://github.com/argoproj/argo/releases/download/v3.0.2/argo-linux-amd64.gz",
|
||||
"gunzip argo-linux-amd64.gz",
|
||||
"chmod +x argo-linux-amd64",
|
||||
"mv ./argo-linux-amd64 /usr/local/bin/argo",
|
||||
"argo version",
|
||||
"git clone https://github.com/Azure/orkestra",
|
||||
"echo cloned orkestra",
|
||||
"cd orkestra",
|
||||
"git checkout remotes/origin/danaya/addtesting",
|
||||
"kubectl apply -k ./config/crd",
|
||||
"helm install --wait orkestra chart/orkestra/ --namespace orkestra --create-namespace",
|
||||
"kubectl apply -f examples/simple/bookinfo.yaml",
|
||||
"sleep 30",
|
||||
"argo wait bookinfo -n orkestra",
|
||||
"make test-e2e"
|
||||
]
|
||||
|
||||
test.run()
|
||||
})
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"dependencies": {
|
||||
"@brigadecore/brigade-utils": "0.5.0"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
FROM debian:latest
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install sudo -y && \
|
||||
apt-get install curl -y && \
|
||||
apt-get install git -y && \
|
||||
apt-get install make -y && \
|
||||
apt-get install wget -y && \
|
||||
apt-get install jq -y && \
|
||||
# install kubectl
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.17/bin/linux/amd64/kubectl && \
|
||||
chmod +x ./kubectl && \
|
||||
sudo mv ./kubectl /usr/local/bin/kubectl && \
|
||||
# install golang
|
||||
wget -c https://golang.org/dl/go1.16.3.linux-amd64.tar.gz && \
|
||||
rm -rf /usr/local/go && \
|
||||
tar -C /usr/local -xzf go1.16.3.linux-amd64.tar.gz && \
|
||||
export PATH=$PATH:/usr/local/go/bin && \
|
||||
# install argo
|
||||
curl -sLO https://github.com/argoproj/argo/releases/download/v3.0.2/argo-linux-amd64.gz && \
|
||||
gunzip argo-linux-amd64.gz && \
|
||||
chmod +x argo-linux-amd64 && \
|
||||
mv ./argo-linux-amd64 /usr/local/bin/argo && \
|
||||
# install helm
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && \
|
||||
chmod 700 get_helm.sh && \
|
||||
./get_helm.sh && \
|
||||
# install kubebuilder
|
||||
os=$(go env GOOS) && \
|
||||
arch=$(go env GOARCH) && \
|
||||
curl -L https://go.kubebuilder.io/dl/2.3.1/${os}/${arch} | tar -xz -C /tmp/ && \
|
||||
sudo mv /tmp/kubebuilder_2.3.1_${os}_${arch} /usr/local/kubebuilder && \
|
||||
export PATH=$PATH:/usr/local/kubebuilder/bin && \
|
||||
git clone https://github.com/Azure/orkestra && \
|
||||
cd orkestra && \
|
||||
make setup-kubebuilder
|
|
@ -0,0 +1,125 @@
|
|||
# Testing Environment Setup
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
* `Argo` - Argo workflow client (Follow the instructions to install the binary from [releases](https://github.com/argoproj/argo/releases))
|
||||
* `Brigade` - [brigade install guide](https://docs.brigade.sh/intro/install/)
|
||||
* `brig` - [brig guide](https://docs.brigade.sh/topics/brig/)
|
||||
* `kubectl` - [kubectl install guide](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)
|
||||
* A Kubernetes Cluster
|
||||
|
||||
When testing I used a KIND cluster but Brigade should work for minikube as well. Brigade docs have a section about Minikube and AKS, [found here](https://docs.brigade.sh/intro/install/#notes-for-minikube).
|
||||
|
||||
Before you begin make sure docker and your cluster are running.
|
||||
|
||||
The Dockerfile will be the image the brigade job is run on, at the moment this Docker image is not used but should be uploaded to DockerHub so our brigade.js can download it as an image. For now the brigade.js does the setup and grabs all the dependencies. After installing Brigade, you should now see the following brigade pods running
|
||||
|
||||
```
|
||||
helm install brigade-server brigade/brigade
|
||||
kubectl get pods -A
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
default brigade-server-brigade-api-7656489497-xczb7 1/1 Running 0 3m23s
|
||||
default brigade-server-brigade-ctrl-9d678c8bc-4h6nf 1/1 Running 0 3m23s
|
||||
default brigade-server-brigade-vacuum-1619128800-q24dh 0/1 Completed 0 34s
|
||||
default brigade-server-kashti-6ff4d6c99c-2dg87 1/1 Running 0 3m23s
|
||||
```
|
||||
|
||||
Using brig we will create a sample project. For our testing we just use all the defaults. The brigade.js path for us would be `testing/brigade.js`.
|
||||
|
||||
```
|
||||
brig project create
|
||||
? VCS or no-VCS project? no-VCS
|
||||
? Project Name mysampleproject
|
||||
? Add secrets? No
|
||||
? Secret for the Generic Gateway (alphanumeric characters only). Press Enter if you want it to be auto-generated [? for ? Secret for the Generic Gateway (alphanumeric characters only). Press Enter if you want it to be auto-generated
|
||||
Auto-generated Generic Gateway Secret: FPK8O
|
||||
? Default script ConfigMap name
|
||||
? Upload a default brigade.js script <PATH_TO_BRIGADE.js>
|
||||
? Default brigade.json config ConfigMap name
|
||||
? Upload a default brigade.json config
|
||||
? Configure advanced options No
|
||||
```
|
||||
|
||||
Confirm your sample project was created,
|
||||
|
||||
```
|
||||
brig project list
|
||||
NAME ID REPO
|
||||
mysampleproject brigade-a50ed8c1dbd7fa803b75f009f893b56bfd12347cadb1e404c12 github.com/brigadecore/empty-testbed
|
||||
```
|
||||
|
||||
To give our brigade jobs the ability to access our kubectl commands we have to apply the binding.yml file onto our cluster. This file gives the brigade jobs permissions for various kubectl commands.
|
||||
|
||||
```
|
||||
cd testing
|
||||
kubectl apply -f binding.yml
|
||||
```
|
||||
|
||||
We also want to run the argo server so we can view the workflow, and so our validation tests can check if the workflow pods were deployed successfully.
|
||||
|
||||
```
|
||||
argo server
|
||||
```
|
||||
|
||||
Now we can run our brigade.js file on our cluster to verify orkestra is working.
|
||||
|
||||
```
|
||||
cd testing
|
||||
brig run -f brigade.js mysampleproject
|
||||
Event created. Waiting for worker pod named "brigade-worker-01f47mb971tp4f3k6erx8fxhrr".
|
||||
Build: 01f47mb971tp4f3k6erx8fxhrr, Worker: brigade-worker-01f47mb971tp4f3k6erx8fxhrr
|
||||
prestart: no dependencies file found
|
||||
[brigade] brigade-worker version: 1.2.1
|
||||
[brigade:k8s] Creating PVC named brigade-worker-01f47mb971tp4f3k6erx8fxhrr
|
||||
Running on exec
|
||||
[brigade:k8s] Creating secret test-runner-01f47mb971tp4f3k6erx8fxhrr
|
||||
[brigade:k8s] Creating pod test-runner-01f47mb971tp4f3k6erx8fxhrr
|
||||
[brigade:k8s] Timeout set at 1500000 milliseconds
|
||||
[brigade:k8s] Pod not yet scheduled
|
||||
[brigade:k8s] default/test-runner-01f47mb971tp4f3k6erx8fxhrr phase Pending
|
||||
[brigade:k8s] default/test-runner-01f47mb971tp4f3k6erx8fxhrr phase Running
|
||||
done
|
||||
[brigade:k8s] default/test-runner-01f47mb971tp4f3k6erx8fxhrr phase Running
|
||||
```
|
||||
|
||||
Upon completion of the test runner we should see,
|
||||
```
|
||||
[brigade:k8s] default/test-runner-01f47mb971tp4f3k6erx8fxhrr phase Running
|
||||
done
|
||||
[brigade:k8s] default/test-runner-01f47mb971tp4f3k6erx8fxhrr phase Succeeded
|
||||
done
|
||||
[brigade:app] after: default event handler fired
|
||||
[brigade:app] beforeExit(2): destroying storage
|
||||
[brigade:k8s] Destroying PVC named brigade-worker-01f47mb971tp4f3k6erx8fxhrr
|
||||
```
|
||||
|
||||
To check the logs of the test runner and validations,
|
||||
|
||||
```
|
||||
brig build logs --last --jobs
|
||||
```
|
||||
|
||||
Any errors will be output to a default log file, `log.txt` in the testing folder.
|
||||
|
||||
If you need to install the brigadecore-utils at runtime add the --config flag to brig run with the brigade.json file
|
||||
|
||||
```
|
||||
brig run <PROJECT_NAME> --file brigade.js --config brigade.json
|
||||
```
|
||||
|
||||
(Unnecessary since we are not using KindJob anymore) The KindJob object in the Brigade API requires you to allow mount hosts in the project. When creating your project with
|
||||
|
||||
```
|
||||
brig project create
|
||||
```
|
||||
|
||||
Enter Y when asked for advanced options, this will allow you to set allow mount hosts to true.
|
||||
|
||||
|
||||
## Known Issues
|
||||
|
||||
There is a docker related bug tracked here: [issue 5593](https://github.com/docker/for-win/issues/5593), which causes there to be time drift when using Docker for Windows. This prevents debian images from properly installing packages since the system clock is wrong.
|
||||
|
||||
Quick fix: Restart computer or restart docker
|
||||
|
|
@ -0,0 +1,196 @@
|
|||
#!/bin/bash
|
||||
ORKESTRA_RESOURCE_COUNT=6
|
||||
AMBASSADOR_VERSION="6.6.0"
|
||||
BAD_AMBASSADOR_VERSION="100.0.0"
|
||||
LOG_FILE="OrkestraValidation.log"
|
||||
OUTPUT_TO_LOG=0
|
||||
g_successCount=0
|
||||
g_failureCount=0
|
||||
|
||||
while getopts "f" flag; do
|
||||
case "${flag}" in
|
||||
f) OUTPUT_TO_LOG=1;;
|
||||
esac
|
||||
done
|
||||
|
||||
function outputMessage {
|
||||
if [ "$OUTPUT_TO_LOG" -eq 1 ]; then
|
||||
echo $1 &>> $LOG_FILE
|
||||
else
|
||||
echo $1
|
||||
fi
|
||||
}
|
||||
|
||||
function testSuiteMessage {
|
||||
if [ "$1" == "TEST_PASS" ]; then
|
||||
outputMessage "SUCCESS: $2"
|
||||
((g_successCount++))
|
||||
elif [ "$1" == "TEST_FAIL" ]; then
|
||||
outputMessage "FAIL: $2"
|
||||
((g_failureCount++))
|
||||
elif [ "$1" == "LOG" ]; then
|
||||
outputMessage "LOG: $2"
|
||||
fi
|
||||
}
|
||||
|
||||
function summary {
|
||||
outputMessage "Success Cases: $g_successCount"
|
||||
outputMessage "Failure Cases: $g_failureCount"
|
||||
}
|
||||
|
||||
function resetLogFile {
|
||||
> $LOG_FILE
|
||||
}
|
||||
|
||||
function validateOrkestraDeployment {
|
||||
resources=$(kubectl get pods --namespace orkestra 2>> $LOG_FILE | grep -i -c running)
|
||||
if [ $resources -ne $ORKESTRA_RESOURCE_COUNT ]; then
|
||||
testSuiteMessage "TEST_FAIL" "No running orkestra resources. Currently $resources running resources. Expected $ORKESTRA_RESOURCE_COUNT"
|
||||
else
|
||||
testSuiteMessage "TEST_PASS" "orkestra resources are running"
|
||||
fi
|
||||
|
||||
orkestraStatus=$(helm status orkestra -n orkestra 2>> $LOG_FILE | grep -c deployed)
|
||||
if [ $orkestraStatus -eq 1 ]; then
|
||||
testSuiteMessage "TEST_PASS" "orkestra deployed successfully"
|
||||
else
|
||||
testSuiteMessage "TEST_FAIL" "orkestra not deployed"
|
||||
fi
|
||||
}
|
||||
|
||||
function validateBookInfoDeployment {
|
||||
ambassadorStatus=$(helm status ambassador -n ambassador 2>> $LOG_FILE | grep -c deployed)
|
||||
if [ $ambassadorStatus -eq 1 ]; then
|
||||
testSuiteMessage "TEST_PASS" "ambassador deployed successfully"
|
||||
else
|
||||
testSuiteMessage "TEST_FAIL" "ambassador not deployed"
|
||||
fi
|
||||
|
||||
bookinfoReleaseNames=("details" "productpage" "ratings" "reviews" "bookinfo")
|
||||
|
||||
for var in "${bookinfoReleaseNames[@]}"
|
||||
do
|
||||
deployedStatus=$(helm status $var -n bookinfo 2>> $LOG_FILE | grep -c deployed)
|
||||
if [ $deployedStatus -eq 1 ]; then
|
||||
testSuiteMessage "TEST_PASS" "$var deployed successfully"
|
||||
else
|
||||
testSuiteMessage "TEST_FAIL" "$var not deployed"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function validateArgoWorkflow {
|
||||
bookinfoStatus=$(curl -s --request GET --url http://localhost:2746/api/v1/workflows/orkestra/bookinfo | grep -c "not found")
|
||||
if [ "$bookinfoStatus" -eq 1 ]; then
|
||||
testSuiteMessage "TEST_FAIL" "No argo workflow found for bookinfo"
|
||||
else
|
||||
argoNodes=($(curl -s --request GET --url http://localhost:2746/api/v1/workflows/orkestra/bookinfo | jq -c '.status.nodes[] | {id: .id, name: .name, displayName: .displayName, phase: .phase}'))
|
||||
|
||||
requiredNodes=(
|
||||
"bookinfo"
|
||||
"bookinfo.bookinfo.ratings"
|
||||
"bookinfo.ambassador"
|
||||
"bookinfo.bookinfo.details"
|
||||
"bookinfo.bookinfo.productpage"
|
||||
"bookinfo.ambassador.ambassador"
|
||||
"bookinfo.bookinfo.reviews"
|
||||
"bookinfo.bookinfo.bookinfo"
|
||||
"bookinfo.bookinfo"
|
||||
)
|
||||
|
||||
for node in "${requiredNodes[@]}"
|
||||
do
|
||||
status=$(curl -s --request GET --url http://localhost:2746/api/v1/workflows/orkestra/bookinfo | jq --arg node "$node" -r '.status.nodes[] | select(.name==$node) | .phase')
|
||||
if [ "$status" == "Succeeded" ]; then
|
||||
testSuiteMessage "TEST_PASS" "argo node: $node has succeeded"
|
||||
else
|
||||
testSuiteMessage "TEST_FAIL" "$node status: $status, Expected Succeeded"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
function validateApplicationGroup {
|
||||
applicationGroupJson=$(kubectl get applicationgroup bookinfo -o json | jq '.status')
|
||||
echo $applicationGroupJson
|
||||
groupCondition=$(echo "$applicationGroupJson" | jq -r '.conditions[] | select(.reason=="Succeeded") | select(.type=="Ready")')
|
||||
if [ -n "$groupCondition" ]; then
|
||||
testSuiteMessage "TEST_PASS" "ApplicationGroup status correct"
|
||||
else
|
||||
testSuiteMessage "TEST_FAIL" "ApplicationGroup status expected: (Succeeded, Ready)"
|
||||
fi
|
||||
|
||||
applicationsJson=$(echo "$applicationGroupJson" | jq '.status')
|
||||
ambassadorReason=$(echo "$applicationsJson" | jq -r '.[0].conditions[] | select(.reason=="InstallSucceeded")')
|
||||
if [ -n "$ambassadorReason" ]; then
|
||||
testSuiteMessage "TEST_PASS" "Ambassador status correct"
|
||||
else
|
||||
testSuiteMessage "TEST_FAIL" "Ambassador status expected: InstallSucceeded"
|
||||
fi
|
||||
|
||||
bookInfoReason=$(echo "$applicationsJson" | jq -r '.[1].conditions[] | select(.reason=="InstallSucceeded")')
|
||||
if [ -n "$bookInfoReason" ]; then
|
||||
testSuiteMessage "TEST_PASS" "BookInfo status correct"
|
||||
else
|
||||
testSuiteMessage "TEST_FAIL" "BookInfo status expected: InstallSucceeded"
|
||||
fi
|
||||
|
||||
subcharts=("details" "productpage" "ratings" "reviews")
|
||||
for chart in "${subcharts[@]}"
|
||||
do
|
||||
applicationReason=$(echo "$applicationsJson" | jq -r --arg c "$chart" '.[1].subcharts[$c].conditions[] | select(.reason=="InstallSucceeded")')
|
||||
if [ -n "$applicationReason" ]; then
|
||||
testSuiteMessage "TEST_PASS" "$chart status correct"
|
||||
else
|
||||
testSuiteMessage "TEST_FAIL" "$chart status expected: InstallSucceeded"
|
||||
fi
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
function applyFailureOnExistingDeployment {
|
||||
kubectl get deployments.apps orkestra -n orkestra -o json | jq '.spec.template.spec.containers[].args += ["--disable-remediation"]' | kubectl replace -f -
|
||||
kubectl get applicationgroup bookinfo -o json | jq --arg v "$BAD_AMBASSADOR_VERSION" '.spec.applications[0].spec.chart.version = $v' | kubectl replace -f -
|
||||
}
|
||||
|
||||
function deployFailure {
|
||||
kubectl delete applicationgroup bookinfo
|
||||
sed "s/${AMBASSADOR_VERSION}/${BAD_AMBASSADOR_VERSION}/g" ./examples/simple/bookinfo.yaml | kubectl apply -f -
|
||||
sleep 5
|
||||
}
|
||||
|
||||
function validateFailedApplicationGroup {
|
||||
applicationGroupJson=$(kubectl get applicationgroup bookinfo -o json | jq '.status')
|
||||
groupCondition=$(echo "$applicationGroupJson" | jq -r '.conditions[] | select(.reason=="Failed")')
|
||||
if [ -n "$groupCondition" ]; then
|
||||
testSuiteMessage "TEST_PASS" "ApplicationGroup status correct"
|
||||
else
|
||||
testSuiteMessage "TEST_FAIL" "ApplicationGroup status expected: (Failed)"
|
||||
fi
|
||||
}
|
||||
|
||||
function runFailureScenarios {
|
||||
echo Running Failure Scenarios
|
||||
applyFailureOnExistingDeployment
|
||||
validateFailedApplicationGroup
|
||||
deployFailure
|
||||
validateFailedApplicationGroup
|
||||
summary
|
||||
echo DONE
|
||||
}
|
||||
|
||||
function runValidation {
|
||||
if [ "$OUTPUT_TO_LOG" -eq 1 ]; then
|
||||
resetLogFile
|
||||
fi
|
||||
echo Running Validation
|
||||
validateOrkestraDeployment
|
||||
# validateBookInfoDeployment
|
||||
validateArgoWorkflow
|
||||
validateApplicationGroup
|
||||
summary
|
||||
echo DONE
|
||||
}
|
||||
|
||||
runValidation
|
||||
runFailureScenarios
|
Загрузка…
Ссылка в новой задаче