This doesn't include portal tests, those will follow.
This commit is contained in:
Amber Brown 2023-01-31 10:10:05 +11:00 коммит произвёл GitHub
Родитель 527c109950
Коммит 127b0565f1
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
13 изменённых файлов: 194 добавлений и 85 удалений

1
.gitignore поставляемый
Просмотреть файл

@ -8,6 +8,7 @@ gomock_reflect_*
/*.key
/*.kubeconfig
/*.pem
/*.tar
/aro
/dev-config.yaml
/e2e.test

Просмотреть файл

@ -6,6 +6,14 @@ resources:
- pipeline: e2e
source: CI
trigger: true
containers:
- container: container
image: registry.access.redhat.com/ubi8/toolbox:8.7
options: --user=0 --cap-add=NET_ADMIN --device /dev/net/tun --name vpn
- container: selenium
image: docker.io/selenium/standalone-edge:latest
options: --shm-size=2g
# Azure DevOps Pipeline running e2e tests
variables:
@ -16,40 +24,56 @@ jobs:
- job: E2E
timeoutInMinutes: 180
pool:
name: ARO-CI
demands: go-1.17
name: 1es-aro-ci-pool
#services:
# selenium: selenium
steps:
- template: ./templates/template-checkout.yml
- script: |
set -xe
sudo rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
sudo dnf install -y openvpn make
displayName: Setup (Container)
target: container
- template: ./templates/template-az-cli-login.yml
parameters:
azureDevOpsJSONSPN: $(aro-v4-e2e-devops-spn)
- template: ./templates/template-push-images-to-acr.yml
parameters:
rpImageACR: $(RP_IMAGE_ACR)
buildCommand: publish-image-aro
- script: |
make extract-aro-docker
displayName: Extract ARO binaries from build
- script: |
az account set -s $AZURE_SUBSCRIPTION_ID
SECRET_SA_ACCOUNT_NAME=e2earosecrets make secrets
. secrets/env
export CI=true
. ./hack/e2e/run-rp-and-e2e.sh
deploy_e2e_db
displayName: Setup (Azure)
- script: |
export CI=true
. secrets/env
. ./hack/e2e/run-rp-and-e2e.sh
# HACK: we don't currently trap an exit signal to kill the VPN and other things
# properly which can result in a stale openvpn process running, causing connection
# issues. This should fix that, but should be rethought.
sudo pkill openvpn
run_vpn
deploy_e2e_db
run_portal
validate_portal_running
# run_portal
# validate_portal_running
run_rp
validate_rp_running
register_sub
make test-e2e
make test-e2e -o e2e.test
displayName: Execute Tests
target: container
- script: |
export CI=true
@ -64,21 +88,28 @@ jobs:
tar cf must-gather.tar.gz must-gather.local.*
displayName: Collect must-gather
condition: failed()
target: container
- publish: must-gather.tar.gz
artifact: must-gather
displayName: Append must-gather to Pipeline
condition: failed()
target: container
- script: |
export CI=true
. ./hack/e2e/run-rp-and-e2e.sh
delete_e2e_cluster
clean_e2e_db
kill_rp
kill_vpn
displayName: Cleanup
condition: always()
target: container
- script: |
export CI=true
. ./hack/e2e/run-rp-and-e2e.sh
clean_e2e_db
displayName: Cleanup (Azure)
- template: ./templates/template-az-cli-logout.yml
- task: PublishTestResults@2

Просмотреть файл

@ -1,15 +1,14 @@
parameters:
rpImageACR: ''
buildCommand: ''
steps:
- script: |
set -e
trap 'set +e; for c in $(docker ps -aq); do docker rm -f $c; done; docker image prune -af ; rm -rf ~/.docker/config.json; rm -rf /run/user/$(id -u $USERNAME)/containers/auth.json' EXIT
#trap 'set +e; for c in $(docker ps -aq); do docker rm -f $c; done; docker image prune -af ; rm -rf ~/.docker/config.json; rm -rf /run/user/$(id -u $USERNAME)/containers/auth.json' EXIT
export RP_IMAGE_ACR=${{ parameters.rpImageACR }}
az acr login --name "$RP_IMAGE_ACR"
# azure checkouts commit, so removing master reference when publishing image
export BRANCH=$(Build.SourceBranchName)
make ${{ parameters.buildCommand }}
make publish-image-e2e
displayName: ⚙️ Build and push images to ACR

Просмотреть файл

@ -1,3 +1,4 @@
variables:
GOPATH: $(Agent.BuildDirectory)/go
OpenShiftVersion: 4.10.20
ARO_CHECKOUT_PATH: $(Agent.BuildDirectory)/go/src/github.com/Azure/ARO-RP

17
Dockerfile.aro-e2e Normal file
Просмотреть файл

@ -0,0 +1,17 @@
# Uses a multi-stage container build to build the RP & E2E components.
#
ARG REGISTRY
FROM ${REGISTRY}/ubi8/go-toolset:1.17.7 AS builder
ENV GOOS=linux \
GOPATH=/go/
WORKDIR ${GOPATH}/src/github.com/Azure/ARO-RP
USER root
COPY . ${GOPATH}/src/github.com/Azure/ARO-RP/
RUN make aro RELEASE=${IS_OFFICIAL_RELEASE} -o generate && make e2e.test e2etools
FROM ${REGISTRY}/ubi8/ubi-minimal
RUN microdnf update && microdnf clean all
COPY --from=builder /go/src/github.com/Azure/ARO-RP/aro /go/src/github.com/Azure/ARO-RP/e2e.test /go/src/github.com/Azure/ARO-RP/db /go/src/github.com/Azure/ARO-RP/cluster /go/src/github.com/Azure/ARO-RP/portalauth /usr/local/bin/
ENTRYPOINT ["aro"]
EXPOSE 2222/tcp 8080/tcp 8443/tcp 8444/tcp 8445/tcp
USER 1000

Просмотреть файл

@ -1,18 +1,13 @@
# Uses a multi-stage container build to build the RP.
#
# TODO:
# Currently the docker version on our RHEL7 VMSS uses a version which
# does not support multi-stage builds. This is a temporary stop-gap
# until we get podman working without issue
ARG REGISTRY
FROM ${REGISTRY}/ubi8/go-toolset:1.17.7 AS builder
ENV GOOS=linux \
GOPATH=/go/
WORKDIR ${GOPATH}/src/github.com/Azure/ARO-RP
USER root
RUN yum update -y
COPY . ${GOPATH}/src/github.com/Azure/ARO-RP/
RUN make aro RELEASE=${IS_OFFICIAL_RELEASE} && make e2e.test
RUN make aro RELEASE=${IS_OFFICIAL_RELEASE} -o generate && make e2e.test
FROM ${REGISTRY}/ubi8/ubi-minimal
RUN microdnf update && microdnf clean all

Просмотреть файл

@ -124,6 +124,15 @@ publish-image-fluentbit: image-fluentbit
publish-image-proxy: image-proxy
docker push ${RP_IMAGE_ACR}.azurecr.io/proxy:latest
image-e2e:
docker build --platform=linux/amd64 --network=host --no-cache -f Dockerfile.aro-e2e -t $(ARO_IMAGE) --build-arg REGISTRY=$(REGISTRY) .
publish-image-e2e: image-e2e
docker push $(ARO_IMAGE)
extract-aro-docker:
hack/ci-utils/extractaro.sh ${ARO_IMAGE}
proxy:
CGO_ENABLED=0 go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./hack/proxy
@ -160,6 +169,11 @@ tunnel:
e2e.test:
go test ./test/e2e/... -tags e2e,codec.safe -c -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" -o e2e.test
e2etools:
CGO_ENABLED=0 go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./hack/cluster
CGO_ENABLED=0 go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./hack/db
CGO_ENABLED=0 go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./hack/portalauth
test-e2e: e2e.test
./e2e.test $(E2E_FLAGS)

8
hack/ci-utils/extractaro.sh Executable file
Просмотреть файл

@ -0,0 +1,8 @@
#!/bin/bash
set -xe
DOCKERID=$(docker create $1)
docker export $DOCKERID > aro.tar
tar -xvf aro.tar --strip-components=3 usr/local/bin/
docker rm $DOCKERID

Просмотреть файл

@ -4,8 +4,6 @@
if [[ $CI ]]; then
set -o pipefail
az account set -s $AZURE_SUBSCRIPTION_ID
SECRET_SA_ACCOUNT_NAME=e2earosecrets make secrets
. secrets/env
echo "##vso[task.setvariable variable=RP_MODE]$RP_MODE"
@ -35,8 +33,7 @@ validate_rp_running() {
sleep 2
# after 40 secs return exit 1 to not block ci
ELAPSED=$((ELAPSED + 1))
if [ $ELAPSED -eq 20 ]
then
if [ $ELAPSED -eq 20 ]; then
exit 1
fi
;;
@ -72,8 +69,7 @@ validate_portal_running() {
sleep 2
# after 40 secs return exit 1 to not block ci
ELAPSED=$((ELAPSED + 1))
if [ $ELAPSED -eq 20 ]
then
if [ $ELAPSED -eq 20 ]; then
exit 1
fi
;;
@ -138,19 +134,13 @@ clean_e2e_db() {
delete_e2e_cluster() {
echo "########## 🧹 Deleting Cluster $CLUSTER ##########"
if [[ $CI ]]; then
./cluster delete
else
go run ./hack/cluster delete
fi
}
run_vpn() {
sudo openvpn --config secrets/$VPN --daemon --writepid vpnpid
sleep 10
}
kill_vpn() {
while read pid; do sudo kill $pid; done < vpnpid
}
# TODO: CLUSTER and is also recalculated in multiple places
# in the billing pipelines :-(
@ -183,9 +173,27 @@ echo
echo "PROXY_HOSTNAME=$PROXY_HOSTNAME"
echo "######################################"
[[ $LOCATION ]] || ( echo ">> LOCATION is not set please validate your ./secrets/env"; return 128 )
[[ $RESOURCEGROUP ]] || ( echo ">> RESOURCEGROUP is not set; please validate your ./secrets/env"; return 128 )
[[ $PROXY_HOSTNAME ]] || ( echo ">> PROXY_HOSTNAME is not set; please validate your ./secrets/env"; return 128 )
[[ $DATABASE_ACCOUNT_NAME ]] || ( echo ">> DATABASE_ACCOUNT_NAME is not set; please validate your ./secrets/env"; return 128 )
[[ $DATABASE_NAME ]] || ( echo ">> DATABASE_NAME is not set; please validate your ./secrets/env"; return 128 )
[[ $AZURE_SUBSCRIPTION_ID ]] || ( echo ">> AZURE_SUBSCRIPTION_ID is not set; please validate your ./secrets/env"; return 128 )
[[ $LOCATION ]] || (
echo ">> LOCATION is not set please validate your ./secrets/env"
return 128
)
[[ $RESOURCEGROUP ]] || (
echo ">> RESOURCEGROUP is not set; please validate your ./secrets/env"
return 128
)
[[ $PROXY_HOSTNAME ]] || (
echo ">> PROXY_HOSTNAME is not set; please validate your ./secrets/env"
return 128
)
[[ $DATABASE_ACCOUNT_NAME ]] || (
echo ">> DATABASE_ACCOUNT_NAME is not set; please validate your ./secrets/env"
return 128
)
[[ $DATABASE_NAME ]] || (
echo ">> DATABASE_NAME is not set; please validate your ./secrets/env"
return 128
)
[[ $AZURE_SUBSCRIPTION_ID ]] || (
echo ">> AZURE_SUBSCRIPTION_ID is not set; please validate your ./secrets/env"
return 128
)

Просмотреть файл

@ -5,4 +5,8 @@ if [[ "$#" -ne 1 ]]; then
exit 1
fi
if [[ $CI ]]; then
./db "$1" | jq -r .openShiftCluster.properties.adminKubeconfig | base64 -d | sed -e 's|https://api-int\.|https://api\.|'
else
go run ./hack/db "$1" | jq -r .openShiftCluster.properties.adminKubeconfig | base64 -d | sed -e 's|https://api-int\.|https://api\.|'
fi

Просмотреть файл

@ -118,12 +118,17 @@ func servicePrincipalSecretForInstall(oc *api.OpenShiftCluster, sub *api.Subscri
if isDevelopment {
// In development mode, load in the proxy certificates so that clusters
// can be accessed from a local (not in Azure) Hive
basepath := os.Getenv("ARO_CHECKOUT_PATH")
if basepath == "" {
// This assumes we are running from an ARO-RP checkout in development
var err error
_, curmod, _, _ := runtime.Caller(0)
basepath, err := filepath.Abs(filepath.Join(filepath.Dir(curmod), "../.."))
basepath, err = filepath.Abs(filepath.Join(filepath.Dir(curmod), "../.."))
if err != nil {
return nil, err
}
}
proxyCert, err := os.ReadFile(path.Join(basepath, "secrets/proxy.crt"))
if err != nil {

Просмотреть файл

@ -119,12 +119,16 @@ func NewDialer(isLocalDevelopmentMode bool) (Dialer, error) {
d := &dev{}
basepath := os.Getenv("ARO_CHECKOUT_PATH")
if basepath == "" {
// This assumes we are running from an ARO-RP checkout in development
var err error
_, curmod, _, _ := runtime.Caller(0)
basepath, err := filepath.Abs(filepath.Join(filepath.Dir(curmod), "../.."))
basepath, err = filepath.Abs(filepath.Join(filepath.Dir(curmod), "../.."))
if err != nil {
return nil, err
}
}
b, err := os.ReadFile(path.Join(basepath, "secrets/proxy.crt"))
if err != nil {

Просмотреть файл

@ -179,6 +179,10 @@ func adminPortalSessionSetup() (string, *selenium.WebDriver) {
hubPort = 4444
hostPort = 8444
)
hubAddress := "localhost"
if os.Getenv("AGENT_NAME") != "" {
hubAddress = "selenium"
}
os.Setenv("SE_SESSION_REQUEST_TIMEOUT", "9000")
@ -188,13 +192,13 @@ func adminPortalSessionSetup() (string, *selenium.WebDriver) {
}
wd := selenium.WebDriver(nil)
_, err := url.ParseRequestURI(fmt.Sprintf("https://localhost:%d", hubPort))
_, err := url.ParseRequestURI(fmt.Sprintf("https://%s:%d", hubAddress, hubPort))
if err != nil {
panic(err)
}
for i := 0; i < 10; i++ {
wd, err = selenium.NewRemote(caps, fmt.Sprintf("http://localhost:%d/wd/hub", hubPort))
wd, err = selenium.NewRemote(caps, fmt.Sprintf("http://%s:%d/wd/hub", hubAddress, hubPort))
if wd != nil {
err = nil
break
@ -218,7 +222,19 @@ func adminPortalSessionSetup() (string, *selenium.WebDriver) {
log.Infof("Could not get to %s. With error : %s", host, err.Error())
}
cmd := exec.Command("go", "run", "./hack/portalauth", "-username", "test", "-groups", "$AZURE_PORTAL_ELEVATED_GROUP_IDS", "2>", "/dev/null")
var portalAuthCmd string
var portalAuthArgs = make([]string, 0)
if os.Getenv("CI") != "" {
// In CI we have a prebuilt portalauth binary
portalAuthCmd = "./portalauth"
} else {
portalAuthCmd = "go"
portalAuthArgs = []string{"run", "./hack/portalauth"}
}
portalAuthArgs = append(portalAuthArgs, "-username", "test", "-groups", "$AZURE_PORTAL_ELEVATED_GROUP_IDS")
cmd := exec.Command(portalAuthCmd, portalAuthArgs...)
output, err := cmd.Output()
if err != nil {
log.Fatalf("Error occurred creating session cookie\n Output: %s\n Error: %s\n", output, err)
@ -443,6 +459,10 @@ func setup(ctx context.Context) error {
return err
}
if os.Getenv("AGENT_NAME") != "" {
// Skip in pipelines for now
dockerSucceeded = false
} else {
cmd := exec.CommandContext(ctx, "which", "docker")
_, err = cmd.CombinedOutput()
if err == nil {
@ -452,6 +472,7 @@ func setup(ctx context.Context) error {
if dockerSucceeded {
setupSelenium(ctx)
}
}
return nil
}
@ -487,7 +508,8 @@ var _ = BeforeSuite(func() {
var _ = AfterSuite(func() {
log.Info("AfterSuite")
if dockerSucceeded {
// Azure Pipelines will tear down the image if needed
if dockerSucceeded && os.Getenv("AGENT_NAME") == "" {
if err := tearDownSelenium(context.Background()); err != nil {
log.Printf(err.Error())
}