Migrate VPN and rp service to Docker Compose (#3882)

* VPN, RP, and Portal are now managed by Docker Compose
This commit is contained in:
Shubhadapaithankar 2024-10-10 12:18:26 -07:00 коммит произвёл GitHub
Родитель 16834d8ddd
Коммит 96637dbc85
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
7 изменённых файлов: 242 добавлений и 144 удалений

Просмотреть файл

@ -11,22 +11,20 @@ trigger:
include:
- v2*
# PR triggers are overridden in the ADO UI
resources:
containers:
- container: golang
image: mcr.microsoft.com/onebranch/cbl-mariner/build:2.0
options: --user=0
- container: python
image: registry.access.redhat.com/ubi8/python-39:latest
options: --user=0
- container: ubi8
image: registry.access.redhat.com/ubi8/toolbox:8.8
options: --user=0 --privileged -v /dev/shm:/dev/shm --device /dev/net/tun --name vpn
variables:
- template: vars.yml
- name: REGISTRY
value: registry.access.redhat.com
- name: LOCAL_ARO_RP_IMAGE
value: "arosvcdev.azurecr.io/aro"
- name: LOCAL_ARO_AZEXT_IMAGE
value: "arosvcdev.azurecr.io/azext-aro"
- name: LOCAL_VPN_IMAGE
value: "arosvcdev.azurecr.io/vpn"
- name: TAG
value: $(Build.BuildId)
- name: VERSION
value: $(Build.BuildId)
jobs:
- job: Build_Test_And_Push_Az_ARO_Extension
@ -38,7 +36,7 @@ jobs:
# Build and test the Az ARO Extension
- script: |
set -xe
DOCKER_BUILD_CI_ARGS="--load" make ci-azext-aro VERSION=$(Build.BuildId)
DOCKER_BUILD_CI_ARGS="--load" make ci-azext-aro VERSION=$(VERSION)
displayName: 🛠 Build & Test Az ARO Extension
# Push the image to ACR
@ -57,7 +55,7 @@ jobs:
# Build and test RP and Portal
- script: |
set -xe
DOCKER_BUILD_CI_ARGS="--load" make ci-rp VERSION=$(Build.BuildId)
DOCKER_BUILD_CI_ARGS="--load" make ci-rp VERSION=$(VERSION)
displayName: 🛠 Build & Test RP and Portal
# Publish test results

Просмотреть файл

@ -11,7 +11,6 @@ parameters:
default: false
steps:
# Authenticate to ACR and push the image
- task: AzureCLI@2
displayName: 'Authenticate to Azure and Push Docker Image'
inputs:
@ -20,6 +19,24 @@ steps:
scriptLocation: 'inlineScript'
inlineScript: |
set -xe
# Install Docker dependencies
echo "Installing Docker and Docker Compose Plugin..."
sudo apt-get update
sudo apt-get install -y ca-certificates curl gnupg
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo tee /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo \"$VERSION_CODENAME\") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
sudo systemctl start docker
sudo systemctl enable docker
# Authenticate to Azure and ACR
echo "Authenticating to Azure and ACR..."
ACR_FQDN="${{ parameters.acrFQDN }}"
REGISTRY_NAME=$(echo $ACR_FQDN | cut -d'.' -f1)
@ -31,38 +48,45 @@ steps:
echo "Listing Docker images..."
docker images
# Ensure the image is available locally before tagging the build ID
IMAGE_NAME="${{ parameters.repository }}:$(Build.BuildId)"
# Define both the full repository image name and the local name
IMAGE_NAME="${ACR_FQDN}/${{ parameters.repository }}:$(VERSION)"
LOCAL_IMAGE="${{ parameters.repository }}:$(VERSION)"
# Check if the image exists locally with the full repository tag
echo "Checking for image $IMAGE_NAME..."
if [[ "$(docker images -q $IMAGE_NAME 2> /dev/null)" == "" ]]; then
echo "Error: Image $IMAGE_NAME not found. Exiting."
exit 1
# If the full repository tagged image does not exist, check for the local image
echo "Full repository image not found. Checking for local image $LOCAL_IMAGE..."
if [[ "$(docker images -q $LOCAL_IMAGE 2> /dev/null)" == "" ]]; then
echo "Error: Neither $IMAGE_NAME nor $LOCAL_IMAGE found. Exiting."
exit 1
else
# Retag the local image with the full repository path
echo "Local image $LOCAL_IMAGE found. Retagging with full repository path..."
docker tag $LOCAL_IMAGE $IMAGE_NAME
fi
else
echo "Image $IMAGE_NAME found. Proceeding to push..."
fi
# Ensure the image is available locally before tagging 'latest'
IMAGE_LATEST="${{ parameters.repository }}:latest"
IMAGE_LATEST="${ACR_FQDN}/${{ parameters.repository }}:latest"
echo "Checking for image $IMAGE_LATEST..."
if [[ "$(docker images -q $IMAGE_LATEST 2> /dev/null)" == "" ]]; then
echo "Warning: Image $IMAGE_LATEST not found. Skipping latest tag."
echo "Warning: Image $IMAGE_LATEST not found. Skipping 'latest' tag."
SKIP_LATEST=true
else
echo "Image $IMAGE_LATEST found. Proceeding with 'latest' tag."
SKIP_LATEST=false
fi
# Tag the image with the ACR repository for the build ID
echo "Tagging image with build ID..."
docker tag $IMAGE_NAME ${ACR_FQDN}/${{ parameters.repository }}:$(Build.BuildId)
# If the latest image exists, tag it as well
if [ "$SKIP_LATEST" == "false" ]; then
echo "Tagging image with 'latest'..."
docker tag $IMAGE_LATEST ${ACR_FQDN}/${{ parameters.repository }}:latest
fi
# Push the Docker image to ACR with build ID
# Push the Docker image to ACR with the build ID
echo "Pushing image with build ID to ACR..."
docker push ${ACR_FQDN}/${{ parameters.repository }}:$(Build.BuildId)
docker push $IMAGE_NAME
# Optionally push the image as 'latest'
if [ "${{ parameters.pushLatest }}" == "true" ] && [ "$SKIP_LATEST" == "false" ]; then
echo "Pushing 'latest' tag to ACR..."
docker push ${ACR_FQDN}/${{ parameters.repository }}:latest
echo "Tagging image with 'latest' and pushing..."
docker tag $IMAGE_NAME $IMAGE_LATEST
docker push $IMAGE_LATEST
fi

8
Dockerfile.vpn Normal file
Просмотреть файл

@ -0,0 +1,8 @@
# Use a Microsoft-approved image
FROM mcr.microsoft.com/azure-cli:2.61.0 AS base
# Install OpenVPN
USER root
RUN apk add --no-cache openvpn || tdnf install -y openvpn || dnf install -y openvpn
ENTRYPOINT openvpn

Просмотреть файл

@ -0,0 +1,2 @@
# ignore everything
*

119
Makefile
Просмотреть файл

@ -399,29 +399,23 @@ ci-clean:
.PHONY: ci-rp
ci-rp: fix-macos-vendor
docker build . $(DOCKER_BUILD_CI_ARGS) \
docker build . ${DOCKER_BUILD_CI_ARGS} \
-f Dockerfile.ci-rp \
--ulimit=nofile=4096:4096 \
--build-arg REGISTRY=$(REGISTRY) \
--build-arg ARO_VERSION=$(VERSION) \
--no-cache=$(NO_CACHE) \
--build-arg REGISTRY=${REGISTRY} \
--build-arg ARO_VERSION=${VERSION} \
--no-cache=${NO_CACHE} \
--target=builder \
-t $(LOCAL_ARO_RP_BUILD_IMAGE):$(VERSION)
-t ${LOCAL_ARO_RP_BUILD_IMAGE}:${VERSION}
docker build . $(DOCKER_BUILD_CI_ARGS) \
-f Dockerfile.ci-rp \
--ulimit=nofile=4096:4096 \
--build-arg REGISTRY=$(REGISTRY) \
--build-arg ARO_VERSION=$(VERSION) \
-t $(LOCAL_ARO_RP_IMAGE):$(VERSION)
docker compose build rp
# Extract test coverage files from build to local filesystem
docker create --name extract_cover_out $(LOCAL_ARO_RP_BUILD_IMAGE):$(VERSION); \
docker create --name extract_cover_out ${LOCAL_ARO_RP_BUILD_IMAGE}:${VERSION}; \
docker cp extract_cover_out:/app/report.xml ./report.xml; \
docker cp extract_cover_out:/app/coverage.xml ./coverage.xml; \
docker rm extract_cover_out;
.PHONY: ci-tunnel
ci-tunnel: fix-macos-vendor
podman $(PODMAN_REMOTE_ARGS) \
@ -441,102 +435,13 @@ ifeq ($(shell uname -s),Darwin)
mv ./vendor/github.com/Microsoft ./vendor/github.com/temp-microsoft && mv ./vendor/github.com/temp-microsoft ./vendor/github.com/microsoft || true
endif
.PHONY: podman-secrets
podman-secrets: aks.kubeconfig
podman $(PODMAN_REMOTE_ARGS) secret rm --ignore aks.kubeconfig
podman $(PODMAN_REMOTE_ARGS) secret create aks.kubeconfig ./aks.kubeconfig
podman $(PODMAN_REMOTE_ARGS) secret rm --ignore proxy-client.key
podman $(PODMAN_REMOTE_ARGS) secret create proxy-client.key ./secrets/proxy-client.key
podman $(PODMAN_REMOTE_ARGS) secret rm --ignore proxy-client.crt
podman $(PODMAN_REMOTE_ARGS) secret create proxy-client.crt ./secrets/proxy-client.crt
podman $(PODMAN_REMOTE_ARGS) secret rm --ignore proxy.crt
podman $(PODMAN_REMOTE_ARGS) secret create proxy.crt ./secrets/proxy.crt
.PHONY: run-portal
run-portal: ci-rp podman-secrets
podman $(PODMAN_REMOTE_ARGS) \
run \
--name aro-portal \
--rm \
-p 127.0.0.1:8444:8444 \
-p 127.0.0.1:2222:2222 \
--cap-drop net_raw \
-e RP_MODE \
-e AZURE_SUBSCRIPTION_ID \
-e AZURE_TENANT_ID \
-e LOCATION \
-e RESOURCEGROUP \
-e AZURE_PORTAL_CLIENT_ID \
-e AZURE_PORTAL_ELEVATED_GROUP_IDS \
-e AZURE_PORTAL_ACCESS_GROUP_IDS \
-e AZURE_RP_CLIENT_SECRET \
-e AZURE_RP_CLIENT_ID \
-e KEYVAULT_PREFIX \
-e DATABASE_ACCOUNT_NAME \
-e DATABASE_NAME \
-e NO_NPM=1 \
--secret proxy-client.key,target=/app/secrets/proxy-client.key \
--secret proxy-client.crt,target=/app/secrets/proxy-client.crt \
--secret proxy.crt,target=/app/secrets/proxy.crt \
$(LOCAL_ARO_RP_IMAGE):$(VERSION) portal
run-portal:
docker compose up portal
# run-rp executes the RP locally as similarly as possible to production. That
# includes the use of Hive, meaning you need a VPN connection.
.PHONY: run-rp
run-rp: ci-rp podman-secrets
podman $(PODMAN_REMOTE_ARGS) \
run \
--name aro-rp \
--rm \
-p 127.0.0.1:8443:8443 \
-w /app \
-e ARO_IMAGE \
-e RP_MODE="development" \
-e PROXY_HOSTNAME \
-e DOMAIN_NAME \
-e AZURE_RP_CLIENT_ID \
-e AZURE_FP_CLIENT_ID \
-e AZURE_SUBSCRIPTION_ID \
-e AZURE_TENANT_ID \
-e AZURE_RP_CLIENT_SECRET \
-e LOCATION \
-e RESOURCEGROUP \
-e AZURE_ARM_CLIENT_ID \
-e AZURE_FP_SERVICE_PRINCIPAL_ID \
-e AZURE_DBTOKEN_CLIENT_ID \
-e AZURE_PORTAL_CLIENT_ID \
-e AZURE_PORTAL_ACCESS_GROUP_IDS \
-e AZURE_CLIENT_ID \
-e AZURE_SERVICE_PRINCIPAL_ID \
-e AZURE_CLIENT_SECRET \
-e AZURE_GATEWAY_CLIENT_ID \
-e AZURE_GATEWAY_SERVICE_PRINCIPAL_ID \
-e AZURE_GATEWAY_CLIENT_SECRET \
-e DATABASE_NAME \
-e PULL_SECRET \
-e SECRET_SA_ACCOUNT_NAME \
-e DATABASE_ACCOUNT_NAME \
-e KEYVAULT_PREFIX \
-e ADMIN_OBJECT_ID \
-e PARENT_DOMAIN_NAME \
-e PARENT_DOMAIN_RESOURCEGROUP \
-e AZURE_ENVIRONMENT \
-e STORAGE_ACCOUNT_DOMAIN \
-e OIDC_STORAGE_ACCOUNT_NAME \
-e KUBECONFIG="/app/secrets/aks.kubeconfig" \
-e HIVE_KUBE_CONFIG_PATH="/app/secrets/aks.kubeconfig" \
-e ARO_CHECKOUT_PATH="/app" \
-e ARO_INSTALL_VIA_HIVE="true" \
-e ARO_ADOPT_BY_HIVE="true" \
-e MOCK_MSI_TENANT_ID \
-e MOCK_MSI_CLIENT_ID \
-e MOCK_MSI_OBJECT_ID \
-e MOCK_MSI_CERT \
--secret aks.kubeconfig,target=/app/secrets/aks.kubeconfig \
--secret proxy-client.key,target=/app/secrets/proxy-client.key \
--secret proxy-client.crt,target=/app/secrets/proxy-client.crt \
--secret proxy.crt,target=/app/secrets/proxy.crt \
$(LOCAL_ARO_RP_IMAGE):$(VERSION) rp
run-rp: aks.kubeconfig
docker compose rm -sf rp
docker compose up rp

154
docker-compose.yml Normal file
Просмотреть файл

@ -0,0 +1,154 @@
services:
vpn:
image: ${LOCAL_VPN_IMAGE}:${VERSION}
build:
context: .
dockerfile: Dockerfile.vpn
container_name: vpn
privileged: true
network_mode: host
volumes:
- ${PWD}/secrets:/secrets:z
devices:
- /dev/net/tun # required to modify VPN on host
entrypoint: "openvpn"
command: ["/secrets/vpn-eastus.ovpn"]
healthcheck:
test: ["CMD", "pidof", "openvpn"]
start_period: 20s
interval: 20s
timeout: 3s
retries: 3
rp:
image: ${LOCAL_ARO_RP_IMAGE}:${VERSION}
build:
context: .
dockerfile: Dockerfile.ci-rp
args:
- REGISTRY=${REGISTRY}
- ARO_VERSION=${VERSION}
ulimits:
nofile:
soft: 4096
hard: 4096
container_name: aro-rp
depends_on:
vpn:
condition: service_healthy
command: ["rp"]
secrets:
- source: proxy-client-key
target: /app/secrets/proxy-client.key
- source: proxy-client-crt
target: /app/secrets/proxy-client.crt
- source: proxy-crt
target: /app/secrets/proxy.crt
- source: hive-kubeconfig
target: /app/secrets/aks.kubeconfig
environment:
# inherit from host
- ADMIN_OBJECT_ID
- ARO_IMAGE
- AZURE_ARM_CLIENT_ID
- AZURE_CLIENT_ID
- AZURE_CLIENT_SECRET
- AZURE_DBTOKEN_CLIENT_ID
- AZURE_ENVIRONMENT
- AZURE_FP_CLIENT_ID
- AZURE_FP_SERVICE_PRINCIPAL_ID
- AZURE_GATEWAY_CLIENT_ID
- AZURE_GATEWAY_CLIENT_SECRET
- AZURE_GATEWAY_SERVICE_PRINCIPAL_ID
- AZURE_PORTAL_ACCESS_GROUP_IDS
- AZURE_PORTAL_CLIENT_ID
- AZURE_RP_CLIENT_ID
- AZURE_RP_CLIENT_SECRET
- AZURE_SERVICE_PRINCIPAL_ID
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT_ID
- DATABASE_ACCOUNT_NAME
- DATABASE_NAME
- DOMAIN_NAME
- KEYVAULT_PREFIX
- LOCATION
- MOCK_MSI_CERT
- MOCK_MSI_CLIENT_ID
- MOCK_MSI_TENANT_ID
- OIDC_STORAGE_ACCOUNT_NAME
- PARENT_DOMAIN_NAME
- PARENT_DOMAIN_RESOURCEGROUP
- PROXY_HOSTNAME
- PULL_SECRET
- RESOURCEGROUP
- SECRET_SA_ACCOUNT_NAME
- STORAGE_ACCOUNT_DOMAIN
# override
- ARO_ADOPT_BY_HIVE=true
- ARO_CHECKOUT_PATH=/app
- ARO_INSTALL_VIA_HIVE=true
- HIVE_KUBE_CONFIG_PATH=/app/secrets/aks.kubeconfig
- KUBECONFIG=/app/secrets/aks.kubeconfig
- RP_MODE=development
expose:
- "8443"
ports:
- "127.0.0.1:8443:8443"
healthcheck:
test: ["CMD", "curl", "-k", "http://localhost:8443/healthz"]
interval: 30s
timeout: 30s
retries: 3
restart: on-failure:3
portal:
image: ${LOCAL_ARO_RP_IMAGE}:${VERSION}
container_name: aro-portal
depends_on:
rp:
condition: service_healthy
environment:
- RP_MODE
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT_ID
- LOCATION
- RESOURCEGROUP
- AZURE_PORTAL_CLIENT_ID
- AZURE_PORTAL_ELEVATED_GROUP_IDS
- AZURE_PORTAL_ACCESS_GROUP_IDS
- AZURE_RP_CLIENT_SECRET
- AZURE_RP_CLIENT_ID
- KEYVAULT_PREFIX
- DATABASE_ACCOUNT_NAME
- DATABASE_NAME
- NO_NPM=1
ports:
- "127.0.0.1:8444:8444"
- "127.0.0.1:2222:2222"
secrets:
- source: proxy-client-key
target: /app/secrets/proxy-client.key
- source: proxy-client-crt
target: /app/secrets/proxy-client.crt
- source: proxy-crt
target: /app/secrets/proxy.crt
cap_drop:
- NET_RAW
command: ["portal"]
restart: on-failure:3
healthcheck:
test: ["CMD", "curl", "-k", "http://localhost:8444/healthz"]
interval: 30s
timeout: 10s
retries: 3
secrets:
proxy-client-key:
file: ./secrets/proxy-client.key
proxy-client-crt:
file: ./secrets/proxy-client.crt
proxy-crt:
file: ./secrets/proxy.crt
hive-kubeconfig:
file: ./aks.kubeconfig

Просмотреть файл

@ -18,4 +18,11 @@ export MOCK_MSI_CERT="replace_with_value_output_by_hack/devtools/msi.sh"
export MOCK_MSI_TENANT_ID="replace_with_value_output_by_hack/devtools/msi.sh"
export PLATFORM_WORKLOAD_IDENTITY_ROLE_SETS="replace_with_value_output_by_hack/devtools/msi.sh"
# you will need this to run-rp , vpn and ci-rp using Docker compose
export REGISTRY=registry.access.redhat.com
export LOCAL_ARO_RP_IMAGE=aro
export VERSION=latest
export TAG=latest
export LOCAL_VPN_IMAGE=vpn
. secrets/env