merge with master plus some adaptions

This commit is contained in:
Jeff Yuan 2023-02-28 14:29:32 +13:00
Родитель 74e769ed82 902ae3f3b7
Коммит 5fefad4b19
881 изменённых файлов: 57579 добавлений и 10093 удалений

Просмотреть файл

@ -3,7 +3,6 @@ gomock_reflect_*
/.vscode
/*.kubeconfig
/dev-config.yaml
/env*
/id_rsa
/pyenv*
/mdm_statsd.socket

2
.github/CODEOWNERS поставляемый
Просмотреть файл

@ -1 +1 @@
* @jewzaam @m1kola @bennerv @hawkowl @rogbas @petrkotas @darthhexx @jharrington22 @cblecker @facchettos @cadenmarchese
* @jewzaam @bennerv @hawkowl @rogbas @petrkotas @darthhexx @jharrington22 @cblecker @facchettos @cadenmarchese @ulrichschlueter @s-amann @SudoBrendan

4
.github/workflows/ci-go.yml поставляемый
Просмотреть файл

@ -31,9 +31,9 @@ jobs:
sudo apt-get install libgpgme-dev libgpgme11
- uses: actions/setup-go@v3
with:
go-version: 1.17
go-version: 1.18
- uses: actions/checkout@v3
- run: |
go mod vendor
go mod tidy -compat=1.17
go mod tidy -compat=1.18
hack/ci-utils/isClean.sh

4
.github/workflows/golint.yml поставляемый
Просмотреть файл

@ -21,7 +21,7 @@ jobs:
sudo apt-get install libgpgme-dev libgpgme11
- uses: actions/setup-go@v3
with:
go-version: 1.17
go-version: 1.18
- uses: actions/checkout@v3
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
@ -53,6 +53,6 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: 1.17
go-version: 1.18
- uses: actions/checkout@v3
- run: make validate-go-action

2
.gitignore поставляемый
Просмотреть файл

@ -8,6 +8,7 @@ gomock_reflect_*
/*.key
/*.kubeconfig
/*.pem
/*.tar
/aro
/dev-config.yaml
/e2e.test
@ -23,6 +24,7 @@ gomock_reflect_*
/mdm_statsd.socket
/uts.txt
/cover.out
/cover_coverpkg.out
/coverage.*
/report.xml
/e2e-report.xml

Просмотреть файл

@ -18,13 +18,24 @@ pr:
exclude:
- docs/*
resources:
containers:
- container: golang
image: registry.access.redhat.com/ubi8/go-toolset:1.18
options: --user=0
- container: python
image: registry.access.redhat.com/ubi8/python-39:latest
options: --user=0
variables:
- template: vars.yml
jobs:
- job: Python_Unit_Tests
pool:
name: ARO-CI
name: 1es-aro-ci-pool
variables:
HOME: $(Agent.BuildDirectory)
steps:
- template: ./templates/template-checkout.yml
- script: |
@ -32,35 +43,48 @@ jobs:
make test-python
[[ -z "$(git status -s)" ]]
displayName: 🧪Run Python Unit Tests
target: python
- job: Golang_Unit_Tests
pool:
name: ARO-CI
demands: go-1.17
name: 1es-aro-ci-pool
variables:
GOCACHE: /tmp/gocache
steps:
- template: ./templates/template-checkout.yml
- script: |
set -xe
go version
go env
displayName: Print Go version & env
target: golang
- script: |
set -xe
make generate
[[ -z "$(git status -s)" ]]
displayName: ⚙️ Run Golang code generate
target: golang
- script: |
set -xe
make build-all
[[ -z "$(git status -s)" ]]
displayName: 🕵️ Build Golang code
target: golang
- script: |
set -xe
make unit-test-go
displayName: 🧪 Run Golang unit tests
target: golang
- script: |
set -xe
make validate-fips
displayName: 🕵️ Validate FIPS
target: golang
- task: PublishTestResults@2
displayName: 📊 Publish tests results
@ -69,12 +93,11 @@ jobs:
condition: succeededOrFailed()
- script: |
# install our vendored versions to prevent some oddities with our golang version (use of internal packages)
go install github.com/axw/gocov/gocov
go install github.com/AlekSi/gocov-xml
$GOPATH/bin/gocov convert cover.out | $GOPATH/bin/gocov-xml > coverage.xml
set -xe
go run github.com/axw/gocov/gocov convert cover.out | go run github.com/AlekSi/gocov-xml > coverage.xml
displayName: ⚙️ Process Reports
condition: succeededOrFailed()
target: golang
- task: PublishCodeCoverageResults@1
displayName: 📈 Publish code coverage
@ -83,11 +106,13 @@ jobs:
summaryFileLocation: $(System.DefaultWorkingDirectory)/**/coverage.xml
failIfCoverageEmpty: false
condition: succeededOrFailed()
target: golang
- job: Lint_Admin_Portal
pool:
name: ARO-CI
name: 1es-aro-ci-pool
steps:
- template: ./templates/template-checkout.yml
- script: |
set -xe
make lint-admin-portal

Просмотреть файл

@ -6,13 +6,19 @@ parameters:
type: boolean
default: false
resources:
containers:
- container: golang
image: registry.access.redhat.com/ubi8/go-toolset:1.17
options: --user=0
variables:
- template: vars.yml
jobs:
- job: Clean_subscription
pool:
name: ARO-CI
name: 1es-aro-ci-pool
steps:
- template: ./templates/template-checkout.yml

Просмотреть файл

@ -6,67 +6,111 @@ resources:
- pipeline: e2e
source: CI
trigger: true
containers:
- container: container
image: registry.access.redhat.com/ubi8/toolbox:8.7
options: --user=0 --cap-add=NET_ADMIN --device /dev/net/tun --name vpn
- container: selenium
image: docker.io/selenium/standalone-edge:latest
options: --shm-size=2g
# Azure DevOps Pipeline running e2e tests
variables:
- template: vars.yml
# Run the test suite and collect must-gather
jobs:
- job: E2E
timeoutInMinutes: 180
pool:
name: ARO-CI
demands: go-1.17
name: 1es-aro-ci-pool
#services:
# selenium: selenium
steps:
- template: ./templates/template-checkout.yml
- script: |
set -xe
sudo rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
sudo dnf install -y openvpn make
displayName: Setup (Container)
target: container
- template: ./templates/template-az-cli-login.yml
parameters:
azureDevOpsJSONSPN: $(aro-v4-e2e-devops-spn)
- script: |
az account set -s $AZURE_SUBSCRIPTION_ID
export SECRET_SA_ACCOUNT_NAME=$(SECRET_SA_ACCOUNT_NAME)
make secrets
. secrets/env
echo "##vso[task.setvariable variable=RP_MODE]$RP_MODE"
displayName: 🔑 Downloading certificates and secrets from storage account
name: setEnv
- template: ./templates/template-push-images-to-acr.yml
parameters:
rpImageACR: $(RP_IMAGE_ACR)
buildCommand: publish-image-aro
- script: |
make extract-aro-docker
displayName: Extract ARO binaries from build
- script: |
set -e
set -o pipefail
. secrets/env
export HIVE_KUBE_CONFIG_PATH_1="secrets/e2e-aks-kubeconfig_1"
az account set -s $AZURE_SUBSCRIPTION_ID
SECRET_SA_ACCOUNT_NAME=e2earosecrets make secrets
. secrets/env
set -x
export PRIVATE_CLUSTER=true
export CI=true
. ./hack/e2e/run-rp-and-e2e.sh
deploy_e2e_db
displayName: Setup (Azure)
- script: |
export CI=true
. secrets/env
. ./hack/e2e/run-rp-and-e2e.sh
trap 'set +e; kill_rp; kill_portal; kill_vpn; clean_e2e_db' EXIT
run_vpn
deploy_e2e_db
run_portal
validate_portal_running
# run_portal
# validate_portal_running
run_rp
validate_rp_running
register_sub
export CI=true
make test-e2e
make test-e2e -o e2e.test
displayName: Execute Tests
target: container
- script: |
export CI=true
. ./hack/e2e/run-rp-and-e2e.sh
hack/get-admin-kubeconfig.sh /subscriptions/$AZURE_SUBSCRIPTION_ID/resourceGroups/$CLUSTER/providers/Microsoft.RedHatOpenShift/openShiftClusters/$CLUSTER >admin.kubeconfig
export KUBECONFIG=admin.kubeconfig
wget https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/$(OpenShiftVersion)/openshift-client-linux-$(OpenShiftVersion).tar.gz
tar xf openshift-client-linux-$(OpenShiftVersion).tar.gz
./oc adm must-gather
tar cf must-gather.tar.gz must-gather.local.*
displayName: Collect must-gather
condition: failed()
target: container
- publish: must-gather.tar.gz
artifact: must-gather
displayName: Append must-gather to Pipeline
condition: failed()
target: container
- script: |
export CI=true
. ./hack/e2e/run-rp-and-e2e.sh
delete_e2e_cluster
kill_rp
kill_vpn
displayName: Cleanup
condition: always()
target: container
- script: |
export CI=true
. ./hack/e2e/run-rp-and-e2e.sh
clean_e2e_db
displayName: Cleanup (Azure)
- template: ./templates/template-az-cli-logout.yml
- task: PublishTestResults@2
displayName: 📊 Publish tests results
@ -85,5 +129,3 @@ jobs:
inputs:
pathToPublish: $(Build.ArtifactStagingDirectory)
artifactName: Screenshots
- template: ./templates/template-az-cli-logout.yml

Просмотреть файл

@ -1,40 +0,0 @@
trigger: none
pr: none
parameters:
- name: vsoDeployerBuildID
type: string
default: latest
variables:
- template: vars.yml
jobs:
- job: Mirror_images
timeoutInMinutes: 180
pool:
name: ARO-CI
steps:
- template: ./templates/template-checkout.yml
- task: DownloadBuildArtifacts@0
inputs:
buildType: specific
project: $(vso-project-id)
pipeline: $(vso-deployer-pipeline-id)
${{ if eq(parameters.vsoDeployerBuildID, 'latest') }}:
buildVersionToDownload: latestFromBranch
branchName: refs/heads/master
${{ if ne(parameters.vsoDeployerBuildID, 'latest') }}:
buildVersionToDownload: specific
buildId: ${{ parameters.vsoDeployerBuildID }}
downloadType: specific
downloadPath: $(System.ArtifactsDirectory)/deployer
displayName: Download Deployer
- template: ./templates/template-mirror-images.yml
parameters:
dstAuth: $(acr-push-auth)
srcAuthQuay: $(quay-pull-auth)
srcAuthRedhat: $(redhat-pull-auth)
dstACRName: $(dst-acr-name)
deployerDirectory: $(System.ArtifactsDirectory)/deployer/drop

Просмотреть файл

@ -14,7 +14,7 @@ pr: none
variables:
Cdp_Definition_Build_Count: $[counter('', 0)] # needed for onebranch.pipeline.version task https://aka.ms/obpipelines/versioning
ONEBRANCH_AME_ACR_LOGIN: cdpxb8e9ef87cd634085ab141c637806568c00.azurecr.io
LinuxContainerImage: $(ONEBRANCH_AME_ACR_LOGIN)/b8e9ef87-cd63-4085-ab14-1c637806568c/official/ubi8/go-toolset:1.17.7-13 # Docker image which is used to build the project https://aka.ms/obpipelines/containers
LinuxContainerImage: $(ONEBRANCH_AME_ACR_LOGIN)/b8e9ef87-cd63-4085-ab14-1c637806568c/official/ubi8/go-toolset:1.18.4 # Docker image which is used to build the project https://aka.ms/obpipelines/containers
Debian_Frontend: noninteractive
resources:
@ -55,6 +55,7 @@ extends:
type: linux
variables: # More settings at https://aka.ms/obpipelines/yaml/jobs
is_official_release: true
ob_outputDirectory: $(Build.SourcesDirectory)/out # this directory is uploaded to pipeline artifacts, reddog and cloudvault. More info at https://aka.ms/obpipelines/artifacts
steps:
@ -69,6 +70,7 @@ extends:
os: linux
variables:
is_official_release: true
ob_git_checkout: true
release_tag: $[stageDependencies.Build_ARO.Build_ARO.outputs['buildaro.releasetag']]

Просмотреть файл

@ -14,7 +14,7 @@ pr: none
variables:
Cdp_Definition_Build_Count: $[counter('', 0)] # needed for onebranch.pipeline.version task https://aka.ms/obpipelines/versioning
ONEBRANCH_AME_ACR_LOGIN: cdpxb8e9ef87cd634085ab141c637806568c00.azurecr.io
LinuxContainerImage: $(ONEBRANCH_AME_ACR_LOGIN)/b8e9ef87-cd63-4085-ab14-1c637806568c/official/ubi8/go-toolset:1.17.7-13 # Docker image which is used to build the project https://aka.ms/obpipelines/containers
LinuxContainerImage: $(ONEBRANCH_AME_ACR_LOGIN)/b8e9ef87-cd63-4085-ab14-1c637806568c/official/ubi8/go-toolset:1.18.4 # Docker image which is used to build the project https://aka.ms/obpipelines/containers
Debian_Frontend: noninteractive
resources:

Просмотреть файл

@ -9,6 +9,12 @@ steps:
export COMMIT=$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty)
if [ -z "$TAG" ];
then
if [ "$is_official_release" = "true" ]
then
git describe --exact-match
echo "Ensure there is an annotated tag (git tag -a) for git commit ${COMMIT}"
exit 1
fi
export VERSION=${COMMIT}
else
export VERSION=${TAG}

Просмотреть файл

@ -6,7 +6,7 @@ steps:
dockerFileRelPath: ./Dockerfile.aro-multistage
dockerFileContextPath: ./
registry: cdpxb8e9ef87cd634085ab141c637806568c00.azurecr.io
arguments: --build-arg REGISTRY=registry.access.redhat.com
arguments: --build-arg REGISTRY=registry.access.redhat.com --build-arg IS_OFFICIAL_RELEASE=$(is_official_release)
saveImageToPath: aro-rp.tar
buildkit: 1
enable_network: true

Просмотреть файл

@ -1,4 +1,6 @@
steps:
- checkout: self
path: go/src/github.com/Azure/ARO-RP
fetchDepth: 1
fetchTags: false
displayName: ⚙️ Check-out

Просмотреть файл

@ -24,3 +24,4 @@ steps:
go run ./hack/clean -dryRun=${{ parameters.dryRun }}
displayName: 🧹 Clean subscription
target: golang

Просмотреть файл

@ -1,19 +0,0 @@
parameters:
deployerDirectory: ''
dstAuth: ''
dstACRName: ''
srcAuthQuay: ''
srcAuthRedhat: ''
steps:
- script: |
set -eu
export DST_AUTH=${{ parameters.dstAuth }}
export DST_ACR_NAME=${{ parameters.dstACRName }}
export SRC_AUTH_QUAY=${{ parameters.srcAuthQuay }}
export SRC_AUTH_REDHAT=${{ parameters.srcAuthRedhat }}
chmod +x ${{ parameters.deployerDirectory }}/aro
${{ parameters.deployerDirectory }}/aro mirror
displayName: 🚀 Fetch and mirror images

Просмотреть файл

@ -1,15 +1,14 @@
parameters:
rpImageACR: ''
buildCommand: ''
steps:
- script: |
set -e
trap 'set +e; for c in $(docker ps -aq); do docker rm -f $c; done; docker image prune -af ; rm -rf ~/.docker/config.json; rm -rf /run/user/$(id -u $USERNAME)/containers/auth.json' EXIT
#trap 'set +e; for c in $(docker ps -aq); do docker rm -f $c; done; docker image prune -af ; rm -rf ~/.docker/config.json; rm -rf /run/user/$(id -u $USERNAME)/containers/auth.json' EXIT
export RP_IMAGE_ACR=${{ parameters.rpImageACR }}
az acr login --name "$RP_IMAGE_ACR"
# azure checkouts commit, so removing master reference when publishing image
export BRANCH=$(Build.SourceBranchName)
make ${{ parameters.buildCommand }}
make publish-image-e2e
displayName: ⚙️ Build and push images to ACR

Просмотреть файл

@ -1,2 +1,4 @@
variables:
GOPATH: $(Agent.BuildDirectory)/go
OpenShiftVersion: 4.10.20
ARO_CHECKOUT_PATH: $(Agent.BuildDirectory)/go/src/github.com/Azure/ARO-RP

Просмотреть файл

@ -2,3 +2,4 @@
12d47b965d8a83b06ae3a44c632b624c57b129b5fbcc8c3f8de4fd80e2273f97 swagger/redhatopenshift/resource-manager/Microsoft.RedHatOpenShift/preview/2021-09-01-preview/redhatopenshift.json
239c63228da1db172f298cd81d0c3cc0d52ecca907915efe61be98c42b6d8f1d swagger/redhatopenshift/resource-manager/Microsoft.RedHatOpenShift/stable/2022-04-01/redhatopenshift.json
1d167031baf0209fe8c46df9654585c64e8cc9a0c89555d7479c4ed6dc150251 swagger/redhatopenshift/resource-manager/Microsoft.RedHatOpenShift/stable/2022-09-04/redhatopenshift.json
1acf7f9a0430240f7c13c5bea79b74d09abd82f8793ad8b3b68d5861dd307267 swagger/redhatopenshift/resource-manager/Microsoft.RedHatOpenShift/stable/2023-04-01/redhatopenshift.json

17
Dockerfile.aro-e2e Normal file
Просмотреть файл

@ -0,0 +1,17 @@
# Uses a multi-stage container build to build the RP & E2E components.
#
ARG REGISTRY
FROM ${REGISTRY}/ubi8/go-toolset:1.18.4 AS builder
ENV GOOS=linux \
GOPATH=/go/
WORKDIR ${GOPATH}/src/github.com/Azure/ARO-RP
USER root
COPY . ${GOPATH}/src/github.com/Azure/ARO-RP/
RUN make aro RELEASE=${IS_OFFICIAL_RELEASE} -o generate && make e2e.test e2etools
FROM ${REGISTRY}/ubi8/ubi-minimal
RUN microdnf update && microdnf clean all
COPY --from=builder /go/src/github.com/Azure/ARO-RP/aro /go/src/github.com/Azure/ARO-RP/e2e.test /go/src/github.com/Azure/ARO-RP/db /go/src/github.com/Azure/ARO-RP/cluster /go/src/github.com/Azure/ARO-RP/portalauth /usr/local/bin/
ENTRYPOINT ["aro"]
EXPOSE 2222/tcp 8080/tcp 8443/tcp 8444/tcp 8445/tcp
USER 1000

Просмотреть файл

@ -1,18 +1,13 @@
# Uses a multi-stage container build to build the RP.
#
# TODO:
# Currently the docker version on our RHEL7 VMSS uses a version which
# does not support multi-stage builds. This is a temporary stop-gap
# until we get podman working without issue
ARG REGISTRY
FROM ${REGISTRY}/ubi8/go-toolset:1.17.7 AS builder
FROM ${REGISTRY}/ubi8/go-toolset:1.18.4 AS builder
ENV GOOS=linux \
GOPATH=/go/
WORKDIR ${GOPATH}/src/github.com/Azure/ARO-RP
USER root
RUN yum update -y
COPY . ${GOPATH}/src/github.com/Azure/ARO-RP/
RUN make aro && make e2e.test
RUN make aro RELEASE=${IS_OFFICIAL_RELEASE} -o generate && make e2e.test
FROM ${REGISTRY}/ubi8/ubi-minimal
RUN microdnf update && microdnf clean all

Просмотреть файл

@ -1,4 +1,4 @@
FROM registry.access.redhat.com/ubi8/go-toolset:1.17.7
FROM registry.access.redhat.com/ubi8/go-toolset:1.18.4
USER root
RUN mkdir -p /root/go/src/github.com/Azure/ARO-RP

Просмотреть файл

@ -33,10 +33,20 @@ endif
ARO_IMAGE ?= $(ARO_IMAGE_BASE):$(VERSION)
check-release:
# Check that VERSION is a valid tag when building an official release (when RELEASE=true).
ifeq ($(RELEASE), true)
ifeq ($(TAG), $(VERSION))
@echo Building release version $(VERSION)
else
$(error $(shell git describe --exact-match) Ensure there is an annotated tag (git tag -a) for git commit $(COMMIT))
endif
endif
build-all:
go build -tags aro,containers_image_openpgp ./...
aro: generate
aro: check-release generate
go build -tags aro,containers_image_openpgp,codec.safe -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro
runlocal-rp:
@ -56,7 +66,7 @@ clean:
find -type d -name 'gomock_reflect_[0-9]*' -exec rm -rf {} \+ 2>/dev/null
client: generate
hack/build-client.sh "${AUTOREST_IMAGE}" 2020-04-30 2021-09-01-preview 2022-04-01 2022-09-04
hack/build-client.sh "${AUTOREST_IMAGE}" 2020-04-30 2021-09-01-preview 2022-04-01 2022-09-04 2023-04-01
# TODO: hard coding dev-config.yaml is clunky; it is also probably convenient to
# override COMMIT.
@ -114,6 +124,15 @@ publish-image-fluentbit: image-fluentbit
publish-image-proxy: image-proxy
docker push ${RP_IMAGE_ACR}.azurecr.io/proxy:latest
image-e2e:
docker build --platform=linux/amd64 --network=host --no-cache -f Dockerfile.aro-e2e -t $(ARO_IMAGE) --build-arg REGISTRY=$(REGISTRY) .
publish-image-e2e: image-e2e
docker push $(ARO_IMAGE)
extract-aro-docker:
hack/ci-utils/extractaro.sh ${ARO_IMAGE}
proxy:
CGO_ENABLED=0 go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./hack/proxy
@ -150,6 +169,11 @@ tunnel:
e2e.test:
go test ./test/e2e/... -tags e2e,codec.safe -c -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" -o e2e.test
e2etools:
CGO_ENABLED=0 go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./hack/cluster
CGO_ENABLED=0 go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./hack/db
CGO_ENABLED=0 go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./hack/portalauth
test-e2e: e2e.test
./e2e.test $(E2E_FLAGS)
@ -179,6 +203,9 @@ validate-fips:
unit-test-go:
go run ./vendor/gotest.tools/gotestsum/main.go --format pkgname --junitfile report.xml -- -tags=aro,containers_image_openpgp -coverprofile=cover.out ./...
unit-test-go-coverpkg:
go run ./vendor/gotest.tools/gotestsum/main.go --format pkgname --junitfile report.xml -- -tags=aro,containers_image_openpgp -coverpkg=./... -coverprofile=cover_coverpkg.out ./...
lint-go:
hack/lint-go.sh

Просмотреть файл

@ -95,8 +95,8 @@ func mirror(ctx context.Context, log *logrus.Entry) error {
for _, arg := range flag.Args()[1:] {
if strings.EqualFold(arg, "latest") {
releases = append(releases, pkgmirror.Node{
Version: version.InstallStream.Version.String(),
Payload: version.InstallStream.PullSpec,
Version: version.DefaultInstallStream.Version.String(),
Payload: version.DefaultInstallStream.PullSpec,
})
} else {
vers, err := version.ParseVersion(arg)
@ -152,18 +152,20 @@ func mirror(ctx context.Context, log *logrus.Entry) error {
}
for _, ref := range []string{
"registry.redhat.io/rhel7/support-tools:latest",
"registry.redhat.io/rhel8/support-tools:latest",
"registry.redhat.io/openshift4/ose-tools-rhel7:latest",
"registry.redhat.io/openshift4/ose-tools-rhel8:latest",
"registry.access.redhat.com/ubi7/ubi-minimal:latest",
"registry.access.redhat.com/ubi8/ubi-minimal:latest",
"registry.access.redhat.com/ubi8/nodejs-14:latest",
"registry.access.redhat.com/ubi7/go-toolset:1.16.12",
"registry.access.redhat.com/ubi8/go-toolset:1.17.7",
// https://catalog.redhat.com/software/containers/ubi8/go-toolset/5ce8713aac3db925c03774d1
"registry.access.redhat.com/ubi8/go-toolset:1.17.12",
"registry.access.redhat.com/ubi8/go-toolset:1.18.4",
"mcr.microsoft.com/azure-cli:latest",
// https://mcr.microsoft.com/en-us/product/cbl-mariner/base/core/tags
"mcr.microsoft.com/cbl-mariner/base/core:2.0-nonroot.20230107-amd64",
"quay.io/app-sre/managed-upgrade-operator:v0.1.856-eebbe07",
// https://quay.io/repository/app-sre/managed-upgrade-operator?tab=tags
"quay.io/app-sre/managed-upgrade-operator:v0.1.891-3d94c00",
// https://quay.io/repository/app-sre/hive?tab=tags
"quay.io/app-sre/hive:fec14dc",
} {
log.Printf("mirroring %s -> %s", ref, pkgmirror.Dest(dstAcr+acrDomainSuffix, ref))

Просмотреть файл

@ -8,13 +8,6 @@ import (
"flag"
"fmt"
configclient "github.com/openshift/client-go/config/clientset/versioned"
consoleclient "github.com/openshift/client-go/console/clientset/versioned"
imageregistryclient "github.com/openshift/client-go/imageregistry/clientset/versioned"
machineclient "github.com/openshift/client-go/machine/clientset/versioned"
operatorclient "github.com/openshift/client-go/operator/clientset/versioned"
securityclient "github.com/openshift/client-go/security/clientset/versioned"
mcoclient "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned"
"github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
@ -22,11 +15,13 @@ import (
"github.com/Azure/ARO-RP/pkg/env"
pkgoperator "github.com/Azure/ARO-RP/pkg/operator"
aroclient "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned"
"github.com/Azure/ARO-RP/pkg/operator/controllers/alertwebhook"
"github.com/Azure/ARO-RP/pkg/operator/controllers/autosizednodes"
"github.com/Azure/ARO-RP/pkg/operator/controllers/banner"
"github.com/Azure/ARO-RP/pkg/operator/controllers/checker"
"github.com/Azure/ARO-RP/pkg/operator/controllers/checkers/clusterdnschecker"
"github.com/Azure/ARO-RP/pkg/operator/controllers/checkers/ingresscertificatechecker"
"github.com/Azure/ARO-RP/pkg/operator/controllers/checkers/internetchecker"
"github.com/Azure/ARO-RP/pkg/operator/controllers/checkers/serviceprincipalchecker"
"github.com/Azure/ARO-RP/pkg/operator/controllers/clusteroperatoraro"
"github.com/Azure/ARO-RP/pkg/operator/controllers/dnsmasq"
"github.com/Azure/ARO-RP/pkg/operator/controllers/genevalogging"
@ -79,43 +74,12 @@ func operator(ctx context.Context, log *logrus.Entry) error {
return err
}
arocli, err := aroclient.NewForConfig(restConfig)
if err != nil {
return err
}
configcli, err := configclient.NewForConfig(restConfig)
if err != nil {
return err
}
consolecli, err := consoleclient.NewForConfig(restConfig)
if err != nil {
return err
}
client := mgr.GetClient()
kubernetescli, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return err
}
maocli, err := machineclient.NewForConfig(restConfig)
if err != nil {
return err
}
mcocli, err := mcoclient.NewForConfig(restConfig)
if err != nil {
return err
}
securitycli, err := securityclient.NewForConfig(restConfig)
if err != nil {
return err
}
imageregistrycli, err := imageregistryclient.NewForConfig(restConfig)
if err != nil {
return err
}
operatorcli, err := operatorclient.NewForConfig(restConfig)
if err != nil {
return err
}
// TODO (NE): dh is sometimes passed, sometimes created later. Can we standardize?
dh, err := dynamichelper.New(log, restConfig)
if err != nil {
return err
@ -124,124 +88,144 @@ func operator(ctx context.Context, log *logrus.Entry) error {
if role == pkgoperator.RoleMaster {
if err = (genevalogging.NewReconciler(
log.WithField("controller", genevalogging.ControllerName),
arocli, kubernetescli, securitycli,
restConfig)).SetupWithManager(mgr); err != nil {
client, dh)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", genevalogging.ControllerName, err)
}
if err = (clusteroperatoraro.NewReconciler(
log.WithField("controller", clusteroperatoraro.ControllerName),
arocli, configcli)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", clusteroperatoraro.ControllerName, err)
}
if err = (pullsecret.NewReconciler(
log.WithField("controller", pullsecret.ControllerName),
arocli, kubernetescli)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", pullsecret.ControllerName, err)
}
if err = (alertwebhook.NewReconciler(
log.WithField("controller", alertwebhook.ControllerName),
arocli, kubernetescli)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", alertwebhook.ControllerName, err)
}
if err = (workaround.NewReconciler(
log.WithField("controller", workaround.ControllerName),
arocli, configcli, kubernetescli, mcocli, restConfig)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", workaround.ControllerName, err)
}
if err = (routefix.NewReconciler(
log.WithField("controller", routefix.ControllerName),
arocli, configcli, kubernetescli, securitycli, restConfig)).SetupWithManager(mgr); err != nil {
client, dh)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", routefix.ControllerName, err)
}
if err = (monitoring.NewReconciler(
log.WithField("controller", monitoring.ControllerName),
arocli, kubernetescli)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", monitoring.ControllerName, err)
}
if err = (rbac.NewReconciler(
log.WithField("controller", rbac.ControllerName),
arocli, dh)).SetupWithManager(mgr); err != nil {
client, dh)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", rbac.ControllerName, err)
}
if err = (dnsmasq.NewClusterReconciler(
log.WithField("controller", dnsmasq.ClusterControllerName),
arocli, mcocli, dh)).SetupWithManager(mgr); err != nil {
client, dh)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", dnsmasq.ClusterControllerName, err)
}
if err = (dnsmasq.NewMachineConfigReconciler(
log.WithField("controller", dnsmasq.MachineConfigControllerName),
arocli, mcocli, dh)).SetupWithManager(mgr); err != nil {
client, dh)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", dnsmasq.MachineConfigControllerName, err)
}
if err = (dnsmasq.NewMachineConfigPoolReconciler(
log.WithField("controller", dnsmasq.MachineConfigPoolControllerName),
arocli, mcocli, dh)).SetupWithManager(mgr); err != nil {
client, dh)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", dnsmasq.MachineConfigPoolControllerName, err)
}
if err = (node.NewReconciler(
log.WithField("controller", node.ControllerName),
arocli, kubernetescli)).SetupWithManager(mgr); err != nil {
client, kubernetescli)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", node.ControllerName, err)
}
if err = (subnets.NewReconciler(
log.WithField("controller", subnets.ControllerName),
arocli, kubernetescli, maocli)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", subnets.ControllerName, err)
}
if err = (machine.NewReconciler(
log.WithField("controller", machine.ControllerName),
arocli, maocli, isLocalDevelopmentMode, role)).SetupWithManager(mgr); err != nil {
client, isLocalDevelopmentMode, role)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", machine.ControllerName, err)
}
if err = (banner.NewReconciler(
log.WithField("controller", banner.ControllerName),
arocli, consolecli)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", banner.ControllerName, err)
}
if err = (machineset.NewReconciler(
log.WithField("controller", machineset.ControllerName),
arocli, maocli)).SetupWithManager(mgr); err != nil {
log.WithField("controller", machineset.ControllerName), client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", machineset.ControllerName, err)
}
if err = (imageconfig.NewReconciler(arocli, configcli)).SetupWithManager(mgr); err != nil {
if err = (imageconfig.NewReconciler(
log.WithField("controller", imageconfig.ControllerName),
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", imageconfig.ControllerName, err)
}
if err = (previewfeature.NewReconciler(
log.WithField("controller", previewfeature.ControllerName),
arocli, kubernetescli, maocli)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", previewfeature.ControllerName, err)
}
if err = (storageaccounts.NewReconciler(
log.WithField("controller", storageaccounts.ControllerName),
arocli, maocli, kubernetescli, imageregistrycli)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", storageaccounts.ControllerName, err)
}
if err = (muo.NewReconciler(arocli, kubernetescli, dh)).SetupWithManager(mgr); err != nil {
if err = (muo.NewReconciler(
log.WithField("controller", muo.ControllerName),
client, dh)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", muo.ControllerName, err)
}
if err = (autosizednodes.NewReconciler(
log.WithField("controller", autosizednodes.ControllerName),
mgr)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", autosizednodes.ControllerName, err)
}
if err = (machinehealthcheck.NewReconciler(
arocli, dh)).SetupWithManager(mgr); err != nil {
log.WithField("controller", machinehealthcheck.ControllerName),
client, dh)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", machinehealthcheck.ControllerName, err)
}
if err = (ingress.NewReconciler(
log.WithField("controller", ingress.ControllerName),
arocli, operatorcli)).SetupWithManager(mgr); err != nil {
client)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", ingress.ControllerName, err)
}
if err = guardrails.NewReconciler(arocli, kubernetescli, dh).SetupWithManager(mgr); err != nil {
if err = (serviceprincipalchecker.NewReconciler(
log.WithField("controller", serviceprincipalchecker.ControllerName),
client, role)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", serviceprincipalchecker.ControllerName, err)
}
if err = (clusterdnschecker.NewReconciler(
log.WithField("controller", clusterdnschecker.ControllerName),
client, role)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", clusterdnschecker.ControllerName, err)
}
if err = (ingresscertificatechecker.NewReconciler(
log.WithField("controller", ingresscertificatechecker.ControllerName),
client, role)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", ingresscertificatechecker.ControllerName, err)
}
if err = (guardrails.NewReconciler(
log.WithField("controller", guardrails.ControllerName),
client, dh)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", guardrails.ControllerName, err)
}
}
if err = (checker.NewReconciler(
log.WithField("controller", checker.ControllerName),
arocli, kubernetescli, maocli, operatorcli, configcli, role)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", checker.ControllerName, err)
if err = (internetchecker.NewReconciler(
log.WithField("controller", internetchecker.ControllerName),
client, role)).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller %s: %v", internetchecker.ControllerName, err)
}
// +kubebuilder:scaffold:builder

Просмотреть файл

@ -21,11 +21,13 @@ import (
_ "github.com/Azure/ARO-RP/pkg/api/v20210901preview"
_ "github.com/Azure/ARO-RP/pkg/api/v20220401"
_ "github.com/Azure/ARO-RP/pkg/api/v20220904"
_ "github.com/Azure/ARO-RP/pkg/api/v20230401"
"github.com/Azure/ARO-RP/pkg/backend"
"github.com/Azure/ARO-RP/pkg/database"
"github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/frontend"
"github.com/Azure/ARO-RP/pkg/frontend/adminactions"
"github.com/Azure/ARO-RP/pkg/hive"
"github.com/Azure/ARO-RP/pkg/metrics/statsd"
"github.com/Azure/ARO-RP/pkg/metrics/statsd/azure"
"github.com/Azure/ARO-RP/pkg/metrics/statsd/golang"
@ -146,8 +148,11 @@ func rp(ctx context.Context, log, audit *logrus.Entry) error {
if err != nil {
return err
}
f, err := frontend.NewFrontend(ctx, audit, log.WithField("component", "frontend"), _env, dbAsyncOperations, dbClusterManagerConfiguration, dbOpenShiftClusters, dbSubscriptions, dbOpenShiftVersions, api.APIs, m, feAead, adminactions.NewKubeActions, adminactions.NewAzureActions, clusterdata.NewBestEffortEnricher)
hiveClusterManager, err := hive.NewFromEnv(ctx, log, _env)
if err != nil {
return err
}
f, err := frontend.NewFrontend(ctx, audit, log.WithField("component", "frontend"), _env, dbAsyncOperations, dbClusterManagerConfiguration, dbOpenShiftClusters, dbSubscriptions, dbOpenShiftVersions, api.APIs, m, feAead, hiveClusterManager, adminactions.NewKubeActions, adminactions.NewAzureActions, clusterdata.NewBestEffortEnricher)
if err != nil {
return err
}

Просмотреть файл

@ -28,19 +28,20 @@ func getLatestOCPVersions(ctx context.Context, log *logrus.Entry) ([]api.OpenShi
acrDomainSuffix := "." + env.Environment().ContainerRegistryDNSSuffix
dstRepo := dstAcr + acrDomainSuffix
var (
OpenshiftVersions = []api.OpenShiftVersion{
{
Properties: api.OpenShiftVersionProperties{
Version: version.InstallStream.Version.String(),
OpenShiftPullspec: version.InstallStream.PullSpec,
InstallerPullspec: dstRepo + "/aro-installer:release-4.10",
Enabled: true,
},
ocpVersions := []api.OpenShiftVersion{}
for _, vers := range version.HiveInstallStreams {
ocpVersions = append(ocpVersions, api.OpenShiftVersion{
Properties: api.OpenShiftVersionProperties{
Version: vers.Version.String(),
OpenShiftPullspec: vers.PullSpec,
InstallerPullspec: fmt.Sprintf("%s/aro-installer:release-%s", dstRepo, vers.Version.MinorVersion()),
Enabled: true,
},
}
)
return OpenshiftVersions, nil
})
}
return ocpVersions, nil
}
func getVersionsDatabase(ctx context.Context, log *logrus.Entry) (database.OpenShiftVersions, error) {

Просмотреть файл

@ -0,0 +1,79 @@
## Adding new instance types
Full support for new instance types in ARO relies on OpenShift support, Azure billing support, and RP support. The below document outlines how to introduce and test RP support for new instance types, after upstream OpenShift compatibility has been confirmed, and billing for each desired instance has been set up.
At the time of writing, new instance types need to be added in the following places:
- `pkg/api/openshiftcluster.go`
- `pkg/admin/api/openshiftcluster.go`
- `pkg/api/validate/vm.go`
- If adding support for a new master instance, ensure that it is tested accordingly as we distinguish between master and worker support.
There are also vmSize consts in the `openshiftcluster.go` files of older versioned APIs, but this was deprecated in `v20220401` and is no longer necessary.
## Testing new instance types
First, confirm that the desired machines are available in the test region:
~~~
$ az vm list-skus --location westus --size Standard_L --all --output table
ResourceType Locations Name Zones Restrictions
--------------- ----------- ---------------- ------- --------------
virtualMachines westus Standard_L16s None
virtualMachines westus Standard_L16s_v2 None
virtualMachines westus Standard_L32s None
virtualMachines westus Standard_L32s_v2 None
virtualMachines westus Standard_L48s_v2 None
virtualMachines westus Standard_L4s None
virtualMachines westus Standard_L64s_v2 None
virtualMachines westus Standard_L80s_v2 None
virtualMachines westus Standard_L8s None
virtualMachines westus Standard_L8s_v2 None
~~~
The desired instance types should be free of any restrictions. The subscription should also have quota for the new instance types, which you may need to request.
### CLI Method
1) Comment out `FeatureRequireD2sV3Workers` from the range of features in `pkg/env/dev.go`. This will allow you to create development clusters with other VM sizes.
> __NOTE:__ Please be responsible with your usage of larger VM sizes, as they incur additional cost.
2) Follow the usual steps to [deploy a development RP](https://github.com/Azure/ARO-RP/blob/master/docs/deploy-development-rp.md), but don't use the hack script to create a cluster.
3) Follow steps in https://docs.microsoft.com/en-us/azure/openshift/tutorial-create-cluster to create a cluster, specifying `-worker-vm-size` and/or `--master-vm-size` in the `az aro create` step to specify an alternate sku:
~~~
az aro create --resource-group $RESOURCEGROUP --name $CLUSTER --vnet aro-lseries --master-subnet master-subnet --worker-subnet worker-subnet --worker-vm-size "Standard_L8s_v2"
~~~
4) Once an install with an alternate size is successful, a basic check of cluster health can be conducted, as well as local e2e tests to confirm supportability.
### Hack scripts method
1) Comment out `FeatureRequireD2sV3Workers` from the range of features in `pkg/env/dev.go`, and modify the worker and master profiles defined in `createCluster()` at `pkg/util/cluster/cluster.go` to contain your desired instance size. For example:
~~~
oc.Properties.WorkerProfiles[0].VMSize = api.VMSizeStandardL4s
~~~
2) Use the [hack script to create a cluster.](https://github.com/cadenmarchese/ARO-RP/blob/master/docs/deploy-development-rp.md#run-the-rp-and-create-a-cluster)
3) Once an install with an alternate size is successful, a basic check of cluster health can be conducted, as well as local e2e tests to confirm supportability.
### Post-install method
> __NOTE:__ This is useful for testing functionality of a specific size in an existing cluster. If adding support for a new size that is expected to be available on install, use the CLI method above.
1) Follow the usual steps to [deploy a development RP](https://github.com/Azure/ARO-RP/blob/master/docs/deploy-development-rp.md), using the hack script or the MSFT documentation to create a cluster.
2) Edit the `vmSize` in a worker MachineSet to the desired SKU.
3) Delete the corresponding machine object (`oc delete machine`) . The updated MachineSet will provision a new one with desired instance type.
~~~
$ oc get machines
NAME PHASE TYPE REGION ZONE AGE
cluster-m9ttf-master-0 Running Standard_D8s_v3 eastus 1 40m
cluster-m9ttf-master-1 Running Standard_D8s_v3 eastus 2 40m
cluster-m9ttf-master-2 Running Standard_D8s_v3 eastus 3 40m
cluster-m9ttf-worker-eastus1-86696 Running Standard_L8s_v2 eastus 1 6m43s
cluster-m9ttf-worker-eastus2-tb5hn Running Standard_D2s_v3 eastus 2 34m
cluster-m9ttf-worker-eastus3-szf9d Running Standard_D2s_v3 eastus 3 34m
~~~

Просмотреть файл

@ -47,10 +47,26 @@ in depth in the face of an attack on the gateway component.
## Setup
* At rollout time, create an AAD application whose *Application ID URI*
(`identifierUris` in the application manifest) is
`https://dbtoken.aro.azure.com`. It is not necessary for the application to
have any permissions, credentials, etc.
* Create the application and set `requestedAccessTokenVersion`
```bash
AZURE_DBTOKEN_CLIENT_ID="$(az ad app create --display-name dbtoken \
--oauth2-allow-implicit-flow false \
--query appId \
-o tsv)"
OBJ_ID="$(az ad app show --id $AZURE_DBTOKEN_CLIENT_ID --query id)"
# NOTE: the graph API requires this to be done from a managed machine
az rest --method PATCH \
--uri https://graph.microsoft.com/v1.0/applications/$OBJ_ID/ \
--body '{"api":{"requestedAccessTokenVersion": 2}}'
```
* Add the `AZURE_DBTOKEN_CLIENT_ID` to the RP config for the respective environment.
* The dbtoken service is responsible for creating database users and permissions
- see the `ConfigurePermissions` function.
* see the ConfigurePermissions function.

Просмотреть файл

@ -42,6 +42,11 @@
follow [prepare a shared RP development
environment](prepare-a-shared-rp-development-environment.md).
1. If you have multiple subscriptions in your account, verify that "ARO SRE Team - InProgress (EA Subscription 2)" is your active subscription:
```bash
az account set --subscription "ARO SRE Team - InProgress (EA Subscription 2)"
```
1. Set SECRET_SA_ACCOUNT_NAME to the name of the storage account containing your
shared development environment secrets and save them in `secrets`:
@ -137,6 +142,21 @@
journalctl _COMM=aro -o json --since "15 min ago" -f | jq -r 'select (.COMPONENT != null and (.COMPONENT | contains("access"))|not) | .MESSAGE'
```
## Automatically run local RP
If you are already familiar with running the ARO RP locally, you can speed up the process executing the [local_dev_env.sh](../hack/devtools/local_dev_env.sh) script.
## Connect ARO-RP with a Hive development cluster
The env variables names defined in pkg/util/liveconfig/manager.go control the communication of the ARO-RP with Hive.
- If you want to use ARO-RP + Hive, set *HIVE_KUBE_CONFIG_PATH* to the path of the kubeconfig of the AKS Dev cluster. [Info](https://github.com/Azure/ARO-RP/blob/master/docs/deploy-development-rp.md#debugging-openshift-cluster) about creating that kubeconfig (Step *Get an admin kubeconfig:*).
- If you want to create clusters using the local ARO-RP + Hive instead of doing the standard cluster creation process (which doesn't use Hive), set *ARO_INSTALL_VIA_HIVE* to *true*.
- If you want to enable the Hive adoption feature (which is performed during adminUpdate()), set *ARO_ADOPT_BY_HIVE* to *true*.
After setting the above environment variables (using *export* direclty in the terminal or including them in the *env* file), connect to the [VPN](https://github.com/Azure/ARO-RP/blob/master/docs/deploy-development-rp.md#debugging-aks-cluster) (*Connect to the VPN* section).
Then proceed to [run](https://github.com/Azure/ARO-RP/blob/master/docs/deploy-development-rp.md#run-the-rp-and-create-a-cluster) the ARO-RP as usual.
After that, when you [create](https://github.com/Azure/ARO-RP/blob/master/docs/deploy-development-rp.md#run-the-rp-and-create-a-cluster) a cluster, you will be using Hive behind the scenes. You can check the created Hive objects following [Debugging OpenShift Cluster](https://github.com/Azure/ARO-RP/blob/master/docs/deploy-development-rp.md#debugging-openshift-cluster) and using the *oc* command.
## Make Admin-Action API call(s) to a running local-rp
```bash
@ -163,19 +183,25 @@
curl -X GET -k "https://localhost:8443/admin/subscriptions/$AZURE_SUBSCRIPTION_ID/resourceGroups/$RESOURCEGROUP/providers/Microsoft.RedHatOpenShift/openShiftClusters/$CLUSTER/serialconsole?vmName=$VMNAME" --header "Content-Type: application/json" -d "{}"
```
* Redeploy node of a dev cluster
* Redeploy a VM in a dev cluster
```bash
VMNAME="aro-cluster-qplnw-master-0"
curl -X POST -k "https://localhost:8443/admin/subscriptions/$AZURE_SUBSCRIPTION_ID/resourceGroups/$RESOURCEGROUP/providers/Microsoft.RedHatOpenShift/openShiftClusters/$CLUSTER/redeployvm?vmName=$VMNAME" --header "Content-Type: application/json" -d "{}"
```
* Stop node of a dev cluster
* Stop a VM in a dev cluster
```bash
VMNAME="aro-cluster-qplnw-master-0"
curl -X POST -k "https://localhost:8443/admin/subscriptions/$AZURE_SUBSCRIPTION_ID/resourceGroups/$RESOURCEGROUP/providers/Microsoft.RedHatOpenShift/openShiftClusters/$CLUSTER/stopvm?vmName=$VMNAME" --header "Content-Type: application/json" -d "{}"
```
* Start node of a dev cluster
* Stop and deallocate a VM in a dev cluster
```bash
VMNAME="aro-cluster-qplnw-master-0"
curl -X POST -k "https://localhost:8443/admin/subscriptions/$AZURE_SUBSCRIPTION_ID/resourceGroups/$RESOURCEGROUP/providers/Microsoft.RedHatOpenShift/openShiftClusters/$CLUSTER/stopvm?vmName=$VMNAME" --header "Content-Type: application/json" -d "{}"
```
* Start a VM in a dev cluster
```bash
VMNAME="aro-cluster-qplnw-master-0"
curl -X POST -k "https://localhost:8443/admin/subscriptions/$AZURE_SUBSCRIPTION_ID/resourceGroups/$RESOURCEGROUP/providers/Microsoft.RedHatOpenShift/openShiftClusters/$CLUSTER/startvm?vmName=$VMNAME" --header "Content-Type: application/json" -d "{}"

Просмотреть файл

@ -17,6 +17,7 @@
```
1. Create a full environment file, which overrides some default `./env` options when sourced
* if using a public key separate from `~/.ssh/id_rsa.pub`, source it with `export SSH_PUBLIC_KEY=~/.ssh/id_separate.pub`
```bash
cp env-int.example env-int
vi env-int
@ -28,11 +29,42 @@
make dev-config.yaml
```
1. Run `make deploy`. This will fail on the first attempt to run due to AKS not being installed, so after the first failure, please skip to the next step to deploy the VPN Gateway and then deploy AKS.
> __NOTE:__ If the deployment fails with `InvalidResourceReference` due to the RP Network Security Groups not found, delete the "gateway-production-predeploy" deployment in the gateway resource group, and re-run `make deploy`.
1. Run `make deploy`
> __NOTE:__ This will fail on the first attempt to run due to certificate and container mirroring requirements.
> __NOTE:__ If the deployment fails with `A vault with the same name already exists in deleted state`, then you will need to recover the deleted keyvaults from a previous deploy using: `az keyvault recover --name <KEYVAULT_NAME>` for each keyvault, and re-run.
> __NOTE:__ If the deployment fails with `InvalidResourceReference` due to the RP Network Security Groups not found, delete the gateway predeploy deployment, and re-run.
1. Deploy a VPN Gateway
This is required in order to be able to connect to AKS from your local machine:
```bash
source ./hack/devtools/deploy-shared-env.sh
deploy_vpn_for_dedicated_rp
```
1. Deploy AKS by running these commands from the ARO-RP root directory:
```bash
source ./hack/devtools/deploy-shared-env.sh
deploy_aks_dev
```
> __NOTE:__ If the AKS deployment fails with missing RP VNETs, delete the "gateway-production-predeploy" deployment in the gateway resource group, and re-run `make deploy` and then re-run `deploy_aks_dev`.
1. Install Hive into AKS
1. Download the VPN config. Please note that this action will _**OVER WRITE**_ the `secrets/vpn-$LOCATION.ovpn` on your local machine. **DO NOT** run `make secrets-update` after doing this, as you will overwrite existing config, until such time as you have run `make secrets` to get the config restored.
```bash
vpn_configuration
```
1. Connect to the Dev VPN in a new terminal:
```bash
sudo openvpn secrets/vpn-$LOCATION.ovpn
```
1. Now that your machine is able access the AKS cluster, you can deploy Hive:
```bash
make aks.kubeconfig
./hack/hive-generate-config.sh
KUBECONFIG=$(pwd)/aks.kubeconfig ./hack/hive-dev-install.sh
```
1. Mirror the OpenShift images to your new ACR
<!-- TODO (bv) allow mirroring through a pipeline would be faster and a nice to have -->
@ -52,10 +84,14 @@
```
1. Run the mirroring
> The `latest` argument will take the InstallStream from `pkg/util/version/const.go` and mirror that version
> The `latest` argument will take the DefaultInstallStream from `pkg/util/version/const.go` and mirror that version
```bash
go run -tags aro ./cmd/aro mirror latest
```
If you are going to test or work with multi-version installs, then you should mirror any additional versions as well, for example for 4.11.21 it would be
```bash
go run -tags aro ./cmd/aro mirror 4.11.21
```
1. Push the ARO and Fluentbit images to your ACR

Просмотреть файл

@ -31,6 +31,12 @@ monitoring from inside the cluster as well as a complementary near-term goal.
locally (like k8s list/watch). At startup, the cosmos DB change feed returns
the current state of all of the OpenShiftClusterDocuments; subsequently as
OpenShiftClusterDocuments it returns the updated documents.
* At the moment of writing, the change feed does not log record deletions. It logs
only changes. Deallocated clusters are deleted from the monitoring list only if
they were seen in the `DeletingProvisioningState` by the monitor.
The monitor reads the change feed every 10 seconds, so we should avoid
cases when `OpenShiftClusterDocuments` have the `DeletingProvisioningState` for
less than 10 seconds.
* Each monitor aims to check each cluster it "owns" every 5 minutes; it walks
the local database map and distributes checking over lots of local goroutine
workers.

Просмотреть файл

Просмотреть файл

Просмотреть файл

@ -27,7 +27,7 @@ locations.
Set SECRET_SA_ACCOUNT_NAME to the name of the storage account:
```bash
SECRET_SA_ACCOUNT_NAME=rharosecretsdev
SECRET_SA_ACCOUNT_NAME=e2earosecrets
```
1. You will need an AAD object (this could be your AAD user, or an AAD group of
@ -45,7 +45,7 @@ locations.
PULL_SECRET=...
```
1. Install [Go 1.17](https://golang.org/dl) or later, if you haven't already.
1. Install [Go 1.18](https://golang.org/dl) or later, if you haven't already.
1. Install the [Azure
CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli), if you
@ -240,21 +240,7 @@ locations.
1. Create an AAD application which will fake up the dbtoken client.
1. Create the application and set `requestedAccessTokenVersion`
```bash
AZURE_DBTOKEN_CLIENT_ID="$(az ad app create --display-name dbtoken \
--oauth2-allow-implicit-flow false \
--query appId \
-o tsv)"
OBJ_ID="$(az ad app show --id $AZURE_DBTOKEN_CLIENT_ID --query id)"
> __NOTE:__: the graph API requires this to be done from a managed machine
az rest --method PATCH \
--uri https://graph.microsoft.com/v1.0/applications/$OBJ_ID/ \
--body '{"api":{"requestedAccessTokenVersion": 2}}'
```
See [dbtoken-service.md](./dbtoken-service.md#setup) for details on setup.
## Certificates
@ -338,14 +324,14 @@ import_certs_secrets
4. The OpenVPN configuration file needs to be manually updated. To achieve this, edit the `vpn-<region>.ovpn` file and add the `vpn-client` certificate and private key
5. Next, we need to update certificates owned by FP Service Principal. Current configuration in DEV and INT is listed below
5. Next, we need to update certificates owned by FP Service Principal. Current configuration in DEV and INT is listed below. You can get the `AAD APP ID` from the `secrets/env` file
Variable | Certificate Client | Subscription Type | AAD App Name | AAD App ID | Key Vault Name |
| --- | --- | --- | --- | --- | --- |
| AZURE_FP_CLIENT_ID | firstparty | DEV | aro-v4-fp-shared | 1516efbc-0d48-4ade-a68e-a2872137fd79 | v4-eastus-svc |
| AZURE_ARM_CLIENT_ID | arm | DEV | aro-v4-arm-shared | 7c49e1a5-60ea-4353-9875-12bbcbf963b7 | v4-eastus-svc |
| AZURE_PORTAL_CLIENT_ID | portal-client | DEV | aro-v4-portal-shared | f7912be2-16d3-4727-a6b7-4042ca854c98 | v4-eastus-svc |
| AZURE_FP_CLIENT_ID | firstparty | INT | aro-int-sp | 71cfb175-ea3a-444e-8c03-b119b2752ce4 | aro-int-eastus-svc |
Variable | Certificate Client | Subscription Type | AAD App Name | Key Vault Name |
| --- | --- | --- | --- | --- |
| AZURE_FP_CLIENT_ID | firstparty | DEV | aro-v4-fp-shared-dev | v4-eastus-dev-svc |
| AZURE_ARM_CLIENT_ID | arm | DEV | aro-v4-arm-shared-dev | v4-eastus-dev-svc |
| AZURE_PORTAL_CLIENT_ID | portal-client | DEV | aro-v4-portal-shared-dev | v4-eastus-dev-svc |
| AZURE_FP_CLIENT_ID | firstparty | INT | aro-int-sp | aro-int-eastus-svc |
```bash
@ -366,9 +352,9 @@ az ad app credential reset \
--cert "$(base64 -w0 <secrets/portal-client.crt)" >/dev/null
```
5. The RP makes API calls to kubernetes cluster via a proxy VMSS agent. For the agent to get the updated certificates, this vm needs to be redeployed. Proxy VM is currently deployed by the `deploy_env_dev` function in `deploy-shared-env.sh`. It makes use of `env-development.json`
5. The RP makes API calls to kubernetes cluster via a proxy VMSS agent. For the agent to get the updated certificates, this vm needs to be deleted & redeployed. Proxy VM is currently deployed by the `deploy_env_dev` function in `deploy-shared-env.sh`. It makes use of `env-development.json`
6. Run `[rharosecretsdev|aroe2esecrets] make secrets-update` to upload it to your
6. Run `[rharosecretsdev|e2earosecrets] make secrets-update` to upload it to your
storage account so other people on your team can access it via `make secrets`
# Environment file
@ -472,6 +458,16 @@ each of the bash functions below.
when running the `deploy_env_dev_override` command, delete the `-pip` resource
and re-run.
1. Get the AKS kubeconfig and upload it to the storage account:
```bash
make aks.kubeconfig
mv aks.kubeconfig secrets/
make secrets-update
```
1. [Install Hive on the new AKS](https://github.com/Azure/ARO-RP/blob/master/docs/hive.md)
1. Load the keys/certificates into the key vault:
```bash

Просмотреть файл

@ -4,7 +4,7 @@ This document goes through the development dependencies one requires in order to
## Software Required
1. Install [Go 1.17](https://golang.org/dl) or later, if you haven't already.
1. Install [Go 1.18](https://golang.org/dl) or later, if you haven't already.
1. After downloading follow the [Install instructions](https://go.dev/doc/install), replacing the tar archive with your download.
1. Append `export PATH="${PATH}:/usr/local/go/bin"` to your shell's profile file.
@ -60,6 +60,9 @@ Install the `libgpgme-dev` package.
# Install gpgme
brew install gpgme
# Install diffutils to avoid errors during test runs
brew install diffutils
```
1. Modify your `~/.zshrc` (or `~/.bashrc` for Bash): this prepends `PATH` with GNU Utils paths;

Просмотреть файл

@ -8,6 +8,8 @@ To run RP unit tests:
make test-go
```
In case of MacOS, the go-diff module creates [issue](https://github.com/golangci/golangci-lint/issues/3087) making the test fail. Until a new release of the module with the [fix](https://github.com/sourcegraph/go-diff/pull/65) is available, an easy workaround to mitigate the issue is to install diffutils using `brew install diffutils`
To Run Go tests with coverage:
```bash

Просмотреть файл

@ -21,7 +21,7 @@ The reason for calling script instead of directly calling:
```bash
go get -u ./...
go mod tidy -compat=1.17
go mod tidy -compat=1.18
go mod vendor
```
@ -30,7 +30,7 @@ semantic versioning via tags. Therefore the proper version is parsed from the ve
branch and fixed using replace directive. Otherwise it will upgrade every time
the command is started.
When upgrading to a never version of OpenShift, this script have to be updated to
When upgrading to a newer version of OpenShift, this script have to be updated to
reflect the proper release.
@ -43,7 +43,7 @@ the PR, one can simply call
go get <module>@<release> OR
go get -u <module>@<release>
go mod tidy -compat=1.17
go mod tidy -compat=1.18
go mod vendor
```

24
go.mod
Просмотреть файл

@ -1,10 +1,12 @@
module github.com/Azure/ARO-RP
go 1.17
go 1.18
require (
github.com/AlekSi/gocov-xml v0.0.0-20190121064608-3a14fb1c4737
github.com/Azure/azure-sdk-for-go v63.1.0+incompatible
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.13.2
github.com/Azure/go-autorest/autorest v0.11.25
github.com/Azure/go-autorest/autorest/adal v0.9.18
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11
@ -60,10 +62,10 @@ require (
github.com/tebeka/selenium v0.9.9
github.com/ugorji/go/codec v1.2.7
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29
golang.org/x/net v0.0.0-20220909164309-bea034e7d591
golang.org/x/net v0.7.0
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
golang.org/x/text v0.3.7
golang.org/x/text v0.7.0
golang.org/x/tools v0.1.12
gotest.tools/gotestsum v1.6.4
k8s.io/api v0.24.7
@ -84,10 +86,12 @@ require (
cloud.google.com/go/compute v1.5.0 // indirect
github.com/AlecAivazis/survey/v2 v2.3.4 // indirect
github.com/Antonboom/errname v0.1.4 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 // indirect
github.com/BurntSushi/toml v1.1.0 // indirect
github.com/Djarvur/go-err113 v0.1.0 // indirect
github.com/IBM-Cloud/bluemix-go v0.0.0-20220407050707-b4cd0d4da813 // indirect
@ -171,6 +175,7 @@ require (
github.com/gobwas/glob v0.2.3 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/golang-jwt/jwt/v4 v4.4.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
@ -202,6 +207,7 @@ require (
github.com/h2non/filetype v1.1.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v0.16.1 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
@ -222,6 +228,7 @@ require (
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/kulti/thelper v0.4.0 // indirect
github.com/kunwardeep/paralleltest v1.0.2 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/kyoh86/exportloopref v0.1.8 // indirect
github.com/ldez/gomoddirectives v0.2.2 // indirect
github.com/ldez/tagliatelle v0.2.0 // indirect
@ -262,7 +269,7 @@ require (
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 // indirect
github.com/opencontainers/runc v1.1.1 // indirect
github.com/opencontainers/runc v1.1.2 // indirect
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
github.com/openshift/cloud-credential-operator v0.0.0-20220316185125-ed0612946f4b // indirect
github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 // indirect
@ -276,6 +283,7 @@ require (
github.com/pelletier/go-toml v1.9.3 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349 // indirect
github.com/pquerna/cachecontrol v0.1.0 // indirect
@ -326,8 +334,8 @@ require (
go.opencensus.io v0.23.0 // indirect
go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/sys v0.1.0 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/api v0.74.0 // indirect
@ -511,7 +519,7 @@ replace (
k8s.io/mount-utils => k8s.io/mount-utils v0.23.0
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.0
sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.1
sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.11.2
sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.5.0
)
@ -534,7 +542,6 @@ replace (
github.com/deislabs/oras => github.com/oras-project/oras v0.12.0
github.com/etcd-io/bbolt => go.etcd.io/bbolt v1.3.6
github.com/go-check/check => gopkg.in/check.v1 v0.0.0-20201130134442-10cb98267c6c
github.com/go-logr/logr => github.com/go-logr/logr v0.4.0
github.com/golang/lint => golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
github.com/google/tcpproxy => inet.af/tcpproxy v0.0.0-20210824174053-2e577fef49e2
github.com/googleapis/gnostic => github.com/google/gnostic v0.5.5
@ -571,7 +578,6 @@ replace (
github.com/willf/bitset => github.com/bits-and-blooms/bitset v1.2.1
google.golang.org/cloud => cloud.google.com/go v0.97.0
google.golang.org/grpc => google.golang.org/grpc v1.40.0
k8s.io/klog/v2 => k8s.io/klog/v2 v2.8.0
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65
k8s.io/kube-state-metrics => k8s.io/kube-state-metrics v1.9.7
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20211002133954-f839ab2b2b11

74
go.sum
Просмотреть файл

@ -94,6 +94,14 @@ github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo
github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v63.1.0+incompatible h1:yNC7qlSUWVF8p0TzxdmWW1FJ3DdIA+0Pge41IU/2+9U=
github.com/Azure/azure-sdk-for-go v63.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.0/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 h1:qoVeMsc9/fh/yhxVaA0obYjVH/oI/ihrOoMwsLS9KSA=
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.13.2 h1:mM/yraAumqMMIYev6zX0oxHqX6hreUs5wXf76W47r38=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.13.2/go.mod h1:+nVKciyKD2J9TyVcEQ82Bo9b+3F92PiQfHrIE/zqLqM=
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I=
github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1 h1:sLZ/Y+P/5RRtsXWylBjB5lkgixYfm0MQPiwrSX//JSo=
github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I=
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
@ -145,6 +153,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c=
github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
@ -578,8 +588,9 @@ github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2x
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk=
github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE=
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
@ -736,12 +747,18 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
@ -945,6 +962,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc=
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
@ -1204,7 +1223,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o=
github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
@ -1409,6 +1429,7 @@ github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5
github.com/kunwardeep/paralleltest v1.0.2 h1:/jJRv0TiqPoEy/Y8dQxCFJhD56uS/pnvtatgTZBHokU=
github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30=
github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M=
github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg=
@ -1601,11 +1622,13 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4=
github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
@ -1722,8 +1745,8 @@ github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5X
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
github.com/opencontainers/runc v1.1.1 h1:PJ9DSs2sVwE0iVr++pAHE6QkS9tzcVWozlPifdwMgrU=
github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw=
github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@ -1838,6 +1861,8 @@ github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTw
github.com/pires/go-proxyproto v0.6.2 h1:KAZ7UteSOt6urjme6ZldyFm4wDe/z0ZUP0Yv0Dos0d8=
github.com/pires/go-proxyproto v0.6.2/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY=
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -2380,6 +2405,7 @@ go.uber.org/dig v1.9.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw=
go.uber.org/fx v1.12.0/go.mod h1:egT3Kyg1JFYQkvKLZ3EsykxkNrZxgXS+gKoKo7abERY=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
@ -2399,6 +2425,7 @@ go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
go4.org v0.0.0-20191010144846-132d2879e1e9/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
go4.org v0.0.0-20200104003542-c7e774b10ea0 h1:M6XsnQeLwG+rHQ+/rrGh3puBI3WZEy9TBWmf2H+enQA=
@ -2436,6 +2463,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
@ -2557,6 +2585,7 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@ -2573,19 +2602,21 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210929193557-e81a3d93ecf6/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -2766,6 +2797,7 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210921065528-437939a70204/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211001092434-39dca1131b70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -2780,14 +2812,15 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -2796,8 +2829,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -2806,7 +2840,6 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs=
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -3260,8 +3293,15 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-aggregator v0.23.0/go.mod h1:b1vpoaTWKZjCzvbe1KXFw3vPbISrghJsg7/RI8oZUME=
k8s.io/kube-controller-manager v0.23.0/go.mod h1:iHapRJJBe+fWu6hG3ye43YMFEeZcnIlRxDUS72bwJoE=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
@ -3285,9 +3325,9 @@ k8s.io/utils v0.0.0-20190923111123-69764acb6e8e/go.mod h1:sZAwmy6armz5eXlNoLmJcl
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210305010621-2afb4311ab10/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
@ -3311,8 +3351,8 @@ rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I=
sigs.k8s.io/controller-runtime v0.9.1 h1:+LAqHAhkVW4lt/jLlrKmnGPA7OORMw/xEUH3Ey1h1Bs=
sigs.k8s.io/controller-runtime v0.9.1/go.mod h1:cTqsgnwSOsYS03XwySYZj8k6vf0+eC4FJRcCgQ9elb4=
sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA=
sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4=
sigs.k8s.io/controller-tools v0.5.0 h1:3u2RCwOlp0cjCALAigpOcbAf50pE+kHSdueUosrC/AE=
sigs.k8s.io/controller-tools v0.5.0/go.mod h1:JTsstrMpxs+9BUj6eGuAaEb6SDSPTeVtUyp0jmnAM/I=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=

Просмотреть файл

@ -21,11 +21,9 @@ import (
utillog "github.com/Azure/ARO-RP/pkg/util/log"
)
var (
fileName = flag.String("file", "-", "File to read. '-' for stdin.")
)
func run(ctx context.Context, log *logrus.Entry) error {
fileName := flag.String("file", "-", "File to read. '-' for stdin.")
flag.Parse()
var (

8
hack/ci-utils/extractaro.sh Executable file
Просмотреть файл

@ -0,0 +1,8 @@
#!/bin/bash
set -xe
DOCKERID=$(docker create $1)
docker export $DOCKERID > aro.tar
tar -xvf aro.tar --strip-components=3 usr/local/bin/
docker rm $DOCKERID

Просмотреть файл

@ -19,10 +19,6 @@ import (
"github.com/Azure/ARO-RP/pkg/util/purge"
)
var (
dryRun = flag.Bool("dryRun", true, `Dry run`)
)
// denylist exists as belt and braces protection for important RGs, even though
// they may already have the persist=true tag set, especially if it is easy to
// accidentally redeploy the RG without the persist=true tag set.
@ -45,11 +41,13 @@ const (
)
func main() {
dryRun := flag.Bool("dryRun", true, `Dry run`)
flag.Parse()
ctx := context.Background()
log := utillog.GetLogger()
if err := run(ctx, log); err != nil {
if err := run(ctx, log, dryRun); err != nil {
log.Fatal(err)
}
}
@ -60,7 +58,7 @@ type settings struct {
deleteGroupPrefixes []string
}
func run(ctx context.Context, log *logrus.Entry) error {
func run(ctx context.Context, log *logrus.Entry, dryRun *bool) error {
for _, key := range []string{
"AZURE_CLIENT_ID",
"AZURE_CLIENT_SECRET",

Просмотреть файл

@ -76,11 +76,21 @@ deploy_aks_dev() {
-n aks-development \
--template-file pkg/deploy/assets/aks-development.json \
--parameters \
"adminObjectId=$ADMIN_OBJECT_ID" \
"dnsZone=$DOMAIN_NAME" \
"keyvaultPrefix=$KEYVAULT_PREFIX" \
"sshRSAPublicKey=$(<secrets/proxy_id_rsa.pub)" >/dev/null
}
deploy_vpn_for_dedicated_rp() {
echo "########## Deploying Dev VPN in RG $RESOURCEGROUP ##########"
az deployment group create \
-g "$RESOURCEGROUP" \
-n dev-vpn \
--template-file pkg/deploy/assets/vpn-development.json \
--parameters \
"vpnCACertificate=$(base64 -w0 <secrets/vpn-ca.crt)" >/dev/null
}
deploy_env_dev_override() {
echo "########## Deploying env-development in RG $RESOURCEGROUP ##########"
az deployment group create \

110
hack/devtools/local_dev_env.sh Executable file
Просмотреть файл

@ -0,0 +1,110 @@
#!/bin/bash
# Local development environment script.
# Execute this script from the root folder of the repo (ARO-RP).
# This script is aimed to provide an automatic and easy way to prepare
# the environment and execute the ARO RP locally.
# The steps here are the ones defined in docs/deploy-development-rp.md
# We recommend to use this script after you understand the steps of the process, not before.
build_development_az_aro_extension() {
echo "INFO: Building development az aro extension..."
make az
}
verify_aro_extension() {
echo "INFO: Verifying aro extension..."
grep -q 'dev_sources' ~/.azure/config || cat >>~/.azure/config <<EOF
[extension]
dev_sources = $PWD/python
EOF
}
set_storage_account() {
echo "INFO: Setting storage account..."
export SECRET_SA_ACCOUNT_NAME=rharosecretsdev make secrets
}
ask_to_create_default_env_config() {
local answer
read -p "Do you want to create a default env file? (existing one will be overwritten, if any) (y / n) " answer
if [[ "$answer" == "y" || "$answer" == "Y" ]]; then
create_env_file
elif [[ "$answer" == "n" || "$answer" == "N" ]]; then
echo "INFO: Skipping creation of default env file..."
else
echo "INFO: Unknown option, skipping step..."
fi
}
create_env_file() {
echo "INFO: Creating default env config file..."
cat >env <<EOF
export LOCATION=eastus
export ARO_IMAGE=arointsvc.azurecr.io/aro:latest
export RP_MODE=development # to use a development RP running at https://localhost:8443/
source secrets/env
EOF
}
ask_to_create_Azure_deployment() {
local answer
read -p "Create Azure deployment in the current subscription ($AZURE_SUBSCRIPTION_ID)? (y / n / l (list existing deployments)) " answer
if [[ "$answer" == "y" || "$answer" == "Y" ]]; then
create_Azure_deployment
elif [[ "$answer" == "n" || "$answer" == "N" ]]; then
echo "INFO: Skipping creation of Azure deployment..."
elif [[ "$answer" == "l" ]]; then
list_Azure_deployment_names
ask_to_create_Azure_deployment
else
echo "INFO: Unknown option, skipping step..."
fi
}
list_Azure_deployment_names() {
echo "INFO: Existing deployment names in the current subscription ($AZURE_SUBSCRIPTION_ID):"
az deployment group list --resource-group $RESOURCEGROUP | jq '[ .[] | {deployment_name: ( .id ) | split("/deployments/")[1] } | .deployment_name ]'
}
create_Azure_deployment() {
echo "INFO: Creating Azure deployment..."
az deployment group create \
-g "$RESOURCEGROUP" \
-n "databases-development-$USER" \
--template-file pkg/deploy/assets/databases-development.json \
--parameters \
"databaseAccountName=$DATABASE_ACCOUNT_NAME" \
"databaseName=$DATABASE_NAME" \
>/dev/null
echo "INFO: Azure deployment created."
}
source_env() {
echo "INFO: Sourcing env file..."
source ./env
}
run_the_RP() {
echo "INFO: Running the ARO RP locally..."
make runlocal-rp
}
main() {
build_development_az_aro_extension
verify_aro_extension
set_storage_account
ask_to_create_default_env_config
source_env
ask_to_create_Azure_deployment
run_the_RP
}
main

Просмотреть файл

@ -1,6 +1,22 @@
#!/bin/bash -e
######## Helper file to run E2e either locally or using Azure DevOps Pipelines ########
if [[ $CI ]]; then
set -o pipefail
. secrets/env
echo "##vso[task.setvariable variable=RP_MODE]$RP_MODE"
set -a
HIVEKUBECONFIGPATH="secrets/e2e-aks-kubeconfig"
HIVE_KUBE_CONFIG_PATH_1="secrets/aks.kubeconfig"
CLUSTER="v4-e2e-V$BUILD_BUILDID-$LOCATION"
DATABASE_NAME="v4-e2e-V$BUILD_BUILDID-$LOCATION"
PRIVATE_CLUSTER=true
E2E_DELETE_CLUSTER=false
set +a
fi
validate_rp_running() {
echo "########## Checking ARO RP Status ##########"
ELAPSED=0
@ -8,17 +24,16 @@ validate_rp_running() {
sleep 5
http_code=$(curl -k -s -o /dev/null -w '%{http_code}' https://localhost:8443/healthz/ready || true)
case $http_code in
"200")
"200")
echo "########## ✅ ARO RP Running ##########"
break
;;
*)
*)
echo "Attempt $ELAPSED - local RP is NOT up. Code : $http_code, waiting"
sleep 2
# after 40 secs return exit 1 to not block ci
ELAPSED=$((ELAPSED+1))
if [ $ELAPSED -eq 20 ]
then
ELAPSED=$((ELAPSED + 1))
if [ $ELAPSED -eq 20 ]; then
exit 1
fi
;;
@ -31,7 +46,7 @@ run_rp() {
./aro rp &
}
kill_rp(){
kill_rp() {
echo "########## Kill the RP running in background ##########"
rppid=$(lsof -t -i :8443)
kill $rppid
@ -45,17 +60,16 @@ validate_portal_running() {
sleep 5
http_code=$(curl -k -s -o /dev/null -w '%{http_code}' https://localhost:8444/api/info)
case $http_code in
"403")
"403")
echo "########## ✅ ARO Admin Portal Running ##########"
break
;;
*)
*)
echo "Attempt $ELAPSED - local Admin Portal is NOT up. Code : $http_code, waiting"
sleep 2
# after 40 secs return exit 1 to not block ci
ELAPSED=$((ELAPSED+1))
if [ $ELAPSED -eq 20 ]
then
ELAPSED=$((ELAPSED + 1))
if [ $ELAPSED -eq 20 ]; then
exit 1
fi
;;
@ -69,7 +83,7 @@ run_portal() {
./aro portal &
}
kill_portal(){
kill_portal() {
echo "########## Kill the Admin Portal running in background ##########"
rppid=$(lsof -t -i :8444)
kill $rppid
@ -85,17 +99,17 @@ run_vpn() {
kill_vpn() {
echo "########## Kill the OpenVPN running in background ##########"
while read pid; do sudo kill $pid; done < vpnpid
while read pid; do sudo kill $pid; done <vpnpid
}
deploy_e2e_db() {
echo "########## 📦 Creating new DB $DATABASE_NAME in $DATABASE_ACCOUNT_NAME ##########"
az deployment group create \
-g "$RESOURCEGROUP" \
-n "databases-development-$DATABASE_NAME" \
--template-file pkg/deploy/assets/databases-development.json \
--parameters \
-g "$RESOURCEGROUP" \
-n "databases-development-$DATABASE_NAME" \
--template-file pkg/deploy/assets/databases-development.json \
--parameters \
"databaseAccountName=$DATABASE_ACCOUNT_NAME" \
"databaseName=$DATABASE_NAME" \
>/dev/null
@ -105,12 +119,12 @@ deploy_e2e_db() {
register_sub() {
echo "########## 🔑 Registering subscription ##########"
curl -sko /dev/null -X PUT \
-H 'Content-Type: application/json' \
-d '{"state": "Registered", "properties": {"tenantId": "'"$AZURE_TENANT_ID"'"}}' \
"https://localhost:8443/subscriptions/$AZURE_SUBSCRIPTION_ID?api-version=2.0"
-H 'Content-Type: application/json' \
-d '{"state": "Registered", "properties": {"tenantId": "'"$AZURE_TENANT_ID"'"}}' \
"https://localhost:8443/subscriptions/$AZURE_SUBSCRIPTION_ID?api-version=2.0"
}
clean_e2e_db(){
clean_e2e_db() {
echo "########## 🧹 Deleting DB $DATABASE_NAME ##########"
az cosmosdb sql database delete --name $DATABASE_NAME \
--yes \
@ -118,33 +132,24 @@ clean_e2e_db(){
--resource-group $RESOURCEGROUP >/dev/null
}
run_vpn() {
sudo openvpn --config secrets/$VPN --daemon --writepid vpnpid
sleep 10
delete_e2e_cluster() {
echo "########## 🧹 Deleting Cluster $CLUSTER ##########"
if [[ $CI ]]; then
./cluster delete
else
go run ./hack/cluster delete
fi
}
kill_vpn() {
while read pid; do sudo kill $pid; done < vpnpid
}
# TODO: CLUSTER and is also recalculated in multiple places
# in the billing pipelines :-(
# if LOCAL_E2E is set, set the value with the local test names
# If it it not set, it defaults to the build ID
if [ -z "${LOCAL_E2E}" ] ; then
export CLUSTER="v4-e2e-V$BUILD_BUILDID-$LOCATION"
export DATABASE_NAME="v4-e2e-V$BUILD_BUILDID-$LOCATION"
fi
if [ -z "${CLUSTER}" ] ; then
if [[ -z $CLUSTER ]]; then
echo "CLUSTER is not set, aborting"
return 1
fi
if [ -z "${DATABASE_NAME}" ] ; then
if [[ -z $DATABASE_NAME ]]; then
echo "DATABASE_NAME is not set, aborting"
return 1
fi
@ -168,11 +173,27 @@ echo
echo "PROXY_HOSTNAME=$PROXY_HOSTNAME"
echo "######################################"
[ "$LOCATION" ] || ( echo ">> LOCATION is not set please validate your ./secrets/env"; return 128 )
[ "$RESOURCEGROUP" ] || ( echo ">> RESOURCEGROUP is not set; please validate your ./secrets/env"; return 128 )
[ "$PROXY_HOSTNAME" ] || ( echo ">> PROXY_HOSTNAME is not set; please validate your ./secrets/env"; return 128 )
[ "$DATABASE_ACCOUNT_NAME" ] || ( echo ">> DATABASE_ACCOUNT_NAME is not set; please validate your ./secrets/env"; return 128 )
[ "$DATABASE_NAME" ] || ( echo ">> DATABASE_NAME is not set; please validate your ./secrets/env"; return 128 )
[ "$AZURE_SUBSCRIPTION_ID" ] || ( echo ">> AZURE_SUBSCRIPTION_ID is not set; please validate your ./secrets/env"; return 128 )
az account set -s $AZURE_SUBSCRIPTION_ID >/dev/null
[[ $LOCATION ]] || (
echo ">> LOCATION is not set please validate your ./secrets/env"
return 128
)
[[ $RESOURCEGROUP ]] || (
echo ">> RESOURCEGROUP is not set; please validate your ./secrets/env"
return 128
)
[[ $PROXY_HOSTNAME ]] || (
echo ">> PROXY_HOSTNAME is not set; please validate your ./secrets/env"
return 128
)
[[ $DATABASE_ACCOUNT_NAME ]] || (
echo ">> DATABASE_ACCOUNT_NAME is not set; please validate your ./secrets/env"
return 128
)
[[ $DATABASE_NAME ]] || (
echo ">> DATABASE_NAME is not set; please validate your ./secrets/env"
return 128
)
[[ $AZURE_SUBSCRIPTION_ID ]] || (
echo ">> AZURE_SUBSCRIPTION_ID is not set; please validate your ./secrets/env"
return 128
)

Просмотреть файл

@ -26,14 +26,12 @@ import (
"github.com/Azure/ARO-RP/pkg/util/version"
)
var (
certFile = flag.String("certFile", "secrets/proxy.crt", "file containing server certificate")
keyFile = flag.String("keyFile", "secrets/proxy.key", "file containing server key")
port = flag.Int("port", 6443, "Port to listen on")
host = flag.String("host", "localhost", "Host to listen on")
)
func run(ctx context.Context, l *logrus.Entry) error {
certFile := flag.String("certFile", "secrets/proxy.crt", "file containing server certificate")
keyFile := flag.String("keyFile", "secrets/proxy.key", "file containing server key")
port := flag.Int("port", 6443, "Port to listen on")
host := flag.String("host", "localhost", "Host to listen on")
l.Printf("starting, git commit %s", version.GitCommit)
flag.Parse()
@ -71,9 +69,8 @@ func run(ctx context.Context, l *logrus.Entry) error {
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
},
PreferServerCipherSuites: true,
SessionTicketsDisabled: true,
MinVersion: tls.VersionTLS12,
SessionTicketsDisabled: true,
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{
tls.CurveP256,
tls.X25519,

Просмотреть файл

@ -35,6 +35,10 @@ func run(ctx context.Context, log *logrus.Entry) error {
}
}
if _, found := os.LookupEnv("SSH_PUBLIC_KEY"); !found {
log.Warnf("environment variable SSH_PUBLIC_KEY unset, will use %s/.ssh/id_rsa.pub", os.Getenv("HOME"))
}
env, err := env.NewCore(ctx, log)
if err != nil {
return err

Просмотреть файл

@ -10,6 +10,7 @@ import (
configclient "github.com/openshift/client-go/config/clientset/versioned"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
@ -54,7 +55,11 @@ func writeVersion(ctx context.Context, restconfig *rest.Config) error {
return err
}
clusterVersion, err := version.GetClusterVersion(ctx, configcli)
cv, err := configcli.ConfigV1().ClusterVersions().Get(ctx, "version", metav1.GetOptions{})
if err != nil {
return err
}
clusterVersion, err := version.GetClusterVersion(cv)
if err != nil {
return err
}

Просмотреть файл

@ -15,19 +15,12 @@ import (
utiltls "github.com/Azure/ARO-RP/pkg/util/tls"
)
var (
client = flag.Bool("client", false, "generate client certificate")
ca = flag.Bool("ca", false, "generate ca certificate")
keyFile = flag.String("keyFile", "", `file containing signing key in der format (default "" - self-signed)`)
certFile = flag.String("certFile", "", `file containing signing certificate in der format (default "" - self-signed)`)
)
func run(name string) error {
func run(name string, flags flagsType) error {
var signingKey *rsa.PrivateKey
var signingCert *x509.Certificate
if *keyFile != "" {
b, err := os.ReadFile(*keyFile)
if *flags.keyFile != "" {
b, err := os.ReadFile(*flags.keyFile)
if err != nil {
return err
}
@ -38,8 +31,8 @@ func run(name string) error {
}
}
if *certFile != "" {
b, err := os.ReadFile(*certFile)
if *flags.certFile != "" {
b, err := os.ReadFile(*flags.certFile)
if err != nil {
return err
}
@ -50,7 +43,7 @@ func run(name string) error {
}
}
key, cert, err := utiltls.GenerateKeyAndCertificate(name, signingKey, signingCert, *ca, *client)
key, cert, err := utiltls.GenerateKeyAndCertificate(name, signingKey, signingCert, *flags.ca, *flags.client)
if err != nil {
return err
}
@ -92,7 +85,21 @@ func usage() {
flag.PrintDefaults()
}
type flagsType struct {
client *bool
ca *bool
keyFile *string
certFile *string
}
func main() {
flags := flagsType{
client: flag.Bool("client", false, "generate client certificate"),
ca: flag.Bool("ca", false, "generate ca certificate"),
keyFile: flag.String("keyFile", "", `file containing signing key in der format (default "" - self-signed)`),
certFile: flag.String("certFile", "", `file containing signing certificate in der format (default "" - self-signed)`),
}
flag.Usage = usage
flag.Parse()
@ -101,7 +108,7 @@ func main() {
os.Exit(2)
}
if err := run(flag.Arg(0)); err != nil {
if err := run(flag.Arg(0), flags); err != nil {
panic(err)
}
}

Просмотреть файл

@ -5,4 +5,8 @@ if [[ "$#" -ne 1 ]]; then
exit 1
fi
go run ./hack/db "$1" | jq -r .openShiftCluster.properties.adminKubeconfig | base64 -d | sed -e 's|https://api-int\.|https://api\.|'
if [[ $CI ]]; then
./db "$1" | jq -r .openShiftCluster.properties.adminKubeconfig | base64 -d | sed -e 's|https://api-int\.|https://api\.|'
else
go run ./hack/db "$1" | jq -r .openShiftCluster.properties.adminKubeconfig | base64 -d | sed -e 's|https://api-int\.|https://api\.|'
fi

Просмотреть файл

@ -1,6 +0,0 @@
apiVersion: hive.openshift.io/v1
kind: ClusterImageSet
metadata:
name: openshift-v4.10.15
spec:
releaseImage: quay.io/openshift-release-dev/ocp-release@sha256:ddcb70ce04a01ce487c0f4ad769e9e36a10c8c832a34307c1b1eb8e03a5b7ddb

Просмотреть файл

@ -68,7 +68,6 @@ fi
$KUBECTL apply -f ./hack/hive-config/crds
$KUBECTL apply -f ./hack/hive-config/hive-deployment.yaml
$KUBECTL apply -f ./hack/hive-config/cluster-image-sets
echo "$PULL_SECRET" > /tmp/.tmp-secret
# Using dry-run allows updates to work seamlessly

Просмотреть файл

@ -5,7 +5,6 @@ package main
import (
"context"
"encoding/gob"
"flag"
"fmt"
"net/http"
@ -28,12 +27,10 @@ const (
SessionKeyGroups = "groups"
)
var (
username = flag.String("username", "testuser", "username of the portal user")
groups = flag.String("groups", "", "comma-separated list of groups the user is in")
)
func run(ctx context.Context, log *logrus.Entry) error {
username := flag.String("username", "testuser", "username of the portal user")
groups := flag.String("groups", "", "comma-separated list of groups the user is in")
flag.Parse()
_env, err := env.NewCore(ctx, log)
@ -71,7 +68,7 @@ func run(ctx context.Context, log *logrus.Entry) error {
session.Values[SessionKeyUsername] = username
session.Values[SessionKeyGroups] = strings.Split(*groups, ",")
session.Values[SessionKeyExpires] = time.Now().Add(time.Hour)
session.Values[SessionKeyExpires] = time.Now().Add(time.Hour).Unix()
encoded, err := securecookie.EncodeMulti(session.Name(), session.Values,
store.Codecs...)
@ -88,8 +85,6 @@ func run(ctx context.Context, log *logrus.Entry) error {
func main() {
log := utillog.GetLogger()
gob.Register(time.Time{})
if err := run(context.Background(), log); err != nil {
log.Fatal(err)
}

Просмотреть файл

@ -11,14 +11,12 @@ import (
"github.com/Azure/ARO-RP/pkg/util/version"
)
var (
certFile = flag.String("certFile", "secrets/proxy.crt", "file containing server certificate")
keyFile = flag.String("keyFile", "secrets/proxy.key", "file containing server key")
clientCertFile = flag.String("clientCertFile", "secrets/proxy-client.crt", "file containing client certificate")
subnet = flag.String("subnet", "10.0.0.0/8", "allowed subnet")
)
func main() {
certFile := flag.String("certFile", "secrets/proxy.crt", "file containing server certificate")
keyFile := flag.String("keyFile", "secrets/proxy.key", "file containing server key")
clientCertFile := flag.String("clientCertFile", "secrets/proxy-client.crt", "file containing client certificate")
subnet := flag.String("subnet", "10.0.0.0/8", "allowed subnet")
log := utillog.GetLogger()
log.Printf("starting, git commit %s", version.GitCommit)

Просмотреть файл

@ -35,19 +35,12 @@ import (
// TLS client certificate. For better or worse, this means we can avoid having
// to add more configurability to the client libraries.
var (
certFile = flag.String("certFile", "secrets/localhost.crt", "file containing server certificate")
keyFile = flag.String("keyFile", "secrets/localhost.key", "file containing server key")
clientCertFile = flag.String("clientCertFile", "secrets/dev-client.crt", "file containing client certificate")
clientKeyFile = flag.String("clientKeyFile", "secrets/dev-client.key", "file containing client key")
)
func run(ctx context.Context, log *logrus.Entry) error {
func run(ctx context.Context, log *logrus.Entry, flags flagsType) error {
if len(flag.Args()) != 1 {
return fmt.Errorf("usage: %s IP", os.Args[0])
}
certb, err := os.ReadFile(*certFile)
certb, err := os.ReadFile(*flags.certFile)
if err != nil {
return err
}
@ -60,7 +53,7 @@ func run(ctx context.Context, log *logrus.Entry) error {
pool := x509.NewCertPool()
pool.AddCert(cert)
keyb, err := os.ReadFile(*keyFile)
keyb, err := os.ReadFile(*flags.keyFile)
if err != nil {
return err
}
@ -70,12 +63,12 @@ func run(ctx context.Context, log *logrus.Entry) error {
return err
}
clientCertb, err := os.ReadFile(*clientCertFile)
clientCertb, err := os.ReadFile(*flags.clientCertFile)
if err != nil {
return err
}
clientKeyb, err := os.ReadFile(*clientKeyFile)
clientKeyb, err := os.ReadFile(*flags.clientKeyFile)
if err != nil {
return err
}
@ -151,14 +144,28 @@ func run(ctx context.Context, log *logrus.Entry) error {
}
}
type flagsType struct {
certFile *string
keyFile *string
clientCertFile *string
clientKeyFile *string
}
func main() {
log := utillog.GetLogger()
log.Printf("starting, git commit %s", version.GitCommit)
flags := flagsType{
certFile: flag.String("certFile", "secrets/localhost.crt", "file containing server certificate"),
keyFile: flag.String("keyFile", "secrets/localhost.key", "file containing server key"),
clientCertFile: flag.String("clientCertFile", "secrets/dev-client.crt", "file containing client certificate"),
clientKeyFile: flag.String("clientKeyFile", "secrets/dev-client.key", "file containing client key"),
}
flag.Parse()
if err := run(context.Background(), log); err != nil {
if err := run(context.Background(), log, flags); err != nil {
log.Fatal(err)
}
}

Просмотреть файл

@ -68,5 +68,5 @@ go mod edit -replace github.com/openshift/installer=$(go list -mod=mod -m github
go get -u ./...
go mod tidy -compat=1.17
go mod tidy -compat=1.18
go mod vendor

Просмотреть файл

@ -113,3 +113,5 @@ allowedImportNames:
- hivev1azure
github.com/gofrs/uuid:
- gofrsuuid
github.com/Azure/ARO-RP/pkg/operator/controllers/checkers/common:
- checkercommon

Просмотреть файл

@ -24,7 +24,6 @@ type OpenShiftCluster struct {
Location string `json:"location,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Properties OpenShiftClusterProperties `json:"properties,omitempty"`
SystemData SystemData `json:"systemData,omitempty"`
}
// OpenShiftClusterProperties represents an OpenShift cluster's properties.
@ -310,4 +309,9 @@ type SystemData struct {
type HiveProfile struct {
Namespace string `json:"namespace,omitempty"`
// CreatedByHive is used during PUCM to skip adoption and reconciliation
// of clusters that were created by Hive to avoid deleting existing
// ClusterDeployments.
CreatedByHive bool `json:"createdByHive,omitempty"`
}

Просмотреть файл

@ -124,15 +124,8 @@ func (c openShiftClusterConverter) ToExternal(oc *api.OpenShiftCluster) interfac
}
out.Properties.HiveProfile = HiveProfile{
Namespace: oc.Properties.HiveProfile.Namespace,
}
out.SystemData = SystemData{
CreatedBy: oc.SystemData.CreatedBy,
CreatedAt: oc.SystemData.CreatedAt,
CreatedByType: CreatedByType(oc.SystemData.CreatedByType),
LastModifiedBy: oc.SystemData.LastModifiedBy,
LastModifiedAt: oc.SystemData.LastModifiedAt,
LastModifiedByType: CreatedByType(oc.SystemData.LastModifiedByType),
Namespace: oc.Properties.HiveProfile.Namespace,
CreatedByHive: oc.Properties.HiveProfile.CreatedByHive,
}
return out
@ -174,6 +167,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif
out.Properties.ArchitectureVersion = api.ArchitectureVersion(oc.Properties.ArchitectureVersion)
out.Properties.InfraID = oc.Properties.InfraID
out.Properties.HiveProfile.Namespace = oc.Properties.HiveProfile.Namespace
out.Properties.HiveProfile.CreatedByHive = oc.Properties.HiveProfile.CreatedByHive
out.Properties.ProvisioningState = api.ProvisioningState(oc.Properties.ProvisioningState)
out.Properties.LastProvisioningState = api.ProvisioningState(oc.Properties.LastProvisioningState)
out.Properties.FailedProvisioningState = api.ProvisioningState(oc.Properties.FailedProvisioningState)
@ -240,15 +234,6 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif
}
}
out.SystemData = api.SystemData{
CreatedBy: oc.SystemData.CreatedBy,
CreatedAt: oc.SystemData.CreatedAt,
CreatedByType: api.CreatedByType(oc.SystemData.CreatedByType),
LastModifiedBy: oc.SystemData.LastModifiedBy,
LastModifiedAt: oc.SystemData.LastModifiedAt,
LastModifiedByType: api.CreatedByType(oc.SystemData.CreatedByType),
}
// out.Properties.RegistryProfiles is not converted. The field is immutable and does not have to be converted.
// Other fields are converted and this breaks the pattern, however this converting this field creates an issue
// with filling the out.Properties.RegistryProfiles[i].Password as default is "" which erases the original value.

Просмотреть файл

@ -9,11 +9,6 @@ package api
// when moving between old and new versions
func SetDefaults(doc *OpenShiftClusterDocument) {
if doc.OpenShiftCluster != nil {
// SoftwareDefinedNetwork was introduced in 2021-09-01-preview
if doc.OpenShiftCluster.Properties.NetworkProfile.SoftwareDefinedNetwork == "" {
doc.OpenShiftCluster.Properties.NetworkProfile.SoftwareDefinedNetwork = SoftwareDefinedNetworkOpenShiftSDN
}
// EncryptionAtHost was introduced in 2021-09-01-preview.
// It can't be changed post cluster creation
if doc.OpenShiftCluster.Properties.MasterProfile.EncryptionAtHost == "" {
@ -85,7 +80,7 @@ func DefaultOperatorFlags() OperatorFlags {
"aro.routefix.enabled": flagTrue,
"aro.storageaccounts.enabled": flagTrue,
"aro.workaround.enabled": flagTrue,
"aro.autosizednodes.enable": flagFalse,
"aro.autosizednodes.enabled": flagFalse,
"rh.srep.muo.enabled": flagTrue,
"rh.srep.muo.managed": flagTrue,
}

Просмотреть файл

@ -47,26 +47,6 @@ func TestSetDefaults(t *testing.T) {
return validOpenShiftClusterDocument()
},
},
{
name: "default SDN",
want: func() *OpenShiftClusterDocument {
return validOpenShiftClusterDocument()
},
input: func(base *OpenShiftClusterDocument) {
base.OpenShiftCluster.Properties.NetworkProfile.SoftwareDefinedNetwork = ""
},
},
{
name: "preserve SDN",
want: func() *OpenShiftClusterDocument {
doc := validOpenShiftClusterDocument()
doc.OpenShiftCluster.Properties.NetworkProfile.SoftwareDefinedNetwork = SoftwareDefinedNetworkOVNKubernetes
return doc
},
input: func(base *OpenShiftClusterDocument) {
base.OpenShiftCluster.Properties.NetworkProfile.SoftwareDefinedNetwork = SoftwareDefinedNetworkOVNKubernetes
},
},
{
name: "default encryption at host",
want: func() *OpenShiftClusterDocument {

Просмотреть файл

@ -9,12 +9,6 @@ const (
// StorageAccount
FeatureFlagSaveAROTestConfig = "Microsoft.RedHatOpenShift/SaveAROTestConfig"
// FeatureFlagAdminKubeconfig is the feature in the subscription that is used
// to enable adminKubeconfig api. API itself returns privileged kubeconfig.
// We need a feature flag to make sure we don't open a security hole in existing
// clusters before customer had a chance to patch their API RBAC
FeatureFlagAdminKubeconfig = "Microsoft.RedHatOpenShift/AdminKubeconfig"
// FeatureFlagMTU3900 is the feature in the subscription that causes new
// OpenShift cluster nodes to use the largest available Maximum Transmission
// Unit (MTU) on Azure virtual networks, which as of late 2021 is 3900 bytes.

Просмотреть файл

@ -347,6 +347,94 @@ const (
VMSizeStandardNC8asT4V3 VMSize = "Standard_NC8as_T4_v3"
VMSizeStandardNC16asT4V3 VMSize = "Standard_NC16as_T4_v3"
VMSizeStandardNC64asT4V3 VMSize = "Standard_NC64as_T4_v3"
VMSizeStandardNC6sV3 VMSize = "Standard_NC6s_v3"
VMSizeStandardNC12sV3 VMSize = "Standard_NC12s_v3"
VMSizeStandardNC24sV3 VMSize = "Standard_NC24s_v3"
VMSizeStandardNC24rsV3 VMSize = "Standard_NC24rs_v3"
)
type VMSizeStruct struct {
CoreCount int
Family string
}
var (
VMSizeStandardD2sV3Struct = VMSizeStruct{CoreCount: 2, Family: standardDSv3}
VMSizeStandardD4asV4Struct = VMSizeStruct{CoreCount: 4, Family: standardDASv4}
VMSizeStandardD8asV4Struct = VMSizeStruct{CoreCount: 8, Family: standardDASv4}
VMSizeStandardD16asV4Struct = VMSizeStruct{CoreCount: 16, Family: standardDASv4}
VMSizeStandardD32asV4Struct = VMSizeStruct{CoreCount: 32, Family: standardDASv4}
VMSizeStandardD4sV3Struct = VMSizeStruct{CoreCount: 4, Family: standardDSv3}
VMSizeStandardD8sV3Struct = VMSizeStruct{CoreCount: 8, Family: standardDSv3}
VMSizeStandardD16sV3Struct = VMSizeStruct{CoreCount: 16, Family: standardDSv3}
VMSizeStandardD32sV3Struct = VMSizeStruct{CoreCount: 32, Family: standardDSv3}
VMSizeStandardE4sV3Struct = VMSizeStruct{CoreCount: 4, Family: standardESv3}
VMSizeStandardE8sV3Struct = VMSizeStruct{CoreCount: 8, Family: standardESv3}
VMSizeStandardE16sV3Struct = VMSizeStruct{CoreCount: 16, Family: standardESv3}
VMSizeStandardE32sV3Struct = VMSizeStruct{CoreCount: 32, Family: standardESv3}
VMSizeStandardE64isV3Struct = VMSizeStruct{CoreCount: 64, Family: standardESv3}
VMSizeStandardE64iV3Struct = VMSizeStruct{CoreCount: 64, Family: standardESv3}
VMSizeStandardE80isV4Struct = VMSizeStruct{CoreCount: 80, Family: standardEISv4}
VMSizeStandardE80idsV4Struct = VMSizeStruct{CoreCount: 80, Family: standardEIDSv4}
VMSizeStandardE104iV5Struct = VMSizeStruct{CoreCount: 104, Family: standardEIv5}
VMSizeStandardE104isV5Struct = VMSizeStruct{CoreCount: 104, Family: standardEISv5}
VMSizeStandardE104idV5Struct = VMSizeStruct{CoreCount: 104, Family: standardEIDv5}
VMSizeStandardE104idsV5Struct = VMSizeStruct{CoreCount: 104, Family: standardEIDSv5}
VMSizeStandardF4sV2Struct = VMSizeStruct{CoreCount: 4, Family: standardFSv2}
VMSizeStandardF8sV2Struct = VMSizeStruct{CoreCount: 8, Family: standardFSv2}
VMSizeStandardF16sV2Struct = VMSizeStruct{CoreCount: 16, Family: standardFSv2}
VMSizeStandardF32sV2Struct = VMSizeStruct{CoreCount: 32, Family: standardFSv2}
VMSizeStandardF72sV2Struct = VMSizeStruct{CoreCount: 72, Family: standardFSv2}
VMSizeStandardM128msStruct = VMSizeStruct{CoreCount: 128, Family: standardMS}
VMSizeStandardG5Struct = VMSizeStruct{CoreCount: 32, Family: standardGFamily}
VMSizeStandardGS5Struct = VMSizeStruct{CoreCount: 32, Family: standardGFamily}
VMSizeStandardL4sStruct = VMSizeStruct{CoreCount: 4, Family: standardLSv2}
VMSizeStandardL8sStruct = VMSizeStruct{CoreCount: 8, Family: standardLSv2}
VMSizeStandardL16sStruct = VMSizeStruct{CoreCount: 16, Family: standardLSv2}
VMSizeStandardL32sStruct = VMSizeStruct{CoreCount: 32, Family: standardLSv2}
VMSizeStandardL8sV2Struct = VMSizeStruct{CoreCount: 8, Family: standardLSv2}
VMSizeStandardL16sV2Struct = VMSizeStruct{CoreCount: 16, Family: standardLSv2}
VMSizeStandardL32sV2Struct = VMSizeStruct{CoreCount: 32, Family: standardLSv2}
VMSizeStandardL48sV2Struct = VMSizeStruct{CoreCount: 48, Family: standardLSv2}
VMSizeStandardL64sV2Struct = VMSizeStruct{CoreCount: 64, Family: standardLSv2}
//Struct GPU nodes
//Struct the formatting of the ncasv3_t4 family is different. This can be seen through a
//Struct az vm list-usage -l eastus
VMSizeStandardNC4asT4V3Struct = VMSizeStruct{CoreCount: 4, Family: standardNCAS}
VMSizeStandardNC8asT4V3Struct = VMSizeStruct{CoreCount: 8, Family: standardNCAS}
VMSizeStandardNC16asT4V3Struct = VMSizeStruct{CoreCount: 16, Family: standardNCAS}
VMSizeStandardNC64asT4V3Struct = VMSizeStruct{CoreCount: 64, Family: standardNCAS}
VMSizeStandardNC6sV3Struct = VMSizeStruct{CoreCount: 6, Family: standardNCSv3}
VMSizeStandardNC12sV3Struct = VMSizeStruct{CoreCount: 12, Family: standardNCSv3}
VMSizeStandardNC24sV3Struct = VMSizeStruct{CoreCount: 24, Family: standardNCSv3}
VMSizeStandardNC24rsV3Struct = VMSizeStruct{CoreCount: 24, Family: standardNCSv3}
)
const (
standardDSv3 = "standardDSv3Family"
standardDASv4 = "standardDASv4Family"
standardESv3 = "standardESv3Family"
standardEISv4 = "standardEISv4Family"
standardEIDSv4 = "standardEIDSv4Family"
standardEIv5 = "standardEIv5Family"
standardEISv5 = "standardEISv5Family"
standardEIDSv5 = "standardEIDSv5Family"
standardEIDv5 = "standardEIDv5Family"
standardFSv2 = "standardFSv2Family"
standardMS = "standardMSFamily"
standardGFamily = "standardGFamily"
standardLSv2 = "standardLsv2Family"
standardNCAS = "Standard NCASv3_T4 Family"
standardNCSv3 = "Standard NCSv3 Family"
)
// WorkerProfile represents a worker profile
@ -432,4 +520,9 @@ type HiveProfile struct {
MissingFields
Namespace string `json:"namespace,omitempty"`
// CreatedByHive is used during PUCM to skip adoption and reconciliation
// of clusters that were created by Hive to avoid deleting existing
// ClusterDeployments.
CreatedByHive bool `json:"createdByHive,omitempty"`
}

Просмотреть файл

@ -120,6 +120,16 @@ var OperationOpenShiftClusterListAdminCredentials = Operation{
Origin: "user,system",
}
var OperationOpenShiftClusterGetDetectors = Operation{
Name: "Microsoft.RedHatOpenShift/openShiftClusters/detectors/read",
Display: Display{
Provider: "Azure Red Hat OpenShift",
Resource: "openShiftClusters",
Operation: "Get OpenShift Cluster Detector",
},
Origin: "user,system",
}
var OperationListInstallVersions = Operation{
Name: "Microsoft.RedHatOpenShift/locations/listInstallVersions/read",
Display: Display{

Просмотреть файл

@ -2,6 +2,7 @@ package api
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
const APIVersionKey = "api-version"
type OpenShiftClusterConverter interface {
ToExternal(*OpenShiftCluster) interface{}

Просмотреть файл

@ -51,7 +51,7 @@ func validOpenShiftCluster() *OpenShiftCluster {
ClusterProfile: ClusterProfile{
PullSecret: `{"auths":{"registry.connect.redhat.com":{"auth":""},"registry.redhat.io":{"auth":""}}}`,
Domain: "cluster.location.aroapp.io",
Version: version.InstallStream.Version.String(),
Version: version.DefaultInstallStream.Version.String(),
ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", subscriptionID),
},
ConsoleProfile: ConsoleProfile{

Просмотреть файл

@ -29,6 +29,7 @@ func init() {
api.OperationOpenShiftClusterWrite,
api.OperationOpenShiftClusterDelete,
api.OperationOpenShiftClusterListCredentials,
api.OperationOpenShiftClusterGetDetectors,
},
},
}

Просмотреть файл

@ -51,7 +51,7 @@ func validOpenShiftCluster() *OpenShiftCluster {
ClusterProfile: ClusterProfile{
PullSecret: `{"auths":{"registry.connect.redhat.com":{"auth":""},"registry.redhat.io":{"auth":""}}}`,
Domain: "cluster.location.aroapp.io",
Version: version.InstallStream.Version.String(),
Version: version.DefaultInstallStream.Version.String(),
ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", subscriptionID),
},
ConsoleProfile: ConsoleProfile{

Просмотреть файл

@ -29,6 +29,7 @@ func init() {
api.OperationOpenShiftClusterWrite,
api.OperationOpenShiftClusterDelete,
api.OperationOpenShiftClusterListCredentials,
api.OperationOpenShiftClusterGetDetectors,
},
},
}

Просмотреть файл

@ -65,7 +65,7 @@ func validOpenShiftCluster() *OpenShiftCluster {
ClusterProfile: ClusterProfile{
PullSecret: `{"auths":{"registry.connect.redhat.com":{"auth":""},"registry.redhat.io":{"auth":""}}}`,
Domain: "cluster.location.aroapp.io",
Version: version.InstallStream.Version.String(),
Version: version.DefaultInstallStream.Version.String(),
ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", subscriptionID),
},
ConsoleProfile: ConsoleProfile{

Просмотреть файл

@ -31,6 +31,7 @@ func init() {
api.OperationOpenShiftClusterDelete,
api.OperationOpenShiftClusterListCredentials,
api.OperationOpenShiftClusterListAdminCredentials,
api.OperationOpenShiftClusterGetDetectors,
},
},
}

Просмотреть файл

@ -65,7 +65,7 @@ func validOpenShiftCluster() *OpenShiftCluster {
ClusterProfile: ClusterProfile{
PullSecret: `{"auths":{"registry.connect.redhat.com":{"auth":""},"registry.redhat.io":{"auth":""}}}`,
Domain: "cluster.location.aroapp.io",
Version: version.InstallStream.Version.String(),
Version: version.DefaultInstallStream.Version.String(),
ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", subscriptionID),
FipsValidatedModules: FipsValidatedModulesDisabled,
},

Просмотреть файл

@ -31,6 +31,7 @@ func init() {
api.OperationOpenShiftClusterDelete,
api.OperationOpenShiftClusterListCredentials,
api.OperationOpenShiftClusterListAdminCredentials,
api.OperationOpenShiftClusterGetDetectors,
},
},
}

Просмотреть файл

@ -71,7 +71,7 @@ func validOpenShiftCluster(name, location string) *OpenShiftCluster {
ClusterProfile: ClusterProfile{
PullSecret: `{"auths":{"registry.connect.redhat.com":{"auth":""},"registry.redhat.io":{"auth":""}}}`,
Domain: "cluster.location.aroapp.io",
Version: version.InstallStream.Version.String(),
Version: version.DefaultInstallStream.Version.String(),
ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", subscriptionID),
FipsValidatedModules: FipsValidatedModulesDisabled,
},

Просмотреть файл

@ -42,6 +42,7 @@ func init() {
api.OperationSyncIdentityProvidersRead,
api.OperationSyncIdentityProvidersWrite,
api.OperationSyncIdentityProvidersDelete,
api.OperationOpenShiftClusterGetDetectors,
},
},
SyncSetConverter: syncSetConverter{},

Просмотреть файл

@ -0,0 +1,154 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// SyncSetList represents a list of SyncSets
type SyncSetList struct {
// The list of syncsets.
SyncSets []*SyncSet `json:"value"`
// The link used to get the next page of operations.
NextLink string `json:"nextLink,omitempty"`
}
// SyncSet represents a SyncSet for an Azure Red Hat OpenShift Cluster.
type SyncSet struct {
// This is a flag used during the swagger generation typewalker to
// signal that it should be marked as a proxy resource and
// not a tracked ARM resource.
proxyResource bool
// The resource ID.
ID string `json:"id,omitempty" mutable:"case"`
// The resource name.
Name string `json:"name,omitempty" mutable:"case"`
// The resource type.
Type string `json:"type,omitempty" mutable:"case"`
// SystemData metadata relating to this resource.
SystemData *SystemData `json:"systemData,omitempty"`
// The Syncsets properties
Properties SyncSetProperties `json:"properties,omitempty"`
}
// SyncSetProperties represents the properties of a SyncSet
type SyncSetProperties struct {
// Resources represents the SyncSets configuration.
Resources string `json:"resources,omitempty"`
}
// MachinePoolList represents a list of MachinePools
type MachinePoolList struct {
// The list of Machine Pools.
MachinePools []*MachinePool `json:"value"`
// The link used to get the next page of operations.
NextLink string `json:"nextLink,omitempty"`
}
// MachinePool represents a MachinePool
type MachinePool struct {
// This is a flag used during the swagger generation typewalker to
// signal that it should be marked as a proxy resource and
// not a tracked ARM resource.
proxyResource bool
// The Resource ID.
ID string `json:"id,omitempty"`
// The resource name.
Name string `json:"name,omitempty"`
// The resource type.
Type string `json:"type,omitempty" mutable:"case"`
// SystemData metadata relating to this resource.
SystemData *SystemData `json:"systemData,omitempty"`
// The MachinePool Properties
Properties MachinePoolProperties `json:"properties,omitempty"`
}
// MachinePoolProperties represents the properties of a MachinePool
type MachinePoolProperties struct {
Resources string `json:"resources,omitempty"`
}
// SyncSetList represents a list of SyncSets
type SyncIdentityProviderList struct {
// The list of sync identity providers
SyncIdentityProviders []*SyncIdentityProvider `json:"value"`
// The link used to get the next page of operations.
NextLink string `json:"nextLink,omitempty"`
}
// SyncIdentityProvider represents a SyncIdentityProvider
type SyncIdentityProvider struct {
// This is a flag used during the swagger generation typewalker to
// signal that it should be marked as a proxy resource and
// not a tracked ARM resource.
proxyResource bool
// The Resource ID.
ID string `json:"id,omitempty"`
// The resource name.
Name string `json:"name,omitempty"`
// The resource type.
Type string `json:"type,omitempty" mutable:"case"`
// SystemData metadata relating to this resource.
SystemData *SystemData `json:"systemData,omitempty"`
// The SyncIdentityProvider Properties
Properties SyncIdentityProviderProperties `json:"properties,omitempty"`
}
// SyncSetProperties represents the properties of a SyncSet
type SyncIdentityProviderProperties struct {
Resources string `json:"resources,omitempty"`
}
// SecretList represents a list of Secrets
type SecretList struct {
// The list of secrets.
Secrets []*Secret `json:"value"`
// The link used to get the next page of operations.
NextLink string `json:"nextLink,omitempty"`
}
// Secret represents a secret.
type Secret struct {
// This is a flag used during the swagger generation typewalker to
// signal that it should be marked as a proxy resource and
// not a tracked ARM resource.
proxyResource bool
// The Resource ID.
ID string `json:"id,omitempty"`
// The resource name.
Name string `json:"name,omitempty"`
// The resource type.
Type string `json:"type,omitempty" mutable:"case"`
// SystemData metadata relating to this resource.
SystemData *SystemData `json:"systemData,omitempty"`
// The Secret Properties
Properties SecretProperties `json:"properties,omitempty"`
}
// SecretProperties represents the properties of a Secret
type SecretProperties struct {
// The Secrets Resources.
SecretResources string `json:"secretResources,omitempty"`
}

Просмотреть файл

@ -0,0 +1,38 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"encoding/base64"
"encoding/json"
"fmt"
"strings"
)
type clusterManagerStaticValidator struct{}
func (c clusterManagerStaticValidator) Static(body string, vars map[string]string) error {
ocmResourceType := vars["ocmResourceType"]
var resource map[string]interface{}
if decodedBody, err := base64.StdEncoding.DecodeString(body); err == nil {
err = json.Unmarshal(decodedBody, &resource)
if err != nil {
return err
}
} else {
b := []byte(body)
err := json.Unmarshal(b, &resource)
if err != nil {
return err
}
}
payloadResourceKind := strings.ToLower(resource["kind"].(string))
if payloadResourceKind != ocmResourceType {
return fmt.Errorf("wanted Kind '%v', resource is Kind '%v'", ocmResourceType, payloadResourceKind)
}
return nil
}

Просмотреть файл

@ -0,0 +1,90 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"fmt"
"testing"
)
var ocmResource = string(`
{
"apiVersion": "hive.openshift.io/v1",
"kind": "SyncSet",
"metadata": {
"name": "sample",
"namespace": "aro-f60ae8a2-bca1-4987-9056-f2f6a1837caa"
},
"spec": {
"clusterDeploymentRefs": [],
"resources": [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "myconfigmap"
}
}
]
}
}
`)
var ocmResourceEncoded = "eyAKICAiYXBpVmVyc2lvbiI6ICJoaXZlLm9wZW5zaGlmdC5pby92MSIsCiAgImtpbmQiOiAiU3luY1NldCIsCiAgIm1ldGFkYXRhIjogewogICAgIm5hbWUiOiAic2FtcGxlIiwKICAgICJuYW1lc3BhY2UiOiAiYXJvLWY2MGFlOGEyLWJjYTEtNDk4Ny05MDU2LWYyZjZhMTgzN2NhYSIKICB9LAogICJzcGVjIjogewogICAgImNsdXN0ZXJEZXBsb3ltZW50UmVmcyI6IFtdLAogICAgInJlc291cmNlcyI6IFsKICAgICAgewogICAgICAgICJhcGlWZXJzaW9uIjogInYxIiwKICAgICAgICAia2luZCI6ICJDb25maWdNYXAiLAogICAgICAgICJtZXRhZGF0YSI6IHsKICAgICAgICAgICJuYW1lIjogIm15Y29uZmlnbWFwIgogICAgICAgIH0KICAgICAgfQogICAgXQogIH0KfQo="
func TestStatic(t *testing.T) {
for _, tt := range []struct {
name string
ocmResource string
vars map[string]string
wantErr bool
err string
}{
{
name: "payload Kind matches",
ocmResource: ocmResource,
vars: map[string]string{
"ocmResourceType": "syncset",
},
wantErr: false,
},
{
name: "payload Kind matches and is a base64 encoded string",
ocmResource: ocmResourceEncoded,
vars: map[string]string{
"ocmResourceType": "syncset",
},
wantErr: false,
},
{
name: "payload Kind does not match",
ocmResource: ocmResource,
vars: map[string]string{
"ocmResourceType": "route",
},
wantErr: true,
err: "wanted Kind 'route', resource is Kind 'syncset'",
},
{
name: "payload Kind does not match and is a base64 encoded string",
ocmResource: ocmResourceEncoded,
vars: map[string]string{
"ocmResourceType": "route",
},
wantErr: true,
err: "wanted Kind 'route', resource is Kind 'syncset'",
},
} {
t.Run(tt.name, func(t *testing.T) {
c := &clusterManagerStaticValidator{}
err := c.Static(tt.ocmResource, tt.vars)
if err != nil && tt.wantErr {
if fmt.Sprint(err) != tt.err {
t.Errorf("wanted '%v', got '%v'", tt.err, err)
}
}
})
}
}

Просмотреть файл

@ -0,0 +1,6 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
//go:generate go run ../../../hack/swagger github.com/Azure/ARO-RP/pkg/api/v20230401 ../../../swagger/redhatopenshift/resource-manager/Microsoft.RedHatOpenShift/stable/2023-04-01

Просмотреть файл

@ -0,0 +1,39 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
type machinePoolConverter struct{}
func (c machinePoolConverter) ToExternal(mp *api.MachinePool) interface{} {
out := new(MachinePool)
out.proxyResource = true
out.ID = mp.ID
out.Name = mp.Name
out.Type = mp.Type
out.Properties.Resources = mp.Properties.Resources
return out
}
func (c machinePoolConverter) ToInternal(_mp interface{}, out *api.MachinePool) {
ocm := _mp.(*api.MachinePool)
out.ID = ocm.ID
}
// ToExternalList returns a slice of external representations of the internal objects
func (c machinePoolConverter) ToExternalList(mp []*api.MachinePool) interface{} {
l := &MachinePoolList{
MachinePools: make([]*MachinePool, 0, len(mp)),
}
for _, machinepool := range mp {
c := c.ToExternal(machinepool)
l.MachinePools = append(l.MachinePools, c.(*MachinePool))
}
return l
}

Просмотреть файл

@ -0,0 +1,38 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
func exampleMachinePool() *MachinePool {
doc := api.ExampleClusterManagerConfigurationDocumentMachinePool()
ext := (&machinePoolConverter{}).ToExternal(doc.MachinePool)
return ext.(*MachinePool)
}
func ExampleMachinePoolPutParameter() interface{} {
mp := exampleMachinePool()
mp.ID = ""
mp.Type = ""
mp.Name = ""
return mp
}
func ExampleMachinePoolPatchParameter() interface{} {
return ExampleMachinePoolPutParameter()
}
func ExampleMachinePoolResponse() interface{} {
return exampleMachinePool()
}
func ExampleMachinePoolListResponse() interface{} {
return &MachinePoolList{
MachinePools: []*MachinePool{
ExampleMachinePoolResponse().(*MachinePool),
},
}
}

Просмотреть файл

@ -0,0 +1,248 @@
package v20230401
import "time"
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// OpenShiftClusterList represents a list of OpenShift clusters.
type OpenShiftClusterList struct {
// The list of OpenShift clusters.
OpenShiftClusters []*OpenShiftCluster `json:"value"`
// The link used to get the next page of operations.
NextLink string `json:"nextLink,omitempty"`
}
// OpenShiftCluster represents an Azure Red Hat OpenShift cluster.
type OpenShiftCluster struct {
// The resource ID.
ID string `json:"id,omitempty" mutable:"case"`
// The resource name.
Name string `json:"name,omitempty" mutable:"case"`
// The resource type.
Type string `json:"type,omitempty" mutable:"case"`
// The resource location.
Location string `json:"location,omitempty"`
// SystemData - The system metadata relating to this resource
SystemData *SystemData `json:"systemData,omitempty"`
// The resource tags.
Tags Tags `json:"tags,omitempty" mutable:"true"`
// The cluster properties.
Properties OpenShiftClusterProperties `json:"properties,omitempty"`
}
// Tags represents an OpenShift cluster's tags.
type Tags map[string]string
// OpenShiftClusterProperties represents an OpenShift cluster's properties.
type OpenShiftClusterProperties struct {
// The cluster provisioning state.
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// The cluster profile.
ClusterProfile ClusterProfile `json:"clusterProfile,omitempty"`
// The console profile.
ConsoleProfile ConsoleProfile `json:"consoleProfile,omitempty"`
// The cluster service principal profile.
ServicePrincipalProfile ServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"`
// The cluster network profile.
NetworkProfile NetworkProfile `json:"networkProfile,omitempty"`
// The cluster master profile.
MasterProfile MasterProfile `json:"masterProfile,omitempty"`
// The cluster worker profiles.
WorkerProfiles []WorkerProfile `json:"workerProfiles,omitempty"`
// The cluster API server profile.
APIServerProfile APIServerProfile `json:"apiserverProfile,omitempty"`
// The cluster ingress profiles.
IngressProfiles []IngressProfile `json:"ingressProfiles,omitempty"`
}
// ProvisioningState represents a provisioning state.
type ProvisioningState string
// ProvisioningState constants.
const (
ProvisioningStateCreating ProvisioningState = "Creating"
ProvisioningStateUpdating ProvisioningState = "Updating"
ProvisioningStateAdminUpdating ProvisioningState = "AdminUpdating"
ProvisioningStateDeleting ProvisioningState = "Deleting"
ProvisioningStateSucceeded ProvisioningState = "Succeeded"
ProvisioningStateFailed ProvisioningState = "Failed"
)
// FipsValidatedModules determines if FIPS is used.
type FipsValidatedModules string
// FipsValidatedModules constants.
const (
FipsValidatedModulesEnabled FipsValidatedModules = "Enabled"
FipsValidatedModulesDisabled FipsValidatedModules = "Disabled"
)
// ClusterProfile represents a cluster profile.
type ClusterProfile struct {
// The pull secret for the cluster.
PullSecret string `json:"pullSecret,omitempty"`
// The domain for the cluster.
Domain string `json:"domain,omitempty"`
// The version of the cluster.
Version string `json:"version,omitempty"`
// The ID of the cluster resource group.
ResourceGroupID string `json:"resourceGroupId,omitempty"`
// If FIPS validated crypto modules are used
FipsValidatedModules FipsValidatedModules `json:"fipsValidatedModules,omitempty"`
}
// ConsoleProfile represents a console profile.
type ConsoleProfile struct {
// The URL to access the cluster console.
URL string `json:"url,omitempty"`
}
// ServicePrincipalProfile represents a service principal profile.
type ServicePrincipalProfile struct {
// The client ID used for the cluster.
ClientID string `json:"clientId,omitempty" mutable:"true"`
// The client secret used for the cluster.
ClientSecret string `json:"clientSecret,omitempty" mutable:"true"`
}
// NetworkProfile represents a network profile.
type NetworkProfile struct {
// The CIDR used for OpenShift/Kubernetes Pods.
PodCIDR string `json:"podCidr,omitempty"`
// The CIDR used for OpenShift/Kubernetes Services.
ServiceCIDR string `json:"serviceCidr,omitempty"`
}
// EncryptionAtHost represents encryption at host state
type EncryptionAtHost string
// EncryptionAtHost constants
const (
EncryptionAtHostEnabled EncryptionAtHost = "Enabled"
EncryptionAtHostDisabled EncryptionAtHost = "Disabled"
)
// MasterProfile represents a master profile.
type MasterProfile struct {
// The size of the master VMs.
VMSize VMSize `json:"vmSize,omitempty"`
// The Azure resource ID of the master subnet.
SubnetID string `json:"subnetId,omitempty"`
// Whether master virtual machines are encrypted at host.
EncryptionAtHost EncryptionAtHost `json:"encryptionAtHost,omitempty"`
// The resource ID of an associated DiskEncryptionSet, if applicable.
DiskEncryptionSetID string `json:"diskEncryptionSetId,omitempty"`
}
// VM size availability varies by region.
// If a node contains insufficient compute resources (memory, cpu, etc.), pods might fail to run correctly.
// For more details on restricted VM sizes, see: https://docs.microsoft.com/en-us/azure/openshift/support-policies-v4#supported-virtual-machine-sizes
type VMSize string
// WorkerProfile represents a worker profile.
type WorkerProfile struct {
// The worker profile name.
Name string `json:"name,omitempty"`
// The size of the worker VMs.
VMSize VMSize `json:"vmSize,omitempty"`
// The disk size of the worker VMs.
DiskSizeGB int `json:"diskSizeGB,omitempty"`
// The Azure resource ID of the worker subnet.
SubnetID string `json:"subnetId,omitempty"`
// The number of worker VMs.
Count int `json:"count,omitempty"`
// Whether master virtual machines are encrypted at host.
EncryptionAtHost EncryptionAtHost `json:"encryptionAtHost,omitempty"`
// The resource ID of an associated DiskEncryptionSet, if applicable.
DiskEncryptionSetID string `json:"diskEncryptionSetId,omitempty"`
}
// APIServerProfile represents an API server profile.
type APIServerProfile struct {
// API server visibility.
Visibility Visibility `json:"visibility,omitempty"`
// The URL to access the cluster API server.
URL string `json:"url,omitempty"`
// The IP of the cluster API server.
IP string `json:"ip,omitempty"`
}
// Visibility represents visibility.
type Visibility string
// Visibility constants
const (
VisibilityPublic Visibility = "Public"
VisibilityPrivate Visibility = "Private"
)
// IngressProfile represents an ingress profile.
type IngressProfile struct {
// The ingress profile name.
Name string `json:"name,omitempty"`
// Ingress visibility.
Visibility Visibility `json:"visibility,omitempty"`
// The IP of the ingress.
IP string `json:"ip,omitempty"`
}
// CreatedByType by defines user type, which executed the request
type CreatedByType string
const (
CreatedByTypeApplication CreatedByType = "Application"
CreatedByTypeKey CreatedByType = "Key"
CreatedByTypeManagedIdentity CreatedByType = "ManagedIdentity"
CreatedByTypeUser CreatedByType = "User"
)
// SystemData metadata pertaining to creation and last modification of the resource.
type SystemData struct {
// The identity that created the resource.
CreatedBy string `json:"createdBy,omitempty"`
// The type of identity that created the resource. Possible values include: 'User', 'Application', 'ManagedIdentity', 'Key'
CreatedByType CreatedByType `json:"createdByType,omitempty"`
// The timestamp of resource creation (UTC).
CreatedAt *time.Time `json:"createdAt,omitempty"`
// The identity that last modified the resource.
LastModifiedBy string `json:"lastModifiedBy,omitempty"`
// The type of identity that last modified the resource. Possible values include: 'User', 'Application', 'ManagedIdentity', 'Key'
LastModifiedByType CreatedByType `json:"lastModifiedByType,omitempty"`
// The type of identity that last modified the resource.
LastModifiedAt *time.Time `json:"lastModifiedAt,omitempty"`
}

Просмотреть файл

@ -0,0 +1,183 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
type openShiftClusterConverter struct{}
// ToExternal returns a new external representation of the internal object,
// reading from the subset of the internal object's fields that appear in the
// external representation. ToExternal does not modify its argument; there is
// no pointer aliasing between the passed and returned objects
func (c openShiftClusterConverter) ToExternal(oc *api.OpenShiftCluster) interface{} {
out := &OpenShiftCluster{
ID: oc.ID,
Name: oc.Name,
Type: oc.Type,
Location: oc.Location,
Properties: OpenShiftClusterProperties{
ProvisioningState: ProvisioningState(oc.Properties.ProvisioningState),
ClusterProfile: ClusterProfile{
PullSecret: string(oc.Properties.ClusterProfile.PullSecret),
Domain: oc.Properties.ClusterProfile.Domain,
Version: oc.Properties.ClusterProfile.Version,
ResourceGroupID: oc.Properties.ClusterProfile.ResourceGroupID,
FipsValidatedModules: FipsValidatedModules(oc.Properties.ClusterProfile.FipsValidatedModules),
},
ConsoleProfile: ConsoleProfile{
URL: oc.Properties.ConsoleProfile.URL,
},
ServicePrincipalProfile: ServicePrincipalProfile{
ClientID: oc.Properties.ServicePrincipalProfile.ClientID,
ClientSecret: string(oc.Properties.ServicePrincipalProfile.ClientSecret),
},
NetworkProfile: NetworkProfile{
PodCIDR: oc.Properties.NetworkProfile.PodCIDR,
ServiceCIDR: oc.Properties.NetworkProfile.ServiceCIDR,
},
MasterProfile: MasterProfile{
VMSize: VMSize(oc.Properties.MasterProfile.VMSize),
SubnetID: oc.Properties.MasterProfile.SubnetID,
EncryptionAtHost: EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost),
DiskEncryptionSetID: oc.Properties.MasterProfile.DiskEncryptionSetID,
},
APIServerProfile: APIServerProfile{
Visibility: Visibility(oc.Properties.APIServerProfile.Visibility),
URL: oc.Properties.APIServerProfile.URL,
IP: oc.Properties.APIServerProfile.IP,
},
},
}
if oc.Properties.WorkerProfiles != nil {
out.Properties.WorkerProfiles = make([]WorkerProfile, 0, len(oc.Properties.WorkerProfiles))
for _, p := range oc.Properties.WorkerProfiles {
out.Properties.WorkerProfiles = append(out.Properties.WorkerProfiles, WorkerProfile{
Name: p.Name,
VMSize: VMSize(p.VMSize),
DiskSizeGB: p.DiskSizeGB,
SubnetID: p.SubnetID,
Count: p.Count,
EncryptionAtHost: EncryptionAtHost(p.EncryptionAtHost),
DiskEncryptionSetID: p.DiskEncryptionSetID,
})
}
}
if oc.Properties.IngressProfiles != nil {
out.Properties.IngressProfiles = make([]IngressProfile, 0, len(oc.Properties.IngressProfiles))
for _, p := range oc.Properties.IngressProfiles {
out.Properties.IngressProfiles = append(out.Properties.IngressProfiles, IngressProfile{
Name: p.Name,
Visibility: Visibility(p.Visibility),
IP: p.IP,
})
}
}
if oc.Tags != nil {
out.Tags = make(map[string]string, len(oc.Tags))
for k, v := range oc.Tags {
out.Tags[k] = v
}
}
out.SystemData = &SystemData{
CreatedBy: oc.SystemData.CreatedBy,
CreatedAt: oc.SystemData.CreatedAt,
CreatedByType: CreatedByType(oc.SystemData.CreatedByType),
LastModifiedBy: oc.SystemData.LastModifiedBy,
LastModifiedAt: oc.SystemData.LastModifiedAt,
LastModifiedByType: CreatedByType(oc.SystemData.LastModifiedByType),
}
return out
}
// ToExternalList returns a slice of external representations of the internal
// objects
func (c openShiftClusterConverter) ToExternalList(ocs []*api.OpenShiftCluster, nextLink string) interface{} {
l := &OpenShiftClusterList{
OpenShiftClusters: make([]*OpenShiftCluster, 0, len(ocs)),
NextLink: nextLink,
}
for _, oc := range ocs {
l.OpenShiftClusters = append(l.OpenShiftClusters, c.ToExternal(oc).(*OpenShiftCluster))
}
return l
}
// ToInternal overwrites in place a pre-existing internal object, setting (only)
// all mapped fields from the external representation. ToInternal modifies its
// argument; there is no pointer aliasing between the passed and returned
// objects
func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShiftCluster) {
oc := _oc.(*OpenShiftCluster)
out.ID = oc.ID
out.Name = oc.Name
out.Type = oc.Type
out.Location = oc.Location
out.Tags = nil
if oc.Tags != nil {
out.Tags = make(map[string]string, len(oc.Tags))
for k, v := range oc.Tags {
out.Tags[k] = v
}
}
out.Properties.ProvisioningState = api.ProvisioningState(oc.Properties.ProvisioningState)
out.Properties.ClusterProfile.PullSecret = api.SecureString(oc.Properties.ClusterProfile.PullSecret)
out.Properties.ClusterProfile.Domain = oc.Properties.ClusterProfile.Domain
out.Properties.ClusterProfile.Version = oc.Properties.ClusterProfile.Version
out.Properties.ClusterProfile.ResourceGroupID = oc.Properties.ClusterProfile.ResourceGroupID
out.Properties.ConsoleProfile.URL = oc.Properties.ConsoleProfile.URL
out.Properties.ClusterProfile.FipsValidatedModules = api.FipsValidatedModules(oc.Properties.ClusterProfile.FipsValidatedModules)
out.Properties.ServicePrincipalProfile.ClientID = oc.Properties.ServicePrincipalProfile.ClientID
out.Properties.ServicePrincipalProfile.ClientSecret = api.SecureString(oc.Properties.ServicePrincipalProfile.ClientSecret)
out.Properties.NetworkProfile.PodCIDR = oc.Properties.NetworkProfile.PodCIDR
out.Properties.NetworkProfile.ServiceCIDR = oc.Properties.NetworkProfile.ServiceCIDR
out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize)
out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID
out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost)
out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID
out.Properties.WorkerProfiles = nil
if oc.Properties.WorkerProfiles != nil {
out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles))
for i := range oc.Properties.WorkerProfiles {
out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name
out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize)
out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB
out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID
out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count
out.Properties.WorkerProfiles[i].EncryptionAtHost = api.EncryptionAtHost(oc.Properties.WorkerProfiles[i].EncryptionAtHost)
out.Properties.WorkerProfiles[i].DiskEncryptionSetID = oc.Properties.WorkerProfiles[i].DiskEncryptionSetID
}
}
out.Properties.APIServerProfile.Visibility = api.Visibility(oc.Properties.APIServerProfile.Visibility)
out.Properties.APIServerProfile.URL = oc.Properties.APIServerProfile.URL
out.Properties.APIServerProfile.IP = oc.Properties.APIServerProfile.IP
out.Properties.IngressProfiles = nil
if oc.Properties.IngressProfiles != nil {
out.Properties.IngressProfiles = make([]api.IngressProfile, len(oc.Properties.IngressProfiles))
for i := range oc.Properties.IngressProfiles {
out.Properties.IngressProfiles[i].Name = oc.Properties.IngressProfiles[i].Name
out.Properties.IngressProfiles[i].Visibility = api.Visibility(oc.Properties.IngressProfiles[i].Visibility)
out.Properties.IngressProfiles[i].IP = oc.Properties.IngressProfiles[i].IP
}
}
out.SystemData = api.SystemData{
CreatedBy: oc.SystemData.CreatedBy,
CreatedAt: oc.SystemData.CreatedAt,
CreatedByType: api.CreatedByType(oc.SystemData.CreatedByType),
LastModifiedBy: oc.SystemData.LastModifiedBy,
LastModifiedAt: oc.SystemData.LastModifiedAt,
LastModifiedByType: api.CreatedByType(oc.SystemData.CreatedByType),
}
}

Просмотреть файл

@ -0,0 +1,63 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
func exampleOpenShiftCluster() *OpenShiftCluster {
doc := api.ExampleOpenShiftClusterDocument()
return (&openShiftClusterConverter{}).ToExternal(doc.OpenShiftCluster).(*OpenShiftCluster)
}
// ExampleOpenShiftClusterPatchParameter returns an example OpenShiftCluster
// object that an end-user might send to create a cluster in a PATCH request
func ExampleOpenShiftClusterPatchParameter() interface{} {
oc := ExampleOpenShiftClusterPutParameter().(*OpenShiftCluster)
oc.Location = ""
oc.SystemData = nil
return oc
}
// ExampleOpenShiftClusterPutParameter returns an example OpenShiftCluster
// object that an end-user might send to create a cluster in a PUT request
func ExampleOpenShiftClusterPutParameter() interface{} {
oc := exampleOpenShiftCluster()
oc.ID = ""
oc.Name = ""
oc.Type = ""
oc.Properties.ProvisioningState = ""
oc.Properties.ClusterProfile.Version = ""
oc.Properties.ClusterProfile.FipsValidatedModules = FipsValidatedModulesEnabled
oc.Properties.ConsoleProfile.URL = ""
oc.Properties.APIServerProfile.URL = ""
oc.Properties.APIServerProfile.IP = ""
oc.Properties.IngressProfiles[0].IP = ""
oc.Properties.MasterProfile.EncryptionAtHost = EncryptionAtHostEnabled
oc.SystemData = nil
return oc
}
// ExampleOpenShiftClusterResponse returns an example OpenShiftCluster object
// that the RP might return to an end-user
func ExampleOpenShiftClusterResponse() interface{} {
oc := exampleOpenShiftCluster()
oc.Properties.ClusterProfile.PullSecret = ""
oc.Properties.ServicePrincipalProfile.ClientSecret = ""
return oc
}
// ExampleOpenShiftClusterListResponse returns an example OpenShiftClusterList
// object that the RP might return to an end-user
func ExampleOpenShiftClusterListResponse() interface{} {
return &OpenShiftClusterList{
OpenShiftClusters: []*OpenShiftCluster{
ExampleOpenShiftClusterResponse().(*OpenShiftCluster),
},
}
}

Просмотреть файл

@ -0,0 +1,20 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"encoding/json"
)
// UnmarshalJSON unmarshals tags. We override this to ensure that PATCH
// behaviour overwrites an existing tags map rather than endlessly adding to it
func (t *Tags) UnmarshalJSON(b []byte) error {
var m map[string]string
err := json.Unmarshal(b, &m)
if err != nil {
return err
}
*t = m
return nil
}

Просмотреть файл

@ -0,0 +1,357 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"fmt"
"net"
"net/http"
"net/url"
"strings"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/validate"
"github.com/Azure/ARO-RP/pkg/util/immutable"
"github.com/Azure/ARO-RP/pkg/util/pullsecret"
"github.com/Azure/ARO-RP/pkg/util/subnet"
"github.com/Azure/ARO-RP/pkg/util/uuid"
)
type openShiftClusterStaticValidator struct {
location string
domain string
requireD2sV3Workers bool
resourceID string
r azure.Resource
}
// Validate validates an OpenShift cluster
func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sV3Workers bool, resourceID string) error {
sv.location = location
sv.domain = domain
sv.requireD2sV3Workers = requireD2sV3Workers
sv.resourceID = resourceID
oc := _oc.(*OpenShiftCluster)
var current *OpenShiftCluster
if _current != nil {
current = (&openShiftClusterConverter{}).ToExternal(_current).(*OpenShiftCluster)
}
var err error
sv.r, err = azure.ParseResourceID(sv.resourceID)
if err != nil {
return err
}
err = sv.validate(oc, current == nil)
if err != nil {
return err
}
if current == nil {
return nil
}
return sv.validateDelta(oc, current)
}
func (sv openShiftClusterStaticValidator) validate(oc *OpenShiftCluster, isCreate bool) error {
if !strings.EqualFold(oc.ID, sv.resourceID) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeMismatchingResourceID, "id", "The provided resource ID '%s' did not match the name in the Url '%s'.", oc.ID, sv.resourceID)
}
if !strings.EqualFold(oc.Name, sv.r.ResourceName) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeMismatchingResourceName, "name", "The provided resource name '%s' did not match the name in the Url '%s'.", oc.Name, sv.r.ResourceName)
}
if !strings.EqualFold(oc.Type, resourceProviderNamespace+"/"+resourceType) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeMismatchingResourceType, "type", "The provided resource type '%s' did not match the name in the Url '%s'.", oc.Type, resourceProviderNamespace+"/"+resourceType)
}
if !strings.EqualFold(oc.Location, sv.location) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "location", "The provided location '%s' is invalid.", oc.Location)
}
return sv.validateProperties("properties", &oc.Properties, isCreate)
}
func (sv openShiftClusterStaticValidator) validateProperties(path string, p *OpenShiftClusterProperties, isCreate bool) error {
switch p.ProvisioningState {
case ProvisioningStateCreating, ProvisioningStateUpdating,
ProvisioningStateAdminUpdating, ProvisioningStateDeleting,
ProvisioningStateSucceeded, ProvisioningStateFailed:
default:
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".provisioningState", "The provided provisioning state '%s' is invalid.", p.ProvisioningState)
}
if err := sv.validateClusterProfile(path+".clusterProfile", &p.ClusterProfile, isCreate); err != nil {
return err
}
if err := sv.validateConsoleProfile(path+".consoleProfile", &p.ConsoleProfile); err != nil {
return err
}
if err := sv.validateServicePrincipalProfile(path+".servicePrincipalProfile", &p.ServicePrincipalProfile); err != nil {
return err
}
if err := sv.validateNetworkProfile(path+".networkProfile", &p.NetworkProfile); err != nil {
return err
}
if err := sv.validateMasterProfile(path+".masterProfile", &p.MasterProfile); err != nil {
return err
}
if err := sv.validateAPIServerProfile(path+".apiserverProfile", &p.APIServerProfile); err != nil {
return err
}
if isCreate {
if len(p.WorkerProfiles) != 1 {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".workerProfiles", "There should be exactly one worker profile.")
}
if err := sv.validateWorkerProfile(path+".workerProfiles['"+p.WorkerProfiles[0].Name+"']", &p.WorkerProfiles[0], &p.MasterProfile); err != nil {
return err
}
if len(p.IngressProfiles) != 1 {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".ingressProfiles", "There should be exactly one ingress profile.")
}
if err := sv.validateIngressProfile(path+".ingressProfiles['"+p.IngressProfiles[0].Name+"']", &p.IngressProfiles[0]); err != nil {
return err
}
}
return nil
}
func (sv openShiftClusterStaticValidator) validateClusterProfile(path string, cp *ClusterProfile, isCreate bool) error {
if pullsecret.Validate(cp.PullSecret) != nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".pullSecret", "The provided pull secret is invalid.")
}
if isCreate {
if !validate.RxDomainName.MatchString(cp.Domain) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".domain", "The provided domain '%s' is invalid.", cp.Domain)
}
} else {
// We currently do not allow domains with a digit as a first charecter,
// for new clusters, but we already have some existing clusters with
// domains like this and we need to allow customers to update them.
if !validate.RxDomainNameRFC1123.MatchString(cp.Domain) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".domain", "The provided domain '%s' is invalid.", cp.Domain)
}
}
// domain ends .aroapp.io, but doesn't end .<rp-location>.aroapp.io
if strings.HasSuffix(cp.Domain, "."+strings.SplitN(sv.domain, ".", 2)[1]) &&
!strings.HasSuffix(cp.Domain, "."+sv.domain) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".domain", "The provided domain '%s' is invalid.", cp.Domain)
}
// domain is of form multiple.names.<rp-location>.aroapp.io
if strings.HasSuffix(cp.Domain, "."+sv.domain) &&
strings.ContainsRune(strings.TrimSuffix(cp.Domain, "."+sv.domain), '.') {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".domain", "The provided domain '%s' is invalid.", cp.Domain)
}
if !validate.RxResourceGroupID.MatchString(cp.ResourceGroupID) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".resourceGroupId", "The provided resource group '%s' is invalid.", cp.ResourceGroupID)
}
if strings.Split(cp.ResourceGroupID, "/")[2] != sv.r.SubscriptionID {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".resourceGroupId", "The provided resource group '%s' is invalid: must be in same subscription as cluster.", cp.ResourceGroupID)
}
if strings.EqualFold(cp.ResourceGroupID, fmt.Sprintf("/subscriptions/%s/resourceGroups/%s", sv.r.SubscriptionID, sv.r.ResourceGroup)) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".resourceGroupId", "The provided resource group '%s' is invalid: must be different from resourceGroup of the OpenShift cluster object.", cp.ResourceGroupID)
}
switch cp.FipsValidatedModules {
case FipsValidatedModulesDisabled, FipsValidatedModulesEnabled:
default:
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".fipsValidatedModules", "The provided value '%s' is invalid.", cp.FipsValidatedModules)
}
return nil
}
func (sv openShiftClusterStaticValidator) validateConsoleProfile(path string, cp *ConsoleProfile) error {
if cp.URL != "" {
if _, err := url.Parse(cp.URL); err != nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".url", "The provided console URL '%s' is invalid.", cp.URL)
}
}
return nil
}
func (sv openShiftClusterStaticValidator) validateServicePrincipalProfile(path string, spp *ServicePrincipalProfile) error {
valid := uuid.IsValid(spp.ClientID)
if !valid {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".clientId", "The provided client ID '%s' is invalid.", spp.ClientID)
}
if spp.ClientSecret == "" {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".clientSecret", "The provided client secret is invalid.")
}
return nil
}
func (sv openShiftClusterStaticValidator) validateNetworkProfile(path string, np *NetworkProfile) error {
_, pod, err := net.ParseCIDR(np.PodCIDR)
if err != nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".podCidr", "The provided pod CIDR '%s' is invalid: '%s'.", np.PodCIDR, err)
}
if pod.IP.To4() == nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".podCidr", "The provided pod CIDR '%s' is invalid: must be IPv4.", np.PodCIDR)
}
{
ones, _ := pod.Mask.Size()
if ones > 18 {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".podCidr", "The provided vnet CIDR '%s' is invalid: must be /18 or larger.", np.PodCIDR)
}
}
_, service, err := net.ParseCIDR(np.ServiceCIDR)
if err != nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".serviceCidr", "The provided service CIDR '%s' is invalid: '%s'.", np.ServiceCIDR, err)
}
if service.IP.To4() == nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".serviceCidr", "The provided service CIDR '%s' is invalid: must be IPv4.", np.ServiceCIDR)
}
{
ones, _ := service.Mask.Size()
if ones > 22 {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".serviceCidr", "The provided vnet CIDR '%s' is invalid: must be /22 or larger.", np.ServiceCIDR)
}
}
return nil
}
func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile) error {
if !validate.VMSizeIsValid(api.VMSize(mp.VMSize), sv.requireD2sV3Workers, true) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", "The provided master VM size '%s' is invalid.", mp.VMSize)
}
if !validate.RxSubnetID.MatchString(mp.SubnetID) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", "The provided master VM subnet '%s' is invalid.", mp.SubnetID)
}
sr, err := azure.ParseResourceID(mp.SubnetID)
if err != nil {
return err
}
if sr.SubscriptionID != sv.r.SubscriptionID {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", "The provided master VM subnet '%s' is invalid: must be in same subscription as cluster.", mp.SubnetID)
}
switch mp.EncryptionAtHost {
case EncryptionAtHostDisabled, EncryptionAtHostEnabled:
default:
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".encryptionAtHost", "The provided value '%s' is invalid.", mp.EncryptionAtHost)
}
if mp.DiskEncryptionSetID != "" {
if !validate.RxDiskEncryptionSetID.MatchString(mp.DiskEncryptionSetID) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskEncryptionSetId", "The provided master disk encryption set '%s' is invalid.", mp.DiskEncryptionSetID)
}
desr, err := azure.ParseResourceID(mp.DiskEncryptionSetID)
if err != nil {
return err
}
if desr.SubscriptionID != sv.r.SubscriptionID {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskEncryptionSetId", "The provided master disk encryption set '%s' is invalid: must be in same subscription as cluster.", mp.DiskEncryptionSetID)
}
}
return nil
}
func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp *WorkerProfile, mp *MasterProfile) error {
if wp.Name != "worker" {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", "The provided worker name '%s' is invalid.", wp.Name)
}
if !validate.VMSizeIsValid(api.VMSize(wp.VMSize), sv.requireD2sV3Workers, false) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", "The provided worker VM size '%s' is invalid.", wp.VMSize)
}
if !validate.DiskSizeIsValid(wp.DiskSizeGB) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", "The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)
}
if !validate.RxSubnetID.MatchString(wp.SubnetID) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", "The provided worker VM subnet '%s' is invalid.", wp.SubnetID)
}
switch wp.EncryptionAtHost {
case EncryptionAtHostDisabled, EncryptionAtHostEnabled:
default:
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".encryptionAtHost", "The provided value '%s' is invalid.", wp.EncryptionAtHost)
}
workerVnetID, _, err := subnet.Split(wp.SubnetID)
if err != nil {
return err
}
masterVnetID, _, err := subnet.Split(mp.SubnetID)
if err != nil {
return err
}
if !strings.EqualFold(masterVnetID, workerVnetID) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", "The provided worker VM subnet '%s' is invalid: must be in the same vnet as master VM subnet '%s'.", wp.SubnetID, mp.SubnetID)
}
if strings.EqualFold(mp.SubnetID, wp.SubnetID) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", "The provided worker VM subnet '%s' is invalid: must be different to master VM subnet '%s'.", wp.SubnetID, mp.SubnetID)
}
if wp.Count < 2 || wp.Count > 50 {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".count", "The provided worker count '%d' is invalid.", wp.Count)
}
if !strings.EqualFold(mp.DiskEncryptionSetID, wp.DiskEncryptionSetID) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", "The provided worker disk encryption set '%s' is invalid: must be the same as master disk encryption set '%s'.", wp.DiskEncryptionSetID, mp.DiskEncryptionSetID)
}
return nil
}
func (sv openShiftClusterStaticValidator) validateAPIServerProfile(path string, ap *APIServerProfile) error {
switch ap.Visibility {
case VisibilityPublic, VisibilityPrivate:
default:
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".visibility", "The provided visibility '%s' is invalid.", ap.Visibility)
}
if ap.URL != "" {
if _, err := url.Parse(ap.URL); err != nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".url", "The provided URL '%s' is invalid.", ap.URL)
}
}
if ap.IP != "" {
ip := net.ParseIP(ap.IP)
if ip == nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".ip", "The provided IP '%s' is invalid.", ap.IP)
}
if ip.To4() == nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".ip", "The provided IP '%s' is invalid: must be IPv4.", ap.IP)
}
}
return nil
}
func (sv openShiftClusterStaticValidator) validateIngressProfile(path string, p *IngressProfile) error {
if p.Name != "default" {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", "The provided ingress name '%s' is invalid.", p.Name)
}
switch p.Visibility {
case VisibilityPublic, VisibilityPrivate:
default:
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".visibility", "The provided visibility '%s' is invalid.", p.Visibility)
}
if p.IP != "" {
ip := net.ParseIP(p.IP)
if ip == nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".ip", "The provided IP '%s' is invalid.", p.IP)
}
if ip.To4() == nil {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".ip", "The provided IP '%s' is invalid: must be IPv4.", p.IP)
}
}
return nil
}
func (sv openShiftClusterStaticValidator) validateDelta(oc, current *OpenShiftCluster) error {
err := immutable.Validate("", oc, current)
if err != nil {
err := err.(*immutable.ValidationError)
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodePropertyChangeNotAllowed, err.Target, err.Message)
}
return nil
}

Просмотреть файл

@ -0,0 +1,946 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/util/uuid"
"github.com/Azure/ARO-RP/pkg/util/version"
"github.com/Azure/ARO-RP/test/validate"
)
type validateTest struct {
name string
clusterName *string
location *string
current func(oc *OpenShiftCluster)
modify func(oc *OpenShiftCluster)
requireD2sV3Workers bool
wantErr string
}
type testMode string
const (
testModeCreate testMode = "Create"
testModeUpdate testMode = "Update"
)
var (
subscriptionID = "00000000-0000-0000-0000-000000000000"
)
func getResourceID(clusterName string) string {
return fmt.Sprintf("/subscriptions/%s/resourcegroups/resourceGroup/providers/microsoft.redhatopenshift/openshiftclusters/%s", subscriptionID, clusterName)
}
func validOpenShiftCluster(name, location string) *OpenShiftCluster {
timestamp, err := time.Parse(time.RFC3339, "2021-01-23T12:34:54.0000000Z")
if err != nil {
panic(err)
}
oc := &OpenShiftCluster{
ID: getResourceID(name),
Name: name,
Type: "Microsoft.RedHatOpenShift/OpenShiftClusters",
Location: location,
Tags: Tags{
"key": "value",
},
SystemData: &SystemData{
CreatedBy: "00000000-0000-0000-0000-000000000000",
CreatedByType: CreatedByTypeApplication,
CreatedAt: &timestamp,
LastModifiedBy: "00000000-0000-0000-0000-000000000000",
LastModifiedByType: CreatedByTypeApplication,
LastModifiedAt: &timestamp,
},
Properties: OpenShiftClusterProperties{
ProvisioningState: ProvisioningStateSucceeded,
ClusterProfile: ClusterProfile{
PullSecret: `{"auths":{"registry.connect.redhat.com":{"auth":""},"registry.redhat.io":{"auth":""}}}`,
Domain: "cluster.location.aroapp.io",
Version: version.DefaultInstallStream.Version.String(),
ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", subscriptionID),
FipsValidatedModules: FipsValidatedModulesDisabled,
},
ConsoleProfile: ConsoleProfile{
URL: "https://console-openshift-console.apps.cluster.location.aroapp.io/",
},
ServicePrincipalProfile: ServicePrincipalProfile{
ClientSecret: "clientSecret",
ClientID: "11111111-1111-1111-1111-111111111111",
},
NetworkProfile: NetworkProfile{
PodCIDR: "10.128.0.0/14",
ServiceCIDR: "172.30.0.0/16",
},
MasterProfile: MasterProfile{
VMSize: "Standard_D8s_v3",
EncryptionAtHost: EncryptionAtHostDisabled,
SubnetID: fmt.Sprintf("/subscriptions/%s/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/master", subscriptionID),
},
WorkerProfiles: []WorkerProfile{
{
Name: "worker",
VMSize: "Standard_D4s_v3",
EncryptionAtHost: EncryptionAtHostDisabled,
DiskSizeGB: 128,
SubnetID: fmt.Sprintf("/subscriptions/%s/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/worker", subscriptionID),
Count: 3,
},
},
APIServerProfile: APIServerProfile{
Visibility: VisibilityPublic,
URL: "https://api.cluster.location.aroapp.io:6443/",
IP: "1.2.3.4",
},
IngressProfiles: []IngressProfile{
{
Name: "default",
Visibility: VisibilityPublic,
IP: "1.2.3.4",
},
},
},
}
return oc
}
func runTests(t *testing.T, mode testMode, tests []*validateTest) {
t.Run(string(mode), func(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// default values if not set
if tt.location == nil {
tt.location = to.StringPtr("location")
}
if tt.clusterName == nil {
tt.clusterName = to.StringPtr("resourceName")
}
v := &openShiftClusterStaticValidator{
location: *tt.location,
domain: "location.aroapp.io",
requireD2sV3Workers: tt.requireD2sV3Workers,
resourceID: getResourceID(*tt.clusterName),
r: azure.Resource{
SubscriptionID: subscriptionID,
ResourceGroup: "resourceGroup",
Provider: "Microsoft.RedHatOpenShift",
ResourceType: "openshiftClusters",
ResourceName: *tt.clusterName,
},
}
validOCForTest := func() *OpenShiftCluster {
oc := validOpenShiftCluster(*tt.clusterName, *tt.location)
if tt.current != nil {
tt.current(oc)
}
return oc
}
oc := validOCForTest()
if tt.modify != nil {
tt.modify(oc)
}
var current *api.OpenShiftCluster
if mode == testModeUpdate {
current = &api.OpenShiftCluster{}
(&openShiftClusterConverter{}).ToInternal(validOCForTest(), current)
}
err := v.Static(oc, current, v.location, v.domain, tt.requireD2sV3Workers, v.resourceID)
if err == nil {
if tt.wantErr != "" {
t.Error(err)
}
} else {
if err.Error() != tt.wantErr {
t.Error(err)
}
cloudErr := err.(*api.CloudError)
if cloudErr.StatusCode != http.StatusBadRequest {
t.Error(cloudErr.StatusCode)
}
if cloudErr.Target == "" {
t.Error("target is required")
}
validate.CloudError(t, err)
}
})
}
})
}
func TestOpenShiftClusterStaticValidate(t *testing.T) {
commonTests := []*validateTest{
{
name: "valid",
},
{
name: "id wrong",
modify: func(oc *OpenShiftCluster) {
oc.ID = "wrong"
},
wantErr: "400: MismatchingResourceID: id: The provided resource ID 'wrong' did not match the name in the Url '/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resourceGroup/providers/microsoft.redhatopenshift/openshiftclusters/resourceName'.",
},
{
name: "name wrong",
modify: func(oc *OpenShiftCluster) {
oc.Name = "wrong"
},
wantErr: "400: MismatchingResourceName: name: The provided resource name 'wrong' did not match the name in the Url 'resourceName'.",
},
{
name: "type wrong",
modify: func(oc *OpenShiftCluster) {
oc.Type = "wrong"
},
wantErr: "400: MismatchingResourceType: type: The provided resource type 'wrong' did not match the name in the Url 'Microsoft.RedHatOpenShift/openShiftClusters'.",
},
{
name: "location invalid",
modify: func(oc *OpenShiftCluster) {
oc.Location = "invalid"
},
wantErr: "400: InvalidParameter: location: The provided location 'invalid' is invalid.",
},
}
runTests(t, testModeCreate, commonTests)
runTests(t, testModeUpdate, commonTests)
}
func TestOpenShiftClusterStaticValidateProperties(t *testing.T) {
commonTests := []*validateTest{
{
name: "valid",
},
{
name: "provisioningState invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ProvisioningState = "invalid"
},
wantErr: "400: InvalidParameter: properties.provisioningState: The provided provisioning state 'invalid' is invalid.",
},
}
createTests := []*validateTest{
{
name: "no workerProfiles invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles = nil
},
wantErr: "400: InvalidParameter: properties.workerProfiles: There should be exactly one worker profile.",
},
{
name: "multiple workerProfiles invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles = []WorkerProfile{{}, {}}
},
wantErr: "400: InvalidParameter: properties.workerProfiles: There should be exactly one worker profile.",
},
}
runTests(t, testModeCreate, createTests)
runTests(t, testModeCreate, commonTests)
runTests(t, testModeUpdate, commonTests)
}
func TestOpenShiftClusterStaticValidateClusterProfile(t *testing.T) {
commonTests := []*validateTest{
{
name: "valid",
},
{
name: "pull secret not a map",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.PullSecret = "1"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.pullSecret: The provided pull secret is invalid.",
},
{
name: "pull secret invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.PullSecret = "{"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.pullSecret: The provided pull secret is invalid.",
},
{
name: "empty domain invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.Domain = ""
},
wantErr: "400: InvalidParameter: properties.clusterProfile.domain: The provided domain '' is invalid.",
},
{
name: "upper case domain invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.Domain = "BAD"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.domain: The provided domain 'BAD' is invalid.",
},
{
name: "domain invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.Domain = "!"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.domain: The provided domain '!' is invalid.",
},
{
name: "wrong location managed domain invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.Domain = "cluster.wronglocation.aroapp.io"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.domain: The provided domain 'cluster.wronglocation.aroapp.io' is invalid.",
},
{
name: "double part managed domain invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.Domain = "foo.bar.location.aroapp.io"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.domain: The provided domain 'foo.bar.location.aroapp.io' is invalid.",
},
{
name: "resourceGroupId invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.ResourceGroupID = "invalid"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.resourceGroupId: The provided resource group 'invalid' is invalid.",
},
{
name: "cluster resource group subscriptionId not matching cluster subscriptionId",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.ResourceGroupID = "/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourcegroups/test-cluster"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.resourceGroupId: The provided resource group '/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourcegroups/test-cluster' is invalid: must be in same subscription as cluster.",
},
{
name: "cluster resourceGroup and external resourceGroup equal",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.ResourceGroupID = "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resourceGroup"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.resourceGroupId: The provided resource group '/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resourceGroup' is invalid: must be different from resourceGroup of the OpenShift cluster object.",
},
{
name: "fips validated modules invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.FipsValidatedModules = "invalid"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.fipsValidatedModules: The provided value 'invalid' is invalid.",
},
{
name: "fips validated modules empty",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.FipsValidatedModules = ""
},
wantErr: "400: InvalidParameter: properties.clusterProfile.fipsValidatedModules: The provided value '' is invalid.",
},
}
createTests := []*validateTest{
{
name: "empty pull secret valid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.PullSecret = ""
},
},
{
name: "leading digit domain invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.Domain = "4k7f9clk"
},
wantErr: "400: InvalidParameter: properties.clusterProfile.domain: The provided domain '4k7f9clk' is invalid.",
},
}
updateTests := []*validateTest{
{
name: "leading digit domain valid",
current: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.Domain = "4k7f9clk"
},
},
}
runTests(t, testModeCreate, createTests)
runTests(t, testModeCreate, commonTests)
runTests(t, testModeUpdate, updateTests)
runTests(t, testModeUpdate, commonTests)
}
func TestOpenShiftClusterStaticValidateConsoleProfile(t *testing.T) {
commonTests := []*validateTest{
{
name: "valid",
},
{
name: "console url invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ConsoleProfile.URL = "\x00"
},
wantErr: "400: InvalidParameter: properties.consoleProfile.url: The provided console URL '\x00' is invalid.",
},
}
createTests := []*validateTest{
{
name: "empty console url valid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ConsoleProfile.URL = ""
},
},
}
runTests(t, testModeCreate, createTests)
runTests(t, testModeCreate, commonTests)
runTests(t, testModeUpdate, commonTests)
}
func TestOpenShiftClusterStaticValidateServicePrincipalProfile(t *testing.T) {
tests := []*validateTest{
{
name: "valid",
},
{
name: "clientID invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ServicePrincipalProfile.ClientID = "invalid"
},
wantErr: "400: InvalidParameter: properties.servicePrincipalProfile.clientId: The provided client ID 'invalid' is invalid.",
},
{
name: "empty clientSecret invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ServicePrincipalProfile.ClientSecret = ""
},
wantErr: "400: InvalidParameter: properties.servicePrincipalProfile.clientSecret: The provided client secret is invalid.",
},
}
runTests(t, testModeCreate, tests)
runTests(t, testModeUpdate, tests)
}
func TestOpenShiftClusterStaticValidateNetworkProfile(t *testing.T) {
tests := []*validateTest{
{
name: "valid",
},
{
name: "podCidr invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.NetworkProfile.PodCIDR = "invalid"
},
wantErr: "400: InvalidParameter: properties.networkProfile.podCidr: The provided pod CIDR 'invalid' is invalid: 'invalid CIDR address: invalid'.",
},
{
name: "ipv6 podCidr invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.NetworkProfile.PodCIDR = "::0/0"
},
wantErr: "400: InvalidParameter: properties.networkProfile.podCidr: The provided pod CIDR '::0/0' is invalid: must be IPv4.",
},
{
name: "serviceCidr invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.NetworkProfile.ServiceCIDR = "invalid"
},
wantErr: "400: InvalidParameter: properties.networkProfile.serviceCidr: The provided service CIDR 'invalid' is invalid: 'invalid CIDR address: invalid'.",
},
{
name: "ipv6 serviceCidr invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.NetworkProfile.ServiceCIDR = "::0/0"
},
wantErr: "400: InvalidParameter: properties.networkProfile.serviceCidr: The provided service CIDR '::0/0' is invalid: must be IPv4.",
},
{
name: "podCidr too small",
modify: func(oc *OpenShiftCluster) {
oc.Properties.NetworkProfile.PodCIDR = "10.0.0.0/19"
},
wantErr: "400: InvalidParameter: properties.networkProfile.podCidr: The provided vnet CIDR '10.0.0.0/19' is invalid: must be /18 or larger.",
},
{
name: "serviceCidr too small",
modify: func(oc *OpenShiftCluster) {
oc.Properties.NetworkProfile.ServiceCIDR = "10.0.0.0/23"
},
wantErr: "400: InvalidParameter: properties.networkProfile.serviceCidr: The provided vnet CIDR '10.0.0.0/23' is invalid: must be /22 or larger.",
},
}
runTests(t, testModeCreate, tests)
runTests(t, testModeUpdate, tests)
}
func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) {
tests := []*validateTest{
{
name: "valid",
},
{
name: "vmSize unsupported",
modify: func(oc *OpenShiftCluster) {
oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3"
},
wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.",
},
{
name: "subnetId invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.MasterProfile.SubnetID = "invalid"
},
wantErr: "400: InvalidParameter: properties.masterProfile.subnetId: The provided master VM subnet 'invalid' is invalid.",
},
{
name: "subnet subscriptionId not matching cluster subscriptionId",
modify: func(oc *OpenShiftCluster) {
oc.Properties.MasterProfile.SubnetID = "/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourcegroups/test-vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/master"
},
wantErr: "400: InvalidParameter: properties.masterProfile.subnetId: The provided master VM subnet '/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourcegroups/test-vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/master' is invalid: must be in same subscription as cluster.",
},
{
name: "disk encryption set is invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.MasterProfile.DiskEncryptionSetID = "invalid"
oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = "invalid"
},
wantErr: "400: InvalidParameter: properties.masterProfile.diskEncryptionSetId: The provided master disk encryption set 'invalid' is invalid.",
},
{
name: "disk encryption set not matching cluster subscriptionId",
modify: func(oc *OpenShiftCluster) {
oc.Properties.MasterProfile.DiskEncryptionSetID = "/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourceGroups/fakeRG/providers/Microsoft.Compute/diskEncryptionSets/fakeDES1"
},
wantErr: "400: InvalidParameter: properties.masterProfile.diskEncryptionSetId: The provided master disk encryption set '/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourceGroups/fakeRG/providers/Microsoft.Compute/diskEncryptionSets/fakeDES1' is invalid: must be in same subscription as cluster.",
},
{
name: "encryption at host invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.MasterProfile.EncryptionAtHost = "Banana"
},
wantErr: "400: InvalidParameter: properties.masterProfile.encryptionAtHost: The provided value 'Banana' is invalid.",
},
{
name: "encryption at host empty",
modify: func(oc *OpenShiftCluster) {
oc.Properties.MasterProfile.EncryptionAtHost = ""
},
wantErr: "400: InvalidParameter: properties.masterProfile.encryptionAtHost: The provided value '' is invalid.",
},
}
createTests := []*validateTest{
{
name: "disk encryption set is valid",
modify: func(oc *OpenShiftCluster) {
desID := fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set", subscriptionID)
oc.Properties.MasterProfile.DiskEncryptionSetID = desID
oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = desID
},
},
}
runTests(t, testModeCreate, createTests)
runTests(t, testModeCreate, tests)
runTests(t, testModeUpdate, tests)
}
func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) {
tests := []*validateTest{
{
name: "valid",
},
{
name: "name invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].Name = "invalid"
},
wantErr: "400: InvalidParameter: properties.workerProfiles['invalid'].name: The provided worker name 'invalid' is invalid.",
},
{
name: "vmSize invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].VMSize = "invalid"
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.",
},
{
name: "vmSize too small (prod)",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3"
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.",
},
{
name: "vmSize too big (dev)",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3"
},
requireD2sV3Workers: true,
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D4s_v3' is invalid.",
},
{
name: "disk too small",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].DiskSizeGB = 127
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].diskSizeGB: The provided worker disk size '127' is invalid.",
},
{
name: "subnetId invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].SubnetID = "invalid"
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].subnetId: The provided worker VM subnet 'invalid' is invalid.",
},
{
name: "master and worker subnets not in same vnet",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].SubnetID = fmt.Sprintf("/subscriptions/%s/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/different-vnet/subnets/worker", subscriptionID)
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].subnetId: The provided worker VM subnet '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/different-vnet/subnets/worker' is invalid: must be in the same vnet as master VM subnet '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/master'.",
},
{
name: "master and worker subnets not different",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].SubnetID = oc.Properties.MasterProfile.SubnetID
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].subnetId: The provided worker VM subnet '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/master' is invalid: must be different to master VM subnet '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/master'.",
},
{
name: "count too small",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].Count = 1
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].count: The provided worker count '1' is invalid.",
},
{
name: "count too big",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].Count = 51
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].count: The provided worker count '51' is invalid.",
},
{
name: "disk encryption set not matching master disk encryption set",
modify: func(oc *OpenShiftCluster) {
oc.Properties.MasterProfile.DiskEncryptionSetID = fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set", subscriptionID)
oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = "/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourceGroups/fakeRG/providers/Microsoft.Compute/diskEncryptionSets/fakeDES1"
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].subnetId: The provided worker disk encryption set '/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourceGroups/fakeRG/providers/Microsoft.Compute/diskEncryptionSets/fakeDES1' is invalid: must be the same as master disk encryption set '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-cluster/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set'.",
},
{
name: "encryption at host invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].EncryptionAtHost = "Banana"
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].encryptionAtHost: The provided value 'Banana' is invalid.",
},
{
name: "encryption at host empty",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].EncryptionAtHost = ""
},
wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].encryptionAtHost: The provided value '' is invalid.",
},
}
// We do not perform this validation on update
runTests(t, testModeCreate, tests)
}
func TestOpenShiftClusterStaticValidateAPIServerProfile(t *testing.T) {
commonTests := []*validateTest{
{
name: "valid",
},
{
name: "visibility invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.APIServerProfile.Visibility = "invalid"
},
wantErr: "400: InvalidParameter: properties.apiserverProfile.visibility: The provided visibility 'invalid' is invalid.",
},
{
name: "url invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.APIServerProfile.URL = "\x00"
},
wantErr: "400: InvalidParameter: properties.apiserverProfile.url: The provided URL '\x00' is invalid.",
},
{
name: "ip invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.APIServerProfile.IP = "invalid"
},
wantErr: "400: InvalidParameter: properties.apiserverProfile.ip: The provided IP 'invalid' is invalid.",
},
{
name: "ipv6 ip invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.APIServerProfile.IP = "::"
},
wantErr: "400: InvalidParameter: properties.apiserverProfile.ip: The provided IP '::' is invalid: must be IPv4.",
},
}
createTests := []*validateTest{
{
name: "empty url valid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.APIServerProfile.URL = ""
},
},
{
name: "empty ip valid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.APIServerProfile.IP = ""
},
},
}
runTests(t, testModeCreate, createTests)
runTests(t, testModeCreate, commonTests)
runTests(t, testModeUpdate, commonTests)
}
func TestOpenShiftClusterStaticValidateIngressProfile(t *testing.T) {
tests := []*validateTest{
{
name: "valid",
},
{
name: "name invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.IngressProfiles[0].Name = "invalid"
},
wantErr: "400: InvalidParameter: properties.ingressProfiles['invalid'].name: The provided ingress name 'invalid' is invalid.",
},
{
name: "visibility invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.IngressProfiles[0].Visibility = "invalid"
},
wantErr: "400: InvalidParameter: properties.ingressProfiles['default'].visibility: The provided visibility 'invalid' is invalid.",
},
{
name: "ip invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.IngressProfiles[0].IP = "invalid"
},
wantErr: "400: InvalidParameter: properties.ingressProfiles['default'].ip: The provided IP 'invalid' is invalid.",
},
{
name: "ipv6 ip invalid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.IngressProfiles[0].IP = "::"
},
wantErr: "400: InvalidParameter: properties.ingressProfiles['default'].ip: The provided IP '::' is invalid: must be IPv4.",
},
{
name: "empty ip valid",
modify: func(oc *OpenShiftCluster) {
oc.Properties.IngressProfiles[0].IP = ""
},
},
}
// we don't validate this on update as all fields are immutable and will
// be validated with "mutable" flag
runTests(t, testModeCreate, tests)
}
func TestOpenShiftClusterStaticValidateDelta(t *testing.T) {
tests := []*validateTest{
{
name: "valid",
},
{
name: "valid id case change",
modify: func(oc *OpenShiftCluster) { oc.ID = strings.ToUpper(oc.ID) },
},
{
name: "valid name case change",
modify: func(oc *OpenShiftCluster) { oc.Name = strings.ToUpper(oc.Name) },
},
{
name: "valid type case change",
modify: func(oc *OpenShiftCluster) { oc.Type = strings.ToUpper(oc.Type) },
},
{
name: "location change",
modify: func(oc *OpenShiftCluster) { oc.Location = strings.ToUpper(oc.Location) },
wantErr: "400: PropertyChangeNotAllowed: location: Changing property 'location' is not allowed.",
},
{
name: "valid tags change",
modify: func(oc *OpenShiftCluster) { oc.Tags = Tags{"new": "value"} },
},
{
name: "provisioningState change",
modify: func(oc *OpenShiftCluster) { oc.Properties.ProvisioningState = ProvisioningStateFailed },
wantErr: "400: PropertyChangeNotAllowed: properties.provisioningState: Changing property 'properties.provisioningState' is not allowed.",
},
{
name: "console url change",
modify: func(oc *OpenShiftCluster) { oc.Properties.ConsoleProfile.URL = "invalid" },
wantErr: "400: PropertyChangeNotAllowed: properties.consoleProfile.url: Changing property 'properties.consoleProfile.url' is not allowed.",
},
{
name: "pull secret change",
modify: func(oc *OpenShiftCluster) { oc.Properties.ClusterProfile.PullSecret = `{"auths":{}}` },
wantErr: "400: PropertyChangeNotAllowed: properties.clusterProfile.pullSecret: Changing property 'properties.clusterProfile.pullSecret' is not allowed.",
},
{
name: "domain change",
modify: func(oc *OpenShiftCluster) { oc.Properties.ClusterProfile.Domain = "invalid" },
wantErr: "400: PropertyChangeNotAllowed: properties.clusterProfile.domain: Changing property 'properties.clusterProfile.domain' is not allowed.",
},
{
name: "version change",
modify: func(oc *OpenShiftCluster) { oc.Properties.ClusterProfile.Version = "4.3.999" },
wantErr: "400: PropertyChangeNotAllowed: properties.clusterProfile.version: Changing property 'properties.clusterProfile.version' is not allowed.",
},
{
name: "resource group change",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ClusterProfile.ResourceGroupID = oc.Properties.ClusterProfile.ResourceGroupID[:strings.LastIndexByte(oc.Properties.ClusterProfile.ResourceGroupID, '/')] + "/changed"
},
wantErr: "400: PropertyChangeNotAllowed: properties.clusterProfile.resourceGroupId: Changing property 'properties.clusterProfile.resourceGroupId' is not allowed.",
},
{
name: "apiServer private change",
modify: func(oc *OpenShiftCluster) {
oc.Properties.APIServerProfile.Visibility = VisibilityPrivate
},
wantErr: "400: PropertyChangeNotAllowed: properties.apiserverProfile.visibility: Changing property 'properties.apiserverProfile.visibility' is not allowed.",
},
{
name: "apiServer url change",
modify: func(oc *OpenShiftCluster) { oc.Properties.APIServerProfile.URL = "invalid" },
wantErr: "400: PropertyChangeNotAllowed: properties.apiserverProfile.url: Changing property 'properties.apiserverProfile.url' is not allowed.",
},
{
name: "apiServer ip change",
modify: func(oc *OpenShiftCluster) { oc.Properties.APIServerProfile.IP = "2.3.4.5" },
wantErr: "400: PropertyChangeNotAllowed: properties.apiserverProfile.ip: Changing property 'properties.apiserverProfile.ip' is not allowed.",
},
{
name: "ingress private change",
modify: func(oc *OpenShiftCluster) {
oc.Properties.IngressProfiles[0].Visibility = VisibilityPrivate
},
wantErr: "400: PropertyChangeNotAllowed: properties.ingressProfiles['default'].visibility: Changing property 'properties.ingressProfiles['default'].visibility' is not allowed.",
},
{
name: "ingress ip change",
modify: func(oc *OpenShiftCluster) { oc.Properties.IngressProfiles[0].IP = "2.3.4.5" },
wantErr: "400: PropertyChangeNotAllowed: properties.ingressProfiles['default'].ip: Changing property 'properties.ingressProfiles['default'].ip' is not allowed.",
},
{
name: "clientId change",
modify: func(oc *OpenShiftCluster) {
oc.Properties.ServicePrincipalProfile.ClientID = uuid.DefaultGenerator.Generate()
},
},
{
name: "clientSecret change",
modify: func(oc *OpenShiftCluster) { oc.Properties.ServicePrincipalProfile.ClientSecret = "invalid" },
},
{
name: "podCidr change",
modify: func(oc *OpenShiftCluster) { oc.Properties.NetworkProfile.PodCIDR = "0.0.0.0/0" },
wantErr: "400: PropertyChangeNotAllowed: properties.networkProfile.podCidr: Changing property 'properties.networkProfile.podCidr' is not allowed.",
},
{
name: "serviceCidr change",
modify: func(oc *OpenShiftCluster) { oc.Properties.NetworkProfile.ServiceCIDR = "0.0.0.0/0" },
wantErr: "400: PropertyChangeNotAllowed: properties.networkProfile.serviceCidr: Changing property 'properties.networkProfile.serviceCidr' is not allowed.",
},
{
name: "master subnetId change",
modify: func(oc *OpenShiftCluster) {
oc.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID[:strings.LastIndexByte(oc.Properties.MasterProfile.SubnetID, '/')] + "/changed"
},
wantErr: "400: PropertyChangeNotAllowed: properties.masterProfile.subnetId: Changing property 'properties.masterProfile.subnetId' is not allowed.",
},
{
name: "worker name change",
modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].Name = "new-name" },
wantErr: "400: PropertyChangeNotAllowed: properties.workerProfiles['new-name'].name: Changing property 'properties.workerProfiles['new-name'].name' is not allowed.",
},
{
name: "worker vmSize change",
modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D8s_v3" },
wantErr: "400: PropertyChangeNotAllowed: properties.workerProfiles['worker'].vmSize: Changing property 'properties.workerProfiles['worker'].vmSize' is not allowed.",
},
{
name: "worker diskSizeGB change",
modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].DiskSizeGB++ },
wantErr: "400: PropertyChangeNotAllowed: properties.workerProfiles['worker'].diskSizeGB: Changing property 'properties.workerProfiles['worker'].diskSizeGB' is not allowed.",
},
{
name: "worker subnetId change",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles[0].SubnetID = oc.Properties.WorkerProfiles[0].SubnetID[:strings.LastIndexByte(oc.Properties.WorkerProfiles[0].SubnetID, '/')] + "/changed"
},
wantErr: "400: PropertyChangeNotAllowed: properties.workerProfiles['worker'].subnetId: Changing property 'properties.workerProfiles['worker'].subnetId' is not allowed.",
},
{
name: "workerProfiles count change",
modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].Count++ },
wantErr: "400: PropertyChangeNotAllowed: properties.workerProfiles['worker'].count: Changing property 'properties.workerProfiles['worker'].count' is not allowed.",
},
{
name: "number of workerProfiles changes",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles = []WorkerProfile{{}, {}}
},
wantErr: "400: PropertyChangeNotAllowed: properties.workerProfiles: Changing property 'properties.workerProfiles' is not allowed.",
},
{
name: "workerProfiles set to nil",
modify: func(oc *OpenShiftCluster) {
oc.Properties.WorkerProfiles = nil
},
wantErr: "400: PropertyChangeNotAllowed: properties.workerProfiles: Changing property 'properties.workerProfiles' is not allowed.",
},
{
name: "systemData set to empty",
modify: func(oc *OpenShiftCluster) {
oc.SystemData = &SystemData{}
},
wantErr: "400: PropertyChangeNotAllowed: systemData.createdBy: Changing property 'systemData.createdBy' is not allowed.",
},
{
name: "systemData LastUpdated changed",
modify: func(oc *OpenShiftCluster) {
oc.SystemData.LastModifiedBy = "Bob"
},
wantErr: "400: PropertyChangeNotAllowed: systemData.lastModifiedBy: Changing property 'systemData.lastModifiedBy' is not allowed.",
},
}
runTests(t, testModeUpdate, tests)
}

Просмотреть файл

@ -0,0 +1,10 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// OpenShiftClusterAdminKubeconfig represents an OpenShift cluster's admin kubeconfig.
type OpenShiftClusterAdminKubeconfig struct {
// The base64-encoded kubeconfig file.
Kubeconfig []byte `json:"kubeconfig,omitempty"`
}

Просмотреть файл

@ -0,0 +1,21 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
type openShiftClusterAdminKubeconfigConverter struct{}
// openShiftClusterAdminKubeconfigConverter returns a new external representation
// of the internal object, reading from the subset of the internal object's
// fields that appear in the external representation. ToExternal does not
// modify its argument; there is no pointer aliasing between the passed and
// returned objects.
func (openShiftClusterAdminKubeconfigConverter) ToExternal(oc *api.OpenShiftCluster) interface{} {
return &OpenShiftClusterAdminKubeconfig{
Kubeconfig: oc.Properties.UserAdminKubeconfig,
}
}

Просмотреть файл

@ -0,0 +1,12 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// ExampleOpenShiftClusterAdminKubeconfigResponse returns an example
// OpenShiftClusterAdminKubeconfig object that the RP might return to an end-user
func ExampleOpenShiftClusterAdminKubeconfigResponse() interface{} {
return &OpenShiftClusterAdminKubeconfig{
Kubeconfig: []byte("{}"),
}
}

Просмотреть файл

@ -0,0 +1,13 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// OpenShiftClusterCredentials represents an OpenShift cluster's credentials.
type OpenShiftClusterCredentials struct {
// The username for the kubeadmin user.
KubeadminUsername string `json:"kubeadminUsername,omitempty"`
// The password for the kubeadmin user.
KubeadminPassword string `json:"kubeadminPassword,omitempty"`
}

Просмотреть файл

@ -0,0 +1,24 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
type openShiftClusterCredentialsConverter struct{}
// OpenShiftClusterCredentialsToExternal returns a new external representation
// of the internal object, reading from the subset of the internal object's
// fields that appear in the external representation. ToExternal does not
// modify its argument; there is no pointer aliasing between the passed and
// returned objects.
func (openShiftClusterCredentialsConverter) ToExternal(oc *api.OpenShiftCluster) interface{} {
out := &OpenShiftClusterCredentials{
KubeadminUsername: "kubeadmin",
KubeadminPassword: string(oc.Properties.KubeadminPassword),
}
return out
}

Просмотреть файл

@ -0,0 +1,13 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// ExampleOpenShiftClusterCredentialsResponse returns an example
// OpenShiftClusterCredentials object that the RP might return to an end-user
func ExampleOpenShiftClusterCredentialsResponse() interface{} {
return &OpenShiftClusterCredentials{
KubeadminUsername: "kubeadmin",
KubeadminPassword: "password",
}
}

Просмотреть файл

@ -0,0 +1,36 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// OpenShiftVersionList represents a List of available versions.
type OpenShiftVersionList struct {
// The List of available versions.
OpenShiftVersions []*OpenShiftVersion `json:"value"`
// Next Link to next operation.
NextLink string `json:"nextLink,omitempty"`
}
// OpenShiftVersion represents an OpenShift version that can be installed.
type OpenShiftVersion struct {
proxyResource bool
// The ID for the resource.
ID string `json:"id,omitempty" mutable:"case"`
// Name of the resource.
Name string `json:"name,omitempty" mutable:"case"`
// The resource type.
Type string `json:"type,omitempty" mutable:"case"`
// The properties for the OpenShiftVersion resource.
Properties OpenShiftVersionProperties `json:"properties,omitempty"`
}
// OpenShiftVersionProperties represents the properties of an OpenShiftVersion.
type OpenShiftVersionProperties struct {
// Version represents the version to create the cluster at.
Version string `json:"version,omitempty"`
}

Просмотреть файл

@ -0,0 +1,50 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
type openShiftVersionConverter struct{}
// openShiftVersionConverter.ToExternal returns a new external representation
// of the internal object, reading from the subset of the internal object's
// fields that appear in the external representation. ToExternal does not
// modify its argument; there is no pointer aliasing between the passed and
// returned objects.
func (openShiftVersionConverter) ToExternal(v *api.OpenShiftVersion) interface{} {
out := &OpenShiftVersion{
ID: v.ID,
proxyResource: true,
Properties: OpenShiftVersionProperties{
Version: v.Properties.Version,
},
}
return out
}
// ToExternalList returns a slice of external representations of the internal
// objects
func (c openShiftVersionConverter) ToExternalList(vers []*api.OpenShiftVersion) interface{} {
l := &OpenShiftVersionList{
OpenShiftVersions: make([]*OpenShiftVersion, 0, len(vers)),
}
for _, ver := range vers {
l.OpenShiftVersions = append(l.OpenShiftVersions, c.ToExternal(ver).(*OpenShiftVersion))
}
return l
}
// ToInternal overwrites in place a pre-existing internal object, setting (only)
// all mapped fields from the external representation. ToInternal modifies its
// argument; there is no pointer aliasing between the passed and returned
// objects
func (c openShiftVersionConverter) ToInternal(_new interface{}, out *api.OpenShiftVersion) {
new := _new.(*OpenShiftVersion)
out.Properties.Version = new.Properties.Version
}

Просмотреть файл

@ -0,0 +1,24 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import "github.com/Azure/ARO-RP/pkg/api"
func exampleOpenShiftVersion() *OpenShiftVersion {
doc := api.ExampleOpenShiftVersionDocument()
ext := (&openShiftVersionConverter{}).ToExternal(doc.OpenShiftVersion)
return ext.(*OpenShiftVersion)
}
func ExampleOpenShiftVersionResponse() interface{} {
return exampleOpenShiftVersion()
}
func ExampleOpenShiftVersionListResponse() interface{} {
return &OpenShiftVersionList{
OpenShiftVersions: []*OpenShiftVersion{
ExampleOpenShiftVersionResponse().(*OpenShiftVersion),
},
}
}

Просмотреть файл

@ -0,0 +1,54 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
// APIVersion contains a version string as it will be used by clients
const APIVersion = "2023-04-01"
const (
resourceProviderNamespace = "Microsoft.RedHatOpenShift"
resourceType = "openShiftClusters"
)
func init() {
api.APIs[APIVersion] = &api.Version{
OpenShiftClusterConverter: openShiftClusterConverter{},
OpenShiftClusterStaticValidator: openShiftClusterStaticValidator{},
OpenShiftClusterCredentialsConverter: openShiftClusterCredentialsConverter{},
OpenShiftClusterAdminKubeconfigConverter: openShiftClusterAdminKubeconfigConverter{},
OpenShiftVersionConverter: openShiftVersionConverter{},
OperationList: api.OperationList{
Operations: []api.Operation{
api.OperationResultsRead,
api.OperationStatusRead,
api.OperationRead,
api.OperationOpenShiftClusterRead,
api.OperationOpenShiftClusterWrite,
api.OperationOpenShiftClusterDelete,
api.OperationOpenShiftClusterListCredentials,
api.OperationOpenShiftClusterListAdminCredentials,
api.OperationListInstallVersions,
api.OperationSyncSetsRead,
api.OperationSyncSetsWrite,
api.OperationSyncSetsDelete,
api.OperationMachinePoolsRead,
api.OperationMachinePoolsWrite,
api.OperationMachinePoolsDelete,
api.OperationSyncIdentityProvidersRead,
api.OperationSyncIdentityProvidersWrite,
api.OperationSyncIdentityProvidersDelete,
api.OperationOpenShiftClusterGetDetectors,
},
},
SyncSetConverter: syncSetConverter{},
MachinePoolConverter: machinePoolConverter{},
SyncIdentityProviderConverter: syncIdentityProviderConverter{},
SecretConverter: secretConverter{},
ClusterManagerStaticValidator: clusterManagerStaticValidator{},
}
}

Просмотреть файл

@ -0,0 +1,38 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
type secretConverter struct{}
func (c secretConverter) ToExternal(s *api.Secret) interface{} {
out := new(Secret)
out.proxyResource = true
out.ID = s.ID
out.Name = s.Name
out.Type = s.Type
return out
}
func (c secretConverter) ToInternal(_s interface{}, out *api.Secret) {
ocm := _s.(*api.Secret)
out.ID = ocm.ID
}
// ToExternalList returns a slice of external representations of the internal objects
func (c secretConverter) ToExternalList(s []*api.Secret) interface{} {
l := &SecretList{
Secrets: make([]*Secret, 0, len(s)),
}
for _, secrets := range s {
c := c.ToExternal(secrets)
l.Secrets = append(l.Secrets, c.(*Secret))
}
return l
}

Просмотреть файл

@ -0,0 +1,38 @@
package v20230401
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
func exampleSecret() *Secret {
doc := api.ExampleClusterManagerConfigurationDocumentSecret()
ext := (&secretConverter{}).ToExternal(doc.Secret)
return ext.(*Secret)
}
func ExampleSecretPutParameter() interface{} {
s := exampleSecret()
s.ID = ""
s.Type = ""
s.Name = ""
return s
}
func ExampleSecretPatchParameter() interface{} {
return ExampleSecretPutParameter()
}
func ExampleSecretResponse() interface{} {
return exampleSecret()
}
func ExampleSecretListResponse() interface{} {
return &SecretList{
Secrets: []*Secret{
ExampleSecretResponse().(*Secret),
},
}
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше