зеркало из https://github.com/Azure/ARO-RP.git
removed all the references of dbtoken
This commit is contained in:
Родитель
8d0407cfbb
Коммит
7a568dbad1
|
@ -150,7 +150,7 @@ linters-settings:
|
|||
alias: $1
|
||||
- pkg: "^github\\.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/([0-9]+)-([0-9]+)-([0-9]+)-?(preview)?/redhatopenshift$"
|
||||
alias: mgmtredhatopenshift$1$2$3$4
|
||||
- pkg: "^github\\.com/Azure/ARO-RP/pkg/(dbtoken|deploy|gateway|mirror|monitor|operator|portal)$"
|
||||
- pkg: "^github\\.com/Azure/ARO-RP/pkg/(deploy|gateway|mirror|monitor|operator|portal)$"
|
||||
alias: pkg$1
|
||||
- pkg: "^github\\.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/redhatopenshift/([0-9]+)-([0-9]+)-([0-9]+)-?(preview)?/redhatopenshift$"
|
||||
alias: redhatopenshift$1$2$3$4
|
||||
|
|
|
@ -15,5 +15,5 @@ FROM ${REGISTRY}/ubi8/ubi-minimal
|
|||
RUN microdnf update && microdnf clean all
|
||||
COPY --from=builder /app/aro /app/e2e.test /usr/local/bin/
|
||||
ENTRYPOINT ["aro"]
|
||||
EXPOSE 2222/tcp 8080/tcp 8443/tcp 8444/tcp 8445/tcp
|
||||
EXPOSE 2222/tcp 8080/tcp 8443/tcp 8444/tcp
|
||||
USER 1000
|
||||
|
|
|
@ -7,7 +7,6 @@ const (
|
|||
envDatabaseName = "DATABASE_NAME"
|
||||
envDatabaseAccountName = "DATABASE_ACCOUNT_NAME"
|
||||
envKeyVaultPrefix = "KEYVAULT_PREFIX"
|
||||
envDBTokenUrl = "DBTOKEN_URL"
|
||||
envOpenShiftVersions = "OPENSHIFT_VERSIONS"
|
||||
envInstallerImageDigests = "INSTALLER_IMAGE_DIGESTS"
|
||||
)
|
||||
|
|
|
@ -26,10 +26,6 @@ func gateway(ctx context.Context, log *logrus.Entry) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err = env.ValidateVars("AZURE_DBTOKEN_CLIENT_ID"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := statsd.New(ctx, log.WithField("component", "gateway"), _env, os.Getenv("MDM_ACCOUNT"), os.Getenv("MDM_NAMESPACE"), os.Getenv("MDM_STATSD_SOCKET"))
|
||||
|
||||
g, err := golang.NewMetrics(log.WithField("component", "gateway"), m)
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
# DB token service
|
||||
|
||||
## Introduction
|
||||
|
||||
Cosmos DB access control is described
|
||||
[here](https://docs.microsoft.com/en-us/azure/cosmos-db/secure-access-to-data).
|
||||
In brief, there are three options:
|
||||
|
||||
1. use r/w or r/o primary keys, which grant access to the whole database account
|
||||
2. implement a service which transforms (1) into scoped resource tokens
|
||||
3. a Microsoft Entra ID RBAC-based model.
|
||||
|
||||
Currently, the RP, monitoring and portal service share the same security
|
||||
boundary (the RP VM) and use option 1. The dbtoken service, which also runs on
|
||||
the RP VM, is our implementation of option 2. Option 3 is now in GA and there are plans
|
||||
[here](https://issues.redhat.com/browse/ARO-5512) to implement it and replace option 1
|
||||
and 2.
|
||||
|
||||
The purpose of the dbtoken service at its implementation time is to enable the
|
||||
gateway component (which handles end-user traffic) to access the service Cosmos
|
||||
DB without recourse to using root credentials. This provides a level of defence
|
||||
in depth in the face of an attack on the gateway component.
|
||||
|
||||
|
||||
## Workflow
|
||||
|
||||
* An AAD application is manually created at rollout, registering the
|
||||
https://dbtoken.aro.azure.com resource.
|
||||
|
||||
* The dbtoken service receives POST requests from any client wishing to receive
|
||||
a scoped resource token at its /token?permission=<permission> endpoint.
|
||||
|
||||
* The dbtoken service validates that the POST request includes a valid
|
||||
AAD-signed bearer JWT for the https://dbtoken.aro.azure.com resource. The
|
||||
subject UUID is retrieved from the JWT.
|
||||
|
||||
* In the case of the gateway service, the JWT subject UUID is the UUID of the
|
||||
service principal corresponding to the gateway VMSS MSI.
|
||||
|
||||
* Using its primary key Cosmos DB credential, the dbtoken requests a scoped
|
||||
resource token for the given user UUID and <permission> from Cosmos DB and
|
||||
proxies it to the caller.
|
||||
|
||||
* Clients may use the dbtoken.Refresher interface to handle regularly refreshing
|
||||
the resource token and injecting it into the database client used by the rest
|
||||
of the client codebase.
|
||||
|
||||
|
||||
## Setup
|
||||
|
||||
* Create the application and set `requestedAccessTokenVersion`
|
||||
|
||||
```bash
|
||||
AZURE_DBTOKEN_CLIENT_ID="$(az ad app create --display-name dbtoken \
|
||||
--oauth2-allow-implicit-flow false \
|
||||
--query appId \
|
||||
-o tsv)"
|
||||
|
||||
OBJ_ID="$(az ad app show --id $AZURE_DBTOKEN_CLIENT_ID --query id)"
|
||||
|
||||
# NOTE: the graph API requires this to be done from a managed machine
|
||||
az rest --method PATCH \
|
||||
--uri https://graph.microsoft.com/v1.0/applications/$OBJ_ID/ \
|
||||
--body '{"api":{"requestedAccessTokenVersion": 2}}'
|
||||
```
|
||||
|
||||
* Add the `AZURE_DBTOKEN_CLIENT_ID` to the RP config for the respective environment.
|
||||
|
||||
* The dbtoken service is responsible for creating database users and permissions
|
||||
|
||||
* see the ConfigurePermissions function.
|
||||
|
||||
|
|
@ -7,39 +7,39 @@
|
|||
ARO-RP repository so that `git status` reports a clean working tree.
|
||||
Otherwise aro container image will have `-dirty` suffix, which can be
|
||||
problematic:
|
||||
- if the working tree becomes dirty during the process (eg. because you
|
||||
create a temporary helper script to run some of the setup), you could end
|
||||
up with different image tag pushed in the azure image registry compared
|
||||
to the tag expected by aro deployer
|
||||
- with a dirty tag, it's not clear what's actually in the image
|
||||
- if the working tree becomes dirty during the process (eg. because you
|
||||
create a temporary helper script to run some of the setup), you could end
|
||||
up with different image tag pushed in the azure image registry compared
|
||||
to the tag expected by aro deployer
|
||||
- with a dirty tag, it's not clear what's actually in the image
|
||||
|
||||
## Deploying an int-like Development RP
|
||||
|
||||
1. Fetch the most up-to-date secrets specifying `SECRET_SA_ACCOUNT_NAME` to the
|
||||
name of the storage account containing your shared development environment
|
||||
secrets, eg.:
|
||||
1. Fetch the most up-to-date secrets specifying `SECRET_SA_ACCOUNT_NAME` to the
|
||||
name of the storage account containing your shared development environment
|
||||
secrets, eg.:
|
||||
|
||||
```bash
|
||||
SECRET_SA_ACCOUNT_NAME=rharosecretsdev make secrets
|
||||
```
|
||||
```bash
|
||||
SECRET_SA_ACCOUNT_NAME=rharosecretsdev make secrets
|
||||
```
|
||||
|
||||
1. Copy and tweak your environment file:
|
||||
1. Copy and tweak your environment file:
|
||||
|
||||
```bash
|
||||
cp env.example env
|
||||
vi env
|
||||
```
|
||||
```bash
|
||||
cp env.example env
|
||||
vi env
|
||||
```
|
||||
|
||||
You don't need to change anything in the env file, unless you plan on using
|
||||
hive to install the cluster. In that case add the following hive environment
|
||||
variables into your env file:
|
||||
You don't need to change anything in the env file, unless you plan on using
|
||||
hive to install the cluster. In that case add the following hive environment
|
||||
variables into your env file:
|
||||
|
||||
```bash
|
||||
export ARO_INSTALL_VIA_HIVE=true
|
||||
export ARO_ADOPT_BY_HIVE=true
|
||||
```
|
||||
```bash
|
||||
export ARO_INSTALL_VIA_HIVE=true
|
||||
export ARO_ADOPT_BY_HIVE=true
|
||||
```
|
||||
|
||||
1. Create a full environment file, which overrides some defaults from `./env` options when sourced
|
||||
1. Create a full environment file, which overrides some defaults from `./env` options when sourced
|
||||
|
||||
```bash
|
||||
cp env-int.example env-int
|
||||
|
@ -48,67 +48,77 @@
|
|||
|
||||
What to change in `env-int` file:
|
||||
|
||||
* if using a public key separate from `~/.ssh/id_rsa.pub` (for ssh access to RP and Gateway vmss instances), source it with `export SSH_PUBLIC_KEY=~/.ssh/id_separate.pub`
|
||||
* don't try to change `$USER` prefix used there
|
||||
* set tag of `FLUENTBIT_IMAGE` value to match the default from `pkg/util/version/const.go`,
|
||||
- if using a public key separate from `~/.ssh/id_rsa.pub` (for ssh access to RP and Gateway vmss instances), source it with `export SSH_PUBLIC_KEY=~/.ssh/id_separate.pub`
|
||||
- don't try to change `$USER` prefix used there
|
||||
- set tag of `FLUENTBIT_IMAGE` value to match the default from `pkg/util/version/const.go`,
|
||||
eg. `FLUENTBIT_IMAGE=${USER}aro.azurecr.io/fluentbit:1.9.10-cm20230426`
|
||||
* if you actually care about fluentbit image version, you need to change the default both in the env-int file and for ARO Deployer, which is out of scope of this guide
|
||||
- if you actually care about fluentbit image version, you need to change the default both in the env-int file and for ARO Deployer, which is out of scope of this guide
|
||||
|
||||
1. And finally source the env:
|
||||
1. And finally source the env:
|
||||
|
||||
```bash
|
||||
. ./env-int
|
||||
```
|
||||
|
||||
1. Generate the development RP configuration
|
||||
1. Generate the development RP configuration
|
||||
|
||||
```bash
|
||||
make dev-config.yaml
|
||||
```
|
||||
|
||||
1. Run `make deploy`. This will fail on the first attempt to run due to AKS not being installed, so after the first failure, please skip to the next step to deploy the VPN Gateway and then deploy AKS.
|
||||
> __NOTE:__ If the deployment fails with `InvalidResourceReference` due to the RP Network Security Groups not found, delete the "gateway-production-predeploy" deployment in the gateway resource group, and re-run `make deploy`.
|
||||
1. Run `make deploy`. This will fail on the first attempt to run due to AKS not being installed, so after the first failure, please skip to the next step to deploy the VPN Gateway and then deploy AKS.
|
||||
|
||||
> __NOTE:__ If the deployment fails with `A vault with the same name already exists in deleted state`, then you will need to recover the deleted keyvaults from a previous deploy using: `az keyvault recover --name <KEYVAULT_NAME>` for each keyvault, and re-run.
|
||||
> **NOTE:** If the deployment fails with `InvalidResourceReference` due to the RP Network Security Groups not found, delete the "gateway-production-predeploy" deployment in the gateway resource group, and re-run `make deploy`.
|
||||
|
||||
1. Deploy a VPN Gateway
|
||||
> **NOTE:** If the deployment fails with `A vault with the same name already exists in deleted state`, then you will need to recover the deleted keyvaults from a previous deploy using: `az keyvault recover --name <KEYVAULT_NAME>` for each keyvault, and re-run.
|
||||
|
||||
1. Deploy a VPN Gateway
|
||||
This is required in order to be able to connect to AKS from your local machine:
|
||||
|
||||
```bash
|
||||
source ./hack/devtools/deploy-shared-env.sh
|
||||
deploy_vpn_for_dedicated_rp
|
||||
```
|
||||
|
||||
1. Deploy AKS by running these commands from the ARO-RP root directory:
|
||||
1. Deploy AKS by running these commands from the ARO-RP root directory:
|
||||
|
||||
```bash
|
||||
source ./hack/devtools/deploy-shared-env.sh
|
||||
deploy_aks_dev
|
||||
```
|
||||
> __NOTE:__ If the AKS deployment fails with missing RP VNETs, delete the "gateway-production-predeploy" deployment in the gateway resource group, and re-run `make deploy` and then re-run `deploy_aks_dev`.
|
||||
|
||||
1. Install Hive into AKS
|
||||
> **NOTE:** If the AKS deployment fails with missing RP VNETs, delete the "gateway-production-predeploy" deployment in the gateway resource group, and re-run `make deploy` and then re-run `deploy_aks_dev`.
|
||||
|
||||
1. Install Hive into AKS
|
||||
|
||||
1. Download the VPN config. Please note that this action will _**OVER WRITE**_ the `secrets/vpn-$LOCATION.ovpn` on your local machine. **DO NOT** run `make secrets-update` after doing this, as you will overwrite existing config, until such time as you have run `make secrets` to get the config restored.
|
||||
```bash
|
||||
vpn_configuration
|
||||
```
|
||||
|
||||
```bash
|
||||
vpn_configuration
|
||||
```
|
||||
|
||||
1. Connect to the Dev VPN in a new terminal:
|
||||
```bash
|
||||
sudo openvpn secrets/vpn-$LOCATION.ovpn
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo openvpn secrets/vpn-$LOCATION.ovpn
|
||||
```
|
||||
|
||||
1. Now that your machine is able access the AKS cluster, you can deploy Hive:
|
||||
```bash
|
||||
make aks.kubeconfig
|
||||
./hack/hive-generate-config.sh
|
||||
KUBECONFIG=$(pwd)/aks.kubeconfig ./hack/hive-dev-install.sh
|
||||
```
|
||||
```bash
|
||||
make aks.kubeconfig
|
||||
./hack/hive-generate-config.sh
|
||||
KUBECONFIG=$(pwd)/aks.kubeconfig ./hack/hive-dev-install.sh
|
||||
```
|
||||
|
||||
1. Mirror the OpenShift images to your new ACR
|
||||
1. Mirror the OpenShift images to your new ACR
|
||||
<!-- TODO (bv) allow mirroring through a pipeline would be faster and a nice to have -->
|
||||
> __NOTE:__ Running the mirroring through a VM in Azure rather than a local workstation is recommended for better performance.
|
||||
> __NOTE:__ Value of `USER_PULL_SECRET` variable comes from the secrets, which are sourced via `env-int` file
|
||||
> __NOTE:__ `DST_AUTH` token or the login to the registry expires after some time
|
||||
|
||||
1. Setup mirroring environment variables
|
||||
> **NOTE:** Running the mirroring through a VM in Azure rather than a local workstation is recommended for better performance.
|
||||
> **NOTE:** Value of `USER_PULL_SECRET` variable comes from the secrets, which are sourced via `env-int` file
|
||||
> **NOTE:** `DST_AUTH` token or the login to the registry expires after some time
|
||||
|
||||
1. Setup mirroring environment variables
|
||||
|
||||
```bash
|
||||
export DST_ACR_NAME=${USER}aro
|
||||
export SRC_AUTH_QUAY=$(echo $USER_PULL_SECRET | jq -r '.auths."quay.io".auth')
|
||||
|
@ -116,50 +126,51 @@
|
|||
export DST_AUTH=$(echo -n '00000000-0000-0000-0000-000000000000:'$(az acr login -n ${DST_ACR_NAME} --expose-token | jq -r .accessToken) | base64 -w0)
|
||||
```
|
||||
|
||||
1. Login to the Azure Container Registry
|
||||
1. Login to the Azure Container Registry
|
||||
|
||||
```bash
|
||||
docker login -u 00000000-0000-0000-0000-000000000000 -p "$(echo $DST_AUTH | base64 -d | cut -d':' -f2)" "${DST_ACR_NAME}.azurecr.io"
|
||||
```
|
||||
|
||||
1. Run the mirroring
|
||||
1. Run the mirroring
|
||||
|
||||
> The `latest` argument will take the DefaultInstallStream from `pkg/util/version/const.go` and mirror that version
|
||||
|
||||
```bash
|
||||
go run ./cmd/aro mirror latest
|
||||
```
|
||||
|
||||
If you are going to test or work with multi-version installs, then you should mirror any additional versions as well, for example for 4.11.21 it would be
|
||||
|
||||
```bash
|
||||
go run ./cmd/aro mirror 4.11.21
|
||||
```
|
||||
|
||||
1. Push the ARO and Fluentbit images to your ACR
|
||||
|
||||
> If running this step from a VM separate from your workstation, ensure the commit tag used to build the image matches the commit tag where `make deploy` is run.
|
||||
|
||||
> For local builds and CI builds without `RP_IMAGE_ACR` environment
|
||||
> variable set, `make publish-image-*` targets will pull from
|
||||
> `registry.access.redhat.com`.
|
||||
> If you need to use Azure container registry instead due to security
|
||||
> compliance requirements, modify the `RP_IMAGE_ACR` environment
|
||||
> variable to point to `arointsvc` or `arosvc` instead. You will need
|
||||
> authenticate to this registry using `az acr login --name arointsvc`
|
||||
> to pull the images.
|
||||
|
||||
> If the push fails on error like `unable to retrieve auth token:
|
||||
> invalid username/password: unauthorized: authentication required`,
|
||||
> try to create `DST_AUTH` variable and login to the container
|
||||
> registry (as explained in steps above) again. It will resolve the
|
||||
> failure in case of an expired auth token.
|
||||
> The `latest` argument will take the DefaultInstallStream from `pkg/util/version/const.go` and mirror that version
|
||||
|
||||
```bash
|
||||
make publish-image-aro-multistage
|
||||
make publish-image-fluentbit
|
||||
go run ./cmd/aro mirror latest
|
||||
```
|
||||
|
||||
1. Update the DNS Child Domains
|
||||
If you are going to test or work with multi-version installs, then you should mirror any additional versions as well, for example for 4.11.21 it would be
|
||||
|
||||
```bash
|
||||
go run ./cmd/aro mirror 4.11.21
|
||||
```
|
||||
|
||||
1. Push the ARO and Fluentbit images to your ACR
|
||||
|
||||
> If running this step from a VM separate from your workstation, ensure the commit tag used to build the image matches the commit tag where `make deploy` is run.
|
||||
|
||||
> For local builds and CI builds without `RP_IMAGE_ACR` environment
|
||||
> variable set, `make publish-image-*` targets will pull from
|
||||
> `registry.access.redhat.com`.
|
||||
> If you need to use Azure container registry instead due to security
|
||||
> compliance requirements, modify the `RP_IMAGE_ACR` environment
|
||||
> variable to point to `arointsvc` or `arosvc` instead. You will need
|
||||
> authenticate to this registry using `az acr login --name arointsvc`
|
||||
> to pull the images.
|
||||
|
||||
> If the push fails on error like `unable to retrieve auth token:
|
||||
|
||||
invalid username/password: unauthorized: authentication required`,
|
||||
> try to create `DST_AUTH` variable and login to the container > registry (as explained in steps above) again. It will resolve the > failure in case of an expired auth token.
|
||||
|
||||
```bash
|
||||
make publish-image-aro-multistage
|
||||
make publish-image-fluentbit
|
||||
```
|
||||
|
||||
1. Update the DNS Child Domains
|
||||
|
||||
```bash
|
||||
export PARENT_DOMAIN_NAME=osadev.cloud
|
||||
export PARENT_DOMAIN_RESOURCEGROUP=dns
|
||||
|
@ -185,11 +196,12 @@
|
|||
done
|
||||
```
|
||||
|
||||
1. Update the certificates in keyvault
|
||||
1. Update the certificates in keyvault
|
||||
<!-- TODO: this is almost duplicated elsewhere. Would be nice to move to common area -->
|
||||
> __NOTE:__ If you reuse an old name, you might run into soft-delete of the keyvaults. Run `az keyvault recover --name` to fix this.
|
||||
|
||||
> __NOTE:__ Check to ensure that the $KEYVAULT_PREFIX environment variable set on workstation matches the prefix deployed into the resource group.
|
||||
> **NOTE:** If you reuse an old name, you might run into soft-delete of the keyvaults. Run `az keyvault recover --name` to fix this.
|
||||
|
||||
> **NOTE:** Check to ensure that the $KEYVAULT_PREFIX environment variable set on workstation matches the prefix deployed into the resource group.
|
||||
|
||||
```bash
|
||||
az keyvault certificate import \
|
||||
|
@ -232,169 +244,174 @@
|
|||
--vault-name "$KEYVAULT_PREFIX-por" \
|
||||
--name portal-client \
|
||||
--file secrets/portal-client.pem >/dev/null
|
||||
az keyvault certificate import \
|
||||
--vault-name "$KEYVAULT_PREFIX-dbt" \
|
||||
--name dbtoken-server \
|
||||
--file secrets/localhost.pem >/dev/null
|
||||
```
|
||||
|
||||
1. Delete the existing VMSS
|
||||
> __NOTE:__ This needs to be deleted as deploying won't recreate the VMSS if the commit hash is the same.
|
||||
1. Delete the existing VMSS
|
||||
|
||||
> **NOTE:** This needs to be deleted as deploying won't recreate the VMSS if the commit hash is the same.
|
||||
|
||||
```bash
|
||||
az vmss delete -g ${RESOURCEGROUP} --name rp-vmss-$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty) && az vmss delete -g $USER-gwy-$LOCATION --name gateway-vmss-$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty)
|
||||
```
|
||||
|
||||
1. Run `make deploy`. When the command finishes, there should be one VMSS for
|
||||
the RP with a single vm instance, and another VMSS with a single vm for
|
||||
Gateway.
|
||||
1. Run `make deploy`. When the command finishes, there should be one VMSS for
|
||||
the RP with a single vm instance, and another VMSS with a single vm for
|
||||
Gateway.
|
||||
|
||||
1. Create storage account and role assignment required for workload identity clusters
|
||||
|
||||
1. Create storage account and role assignment required for workload identity clusters
|
||||
```
|
||||
source ./hack/devtools/deploy-shared-env.sh
|
||||
deploy_oic_for_dedicated_rp
|
||||
```
|
||||
|
||||
1. If you are going to use multiversion, you can now update the OpenShiftVersions DB as per [OpenShift Version insttructions](./deploy-development-rp.md#openshift-version)
|
||||
1. If you are going to use multiversion, you can now update the OpenShiftVersions DB as per [OpenShift Version insttructions](./deploy-development-rp.md#openshift-version)
|
||||
|
||||
## SSH to RP VMSS Instance
|
||||
|
||||
1. Update the RP NSG to allow SSH
|
||||
```bash
|
||||
az network nsg rule create \
|
||||
--name ssh-to-rp \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--nsg-name rp-nsg \
|
||||
--access Allow \
|
||||
--priority 500 \
|
||||
--source-address-prefixes "$(curl --silent -4 ipecho.net/plain)/32" \
|
||||
--protocol Tcp \
|
||||
--destination-port-ranges 22
|
||||
```
|
||||
|
||||
```bash
|
||||
az network nsg rule create \
|
||||
--name ssh-to-rp \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--nsg-name rp-nsg \
|
||||
--access Allow \
|
||||
--priority 500 \
|
||||
--source-address-prefixes "$(curl --silent -4 ipecho.net/plain)/32" \
|
||||
--protocol Tcp \
|
||||
--destination-port-ranges 22
|
||||
```
|
||||
|
||||
1. SSH into the VM
|
||||
```bash
|
||||
VMSS_PIP=$(az vmss list-instance-public-ips -g $RESOURCEGROUP --name rp-vmss-$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty) | jq -r '.[0].ipAddress')
|
||||
|
||||
ssh cloud-user@${VMSS_PIP}
|
||||
```
|
||||
```bash
|
||||
VMSS_PIP=$(az vmss list-instance-public-ips -g $RESOURCEGROUP --name rp-vmss-$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty) | jq -r '.[0].ipAddress')
|
||||
|
||||
ssh cloud-user@${VMSS_PIP}
|
||||
```
|
||||
|
||||
## SSH to Gateway VMSS Instance
|
||||
|
||||
1. Update the Gateway NSG to allow SSH
|
||||
```bash
|
||||
az network nsg rule create \
|
||||
--name ssh-to-gwy \
|
||||
--resource-group $USER-gwy-$LOCATION \
|
||||
--nsg-name gateway-nsg \
|
||||
--access Allow \
|
||||
--priority 500 \
|
||||
--source-address-prefixes "$(curl --silent -4 ipecho.net/plain)/32" \
|
||||
--protocol Tcp \
|
||||
--destination-port-ranges 22
|
||||
```
|
||||
|
||||
```bash
|
||||
az network nsg rule create \
|
||||
--name ssh-to-gwy \
|
||||
--resource-group $USER-gwy-$LOCATION \
|
||||
--nsg-name gateway-nsg \
|
||||
--access Allow \
|
||||
--priority 500 \
|
||||
--source-address-prefixes "$(curl --silent -4 ipecho.net/plain)/32" \
|
||||
--protocol Tcp \
|
||||
--destination-port-ranges 22
|
||||
```
|
||||
|
||||
1. SSH into the VM
|
||||
```bash
|
||||
VMSS_PIP=$(az vmss list-instance-public-ips -g $USER-gwy-$LOCATION --name gateway-vmss-$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty) | jq -r '.[0].ipAddress')
|
||||
|
||||
ssh cloud-user@${VMSS_PIP}
|
||||
```
|
||||
```bash
|
||||
VMSS_PIP=$(az vmss list-instance-public-ips -g $USER-gwy-$LOCATION --name gateway-vmss-$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty) | jq -r '.[0].ipAddress')
|
||||
|
||||
ssh cloud-user@${VMSS_PIP}
|
||||
```
|
||||
|
||||
## Deploy a Cluster
|
||||
|
||||
1. Add a NSG rule to allow tunneling to the RP instance
|
||||
|
||||
```bash
|
||||
az network nsg rule create \
|
||||
--name tunnel-to-rp \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--nsg-name rp-nsg \
|
||||
--access Allow \
|
||||
--priority 499 \
|
||||
--source-address-prefixes "$(curl --silent -4 ipecho.net/plain)/32" \
|
||||
--protocol Tcp \
|
||||
--destination-port-ranges 443
|
||||
```
|
||||
|
||||
```bash
|
||||
az network nsg rule create \
|
||||
--name tunnel-to-rp \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--nsg-name rp-nsg \
|
||||
--access Allow \
|
||||
--priority 499 \
|
||||
--source-address-prefixes "$(curl --silent -4 ipecho.net/plain)/32" \
|
||||
--protocol Tcp \
|
||||
--destination-port-ranges 443
|
||||
```
|
||||
|
||||
1. Run the tunnel program to tunnel to the RP
|
||||
```bash
|
||||
make tunnel
|
||||
```
|
||||
|
||||
> __NOTE:__ `make tunnel` will print the public IP of your new RP VM NIC. Ensure that it's correct.
|
||||
```bash
|
||||
make tunnel
|
||||
```
|
||||
|
||||
> **NOTE:** `make tunnel` will print the public IP of your new RP VM NIC. Ensure that it's correct.
|
||||
|
||||
1. Update the versions present available to install (run this as many times as you need for versions)
|
||||
```bash
|
||||
curl -X PUT -k "https://localhost:8443/admin/versions" --header "Content-Type: application/json" -d '{ "properties": { "version": "4.x.y", "enabled": true, "openShiftPullspec": "quay.io/openshift-release-dev/ocp-release@sha256:<sha256>", "installerPullspec": "<name>.azurecr.io/installer:release-4.x" }}'
|
||||
```
|
||||
|
||||
```bash
|
||||
curl -X PUT -k "https://localhost:8443/admin/versions" --header "Content-Type: application/json" -d '{ "properties": { "version": "4.x.y", "enabled": true, "openShiftPullspec": "quay.io/openshift-release-dev/ocp-release@sha256:<sha256>", "installerPullspec": "<name>.azurecr.io/installer:release-4.x" }}'
|
||||
```
|
||||
|
||||
1. Update environment variable to deploy in a different resource group
|
||||
```bash
|
||||
export RESOURCEGROUP=myResourceGroup
|
||||
```
|
||||
|
||||
```bash
|
||||
export RESOURCEGROUP=myResourceGroup
|
||||
```
|
||||
|
||||
1. Create the resource group if it doesn't exist
|
||||
```bash
|
||||
az group create --resource-group $RESOURCEGROUP --location $LOCATION
|
||||
```
|
||||
|
||||
```bash
|
||||
az group create --resource-group $RESOURCEGROUP --location $LOCATION
|
||||
```
|
||||
|
||||
1. Create VNets / Subnets
|
||||
```bash
|
||||
az network vnet create \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--name aro-vnet \
|
||||
--address-prefixes 10.0.0.0/22
|
||||
```
|
||||
|
||||
```bash
|
||||
az network vnet subnet create \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--vnet-name aro-vnet \
|
||||
--name master-subnet \
|
||||
--address-prefixes 10.0.0.0/23 \
|
||||
--service-endpoints Microsoft.ContainerRegistry
|
||||
```
|
||||
```bash
|
||||
az network vnet create \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--name aro-vnet \
|
||||
--address-prefixes 10.0.0.0/22
|
||||
```
|
||||
|
||||
```bash
|
||||
az network vnet subnet create \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--vnet-name aro-vnet \
|
||||
--name worker-subnet \
|
||||
--address-prefixes 10.0.2.0/23 \
|
||||
--service-endpoints Microsoft.ContainerRegistry
|
||||
```
|
||||
```bash
|
||||
az network vnet subnet create \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--vnet-name aro-vnet \
|
||||
--name master-subnet \
|
||||
--address-prefixes 10.0.0.0/23 \
|
||||
--service-endpoints Microsoft.ContainerRegistry
|
||||
```
|
||||
|
||||
```bash
|
||||
az network vnet subnet create \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--vnet-name aro-vnet \
|
||||
--name worker-subnet \
|
||||
--address-prefixes 10.0.2.0/23 \
|
||||
--service-endpoints Microsoft.ContainerRegistry
|
||||
```
|
||||
|
||||
1. Register your subscription with the resource provider (post directly to subscription cosmosdb container)
|
||||
```bash
|
||||
curl -k -X PUT -H 'Content-Type: application/json' -d '{
|
||||
"state": "Registered",
|
||||
"properties": {
|
||||
"tenantId": "'"$AZURE_TENANT_ID"'",
|
||||
"registeredFeatures": [
|
||||
{
|
||||
"name": "Microsoft.RedHatOpenShift/RedHatEngineering",
|
||||
"state": "Registered"
|
||||
}
|
||||
]
|
||||
}
|
||||
}' "https://localhost:8443/subscriptions/$AZURE_SUBSCRIPTION_ID?api-version=2.0"
|
||||
```
|
||||
|
||||
```bash
|
||||
curl -k -X PUT -H 'Content-Type: application/json' -d '{
|
||||
"state": "Registered",
|
||||
"properties": {
|
||||
"tenantId": "'"$AZURE_TENANT_ID"'",
|
||||
"registeredFeatures": [
|
||||
{
|
||||
"name": "Microsoft.RedHatOpenShift/RedHatEngineering",
|
||||
"state": "Registered"
|
||||
}
|
||||
]
|
||||
}
|
||||
}' "https://localhost:8443/subscriptions/$AZURE_SUBSCRIPTION_ID?api-version=2.0"
|
||||
```
|
||||
|
||||
1. Create the cluster
|
||||
```bash
|
||||
export CLUSTER=$USER
|
||||
|
||||
az aro create \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--name $CLUSTER \
|
||||
--vnet aro-vnet \
|
||||
--master-subnet master-subnet \
|
||||
--worker-subnet worker-subnet
|
||||
```
|
||||
```bash
|
||||
export CLUSTER=$USER
|
||||
|
||||
> __NOTE:__ The `az aro` CLI extension must be registered in order to run `az aro` commands against a local or tunneled RP. The usual hack script used to create clusters does not work due to keyvault mirroring requirements. The name of the cluster depends on the DNS zone that was created in an earlier step.
|
||||
az aro create \
|
||||
--resource-group $RESOURCEGROUP \
|
||||
--name $CLUSTER \
|
||||
--vnet aro-vnet \
|
||||
--master-subnet master-subnet \
|
||||
--worker-subnet worker-subnet
|
||||
```
|
||||
|
||||
> **NOTE:** The `az aro` CLI extension must be registered in order to run `az aro` commands against a local or tunneled RP. The usual hack script used to create clusters does not work due to keyvault mirroring requirements. The name of the cluster depends on the DNS zone that was created in an earlier step.
|
||||
|
|
|
@ -1,50 +1,50 @@
|
|||
# Certificates and Secrets Explained
|
||||
|
||||
## Overview
|
||||
|
||||
This walks through all the keyvaults and explains the usage of the certificates and secrets used throughout.
|
||||
|
||||
## MDM/MDSD
|
||||
Majority of the certificates below are mdm/mdsd related. These certificates are certificates signed by the AME.GBL certificate authority and are vital to ensuring the necessary ingestion of metrics and logs within the ARO RP service and clusters.
|
||||
|
||||
Majority of the certificates below are mdm/mdsd related. These certificates are certificates signed by the AME.GBL certificate authority and are vital to ensuring the necessary ingestion of metrics and logs within the ARO RP service and clusters.
|
||||
|
||||
More information about Geneva Monitoring can be found [here](https://eng.ms/docs/products/geneva/getting_started/newgettingstarted/overview).
|
||||
|
||||
|
||||
## Certificates
|
||||
Majority of the certificates are configured for auto-renewal to ensure that when nearing expiration, they are updated and rotated. More information about certificate rotation can be found [here](./certificate-rotation.md)
|
||||
|
||||
Majority of the certificates are configured for auto-renewal to ensure that when nearing expiration, they are updated and rotated. More information about certificate rotation can be found [here](./certificate-rotation.md)
|
||||
|
||||
## RP Keyvaults
|
||||
|
||||
1. Cluster (cls)
|
||||
- Certificates:
|
||||
- This keyvault contains all cluster `api` and `*.apps` certificates used within OpenShift. These certificates are auto-rotated and pushed to clusters during AdminUpdates in the `configureAPIServerCertificate` and `configureIngressCertificate` steps. These certificates will not be generated if the `DisableSignedCertificates` [feature flag](./feature-flags.md) is set within the RP config.
|
||||
|
||||
1. DBToken (dbt)
|
||||
- Certificates:
|
||||
- `dbtoken-server` is a TLS certificate used in the [`dbtoken` service](./dbtoken-service.md) for RESTful calls to issue credentials.
|
||||
- Certificates:
|
||||
- This keyvault contains all cluster `api` and `*.apps` certificates used within OpenShift. These certificates are auto-rotated and pushed to clusters during AdminUpdates in the `configureAPIServerCertificate` and `configureIngressCertificate` steps. These certificates will not be generated if the `DisableSignedCertificates` [feature flag](./feature-flags.md) is set within the RP config.
|
||||
|
||||
1. Portal (por)
|
||||
- Certificates:
|
||||
- `portal-client` is a certificate which is used within the aro-portal app registration. The subject of this certificate must match that within the `trustedSubjects` section of the app registration manifest within the Azure portal, otherwise callbacks from the Microsoft AAD login service will not function correctly.
|
||||
- `portal-server` is a TLS certificate used in the SRE portal to access clusters
|
||||
- Secrets:
|
||||
- `portal-session-key` is a secret used to encrypt the session cookie when logging into the SRE portal. When logging in, the SRE portal will encrypt a session cookie with this secret and push it to persist in your web browser. Requests to the SRE portal then use this cookie to confirm authentication to the SRE portal.
|
||||
|
||||
- Certificates:
|
||||
- `portal-client` is a certificate which is used within the aro-portal app registration. The subject of this certificate must match that within the `trustedSubjects` section of the app registration manifest within the Azure portal, otherwise callbacks from the Microsoft AAD login service will not function correctly.
|
||||
- `portal-server` is a TLS certificate used in the SRE portal to access clusters
|
||||
- Secrets:
|
||||
- `portal-session-key` is a secret used to encrypt the session cookie when logging into the SRE portal. When logging in, the SRE portal will encrypt a session cookie with this secret and push it to persist in your web browser. Requests to the SRE portal then use this cookie to confirm authentication to the SRE portal.
|
||||
|
||||
1. Service (svc)
|
||||
- Certificates:
|
||||
- `cluster-mdsd` is the certificate persisted for logging for every ARO cluster
|
||||
- `rp-firstparty` is the certificate for the First Party service principal credentials
|
||||
- `rp-mdm` is the MDM certificate the RP uses to emit cluster metrics within the monitor and RP metrics within the RP processes
|
||||
- `rp-mdsd` is the MDSD certificate the RP uses to emit logs to the Geneva/MDSD service
|
||||
- `rp-server` is the TLS certificate used for RP RESTful HTTPS calls
|
||||
- Secrets:
|
||||
- `encryption-key` a legacy secret which uses the old encryption suites to encrypt secure strings and secure bytes within the cluster document
|
||||
- `encryption-key-v2` the new secret used to encrypt secure strings and secure bytes within the cluster document
|
||||
- `fe-encryption-key` a legacy secret used to encrypt `skipTokens` for paging OpenShiftCluster List requests. Uses an older encryption suite.
|
||||
- `fe-encryption-key-v2` a new secret used to encrypt `skipTokens` for paging OpenShiftCluster List requests
|
||||
- Certificates:
|
||||
- `cluster-mdsd` is the certificate persisted for logging for every ARO cluster
|
||||
- `rp-firstparty` is the certificate for the First Party service principal credentials
|
||||
- `rp-mdm` is the MDM certificate the RP uses to emit cluster metrics within the monitor and RP metrics within the RP processes
|
||||
- `rp-mdsd` is the MDSD certificate the RP uses to emit logs to the Geneva/MDSD service
|
||||
- `rp-server` is the TLS certificate used for RP RESTful HTTPS calls
|
||||
- Secrets:
|
||||
- `encryption-key` a legacy secret which uses the old encryption suites to encrypt secure strings and secure bytes within the cluster document
|
||||
- `encryption-key-v2` the new secret used to encrypt secure strings and secure bytes within the cluster document
|
||||
- `fe-encryption-key` a legacy secret used to encrypt `skipTokens` for paging OpenShiftCluster List requests. Uses an older encryption suite.
|
||||
- `fe-encryption-key-v2` a new secret used to encrypt `skipTokens` for paging OpenShiftCluster List requests
|
||||
|
||||
## Gateway Keyvaults
|
||||
|
||||
1. Gateway (gwy)
|
||||
- Certificates:
|
||||
- `gwy-mdm` the certificate used for emitting metrics to the Geneva/MDM service
|
||||
- `gwy-mdsd` the certificate used for emitting logs to the Geneva/MDSD service
|
||||
- Certificates:
|
||||
- `gwy-mdm` the certificate used for emitting metrics to the Geneva/MDM service
|
||||
- `gwy-mdsd` the certificate used for emitting logs to the Geneva/MDSD service
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# Prepare a shared RP development environment
|
||||
|
||||
Follow these steps to build a shared RP development environment and secrets
|
||||
file. A single RP development environment can be shared across multiple
|
||||
developers and/or CI flows. It may include multiple resource groups in multiple
|
||||
file. A single RP development environment can be shared across multiple
|
||||
developers and/or CI flows. It may include multiple resource groups in multiple
|
||||
locations.
|
||||
|
||||
## Prerequisites
|
||||
|
@ -12,7 +12,7 @@ locations.
|
|||
applications.
|
||||
|
||||
1. You will need a publicly resolvable DNS Zone resource in your Azure
|
||||
subscription. Set PARENT_DOMAIN_NAME and PARENT_DOMAIN_RESOURCEGROUP to the name and
|
||||
subscription. Set PARENT_DOMAIN_NAME and PARENT_DOMAIN_RESOURCEGROUP to the name and
|
||||
resource group of the DNS Zone resource:
|
||||
|
||||
```bash
|
||||
|
@ -21,9 +21,9 @@ locations.
|
|||
```
|
||||
|
||||
1. You will need a storage account in your Azure subscription in which to store
|
||||
shared development environment secrets. The storage account must contain a
|
||||
private container named `secrets`. All team members must have `Storage Blob
|
||||
Data Reader` or `Storage Blob Data Contributor` role on the storage account.
|
||||
shared development environment secrets. The storage account must contain a
|
||||
private container named `secrets`. All team members must have `Storage Blob
|
||||
Data Reader` or `Storage Blob Data Contributor` role on the storage account.
|
||||
Set SECRET_SA_ACCOUNT_NAME to the name of the storage account:
|
||||
|
||||
```bash
|
||||
|
@ -32,7 +32,7 @@ locations.
|
|||
|
||||
1. You will need an AAD object (this could be your AAD user, or an AAD group of
|
||||
which you are a member) which will be able to administer certificates in the
|
||||
development environment key vault(s). Set ADMIN_OBJECT_ID to the object ID.
|
||||
development environment key vault(s). Set ADMIN_OBJECT_ID to the object ID.
|
||||
|
||||
```bash
|
||||
ADMIN_OBJECT_ID="$(az ad group show -g 'aro-engineering' --query id -o tsv)"
|
||||
|
@ -73,12 +73,11 @@ locations.
|
|||
mkdir -p secrets
|
||||
```
|
||||
|
||||
|
||||
## AAD applications
|
||||
|
||||
1. Create an AAD application which will fake up the ARM layer:
|
||||
|
||||
This application requires client certificate authentication to be enabled. A
|
||||
This application requires client certificate authentication to be enabled. A
|
||||
suitable key/certificate file can be generated using the following helper
|
||||
utility:
|
||||
|
||||
|
@ -101,11 +100,11 @@ locations.
|
|||
|
||||
Later this application will be granted:
|
||||
|
||||
* `User Access Administrator` on your subscription.
|
||||
- `User Access Administrator` on your subscription.
|
||||
|
||||
1. Create an AAD application which will fake up the first party application.
|
||||
|
||||
This application requires client certificate authentication to be enabled. A
|
||||
This application requires client certificate authentication to be enabled. A
|
||||
suitable key/certificate file can be generated using the following helper
|
||||
utility:
|
||||
|
||||
|
@ -130,9 +129,9 @@ locations.
|
|||
|
||||
Later this application will be granted:
|
||||
|
||||
* `ARO v4 FP Subscription` on your subscription.
|
||||
* `DNS Zone Contributor` on the DNS zone in RESOURCEGROUP.
|
||||
* `Network Contributor` on RESOURCEGROUP.
|
||||
- `ARO v4 FP Subscription` on your subscription.
|
||||
- `DNS Zone Contributor` on the DNS zone in RESOURCEGROUP.
|
||||
- `Network Contributor` on RESOURCEGROUP.
|
||||
|
||||
1. Create an AAD application which will fake up the RP identity.
|
||||
|
||||
|
@ -150,9 +149,9 @@ locations.
|
|||
|
||||
Later this application will be granted:
|
||||
|
||||
* `Reader` on RESOURCEGROUP.
|
||||
* `Secrets / Get` on the key vault in RESOURCEGROUP.
|
||||
* `DocumentDB Account Contributor` on the CosmosDB resource in RESOURCEGROUP.
|
||||
- `Reader` on RESOURCEGROUP.
|
||||
- `Secrets / Get` on the key vault in RESOURCEGROUP.
|
||||
- `DocumentDB Account Contributor` on the CosmosDB resource in RESOURCEGROUP.
|
||||
|
||||
1. Create an AAD application which will fake up the gateway identity.
|
||||
|
||||
|
@ -184,21 +183,21 @@ locations.
|
|||
|
||||
Later this application will be granted:
|
||||
|
||||
* `Contributor` on your subscription.
|
||||
* `User Access Administrator` on your subscription.
|
||||
- `Contributor` on your subscription.
|
||||
- `User Access Administrator` on your subscription.
|
||||
|
||||
You must also manually grant this application the `Microsoft.Graph/Application.ReadWrite.OwnedBy` permission, which requires admin access, in order for AAD applications to be created/deleted on a per-cluster basis.
|
||||
|
||||
* Go into the Azure Portal
|
||||
* Go to Azure Active Directory
|
||||
* Navigate to the `aro-v4-tooling-shared` app registration page
|
||||
* Click 'API permissions' in the left side pane
|
||||
* Click 'Add a permission'.
|
||||
* Click 'Microsoft Graph'
|
||||
* Select 'Application permissions'
|
||||
* Search for 'Application' and select `Application.ReadWrite.OwnedBy`
|
||||
* Click 'Add permissions'
|
||||
* This request will need to be approved by a tenant administrator. If you are one, you can click the `Grant admin consent for <name>` button to the right of the `Add a permission` button on the app page
|
||||
- Go into the Azure Portal
|
||||
- Go to Azure Active Directory
|
||||
- Navigate to the `aro-v4-tooling-shared` app registration page
|
||||
- Click 'API permissions' in the left side pane
|
||||
- Click 'Add a permission'.
|
||||
- Click 'Microsoft Graph'
|
||||
- Select 'Application permissions'
|
||||
- Search for 'Application' and select `Application.ReadWrite.OwnedBy`
|
||||
- Click 'Add permissions'
|
||||
- This request will need to be approved by a tenant administrator. If you are one, you can click the `Grant admin consent for <name>` button to the right of the `Add a permission` button on the app page
|
||||
|
||||
1. Set up the RP role definitions and subscription role assignments in your Azure subscription. The usage of "uuidgen" for fpRoleDefinitionId is simply there to keep from interfering with any linked resources and to create the role net new. This mimics the RBAC that ARM sets up. With at least `User Access Administrator` permissions on your subscription, do:
|
||||
|
||||
|
@ -217,7 +216,7 @@ locations.
|
|||
|
||||
1. Create an AAD application which will fake up the portal client.
|
||||
|
||||
This application requires client certificate authentication to be enabled. A
|
||||
This application requires client certificate authentication to be enabled. A
|
||||
suitable key/certificate file can be generated using the following helper
|
||||
utility:
|
||||
|
||||
|
@ -238,13 +237,9 @@ locations.
|
|||
--cert "$(base64 -w0 <secrets/portal-client.crt)" >/dev/null
|
||||
```
|
||||
|
||||
1. Create an AAD application which will fake up the dbtoken client.
|
||||
|
||||
See [dbtoken-service.md](./dbtoken-service.md#setup) for details on setup.
|
||||
|
||||
## Certificates
|
||||
|
||||
1. Create the VPN CA key/certificate. A suitable key/certificate file can be
|
||||
1. Create the VPN CA key/certificate. A suitable key/certificate file can be
|
||||
generated using the following helper utility:
|
||||
|
||||
```bash
|
||||
|
@ -252,7 +247,7 @@ locations.
|
|||
mv vpn-ca.* secrets
|
||||
```
|
||||
|
||||
1. Create the VPN client key/certificate. A suitable key/certificate file can be
|
||||
1. Create the VPN client key/certificate. A suitable key/certificate file can be
|
||||
generated using the following helper utility:
|
||||
|
||||
```bash
|
||||
|
@ -260,7 +255,7 @@ locations.
|
|||
mv vpn-client.* secrets
|
||||
```
|
||||
|
||||
1. Create the proxy serving key/certificate. A suitable key/certificate file
|
||||
1. Create the proxy serving key/certificate. A suitable key/certificate file
|
||||
can be generated using the following helper utility:
|
||||
|
||||
```bash
|
||||
|
@ -268,7 +263,7 @@ locations.
|
|||
mv proxy.* secrets
|
||||
```
|
||||
|
||||
1. Create the proxy client key/certificate. A suitable key/certificate file can
|
||||
1. Create the proxy client key/certificate. A suitable key/certificate file can
|
||||
be generated using the following helper utility:
|
||||
|
||||
```bash
|
||||
|
@ -276,14 +271,14 @@ locations.
|
|||
mv proxy-client.* secrets
|
||||
```
|
||||
|
||||
1. Create the proxy ssh key/certificate. A suitable key/certificate file can
|
||||
1. Create the proxy ssh key/certificate. A suitable key/certificate file can
|
||||
be generated using the following helper utility:
|
||||
|
||||
```bash
|
||||
ssh-keygen -f secrets/proxy_id_rsa -N ''
|
||||
```
|
||||
|
||||
1. Create an RP serving key/certificate. A suitable key/certificate file
|
||||
1. Create an RP serving key/certificate. A suitable key/certificate file
|
||||
can be generated using the following helper utility:
|
||||
|
||||
```bash
|
||||
|
@ -291,7 +286,7 @@ locations.
|
|||
mv localhost.* secrets
|
||||
```
|
||||
|
||||
1. Create the dev CA key/certificate. A suitable key/certificate file can be
|
||||
1. Create the dev CA key/certificate. A suitable key/certificate file can be
|
||||
generated using the following helper utility:
|
||||
|
||||
```bash
|
||||
|
@ -299,7 +294,7 @@ locations.
|
|||
mv dev-ca.* secrets
|
||||
```
|
||||
|
||||
1. Create the dev client key/certificate. A suitable key/certificate file can
|
||||
1. Create the dev client key/certificate. A suitable key/certificate file can
|
||||
be generated using the following helper utility:
|
||||
|
||||
```bash
|
||||
|
@ -326,13 +321,12 @@ import_certs_secrets
|
|||
|
||||
5. Next, we need to update certificates owned by FP Service Principal. Current configuration in DEV and INT is listed below. You can get the `AAD APP ID` from the `secrets/env` file
|
||||
|
||||
Variable | Certificate Client | Subscription Type | AAD App Name | Key Vault Name |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| AZURE_FP_CLIENT_ID | firstparty | DEV | aro-v4-fp-shared-dev | v4-eastus-dev-svc |
|
||||
| AZURE_ARM_CLIENT_ID | arm | DEV | aro-v4-arm-shared-dev | v4-eastus-dev-svc |
|
||||
| AZURE_PORTAL_CLIENT_ID | portal-client | DEV | aro-v4-portal-shared-dev | v4-eastus-dev-svc |
|
||||
| AZURE_FP_CLIENT_ID | firstparty | INT | aro-int-sp | aro-int-eastus-svc |
|
||||
|
||||
| Variable | Certificate Client | Subscription Type | AAD App Name | Key Vault Name |
|
||||
| ---------------------- | ------------------ | ----------------- | ------------------------ | ------------------ |
|
||||
| AZURE_FP_CLIENT_ID | firstparty | DEV | aro-v4-fp-shared-dev | v4-eastus-dev-svc |
|
||||
| AZURE_ARM_CLIENT_ID | arm | DEV | aro-v4-arm-shared-dev | v4-eastus-dev-svc |
|
||||
| AZURE_PORTAL_CLIENT_ID | portal-client | DEV | aro-v4-portal-shared-dev | v4-eastus-dev-svc |
|
||||
| AZURE_FP_CLIENT_ID | firstparty | INT | aro-int-sp | aro-int-eastus-svc |
|
||||
|
||||
```bash
|
||||
# Import firstparty.pem to keyvault v4-eastus-svc
|
||||
|
@ -355,18 +349,18 @@ az ad app credential reset \
|
|||
5. The RP makes API calls to kubernetes cluster via a proxy VMSS agent. For the agent to get the updated certificates, this vm needs to be deleted & redeployed. Proxy VM is currently deployed by the `deploy_env_dev` function in `deploy-shared-env.sh`. It makes use of `env-development.json`
|
||||
|
||||
6. Run `[rharosecretsdev|e2earosecrets] make secrets-update` to upload it to your
|
||||
storage account so other people on your team can access it via `make secrets`
|
||||
storage account so other people on your team can access it via `make secrets`
|
||||
|
||||
# Environment file
|
||||
|
||||
1. Choose the resource group prefix. The resource group location will be
|
||||
1. Choose the resource group prefix. The resource group location will be
|
||||
The resource group location will be appended to the prefix to make the resource group name. If a v4-prefixed environment exists in the subscription already, use a unique prefix.
|
||||
|
||||
```bash
|
||||
RESOURCEGROUP_PREFIX=v4
|
||||
```
|
||||
|
||||
1. Choose the proxy domain name label. This final proxy hostname will be of the
|
||||
1. Choose the proxy domain name label. This final proxy hostname will be of the
|
||||
form `vm0.$PROXY_DOMAIN_NAME_LABEL.$LOCATION.cloudapp.azure.com`.
|
||||
|
||||
```bash
|
||||
|
@ -382,7 +376,6 @@ storage account so other people on your team can access it via `make secrets`
|
|||
export AZURE_ARM_CLIENT_ID='$AZURE_ARM_CLIENT_ID'
|
||||
export AZURE_FP_CLIENT_ID='$AZURE_FP_CLIENT_ID'
|
||||
export AZURE_FP_SERVICE_PRINCIPAL_ID='$(az ad sp list --filter "appId eq '$AZURE_FP_CLIENT_ID'" --query '[].id' -o tsv)'
|
||||
export AZURE_DBTOKEN_CLIENT_ID='$AZURE_DBTOKEN_CLIENT_ID'
|
||||
export AZURE_PORTAL_CLIENT_ID='$AZURE_PORTAL_CLIENT_ID'
|
||||
export AZURE_PORTAL_ACCESS_GROUP_IDS='$ADMIN_OBJECT_ID'
|
||||
export AZURE_PORTAL_ELEVATED_GROUP_IDS='$ADMIN_OBJECT_ID'
|
||||
|
@ -418,7 +411,7 @@ storage account so other people on your team can access it via `make secrets`.
|
|||
Look at the [helper file](../hack/devtools/deploy-shared-env.sh) to understand
|
||||
each of the bash functions below.
|
||||
|
||||
1. Copy, edit (if necessary) and source your environment file. The required
|
||||
1. Copy, edit (if necessary) and source your environment file. The required
|
||||
environment variable configuration is documented immediately below:
|
||||
|
||||
```bash
|
||||
|
@ -427,7 +420,7 @@ each of the bash functions below.
|
|||
. ./env
|
||||
```
|
||||
|
||||
* LOCATION: Location of the shared RP development environment (default:
|
||||
- LOCATION: Location of the shared RP development environment (default:
|
||||
`eastus`).
|
||||
|
||||
1. Create the resource group and deploy the RP resources:
|
||||
|
@ -450,7 +443,7 @@ each of the bash functions below.
|
|||
|
||||
If you encounter a "VirtualNetworkGatewayCannotUseStandardPublicIP" error
|
||||
when running the `deploy_env_dev` command, you have to override two
|
||||
additional parameters. Run this command instead:
|
||||
additional parameters. Run this command instead:
|
||||
|
||||
```bash
|
||||
deploy_env_dev_override
|
||||
|
@ -476,10 +469,10 @@ each of the bash functions below.
|
|||
import_certs_secrets
|
||||
```
|
||||
|
||||
> __NOTE:__: in production, three additional keys/certificates (rp-mdm, rp-mdsd, and
|
||||
cluster-mdsd) are also required in the $KEYVAULT_PREFIX-svc key vault. These
|
||||
are client certificates for RP metric and log forwarding (respectively) to
|
||||
Geneva.
|
||||
> **NOTE:**: in production, three additional keys/certificates (rp-mdm, rp-mdsd, and
|
||||
> cluster-mdsd) are also required in the $KEYVAULT_PREFIX-svc key vault. These
|
||||
> are client certificates for RP metric and log forwarding (respectively) to
|
||||
> Geneva.
|
||||
|
||||
If you need them in development:
|
||||
|
||||
|
@ -508,15 +501,15 @@ each of the bash functions below.
|
|||
--file secrets/cluster-logging-int.pem
|
||||
```
|
||||
|
||||
> __NOTE:__: in development, if you don't have valid certs for these, you can just
|
||||
upload `localhost.pem` as a placeholder for each of these. This will avoid an
|
||||
error stemming from them not existing, but it will result in logging pods
|
||||
crash looping in any clusters you make. Additionally, no gateway resources are
|
||||
created in development so you should not need to execute the cert import statement
|
||||
for the "-gwy" keyvault.
|
||||
> **NOTE:**: in development, if you don't have valid certs for these, you can just
|
||||
> upload `localhost.pem` as a placeholder for each of these. This will avoid an
|
||||
> error stemming from them not existing, but it will result in logging pods
|
||||
> crash looping in any clusters you make. Additionally, no gateway resources are
|
||||
> created in development so you should not need to execute the cert import statement
|
||||
> for the "-gwy" keyvault.
|
||||
|
||||
1. In pre-production (int, e2e) certain certificates are provisioned via keyvault
|
||||
integration. These should be rotated and generated in the keyvault itself:
|
||||
integration. These should be rotated and generated in the keyvault itself:
|
||||
|
||||
```
|
||||
Vault Name: "$KEYVAULT_PREFIX-svc"
|
||||
|
@ -540,8 +533,7 @@ Development value: secrets/cluster-logging-int.pem
|
|||
vpn_configuration
|
||||
```
|
||||
|
||||
|
||||
## Append Resource Group to Subscription Cleaner DenyList
|
||||
|
||||
* We have subscription pruning that takes place routinely and need to add our resource group for the shared rp environment to the `denylist` of the cleaner:
|
||||
* [https://github.com/Azure/ARO-RP/blob/e918d1b87be53a3b3cdf18b674768a6480fb56b8/hack/clean/clean.go#L29](https://github.com/Azure/ARO-RP/blob/e918d1b87be53a3b3cdf18b674768a6480fb56b8/hack/clean/clean.go#L29)
|
||||
- We have subscription pruning that takes place routinely and need to add our resource group for the shared rp environment to the `denylist` of the cleaner:
|
||||
- [https://github.com/Azure/ARO-RP/blob/e918d1b87be53a3b3cdf18b674768a6480fb56b8/hack/clean/clean.go#L29](https://github.com/Azure/ARO-RP/blob/e918d1b87be53a3b3cdf18b674768a6480fb56b8/hack/clean/clean.go#L29)
|
||||
|
|
|
@ -126,10 +126,6 @@ import_certs_secrets() {
|
|||
--vault-name "$KEYVAULT_PREFIX-por" \
|
||||
--name portal-server \
|
||||
--file secrets/localhost.pem >/dev/null
|
||||
az keyvault certificate import \
|
||||
--vault-name "$KEYVAULT_PREFIX-dbt" \
|
||||
--name dbtoken-server \
|
||||
--file secrets/localhost.pem >/dev/null
|
||||
az keyvault certificate import \
|
||||
--vault-name "$KEYVAULT_PREFIX-por" \
|
||||
--name portal-client \
|
||||
|
|
|
@ -19,7 +19,6 @@ func run(ctx context.Context, log *logrus.Entry) error {
|
|||
err := env.ValidateVars(
|
||||
"ADMIN_OBJECT_ID",
|
||||
"AZURE_CLIENT_ID",
|
||||
"AZURE_DBTOKEN_CLIENT_ID",
|
||||
"AZURE_SERVICE_PRINCIPAL_ID",
|
||||
"AZURE_FP_SERVICE_PRINCIPAL_ID",
|
||||
"AZURE_PORTAL_ACCESS_GROUP_IDS",
|
||||
|
|
|
@ -17,12 +17,6 @@
|
|||
"databaseAccountName": {
|
||||
"value": ""
|
||||
},
|
||||
"dbtokenClientId": {
|
||||
"value": ""
|
||||
},
|
||||
"dbtokenUrl": {
|
||||
"value": ""
|
||||
},
|
||||
"fluentbitImage": {
|
||||
"value": ""
|
||||
},
|
||||
|
|
|
@ -17,12 +17,6 @@
|
|||
"databaseAccountName": {
|
||||
"type": "string"
|
||||
},
|
||||
"dbtokenClientId": {
|
||||
"type": "string"
|
||||
},
|
||||
"dbtokenUrl": {
|
||||
"type": "string"
|
||||
},
|
||||
"fluentbitImage": {
|
||||
"type": "string"
|
||||
},
|
||||
|
|
|
@ -187,47 +187,6 @@
|
|||
},
|
||||
"apiVersion": "2019-09-01"
|
||||
},
|
||||
{
|
||||
"name": "[concat(parameters('keyvaultPrefix'), '-dbt')]",
|
||||
"type": "Microsoft.KeyVault/vaults",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"tenantId": "[subscription().tenantId]",
|
||||
"sku": {
|
||||
"family": "A",
|
||||
"name": "standard"
|
||||
},
|
||||
"accessPolicies": [
|
||||
{
|
||||
"tenantId": "[subscription().tenantId]",
|
||||
"objectId": "[parameters('rpServicePrincipalId')]",
|
||||
"permissions": {
|
||||
"secrets": [
|
||||
"get"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"tenantId": "[subscription().tenantId]",
|
||||
"objectId": "[parameters('adminObjectId')]",
|
||||
"permissions": {
|
||||
"secrets": [
|
||||
"set",
|
||||
"list"
|
||||
],
|
||||
"certificates": [
|
||||
"delete",
|
||||
"get",
|
||||
"import",
|
||||
"list"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"enableSoftDelete": true
|
||||
},
|
||||
"apiVersion": "2019-09-01"
|
||||
},
|
||||
{
|
||||
"name": "[concat(parameters('keyvaultPrefix'), '-por')]",
|
||||
"type": "Microsoft.KeyVault/vaults",
|
||||
|
|
|
@ -63,9 +63,6 @@
|
|||
"databaseAccountName": {
|
||||
"value": ""
|
||||
},
|
||||
"dbtokenClientId": {
|
||||
"value": ""
|
||||
},
|
||||
"disableCosmosDBFirewall": {
|
||||
"value": false
|
||||
},
|
||||
|
|
|
@ -8,9 +8,6 @@
|
|||
"extraClusterKeyvaultAccessPolicies": {
|
||||
"value": []
|
||||
},
|
||||
"extraDBTokenKeyvaultAccessPolicies": {
|
||||
"value": []
|
||||
},
|
||||
"extraPortalKeyvaultAccessPolicies": {
|
||||
"value": []
|
||||
},
|
||||
|
|
|
@ -19,17 +19,6 @@
|
|||
}
|
||||
}
|
||||
],
|
||||
"dbTokenKeyvaultAccessPolicies": [
|
||||
{
|
||||
"tenantId": "[subscription().tenantId]",
|
||||
"objectId": "[parameters('rpServicePrincipalId')]",
|
||||
"permissions": {
|
||||
"secrets": [
|
||||
"get"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"portalKeyvaultAccessPolicies": [
|
||||
{
|
||||
"tenantId": "[subscription().tenantId]",
|
||||
|
@ -63,10 +52,6 @@
|
|||
"type": "array",
|
||||
"defaultValue": []
|
||||
},
|
||||
"extraDBTokenKeyvaultAccessPolicies": {
|
||||
"type": "array",
|
||||
"defaultValue": []
|
||||
},
|
||||
"extraPortalKeyvaultAccessPolicies": {
|
||||
"type": "array",
|
||||
"defaultValue": []
|
||||
|
@ -123,32 +108,6 @@
|
|||
},
|
||||
"name": "rp_in_geneva"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"protocol": "Tcp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "445",
|
||||
"sourceAddressPrefix": "10.0.8.0/24",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 140,
|
||||
"direction": "Inbound"
|
||||
},
|
||||
"name": "dbtoken_in_gateway_445"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"protocol": "Tcp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "8445",
|
||||
"sourceAddressPrefix": "10.0.8.0/24",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 141,
|
||||
"direction": "Inbound"
|
||||
},
|
||||
"name": "dbtoken_in_gateway_8445"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"protocol": "Tcp",
|
||||
|
@ -278,21 +237,6 @@
|
|||
},
|
||||
"apiVersion": "2019-09-01"
|
||||
},
|
||||
{
|
||||
"name": "[concat(parameters('keyvaultPrefix'), '-dbt')]",
|
||||
"type": "Microsoft.KeyVault/vaults",
|
||||
"location": "[resourceGroup().location]",
|
||||
"properties": {
|
||||
"tenantId": "[subscription().tenantId]",
|
||||
"sku": {
|
||||
"family": "A",
|
||||
"name": "standard"
|
||||
},
|
||||
"accessPolicies": "[concat(variables('dbTokenKeyvaultAccessPolicies'), parameters('extraDBTokenKeyvaultAccessPolicies'))]",
|
||||
"enableSoftDelete": true
|
||||
},
|
||||
"apiVersion": "2019-09-01"
|
||||
},
|
||||
{
|
||||
"name": "[concat(parameters('keyvaultPrefix'), '-por')]",
|
||||
"type": "Microsoft.KeyVault/vaults",
|
||||
|
|
|
@ -83,9 +83,6 @@
|
|||
"databaseAccountName": {
|
||||
"type": "string"
|
||||
},
|
||||
"dbtokenClientId": {
|
||||
"type": "string"
|
||||
},
|
||||
"disableCosmosDBFirewall": {
|
||||
"type": "bool",
|
||||
"defaultValue": false
|
||||
|
@ -357,64 +354,6 @@
|
|||
"[resourceId('Microsoft.Network/publicIPAddresses', 'rp-pip')]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"sku": {
|
||||
"name": "Standard"
|
||||
},
|
||||
"properties": {
|
||||
"frontendIPConfigurations": [
|
||||
{
|
||||
"properties": {
|
||||
"subnet": {
|
||||
"id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', 'rp-vnet', 'rp-subnet')]"
|
||||
}
|
||||
},
|
||||
"name": "dbtoken-frontend",
|
||||
"zones": "[if(contains(parameters('nonZonalRegions'),toLower(replace(resourceGroup().location, ' ', ''))),'',pickZones('Microsoft.Network', 'publicIPAddresses', resourceGroup().location, 3))]"
|
||||
}
|
||||
],
|
||||
"backendAddressPools": [
|
||||
{
|
||||
"name": "rp-backend"
|
||||
}
|
||||
],
|
||||
"loadBalancingRules": [
|
||||
{
|
||||
"properties": {
|
||||
"frontendIPConfiguration": {
|
||||
"id": "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', 'rp-lb-internal', 'dbtoken-frontend')]"
|
||||
},
|
||||
"backendAddressPool": {
|
||||
"id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb-internal', 'rp-backend')]"
|
||||
},
|
||||
"probe": {
|
||||
"id": "[resourceId('Microsoft.Network/loadBalancers/probes', 'rp-lb-internal', 'dbtoken-probe')]"
|
||||
},
|
||||
"protocol": "Tcp",
|
||||
"loadDistribution": "Default",
|
||||
"frontendPort": 8445,
|
||||
"backendPort": 445
|
||||
},
|
||||
"name": "dbtoken-lbrule"
|
||||
}
|
||||
],
|
||||
"probes": [
|
||||
{
|
||||
"properties": {
|
||||
"protocol": "Https",
|
||||
"port": 445,
|
||||
"numberOfProbes": 2,
|
||||
"requestPath": "/healthz/ready"
|
||||
},
|
||||
"name": "dbtoken-probe"
|
||||
}
|
||||
]
|
||||
},
|
||||
"name": "rp-lb-internal",
|
||||
"type": "Microsoft.Network/loadBalancers",
|
||||
"location": "[resourceGroup().location]",
|
||||
"apiVersion": "2020-08-01"
|
||||
},
|
||||
{
|
||||
"sku": {
|
||||
"name": "[parameters('vmSize')]",
|
||||
|
@ -479,9 +418,6 @@
|
|||
"loadBalancerBackendAddressPools": [
|
||||
{
|
||||
"id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb', 'rp-backend')]"
|
||||
},
|
||||
{
|
||||
"id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb-internal', 'rp-backend')]"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -530,7 +466,6 @@
|
|||
"dependsOn": [
|
||||
"[resourceId('Microsoft.Authorization/roleAssignments', guid(resourceGroup().id, parameters('rpServicePrincipalId'), 'RP / Reader'))]",
|
||||
"[resourceId('Microsoft.Network/loadBalancers', 'rp-lb')]",
|
||||
"[resourceId('Microsoft.Network/loadBalancers', 'rp-lb-internal')]",
|
||||
"[resourceId('Microsoft.Storage/storageAccounts', substring(parameters('storageAccountDomain'), 0, indexOf(parameters('storageAccountDomain'), '.')))]"
|
||||
]
|
||||
},
|
||||
|
@ -669,21 +604,6 @@
|
|||
"[resourceId('Microsoft.Network/loadBalancers', 'rp-lb')]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"allowVirtualNetworkAccess": true,
|
||||
"allowForwardedTraffic": true,
|
||||
"allowGatewayTransit": false,
|
||||
"useRemoteGateways": false,
|
||||
"remoteVirtualNetwork": {
|
||||
"id": "[resourceId(parameters('gatewayResourceGroupName'), 'Microsoft.Network/virtualNetworks', 'gateway-vnet')]"
|
||||
}
|
||||
},
|
||||
"name": "rp-vnet/peering-gateway-vnet",
|
||||
"type": "Microsoft.Network/virtualNetworks/virtualNetworkPeerings",
|
||||
"apiVersion": "2020-08-01",
|
||||
"location": "[resourceGroup().location]"
|
||||
},
|
||||
{
|
||||
"properties": {},
|
||||
"name": "[concat(resourceGroup().location, '.', parameters('clusterParentDomainName'))]",
|
||||
|
|
|
@ -55,10 +55,8 @@ type Configuration struct {
|
|||
ClusterParentDomainName *string `json:"clusterParentDomainName,omitempty" value:"required"`
|
||||
DatabaseAccountName *string `json:"databaseAccountName,omitempty" value:"required"`
|
||||
CosmosDB *CosmosDBConfiguration `json:"cosmosDB,omitempty"`
|
||||
DBTokenClientID *string `json:"dbtokenClientId,omitempty" value:"required"`
|
||||
DisableCosmosDBFirewall *bool `json:"disableCosmosDBFirewall,omitempty"`
|
||||
ExtraClusterKeyvaultAccessPolicies []interface{} `json:"extraClusterKeyvaultAccessPolicies,omitempty" value:"required"`
|
||||
ExtraDBTokenKeyvaultAccessPolicies []interface{} `json:"extraDBTokenKeyvaultAccessPolicies,omitempty" value:"required"`
|
||||
ExtraCosmosDBIPs []string `json:"extraCosmosDBIPs,omitempty"`
|
||||
ExtraGatewayKeyvaultAccessPolicies []interface{} `json:"extraGatewayKeyvaultAccessPolicies,omitempty" value:"required"`
|
||||
ExtraPortalKeyvaultAccessPolicies []interface{} `json:"extraPortalKeyvaultAccessPolicies,omitempty" value:"required"`
|
||||
|
|
|
@ -43,13 +43,13 @@ type deployer struct {
|
|||
log *logrus.Entry
|
||||
env env.Core
|
||||
|
||||
globaldeployments features.DeploymentsClient
|
||||
globalgroups features.ResourceGroupsClient
|
||||
globalrecordsets dns.RecordSetsClient
|
||||
globalaccounts storage.AccountsClient
|
||||
deployments features.DeploymentsClient
|
||||
groups features.ResourceGroupsClient
|
||||
loadbalancers network.LoadBalancersClient
|
||||
globaldeployments features.DeploymentsClient
|
||||
globalgroups features.ResourceGroupsClient
|
||||
globalrecordsets dns.RecordSetsClient
|
||||
globalaccounts storage.AccountsClient
|
||||
deployments features.DeploymentsClient
|
||||
groups features.ResourceGroupsClient
|
||||
// loadbalancers network.LoadBalancersClient
|
||||
userassignedidentities msi.UserAssignedIdentitiesClient
|
||||
providers features.ProvidersClient
|
||||
publicipaddresses network.PublicIPAddressesClient
|
||||
|
@ -59,7 +59,6 @@ type deployer struct {
|
|||
vmssvms compute.VirtualMachineScaleSetVMsClient
|
||||
zones dns.ZonesClient
|
||||
clusterKeyvault keyvault.Manager
|
||||
dbtokenKeyvault keyvault.Manager
|
||||
portalKeyvault keyvault.Manager
|
||||
serviceKeyvault keyvault.Manager
|
||||
|
||||
|
@ -95,13 +94,13 @@ func New(ctx context.Context, log *logrus.Entry, _env env.Core, config *RPConfig
|
|||
log: log,
|
||||
env: _env,
|
||||
|
||||
globaldeployments: features.NewDeploymentsClient(_env.Environment(), *config.Configuration.GlobalSubscriptionID, authorizer),
|
||||
globalgroups: features.NewResourceGroupsClient(_env.Environment(), *config.Configuration.GlobalSubscriptionID, authorizer),
|
||||
globalrecordsets: dns.NewRecordSetsClient(_env.Environment(), *config.Configuration.GlobalSubscriptionID, authorizer),
|
||||
globalaccounts: storage.NewAccountsClient(_env.Environment(), *config.Configuration.GlobalSubscriptionID, authorizer),
|
||||
deployments: features.NewDeploymentsClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
groups: features.NewResourceGroupsClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
loadbalancers: network.NewLoadBalancersClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
globaldeployments: features.NewDeploymentsClient(_env.Environment(), *config.Configuration.GlobalSubscriptionID, authorizer),
|
||||
globalgroups: features.NewResourceGroupsClient(_env.Environment(), *config.Configuration.GlobalSubscriptionID, authorizer),
|
||||
globalrecordsets: dns.NewRecordSetsClient(_env.Environment(), *config.Configuration.GlobalSubscriptionID, authorizer),
|
||||
globalaccounts: storage.NewAccountsClient(_env.Environment(), *config.Configuration.GlobalSubscriptionID, authorizer),
|
||||
deployments: features.NewDeploymentsClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
groups: features.NewResourceGroupsClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
// loadbalancers: network.NewLoadBalancersClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
userassignedidentities: msi.NewUserAssignedIdentitiesClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
providers: features.NewProvidersClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
roleassignments: authorization.NewRoleAssignmentsClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
|
@ -111,7 +110,6 @@ func New(ctx context.Context, log *logrus.Entry, _env env.Core, config *RPConfig
|
|||
vmssvms: compute.NewVirtualMachineScaleSetVMsClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
zones: dns.NewZonesClient(_env.Environment(), config.SubscriptionID, authorizer),
|
||||
clusterKeyvault: keyvault.NewManager(kvAuthorizer, "https://"+*config.Configuration.KeyvaultPrefix+env.ClusterKeyvaultSuffix+"."+_env.Environment().KeyVaultDNSSuffix+"/"),
|
||||
dbtokenKeyvault: keyvault.NewManager(kvAuthorizer, "https://"+*config.Configuration.KeyvaultPrefix+env.DBTokenKeyvaultSuffix+"."+_env.Environment().KeyVaultDNSSuffix+"/"),
|
||||
portalKeyvault: keyvault.NewManager(kvAuthorizer, "https://"+*config.Configuration.KeyvaultPrefix+env.PortalKeyvaultSuffix+"."+_env.Environment().KeyVaultDNSSuffix+"/"),
|
||||
serviceKeyvault: keyvault.NewManager(kvAuthorizer, "https://"+*config.Configuration.KeyvaultPrefix+env.ServiceKeyvaultSuffix+"."+_env.Environment().KeyVaultDNSSuffix+"/"),
|
||||
|
||||
|
|
|
@ -40,9 +40,6 @@ func (d *deployer) DeployGateway(ctx context.Context) error {
|
|||
|
||||
// Special cases where the config isn't marshalled into the ARM template parameters cleanly
|
||||
parameters := d.getParameters(template["parameters"].(map[string]interface{}))
|
||||
parameters.Parameters["dbtokenURL"] = &arm.ParametersParameter{
|
||||
Value: "https://dbtoken." + d.config.Location + "." + *d.config.Configuration.RPParentDomainName + ":8445",
|
||||
}
|
||||
parameters.Parameters["rpImage"] = &arm.ParametersParameter{
|
||||
Value: *d.config.Configuration.RPImagePrefix + ":" + d.version,
|
||||
}
|
||||
|
|
|
@ -114,12 +114,6 @@ func (d *deployer) configureDNS(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
lb, err := d.loadbalancers.Get(ctx, d.config.RPResourceGroupName, "rp-lb-internal", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbtokenIP := *((*lb.FrontendIPConfigurations)[0].PrivateIPAddress)
|
||||
|
||||
zone, err := d.zones.Get(ctx, d.config.RPResourceGroupName, d.config.Location+"."+*d.config.Configuration.ClusterParentDomainName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -153,20 +147,6 @@ func (d *deployer) configureDNS(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
_, err = d.globalrecordsets.CreateOrUpdate(ctx, *d.config.Configuration.GlobalResourceGroupName, *d.config.Configuration.RPParentDomainName, "dbtoken."+d.config.Location, mgmtdns.A, mgmtdns.RecordSet{
|
||||
RecordSetProperties: &mgmtdns.RecordSetProperties{
|
||||
TTL: to.Int64Ptr(3600),
|
||||
ARecords: &[]mgmtdns.ARecord{
|
||||
{
|
||||
Ipv4Address: &dbtokenIP,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nsRecords := make([]mgmtdns.NsRecord, 0, len(*zone.NameServers))
|
||||
for i := range *zone.NameServers {
|
||||
nsRecords = append(nsRecords, mgmtdns.NsRecord{
|
||||
|
|
|
@ -122,14 +122,10 @@ func DevConfig(_env env.Core) (*Config, error) {
|
|||
PortalProvisionedThroughput: 400,
|
||||
GatewayProvisionedThroughput: 400,
|
||||
},
|
||||
DBTokenClientID: to.StringPtr(os.Getenv("AZURE_DBTOKEN_CLIENT_ID")),
|
||||
DisableCosmosDBFirewall: to.BoolPtr(true),
|
||||
ExtraClusterKeyvaultAccessPolicies: []interface{}{
|
||||
adminKeyvaultAccessPolicy(_env),
|
||||
},
|
||||
ExtraDBTokenKeyvaultAccessPolicies: []interface{}{
|
||||
adminKeyvaultAccessPolicy(_env),
|
||||
},
|
||||
ExtraGatewayKeyvaultAccessPolicies: []interface{}{
|
||||
adminKeyvaultAccessPolicy(_env),
|
||||
},
|
||||
|
|
|
@ -199,8 +199,6 @@ func (g *generator) gatewayVMSS() *arm.Resource {
|
|||
"azureSecPackQualysUrl",
|
||||
"azureSecPackVSATenantId",
|
||||
"databaseAccountName",
|
||||
"dbtokenClientId",
|
||||
"dbtokenUrl",
|
||||
"mdmFrontendUrl",
|
||||
"mdsdEnvironment",
|
||||
"fluentbitImage",
|
||||
|
|
|
@ -99,32 +99,6 @@ func (g *generator) rpSecurityGroup() *arm.Resource {
|
|||
})
|
||||
} else {
|
||||
rules = append(rules,
|
||||
mgmtnetwork.SecurityRule{
|
||||
SecurityRulePropertiesFormat: &mgmtnetwork.SecurityRulePropertiesFormat{
|
||||
Protocol: mgmtnetwork.SecurityRuleProtocolTCP,
|
||||
SourcePortRange: to.StringPtr("*"),
|
||||
DestinationPortRange: to.StringPtr("445"),
|
||||
SourceAddressPrefix: to.StringPtr("10.0.8.0/24"),
|
||||
DestinationAddressPrefix: to.StringPtr("*"),
|
||||
Access: mgmtnetwork.SecurityRuleAccessAllow,
|
||||
Priority: to.Int32Ptr(140),
|
||||
Direction: mgmtnetwork.SecurityRuleDirectionInbound,
|
||||
},
|
||||
Name: to.StringPtr("dbtoken_in_gateway_445"),
|
||||
},
|
||||
mgmtnetwork.SecurityRule{
|
||||
SecurityRulePropertiesFormat: &mgmtnetwork.SecurityRulePropertiesFormat{
|
||||
Protocol: mgmtnetwork.SecurityRuleProtocolTCP,
|
||||
SourcePortRange: to.StringPtr("*"),
|
||||
DestinationPortRange: to.StringPtr("8445"),
|
||||
SourceAddressPrefix: to.StringPtr("10.0.8.0/24"),
|
||||
DestinationAddressPrefix: to.StringPtr("*"),
|
||||
Access: mgmtnetwork.SecurityRuleAccessAllow,
|
||||
Priority: to.Int32Ptr(141),
|
||||
Direction: mgmtnetwork.SecurityRuleDirectionInbound,
|
||||
},
|
||||
Name: to.StringPtr("dbtoken_in_gateway_8445"),
|
||||
},
|
||||
mgmtnetwork.SecurityRule{
|
||||
SecurityRulePropertiesFormat: &mgmtnetwork.SecurityRulePropertiesFormat{
|
||||
Protocol: mgmtnetwork.SecurityRuleProtocolTCP,
|
||||
|
@ -334,69 +308,6 @@ func (g *generator) rpLB() *arm.Resource {
|
|||
}
|
||||
}
|
||||
|
||||
func (g *generator) rpLBInternal() *arm.Resource {
|
||||
return &arm.Resource{
|
||||
Resource: &mgmtnetwork.LoadBalancer{
|
||||
Sku: &mgmtnetwork.LoadBalancerSku{
|
||||
Name: mgmtnetwork.LoadBalancerSkuNameStandard,
|
||||
},
|
||||
LoadBalancerPropertiesFormat: &mgmtnetwork.LoadBalancerPropertiesFormat{
|
||||
FrontendIPConfigurations: &[]mgmtnetwork.FrontendIPConfiguration{
|
||||
{
|
||||
FrontendIPConfigurationPropertiesFormat: &mgmtnetwork.FrontendIPConfigurationPropertiesFormat{
|
||||
Subnet: &mgmtnetwork.Subnet{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/virtualNetworks/subnets', 'rp-vnet', 'rp-subnet')]"),
|
||||
},
|
||||
},
|
||||
Name: to.StringPtr("dbtoken-frontend"),
|
||||
Zones: &[]string{},
|
||||
},
|
||||
},
|
||||
BackendAddressPools: &[]mgmtnetwork.BackendAddressPool{
|
||||
{
|
||||
Name: to.StringPtr("rp-backend"),
|
||||
},
|
||||
},
|
||||
LoadBalancingRules: &[]mgmtnetwork.LoadBalancingRule{
|
||||
{
|
||||
LoadBalancingRulePropertiesFormat: &mgmtnetwork.LoadBalancingRulePropertiesFormat{
|
||||
FrontendIPConfiguration: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', 'rp-lb-internal', 'dbtoken-frontend')]"),
|
||||
},
|
||||
BackendAddressPool: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb-internal', 'rp-backend')]"),
|
||||
},
|
||||
Probe: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/probes', 'rp-lb-internal', 'dbtoken-probe')]"),
|
||||
},
|
||||
Protocol: mgmtnetwork.TransportProtocolTCP,
|
||||
LoadDistribution: mgmtnetwork.LoadDistributionDefault,
|
||||
FrontendPort: to.Int32Ptr(8445),
|
||||
BackendPort: to.Int32Ptr(445),
|
||||
},
|
||||
Name: to.StringPtr("dbtoken-lbrule"),
|
||||
},
|
||||
},
|
||||
Probes: &[]mgmtnetwork.Probe{
|
||||
{
|
||||
ProbePropertiesFormat: &mgmtnetwork.ProbePropertiesFormat{
|
||||
Protocol: mgmtnetwork.ProbeProtocolHTTPS,
|
||||
Port: to.Int32Ptr(445),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
RequestPath: to.StringPtr("/healthz/ready"),
|
||||
},
|
||||
Name: to.StringPtr("dbtoken-probe"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Name: to.StringPtr("rp-lb-internal"),
|
||||
Type: to.StringPtr("Microsoft.Network/loadBalancers"),
|
||||
Location: to.StringPtr("[resourceGroup().location]"),
|
||||
},
|
||||
APIVersion: azureclient.APIVersion("Microsoft.Network"),
|
||||
}
|
||||
}
|
||||
|
||||
// rpLBAlert generates an alert resource for the rp-lb healthprobe metric
|
||||
func (g *generator) rpLBAlert(threshold float64, severity int32, name string, evalFreq string, windowSize string, metric string) *arm.Resource {
|
||||
return &arm.Resource{
|
||||
|
@ -463,7 +374,6 @@ func (g *generator) rpVMSS() *arm.Resource {
|
|||
"clusterMdsdNamespace",
|
||||
"clusterParentDomainName",
|
||||
"databaseAccountName",
|
||||
"dbtokenClientId",
|
||||
"fluentbitImage",
|
||||
"fpClientId",
|
||||
"fpServicePrincipalId",
|
||||
|
@ -604,9 +514,6 @@ func (g *generator) rpVMSS() *arm.Resource {
|
|||
{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb', 'rp-backend')]"),
|
||||
},
|
||||
{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb-internal', 'rp-backend')]"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -654,7 +561,6 @@ func (g *generator) rpVMSS() *arm.Resource {
|
|||
DependsOn: []string{
|
||||
"[resourceId('Microsoft.Authorization/roleAssignments', guid(resourceGroup().id, parameters('rpServicePrincipalId'), 'RP / Reader'))]",
|
||||
"[resourceId('Microsoft.Network/loadBalancers', 'rp-lb')]",
|
||||
"[resourceId('Microsoft.Network/loadBalancers', 'rp-lb-internal')]",
|
||||
"[resourceId('Microsoft.Storage/storageAccounts', substring(parameters('storageAccountDomain'), 0, indexOf(parameters('storageAccountDomain'), '.')))]",
|
||||
},
|
||||
}
|
||||
|
@ -692,20 +598,6 @@ func (g *generator) rpClusterKeyvaultAccessPolicies() []mgmtkeyvault.AccessPolic
|
|||
}
|
||||
}
|
||||
|
||||
func (g *generator) rpDBTokenKeyvaultAccessPolicies() []mgmtkeyvault.AccessPolicyEntry {
|
||||
return []mgmtkeyvault.AccessPolicyEntry{
|
||||
{
|
||||
TenantID: &tenantUUIDHack,
|
||||
ObjectID: to.StringPtr("[parameters('rpServicePrincipalId')]"),
|
||||
Permissions: &mgmtkeyvault.Permissions{
|
||||
Secrets: &[]mgmtkeyvault.SecretPermissions{
|
||||
mgmtkeyvault.SecretPermissionsGet,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (g *generator) rpPortalKeyvaultAccessPolicies() []mgmtkeyvault.AccessPolicyEntry {
|
||||
return []mgmtkeyvault.AccessPolicyEntry{
|
||||
{
|
||||
|
@ -776,53 +668,6 @@ func (g *generator) rpClusterKeyvault() *arm.Resource {
|
|||
}
|
||||
}
|
||||
|
||||
func (g *generator) rpDBTokenKeyvault() *arm.Resource {
|
||||
vault := &mgmtkeyvault.Vault{
|
||||
Properties: &mgmtkeyvault.VaultProperties{
|
||||
EnableSoftDelete: to.BoolPtr(true),
|
||||
TenantID: &tenantUUIDHack,
|
||||
Sku: &mgmtkeyvault.Sku{
|
||||
Name: mgmtkeyvault.Standard,
|
||||
Family: to.StringPtr("A"),
|
||||
},
|
||||
AccessPolicies: &[]mgmtkeyvault.AccessPolicyEntry{
|
||||
{
|
||||
ObjectID: to.StringPtr(dbTokenAccessPolicyHack),
|
||||
},
|
||||
},
|
||||
},
|
||||
Name: to.StringPtr("[concat(parameters('keyvaultPrefix'), '" + env.DBTokenKeyvaultSuffix + "')]"),
|
||||
Type: to.StringPtr("Microsoft.KeyVault/vaults"),
|
||||
Location: to.StringPtr("[resourceGroup().location]"),
|
||||
}
|
||||
|
||||
if !g.production {
|
||||
*vault.Properties.AccessPolicies = append(g.rpDBTokenKeyvaultAccessPolicies(),
|
||||
mgmtkeyvault.AccessPolicyEntry{
|
||||
TenantID: &tenantUUIDHack,
|
||||
ObjectID: to.StringPtr("[parameters('adminObjectId')]"),
|
||||
Permissions: &mgmtkeyvault.Permissions{
|
||||
Certificates: &[]mgmtkeyvault.CertificatePermissions{
|
||||
mgmtkeyvault.Delete,
|
||||
mgmtkeyvault.Get,
|
||||
mgmtkeyvault.Import,
|
||||
mgmtkeyvault.List,
|
||||
},
|
||||
Secrets: &[]mgmtkeyvault.SecretPermissions{
|
||||
mgmtkeyvault.SecretPermissionsSet,
|
||||
mgmtkeyvault.SecretPermissionsList,
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return &arm.Resource{
|
||||
Resource: vault,
|
||||
APIVersion: azureclient.APIVersion("Microsoft.KeyVault"),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *generator) rpPortalKeyvault() *arm.Resource {
|
||||
vault := &mgmtkeyvault.Vault{
|
||||
Properties: &mgmtkeyvault.VaultProperties{
|
||||
|
|
|
@ -265,8 +265,6 @@ echo "configuring aro-gateway service"
|
|||
cat >/etc/sysconfig/aro-gateway <<EOF
|
||||
ACR_RESOURCE_ID='$ACRRESOURCEID'
|
||||
DATABASE_ACCOUNT_NAME='$DATABASEACCOUNTNAME'
|
||||
AZURE_DBTOKEN_CLIENT_ID='$DBTOKENCLIENTID'
|
||||
DBTOKEN_URL='$DBTOKENURL'
|
||||
MDM_ACCOUNT="$RPMDMACCOUNT"
|
||||
MDM_NAMESPACE=Gateway
|
||||
GATEWAY_DOMAINS='$GATEWAYDOMAINS'
|
||||
|
@ -290,8 +288,6 @@ ExecStart=/usr/bin/docker run \
|
|||
--cap-drop net_raw \
|
||||
-e ACR_RESOURCE_ID \
|
||||
-e DATABASE_ACCOUNT_NAME \
|
||||
-e AZURE_DBTOKEN_CLIENT_ID \
|
||||
-e DBTOKEN_URL \
|
||||
-e GATEWAY_DOMAINS \
|
||||
-e GATEWAY_FEATURES \
|
||||
-e MDM_ACCOUNT \
|
||||
|
|
|
@ -114,7 +114,6 @@ sysctl --system
|
|||
|
||||
firewall-cmd --add-port=443/tcp --permanent
|
||||
firewall-cmd --add-port=444/tcp --permanent
|
||||
firewall-cmd --add-port=445/tcp --permanent
|
||||
firewall-cmd --add-port=2222/tcp --permanent
|
||||
|
||||
export AZURE_CLOUD_NAME=$AZURECLOUDNAME
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
const (
|
||||
tenantIDHack = "13805ec3-a223-47ad-ad65-8b2baf92c0fb"
|
||||
clusterAccessPolicyHack = "e1992efe-4835-46cf-8c08-d8b8451044b8"
|
||||
dbTokenAccessPolicyHack = "bb6c76fd-76ea-43c9-8ee3-ca568ae1c226"
|
||||
portalAccessPolicyHack = "e5e11dae-7c49-4118-9628-e0afa4d6a502"
|
||||
serviceAccessPolicyHack = "533a94d0-d6c2-4fca-9af1-374aa6493468"
|
||||
gatewayAccessPolicyHack = "d377245e-57a7-4e58-b618-492f9dbdd74b"
|
||||
|
@ -58,7 +57,6 @@ func (g *generator) templateFixup(t *arm.Template) ([]byte, error) {
|
|||
|
||||
if g.production {
|
||||
b = regexp.MustCompile(`(?m)"accessPolicies": \[[^]]*`+clusterAccessPolicyHack+`[^]]*\]`).ReplaceAll(b, []byte(`"accessPolicies": "[concat(variables('clusterKeyvaultAccessPolicies'), parameters('extraClusterKeyvaultAccessPolicies'))]"`))
|
||||
b = regexp.MustCompile(`(?m)"accessPolicies": \[[^]]*`+dbTokenAccessPolicyHack+`[^]]*\]`).ReplaceAll(b, []byte(`"accessPolicies": "[concat(variables('dbTokenKeyvaultAccessPolicies'), parameters('extraDBTokenKeyvaultAccessPolicies'))]"`))
|
||||
b = regexp.MustCompile(`(?m)"accessPolicies": \[[^]]*`+gatewayAccessPolicyHack+`[^]]*\]`).ReplaceAll(b, []byte(`"accessPolicies": "[concat(variables('gatewayKeyvaultAccessPolicies'), parameters('extraGatewayKeyvaultAccessPolicies'))]"`))
|
||||
b = regexp.MustCompile(`(?m)"accessPolicies": \[[^]]*`+portalAccessPolicyHack+`[^]]*\]`).ReplaceAll(b, []byte(`"accessPolicies": "[concat(variables('portalKeyvaultAccessPolicies'), parameters('extraPortalKeyvaultAccessPolicies'))]"`))
|
||||
b = regexp.MustCompile(`(?m)"accessPolicies": \[[^]]*`+serviceAccessPolicyHack+`[^]]*\]`).ReplaceAll(b, []byte(`"accessPolicies": "[concat(variables('serviceKeyvaultAccessPolicies'), parameters('extraServiceKeyvaultAccessPolicies'))]"`))
|
||||
|
|
|
@ -27,8 +27,6 @@ func (g *generator) gatewayTemplate() *arm.Template {
|
|||
"azureSecPackQualysUrl",
|
||||
"azureSecPackVSATenantId",
|
||||
"databaseAccountName",
|
||||
"dbtokenClientId",
|
||||
"dbtokenUrl",
|
||||
"fluentbitImage",
|
||||
"gatewayDomains",
|
||||
"gatewayFeatures",
|
||||
|
|
|
@ -46,7 +46,6 @@ func (g *generator) rpTemplate() *arm.Template {
|
|||
"clusterMdsdConfigVersion",
|
||||
"clusterMdsdNamespace",
|
||||
"cosmosDB",
|
||||
"dbtokenClientId",
|
||||
"disableCosmosDBFirewall",
|
||||
"fluentbitImage",
|
||||
"fpClientId",
|
||||
|
@ -166,17 +165,12 @@ func (g *generator) rpTemplate() *arm.Template {
|
|||
g.publicIPAddress("rp-pip"),
|
||||
g.publicIPAddress("portal-pip"),
|
||||
g.rpLB(),
|
||||
g.rpLBInternal(),
|
||||
g.rpVMSS(),
|
||||
g.rpStorageAccount(),
|
||||
g.rpLBAlert(30.0, 2, "rp-availability-alert", "PT5M", "PT15M", "DipAvailability"), // triggers on all 3 RPs being down for 10min, can't be >=0.3 due to deploys going down to 32% at times.
|
||||
g.rpLBAlert(67.0, 3, "rp-degraded-alert", "PT15M", "PT6H", "DipAvailability"), // 1/3 backend down for 1h or 2/3 down for 3h in the last 6h
|
||||
g.rpLBAlert(33.0, 2, "rp-vnet-alert", "PT5M", "PT5M", "VipAvailability")) // this will trigger only if the Azure network infrastructure between the loadBalancers and VMs is down for 3.5min
|
||||
// more on alerts https://msazure.visualstudio.com/AzureRedHatOpenShift/_wiki/wikis/ARO.wiki/53765/WIP-Alerting
|
||||
|
||||
t.Resources = append(t.Resources,
|
||||
g.virtualNetworkPeering("rp-vnet/peering-gateway-vnet", "[resourceId(parameters('gatewayResourceGroupName'), 'Microsoft.Network/virtualNetworks', 'gateway-vnet')]", false, false, nil),
|
||||
)
|
||||
}
|
||||
|
||||
t.Resources = append(t.Resources, g.rpDNSZone(),
|
||||
|
@ -282,7 +276,6 @@ func (g *generator) rpPredeployTemplate() *arm.Template {
|
|||
if g.production {
|
||||
t.Variables = map[string]interface{}{
|
||||
"clusterKeyvaultAccessPolicies": g.rpClusterKeyvaultAccessPolicies(),
|
||||
"dbTokenKeyvaultAccessPolicies": g.rpDBTokenKeyvaultAccessPolicies(),
|
||||
"portalKeyvaultAccessPolicies": g.rpPortalKeyvaultAccessPolicies(),
|
||||
"serviceKeyvaultAccessPolicies": g.rpServiceKeyvaultAccessPolicies(),
|
||||
}
|
||||
|
@ -298,7 +291,6 @@ func (g *generator) rpPredeployTemplate() *arm.Template {
|
|||
params = append(params,
|
||||
"deployNSGs",
|
||||
"extraClusterKeyvaultAccessPolicies",
|
||||
"extraDBTokenKeyvaultAccessPolicies",
|
||||
"extraPortalKeyvaultAccessPolicies",
|
||||
"extraServiceKeyvaultAccessPolicies",
|
||||
"gatewayResourceGroupName",
|
||||
|
@ -317,7 +309,6 @@ func (g *generator) rpPredeployTemplate() *arm.Template {
|
|||
p.Type = "bool"
|
||||
p.DefaultValue = false
|
||||
case "extraClusterKeyvaultAccessPolicies",
|
||||
"extraDBTokenKeyvaultAccessPolicies",
|
||||
"extraPortalKeyvaultAccessPolicies",
|
||||
"extraServiceKeyvaultAccessPolicies":
|
||||
p.Type = "array"
|
||||
|
@ -337,7 +328,6 @@ func (g *generator) rpPredeployTemplate() *arm.Template {
|
|||
g.rpVnet(),
|
||||
g.rpPEVnet(),
|
||||
g.rpClusterKeyvault(),
|
||||
g.rpDBTokenKeyvault(),
|
||||
g.rpPortalKeyvault(),
|
||||
g.rpServiceKeyvault(),
|
||||
g.rpServiceKeyvaultDynamic(),
|
||||
|
|
|
@ -22,7 +22,6 @@ const (
|
|||
COMPONENT_RP ServiceComponent = "RP"
|
||||
COMPONENT_GATEWAY ServiceComponent = "GATEWAY"
|
||||
COMPONENT_MONITOR ServiceComponent = "MONITOR"
|
||||
COMPONENT_DBTOKEN ServiceComponent = "DBTOKEN"
|
||||
COMPONENT_OPERATOR ServiceComponent = "OPERATOR"
|
||||
COMPONENT_MIRROR ServiceComponent = "MIRROR"
|
||||
COMPONENT_PORTAL ServiceComponent = "PORTAL"
|
||||
|
|
|
@ -46,13 +46,11 @@ const (
|
|||
EncryptionSecretV2Name = "encryption-key-v2"
|
||||
FrontendEncryptionSecretName = "fe-encryption-key"
|
||||
FrontendEncryptionSecretV2Name = "fe-encryption-key-v2"
|
||||
DBTokenServerSecretName = "dbtoken-server"
|
||||
PortalServerSecretName = "portal-server"
|
||||
PortalServerClientSecretName = "portal-client"
|
||||
PortalServerSessionKeySecretName = "portal-session-key"
|
||||
PortalServerSSHKeySecretName = "portal-sshkey"
|
||||
ClusterKeyvaultSuffix = "-cls"
|
||||
DBTokenKeyvaultSuffix = "-dbt"
|
||||
GatewayKeyvaultSuffix = "-gwy"
|
||||
PortalKeyvaultSuffix = "-por"
|
||||
ServiceKeyvaultSuffix = "-svc"
|
||||
|
|
Загрузка…
Ссылка в новой задаче