removed all the references of dbtoken

This commit is contained in:
Anshul Verma 2024-05-27 20:06:35 +05:30
Родитель 8d0407cfbb
Коммит 7a568dbad1
31 изменённых файлов: 325 добавлений и 804 удалений

Просмотреть файл

@ -150,7 +150,7 @@ linters-settings:
alias: $1
- pkg: "^github\\.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/([0-9]+)-([0-9]+)-([0-9]+)-?(preview)?/redhatopenshift$"
alias: mgmtredhatopenshift$1$2$3$4
- pkg: "^github\\.com/Azure/ARO-RP/pkg/(dbtoken|deploy|gateway|mirror|monitor|operator|portal)$"
- pkg: "^github\\.com/Azure/ARO-RP/pkg/(deploy|gateway|mirror|monitor|operator|portal)$"
alias: pkg$1
- pkg: "^github\\.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/redhatopenshift/([0-9]+)-([0-9]+)-([0-9]+)-?(preview)?/redhatopenshift$"
alias: redhatopenshift$1$2$3$4

Просмотреть файл

@ -15,5 +15,5 @@ FROM ${REGISTRY}/ubi8/ubi-minimal
RUN microdnf update && microdnf clean all
COPY --from=builder /app/aro /app/e2e.test /usr/local/bin/
ENTRYPOINT ["aro"]
EXPOSE 2222/tcp 8080/tcp 8443/tcp 8444/tcp 8445/tcp
EXPOSE 2222/tcp 8080/tcp 8443/tcp 8444/tcp
USER 1000

Просмотреть файл

@ -7,7 +7,6 @@ const (
envDatabaseName = "DATABASE_NAME"
envDatabaseAccountName = "DATABASE_ACCOUNT_NAME"
envKeyVaultPrefix = "KEYVAULT_PREFIX"
envDBTokenUrl = "DBTOKEN_URL"
envOpenShiftVersions = "OPENSHIFT_VERSIONS"
envInstallerImageDigests = "INSTALLER_IMAGE_DIGESTS"
)

Просмотреть файл

@ -26,10 +26,6 @@ func gateway(ctx context.Context, log *logrus.Entry) error {
return err
}
if err = env.ValidateVars("AZURE_DBTOKEN_CLIENT_ID"); err != nil {
return err
}
m := statsd.New(ctx, log.WithField("component", "gateway"), _env, os.Getenv("MDM_ACCOUNT"), os.Getenv("MDM_NAMESPACE"), os.Getenv("MDM_STATSD_SOCKET"))
g, err := golang.NewMetrics(log.WithField("component", "gateway"), m)

Просмотреть файл

@ -1,73 +0,0 @@
# DB token service
## Introduction
Cosmos DB access control is described
[here](https://docs.microsoft.com/en-us/azure/cosmos-db/secure-access-to-data).
In brief, there are three options:
1. use r/w or r/o primary keys, which grant access to the whole database account
2. implement a service which transforms (1) into scoped resource tokens
3. a Microsoft Entra ID RBAC-based model.
Currently, the RP, monitoring and portal service share the same security
boundary (the RP VM) and use option 1. The dbtoken service, which also runs on
the RP VM, is our implementation of option 2. Option 3 is now in GA and there are plans
[here](https://issues.redhat.com/browse/ARO-5512) to implement it and replace option 1
and 2.
The purpose of the dbtoken service at its implementation time is to enable the
gateway component (which handles end-user traffic) to access the service Cosmos
DB without recourse to using root credentials. This provides a level of defence
in depth in the face of an attack on the gateway component.
## Workflow
* An AAD application is manually created at rollout, registering the
https://dbtoken.aro.azure.com resource.
* The dbtoken service receives POST requests from any client wishing to receive
a scoped resource token at its /token?permission=<permission> endpoint.
* The dbtoken service validates that the POST request includes a valid
AAD-signed bearer JWT for the https://dbtoken.aro.azure.com resource. The
subject UUID is retrieved from the JWT.
* In the case of the gateway service, the JWT subject UUID is the UUID of the
service principal corresponding to the gateway VMSS MSI.
* Using its primary key Cosmos DB credential, the dbtoken requests a scoped
resource token for the given user UUID and <permission> from Cosmos DB and
proxies it to the caller.
* Clients may use the dbtoken.Refresher interface to handle regularly refreshing
the resource token and injecting it into the database client used by the rest
of the client codebase.
## Setup
* Create the application and set `requestedAccessTokenVersion`
```bash
AZURE_DBTOKEN_CLIENT_ID="$(az ad app create --display-name dbtoken \
--oauth2-allow-implicit-flow false \
--query appId \
-o tsv)"
OBJ_ID="$(az ad app show --id $AZURE_DBTOKEN_CLIENT_ID --query id)"
# NOTE: the graph API requires this to be done from a managed machine
az rest --method PATCH \
--uri https://graph.microsoft.com/v1.0/applications/$OBJ_ID/ \
--body '{"api":{"requestedAccessTokenVersion": 2}}'
```
* Add the `AZURE_DBTOKEN_CLIENT_ID` to the RP config for the respective environment.
* The dbtoken service is responsible for creating database users and permissions
* see the ConfigurePermissions function.

Просмотреть файл

@ -48,11 +48,11 @@
What to change in `env-int` file:
* if using a public key separate from `~/.ssh/id_rsa.pub` (for ssh access to RP and Gateway vmss instances), source it with `export SSH_PUBLIC_KEY=~/.ssh/id_separate.pub`
* don't try to change `$USER` prefix used there
* set tag of `FLUENTBIT_IMAGE` value to match the default from `pkg/util/version/const.go`,
- if using a public key separate from `~/.ssh/id_rsa.pub` (for ssh access to RP and Gateway vmss instances), source it with `export SSH_PUBLIC_KEY=~/.ssh/id_separate.pub`
- don't try to change `$USER` prefix used there
- set tag of `FLUENTBIT_IMAGE` value to match the default from `pkg/util/version/const.go`,
eg. `FLUENTBIT_IMAGE=${USER}aro.azurecr.io/fluentbit:1.9.10-cm20230426`
* if you actually care about fluentbit image version, you need to change the default both in the env-int file and for ARO Deployer, which is out of scope of this guide
- if you actually care about fluentbit image version, you need to change the default both in the env-int file and for ARO Deployer, which is out of scope of this guide
1. And finally source the env:
@ -61,36 +61,44 @@
```
1. Generate the development RP configuration
```bash
make dev-config.yaml
```
1. Run `make deploy`. This will fail on the first attempt to run due to AKS not being installed, so after the first failure, please skip to the next step to deploy the VPN Gateway and then deploy AKS.
> __NOTE:__ If the deployment fails with `InvalidResourceReference` due to the RP Network Security Groups not found, delete the "gateway-production-predeploy" deployment in the gateway resource group, and re-run `make deploy`.
> __NOTE:__ If the deployment fails with `A vault with the same name already exists in deleted state`, then you will need to recover the deleted keyvaults from a previous deploy using: `az keyvault recover --name <KEYVAULT_NAME>` for each keyvault, and re-run.
> **NOTE:** If the deployment fails with `InvalidResourceReference` due to the RP Network Security Groups not found, delete the "gateway-production-predeploy" deployment in the gateway resource group, and re-run `make deploy`.
> **NOTE:** If the deployment fails with `A vault with the same name already exists in deleted state`, then you will need to recover the deleted keyvaults from a previous deploy using: `az keyvault recover --name <KEYVAULT_NAME>` for each keyvault, and re-run.
1. Deploy a VPN Gateway
This is required in order to be able to connect to AKS from your local machine:
```bash
source ./hack/devtools/deploy-shared-env.sh
deploy_vpn_for_dedicated_rp
```
1. Deploy AKS by running these commands from the ARO-RP root directory:
```bash
source ./hack/devtools/deploy-shared-env.sh
deploy_aks_dev
```
> __NOTE:__ If the AKS deployment fails with missing RP VNETs, delete the "gateway-production-predeploy" deployment in the gateway resource group, and re-run `make deploy` and then re-run `deploy_aks_dev`.
> **NOTE:** If the AKS deployment fails with missing RP VNETs, delete the "gateway-production-predeploy" deployment in the gateway resource group, and re-run `make deploy` and then re-run `deploy_aks_dev`.
1. Install Hive into AKS
1. Download the VPN config. Please note that this action will _**OVER WRITE**_ the `secrets/vpn-$LOCATION.ovpn` on your local machine. **DO NOT** run `make secrets-update` after doing this, as you will overwrite existing config, until such time as you have run `make secrets` to get the config restored.
```bash
vpn_configuration
```
1. Connect to the Dev VPN in a new terminal:
```bash
sudo openvpn secrets/vpn-$LOCATION.ovpn
```
@ -104,11 +112,13 @@
1. Mirror the OpenShift images to your new ACR
<!-- TODO (bv) allow mirroring through a pipeline would be faster and a nice to have -->
> __NOTE:__ Running the mirroring through a VM in Azure rather than a local workstation is recommended for better performance.
> __NOTE:__ Value of `USER_PULL_SECRET` variable comes from the secrets, which are sourced via `env-int` file
> __NOTE:__ `DST_AUTH` token or the login to the registry expires after some time
> **NOTE:** Running the mirroring through a VM in Azure rather than a local workstation is recommended for better performance.
> **NOTE:** Value of `USER_PULL_SECRET` variable comes from the secrets, which are sourced via `env-int` file
> **NOTE:** `DST_AUTH` token or the login to the registry expires after some time
1. Setup mirroring environment variables
```bash
export DST_ACR_NAME=${USER}aro
export SRC_AUTH_QUAY=$(echo $USER_PULL_SECRET | jq -r '.auths."quay.io".auth')
@ -117,6 +127,7 @@
```
1. Login to the Azure Container Registry
```bash
docker login -u 00000000-0000-0000-0000-000000000000 -p "$(echo $DST_AUTH | base64 -d | cut -d':' -f2)" "${DST_ACR_NAME}.azurecr.io"
```
@ -149,10 +160,9 @@
> to pull the images.
> If the push fails on error like `unable to retrieve auth token:
> invalid username/password: unauthorized: authentication required`,
> try to create `DST_AUTH` variable and login to the container
> registry (as explained in steps above) again. It will resolve the
> failure in case of an expired auth token.
invalid username/password: unauthorized: authentication required`,
> try to create `DST_AUTH` variable and login to the container > registry (as explained in steps above) again. It will resolve the > failure in case of an expired auth token.
```bash
make publish-image-aro-multistage
@ -160,6 +170,7 @@
```
1. Update the DNS Child Domains
```bash
export PARENT_DOMAIN_NAME=osadev.cloud
export PARENT_DOMAIN_RESOURCEGROUP=dns
@ -187,9 +198,10 @@
1. Update the certificates in keyvault
<!-- TODO: this is almost duplicated elsewhere. Would be nice to move to common area -->
> __NOTE:__ If you reuse an old name, you might run into soft-delete of the keyvaults. Run `az keyvault recover --name` to fix this.
> __NOTE:__ Check to ensure that the $KEYVAULT_PREFIX environment variable set on workstation matches the prefix deployed into the resource group.
> **NOTE:** If you reuse an old name, you might run into soft-delete of the keyvaults. Run `az keyvault recover --name` to fix this.
> **NOTE:** Check to ensure that the $KEYVAULT_PREFIX environment variable set on workstation matches the prefix deployed into the resource group.
```bash
az keyvault certificate import \
@ -232,14 +244,11 @@
--vault-name "$KEYVAULT_PREFIX-por" \
--name portal-client \
--file secrets/portal-client.pem >/dev/null
az keyvault certificate import \
--vault-name "$KEYVAULT_PREFIX-dbt" \
--name dbtoken-server \
--file secrets/localhost.pem >/dev/null
```
1. Delete the existing VMSS
> __NOTE:__ This needs to be deleted as deploying won't recreate the VMSS if the commit hash is the same.
> **NOTE:** This needs to be deleted as deploying won't recreate the VMSS if the commit hash is the same.
```bash
az vmss delete -g ${RESOURCEGROUP} --name rp-vmss-$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty) && az vmss delete -g $USER-gwy-$LOCATION --name gateway-vmss-$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty)
@ -250,6 +259,7 @@
Gateway.
1. Create storage account and role assignment required for workload identity clusters
```
source ./hack/devtools/deploy-shared-env.sh
deploy_oic_for_dedicated_rp
@ -260,6 +270,7 @@
## SSH to RP VMSS Instance
1. Update the RP NSG to allow SSH
```bash
az network nsg rule create \
--name ssh-to-rp \
@ -273,16 +284,17 @@
```
1. SSH into the VM
```bash
VMSS_PIP=$(az vmss list-instance-public-ips -g $RESOURCEGROUP --name rp-vmss-$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty) | jq -r '.[0].ipAddress')
ssh cloud-user@${VMSS_PIP}
```
## SSH to Gateway VMSS Instance
1. Update the Gateway NSG to allow SSH
```bash
az network nsg rule create \
--name ssh-to-gwy \
@ -295,15 +307,14 @@
--destination-port-ranges 22
```
1. SSH into the VM
```bash
VMSS_PIP=$(az vmss list-instance-public-ips -g $USER-gwy-$LOCATION --name gateway-vmss-$(git rev-parse --short=7 HEAD)$([[ $(git status --porcelain) = "" ]] || echo -dirty) | jq -r '.[0].ipAddress')
ssh cloud-user@${VMSS_PIP}
```
## Deploy a Cluster
1. Add a NSG rule to allow tunneling to the RP instance
@ -320,30 +331,34 @@
--destination-port-ranges 443
```
1. Run the tunnel program to tunnel to the RP
```bash
make tunnel
```
> __NOTE:__ `make tunnel` will print the public IP of your new RP VM NIC. Ensure that it's correct.
> **NOTE:** `make tunnel` will print the public IP of your new RP VM NIC. Ensure that it's correct.
1. Update the versions present available to install (run this as many times as you need for versions)
```bash
curl -X PUT -k "https://localhost:8443/admin/versions" --header "Content-Type: application/json" -d '{ "properties": { "version": "4.x.y", "enabled": true, "openShiftPullspec": "quay.io/openshift-release-dev/ocp-release@sha256:<sha256>", "installerPullspec": "<name>.azurecr.io/installer:release-4.x" }}'
```
1. Update environment variable to deploy in a different resource group
```bash
export RESOURCEGROUP=myResourceGroup
```
1. Create the resource group if it doesn't exist
```bash
az group create --resource-group $RESOURCEGROUP --location $LOCATION
```
1. Create VNets / Subnets
```bash
az network vnet create \
--resource-group $RESOURCEGROUP \
@ -370,6 +385,7 @@
```
1. Register your subscription with the resource provider (post directly to subscription cosmosdb container)
```bash
curl -k -X PUT -H 'Content-Type: application/json' -d '{
"state": "Registered",
@ -386,6 +402,7 @@
```
1. Create the cluster
```bash
export CLUSTER=$USER
@ -397,4 +414,4 @@
--worker-subnet worker-subnet
```
> __NOTE:__ The `az aro` CLI extension must be registered in order to run `az aro` commands against a local or tunneled RP. The usual hack script used to create clusters does not work due to keyvault mirroring requirements. The name of the cluster depends on the DNS zone that was created in an earlier step.
> **NOTE:** The `az aro` CLI extension must be registered in order to run `az aro` commands against a local or tunneled RP. The usual hack script used to create clusters does not work due to keyvault mirroring requirements. The name of the cluster depends on the DNS zone that was created in an earlier step.

Просмотреть файл

@ -1,28 +1,28 @@
# Certificates and Secrets Explained
## Overview
This walks through all the keyvaults and explains the usage of the certificates and secrets used throughout.
## MDM/MDSD
Majority of the certificates below are mdm/mdsd related. These certificates are certificates signed by the AME.GBL certificate authority and are vital to ensuring the necessary ingestion of metrics and logs within the ARO RP service and clusters.
More information about Geneva Monitoring can be found [here](https://eng.ms/docs/products/geneva/getting_started/newgettingstarted/overview).
## Certificates
Majority of the certificates are configured for auto-renewal to ensure that when nearing expiration, they are updated and rotated. More information about certificate rotation can be found [here](./certificate-rotation.md)
## RP Keyvaults
1. Cluster (cls)
- Certificates:
- This keyvault contains all cluster `api` and `*.apps` certificates used within OpenShift. These certificates are auto-rotated and pushed to clusters during AdminUpdates in the `configureAPIServerCertificate` and `configureIngressCertificate` steps. These certificates will not be generated if the `DisableSignedCertificates` [feature flag](./feature-flags.md) is set within the RP config.
1. DBToken (dbt)
- Certificates:
- `dbtoken-server` is a TLS certificate used in the [`dbtoken` service](./dbtoken-service.md) for RESTful calls to issue credentials.
1. Portal (por)
- Certificates:
- `portal-client` is a certificate which is used within the aro-portal app registration. The subject of this certificate must match that within the `trustedSubjects` section of the app registration manifest within the Azure portal, otherwise callbacks from the Microsoft AAD login service will not function correctly.
- `portal-server` is a TLS certificate used in the SRE portal to access clusters

Просмотреть файл

@ -73,7 +73,6 @@ locations.
mkdir -p secrets
```
## AAD applications
1. Create an AAD application which will fake up the ARM layer:
@ -101,7 +100,7 @@ locations.
Later this application will be granted:
* `User Access Administrator` on your subscription.
- `User Access Administrator` on your subscription.
1. Create an AAD application which will fake up the first party application.
@ -130,9 +129,9 @@ locations.
Later this application will be granted:
* `ARO v4 FP Subscription` on your subscription.
* `DNS Zone Contributor` on the DNS zone in RESOURCEGROUP.
* `Network Contributor` on RESOURCEGROUP.
- `ARO v4 FP Subscription` on your subscription.
- `DNS Zone Contributor` on the DNS zone in RESOURCEGROUP.
- `Network Contributor` on RESOURCEGROUP.
1. Create an AAD application which will fake up the RP identity.
@ -150,9 +149,9 @@ locations.
Later this application will be granted:
* `Reader` on RESOURCEGROUP.
* `Secrets / Get` on the key vault in RESOURCEGROUP.
* `DocumentDB Account Contributor` on the CosmosDB resource in RESOURCEGROUP.
- `Reader` on RESOURCEGROUP.
- `Secrets / Get` on the key vault in RESOURCEGROUP.
- `DocumentDB Account Contributor` on the CosmosDB resource in RESOURCEGROUP.
1. Create an AAD application which will fake up the gateway identity.
@ -184,21 +183,21 @@ locations.
Later this application will be granted:
* `Contributor` on your subscription.
* `User Access Administrator` on your subscription.
- `Contributor` on your subscription.
- `User Access Administrator` on your subscription.
You must also manually grant this application the `Microsoft.Graph/Application.ReadWrite.OwnedBy` permission, which requires admin access, in order for AAD applications to be created/deleted on a per-cluster basis.
* Go into the Azure Portal
* Go to Azure Active Directory
* Navigate to the `aro-v4-tooling-shared` app registration page
* Click 'API permissions' in the left side pane
* Click 'Add a permission'.
* Click 'Microsoft Graph'
* Select 'Application permissions'
* Search for 'Application' and select `Application.ReadWrite.OwnedBy`
* Click 'Add permissions'
* This request will need to be approved by a tenant administrator. If you are one, you can click the `Grant admin consent for <name>` button to the right of the `Add a permission` button on the app page
- Go into the Azure Portal
- Go to Azure Active Directory
- Navigate to the `aro-v4-tooling-shared` app registration page
- Click 'API permissions' in the left side pane
- Click 'Add a permission'.
- Click 'Microsoft Graph'
- Select 'Application permissions'
- Search for 'Application' and select `Application.ReadWrite.OwnedBy`
- Click 'Add permissions'
- This request will need to be approved by a tenant administrator. If you are one, you can click the `Grant admin consent for <name>` button to the right of the `Add a permission` button on the app page
1. Set up the RP role definitions and subscription role assignments in your Azure subscription. The usage of "uuidgen" for fpRoleDefinitionId is simply there to keep from interfering with any linked resources and to create the role net new. This mimics the RBAC that ARM sets up. With at least `User Access Administrator` permissions on your subscription, do:
@ -238,10 +237,6 @@ locations.
--cert "$(base64 -w0 <secrets/portal-client.crt)" >/dev/null
```
1. Create an AAD application which will fake up the dbtoken client.
See [dbtoken-service.md](./dbtoken-service.md#setup) for details on setup.
## Certificates
1. Create the VPN CA key/certificate. A suitable key/certificate file can be
@ -326,14 +321,13 @@ import_certs_secrets
5. Next, we need to update certificates owned by FP Service Principal. Current configuration in DEV and INT is listed below. You can get the `AAD APP ID` from the `secrets/env` file
Variable | Certificate Client | Subscription Type | AAD App Name | Key Vault Name |
| --- | --- | --- | --- | --- |
| Variable | Certificate Client | Subscription Type | AAD App Name | Key Vault Name |
| ---------------------- | ------------------ | ----------------- | ------------------------ | ------------------ |
| AZURE_FP_CLIENT_ID | firstparty | DEV | aro-v4-fp-shared-dev | v4-eastus-dev-svc |
| AZURE_ARM_CLIENT_ID | arm | DEV | aro-v4-arm-shared-dev | v4-eastus-dev-svc |
| AZURE_PORTAL_CLIENT_ID | portal-client | DEV | aro-v4-portal-shared-dev | v4-eastus-dev-svc |
| AZURE_FP_CLIENT_ID | firstparty | INT | aro-int-sp | aro-int-eastus-svc |
```bash
# Import firstparty.pem to keyvault v4-eastus-svc
az keyvault certificate import --vault-name <kv_name> --name rp-firstparty --file firstparty.pem
@ -382,7 +376,6 @@ storage account so other people on your team can access it via `make secrets`
export AZURE_ARM_CLIENT_ID='$AZURE_ARM_CLIENT_ID'
export AZURE_FP_CLIENT_ID='$AZURE_FP_CLIENT_ID'
export AZURE_FP_SERVICE_PRINCIPAL_ID='$(az ad sp list --filter "appId eq '$AZURE_FP_CLIENT_ID'" --query '[].id' -o tsv)'
export AZURE_DBTOKEN_CLIENT_ID='$AZURE_DBTOKEN_CLIENT_ID'
export AZURE_PORTAL_CLIENT_ID='$AZURE_PORTAL_CLIENT_ID'
export AZURE_PORTAL_ACCESS_GROUP_IDS='$ADMIN_OBJECT_ID'
export AZURE_PORTAL_ELEVATED_GROUP_IDS='$ADMIN_OBJECT_ID'
@ -427,7 +420,7 @@ each of the bash functions below.
. ./env
```
* LOCATION: Location of the shared RP development environment (default:
- LOCATION: Location of the shared RP development environment (default:
`eastus`).
1. Create the resource group and deploy the RP resources:
@ -476,10 +469,10 @@ each of the bash functions below.
import_certs_secrets
```
> __NOTE:__: in production, three additional keys/certificates (rp-mdm, rp-mdsd, and
cluster-mdsd) are also required in the $KEYVAULT_PREFIX-svc key vault. These
are client certificates for RP metric and log forwarding (respectively) to
Geneva.
> **NOTE:**: in production, three additional keys/certificates (rp-mdm, rp-mdsd, and
> cluster-mdsd) are also required in the $KEYVAULT_PREFIX-svc key vault. These
> are client certificates for RP metric and log forwarding (respectively) to
> Geneva.
If you need them in development:
@ -508,12 +501,12 @@ each of the bash functions below.
--file secrets/cluster-logging-int.pem
```
> __NOTE:__: in development, if you don't have valid certs for these, you can just
upload `localhost.pem` as a placeholder for each of these. This will avoid an
error stemming from them not existing, but it will result in logging pods
crash looping in any clusters you make. Additionally, no gateway resources are
created in development so you should not need to execute the cert import statement
for the "-gwy" keyvault.
> **NOTE:**: in development, if you don't have valid certs for these, you can just
> upload `localhost.pem` as a placeholder for each of these. This will avoid an
> error stemming from them not existing, but it will result in logging pods
> crash looping in any clusters you make. Additionally, no gateway resources are
> created in development so you should not need to execute the cert import statement
> for the "-gwy" keyvault.
1. In pre-production (int, e2e) certain certificates are provisioned via keyvault
integration. These should be rotated and generated in the keyvault itself:
@ -540,8 +533,7 @@ Development value: secrets/cluster-logging-int.pem
vpn_configuration
```
## Append Resource Group to Subscription Cleaner DenyList
* We have subscription pruning that takes place routinely and need to add our resource group for the shared rp environment to the `denylist` of the cleaner:
* [https://github.com/Azure/ARO-RP/blob/e918d1b87be53a3b3cdf18b674768a6480fb56b8/hack/clean/clean.go#L29](https://github.com/Azure/ARO-RP/blob/e918d1b87be53a3b3cdf18b674768a6480fb56b8/hack/clean/clean.go#L29)
- We have subscription pruning that takes place routinely and need to add our resource group for the shared rp environment to the `denylist` of the cleaner:
- [https://github.com/Azure/ARO-RP/blob/e918d1b87be53a3b3cdf18b674768a6480fb56b8/hack/clean/clean.go#L29](https://github.com/Azure/ARO-RP/blob/e918d1b87be53a3b3cdf18b674768a6480fb56b8/hack/clean/clean.go#L29)

Просмотреть файл

@ -126,10 +126,6 @@ import_certs_secrets() {
--vault-name "$KEYVAULT_PREFIX-por" \
--name portal-server \
--file secrets/localhost.pem >/dev/null
az keyvault certificate import \
--vault-name "$KEYVAULT_PREFIX-dbt" \
--name dbtoken-server \
--file secrets/localhost.pem >/dev/null
az keyvault certificate import \
--vault-name "$KEYVAULT_PREFIX-por" \
--name portal-client \

Просмотреть файл

@ -19,7 +19,6 @@ func run(ctx context.Context, log *logrus.Entry) error {
err := env.ValidateVars(
"ADMIN_OBJECT_ID",
"AZURE_CLIENT_ID",
"AZURE_DBTOKEN_CLIENT_ID",
"AZURE_SERVICE_PRINCIPAL_ID",
"AZURE_FP_SERVICE_PRINCIPAL_ID",
"AZURE_PORTAL_ACCESS_GROUP_IDS",

Просмотреть файл

@ -17,12 +17,6 @@
"databaseAccountName": {
"value": ""
},
"dbtokenClientId": {
"value": ""
},
"dbtokenUrl": {
"value": ""
},
"fluentbitImage": {
"value": ""
},

Просмотреть файл

@ -17,12 +17,6 @@
"databaseAccountName": {
"type": "string"
},
"dbtokenClientId": {
"type": "string"
},
"dbtokenUrl": {
"type": "string"
},
"fluentbitImage": {
"type": "string"
},

Просмотреть файл

@ -187,47 +187,6 @@
},
"apiVersion": "2019-09-01"
},
{
"name": "[concat(parameters('keyvaultPrefix'), '-dbt')]",
"type": "Microsoft.KeyVault/vaults",
"location": "[resourceGroup().location]",
"properties": {
"tenantId": "[subscription().tenantId]",
"sku": {
"family": "A",
"name": "standard"
},
"accessPolicies": [
{
"tenantId": "[subscription().tenantId]",
"objectId": "[parameters('rpServicePrincipalId')]",
"permissions": {
"secrets": [
"get"
]
}
},
{
"tenantId": "[subscription().tenantId]",
"objectId": "[parameters('adminObjectId')]",
"permissions": {
"secrets": [
"set",
"list"
],
"certificates": [
"delete",
"get",
"import",
"list"
]
}
}
],
"enableSoftDelete": true
},
"apiVersion": "2019-09-01"
},
{
"name": "[concat(parameters('keyvaultPrefix'), '-por')]",
"type": "Microsoft.KeyVault/vaults",

Просмотреть файл

@ -63,9 +63,6 @@
"databaseAccountName": {
"value": ""
},
"dbtokenClientId": {
"value": ""
},
"disableCosmosDBFirewall": {
"value": false
},

Просмотреть файл

@ -8,9 +8,6 @@
"extraClusterKeyvaultAccessPolicies": {
"value": []
},
"extraDBTokenKeyvaultAccessPolicies": {
"value": []
},
"extraPortalKeyvaultAccessPolicies": {
"value": []
},

Просмотреть файл

@ -19,17 +19,6 @@
}
}
],
"dbTokenKeyvaultAccessPolicies": [
{
"tenantId": "[subscription().tenantId]",
"objectId": "[parameters('rpServicePrincipalId')]",
"permissions": {
"secrets": [
"get"
]
}
}
],
"portalKeyvaultAccessPolicies": [
{
"tenantId": "[subscription().tenantId]",
@ -63,10 +52,6 @@
"type": "array",
"defaultValue": []
},
"extraDBTokenKeyvaultAccessPolicies": {
"type": "array",
"defaultValue": []
},
"extraPortalKeyvaultAccessPolicies": {
"type": "array",
"defaultValue": []
@ -123,32 +108,6 @@
},
"name": "rp_in_geneva"
},
{
"properties": {
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "445",
"sourceAddressPrefix": "10.0.8.0/24",
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 140,
"direction": "Inbound"
},
"name": "dbtoken_in_gateway_445"
},
{
"properties": {
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "8445",
"sourceAddressPrefix": "10.0.8.0/24",
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 141,
"direction": "Inbound"
},
"name": "dbtoken_in_gateway_8445"
},
{
"properties": {
"protocol": "Tcp",
@ -278,21 +237,6 @@
},
"apiVersion": "2019-09-01"
},
{
"name": "[concat(parameters('keyvaultPrefix'), '-dbt')]",
"type": "Microsoft.KeyVault/vaults",
"location": "[resourceGroup().location]",
"properties": {
"tenantId": "[subscription().tenantId]",
"sku": {
"family": "A",
"name": "standard"
},
"accessPolicies": "[concat(variables('dbTokenKeyvaultAccessPolicies'), parameters('extraDBTokenKeyvaultAccessPolicies'))]",
"enableSoftDelete": true
},
"apiVersion": "2019-09-01"
},
{
"name": "[concat(parameters('keyvaultPrefix'), '-por')]",
"type": "Microsoft.KeyVault/vaults",

Просмотреть файл

@ -83,9 +83,6 @@
"databaseAccountName": {
"type": "string"
},
"dbtokenClientId": {
"type": "string"
},
"disableCosmosDBFirewall": {
"type": "bool",
"defaultValue": false
@ -357,64 +354,6 @@
"[resourceId('Microsoft.Network/publicIPAddresses', 'rp-pip')]"
]
},
{
"sku": {
"name": "Standard"
},
"properties": {
"frontendIPConfigurations": [
{
"properties": {
"subnet": {
"id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', 'rp-vnet', 'rp-subnet')]"
}
},
"name": "dbtoken-frontend",
"zones": "[if(contains(parameters('nonZonalRegions'),toLower(replace(resourceGroup().location, ' ', ''))),'',pickZones('Microsoft.Network', 'publicIPAddresses', resourceGroup().location, 3))]"
}
],
"backendAddressPools": [
{
"name": "rp-backend"
}
],
"loadBalancingRules": [
{
"properties": {
"frontendIPConfiguration": {
"id": "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', 'rp-lb-internal', 'dbtoken-frontend')]"
},
"backendAddressPool": {
"id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb-internal', 'rp-backend')]"
},
"probe": {
"id": "[resourceId('Microsoft.Network/loadBalancers/probes', 'rp-lb-internal', 'dbtoken-probe')]"
},
"protocol": "Tcp",
"loadDistribution": "Default",
"frontendPort": 8445,
"backendPort": 445
},
"name": "dbtoken-lbrule"
}
],
"probes": [
{
"properties": {
"protocol": "Https",
"port": 445,
"numberOfProbes": 2,
"requestPath": "/healthz/ready"
},
"name": "dbtoken-probe"
}
]
},
"name": "rp-lb-internal",
"type": "Microsoft.Network/loadBalancers",
"location": "[resourceGroup().location]",
"apiVersion": "2020-08-01"
},
{
"sku": {
"name": "[parameters('vmSize')]",
@ -479,9 +418,6 @@
"loadBalancerBackendAddressPools": [
{
"id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb', 'rp-backend')]"
},
{
"id": "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb-internal', 'rp-backend')]"
}
]
}
@ -530,7 +466,6 @@
"dependsOn": [
"[resourceId('Microsoft.Authorization/roleAssignments', guid(resourceGroup().id, parameters('rpServicePrincipalId'), 'RP / Reader'))]",
"[resourceId('Microsoft.Network/loadBalancers', 'rp-lb')]",
"[resourceId('Microsoft.Network/loadBalancers', 'rp-lb-internal')]",
"[resourceId('Microsoft.Storage/storageAccounts', substring(parameters('storageAccountDomain'), 0, indexOf(parameters('storageAccountDomain'), '.')))]"
]
},
@ -669,21 +604,6 @@
"[resourceId('Microsoft.Network/loadBalancers', 'rp-lb')]"
]
},
{
"properties": {
"allowVirtualNetworkAccess": true,
"allowForwardedTraffic": true,
"allowGatewayTransit": false,
"useRemoteGateways": false,
"remoteVirtualNetwork": {
"id": "[resourceId(parameters('gatewayResourceGroupName'), 'Microsoft.Network/virtualNetworks', 'gateway-vnet')]"
}
},
"name": "rp-vnet/peering-gateway-vnet",
"type": "Microsoft.Network/virtualNetworks/virtualNetworkPeerings",
"apiVersion": "2020-08-01",
"location": "[resourceGroup().location]"
},
{
"properties": {},
"name": "[concat(resourceGroup().location, '.', parameters('clusterParentDomainName'))]",

Просмотреть файл

@ -55,10 +55,8 @@ type Configuration struct {
ClusterParentDomainName *string `json:"clusterParentDomainName,omitempty" value:"required"`
DatabaseAccountName *string `json:"databaseAccountName,omitempty" value:"required"`
CosmosDB *CosmosDBConfiguration `json:"cosmosDB,omitempty"`
DBTokenClientID *string `json:"dbtokenClientId,omitempty" value:"required"`
DisableCosmosDBFirewall *bool `json:"disableCosmosDBFirewall,omitempty"`
ExtraClusterKeyvaultAccessPolicies []interface{} `json:"extraClusterKeyvaultAccessPolicies,omitempty" value:"required"`
ExtraDBTokenKeyvaultAccessPolicies []interface{} `json:"extraDBTokenKeyvaultAccessPolicies,omitempty" value:"required"`
ExtraCosmosDBIPs []string `json:"extraCosmosDBIPs,omitempty"`
ExtraGatewayKeyvaultAccessPolicies []interface{} `json:"extraGatewayKeyvaultAccessPolicies,omitempty" value:"required"`
ExtraPortalKeyvaultAccessPolicies []interface{} `json:"extraPortalKeyvaultAccessPolicies,omitempty" value:"required"`

Просмотреть файл

@ -49,7 +49,7 @@ type deployer struct {
globalaccounts storage.AccountsClient
deployments features.DeploymentsClient
groups features.ResourceGroupsClient
loadbalancers network.LoadBalancersClient
// loadbalancers network.LoadBalancersClient
userassignedidentities msi.UserAssignedIdentitiesClient
providers features.ProvidersClient
publicipaddresses network.PublicIPAddressesClient
@ -59,7 +59,6 @@ type deployer struct {
vmssvms compute.VirtualMachineScaleSetVMsClient
zones dns.ZonesClient
clusterKeyvault keyvault.Manager
dbtokenKeyvault keyvault.Manager
portalKeyvault keyvault.Manager
serviceKeyvault keyvault.Manager
@ -101,7 +100,7 @@ func New(ctx context.Context, log *logrus.Entry, _env env.Core, config *RPConfig
globalaccounts: storage.NewAccountsClient(_env.Environment(), *config.Configuration.GlobalSubscriptionID, authorizer),
deployments: features.NewDeploymentsClient(_env.Environment(), config.SubscriptionID, authorizer),
groups: features.NewResourceGroupsClient(_env.Environment(), config.SubscriptionID, authorizer),
loadbalancers: network.NewLoadBalancersClient(_env.Environment(), config.SubscriptionID, authorizer),
// loadbalancers: network.NewLoadBalancersClient(_env.Environment(), config.SubscriptionID, authorizer),
userassignedidentities: msi.NewUserAssignedIdentitiesClient(_env.Environment(), config.SubscriptionID, authorizer),
providers: features.NewProvidersClient(_env.Environment(), config.SubscriptionID, authorizer),
roleassignments: authorization.NewRoleAssignmentsClient(_env.Environment(), config.SubscriptionID, authorizer),
@ -111,7 +110,6 @@ func New(ctx context.Context, log *logrus.Entry, _env env.Core, config *RPConfig
vmssvms: compute.NewVirtualMachineScaleSetVMsClient(_env.Environment(), config.SubscriptionID, authorizer),
zones: dns.NewZonesClient(_env.Environment(), config.SubscriptionID, authorizer),
clusterKeyvault: keyvault.NewManager(kvAuthorizer, "https://"+*config.Configuration.KeyvaultPrefix+env.ClusterKeyvaultSuffix+"."+_env.Environment().KeyVaultDNSSuffix+"/"),
dbtokenKeyvault: keyvault.NewManager(kvAuthorizer, "https://"+*config.Configuration.KeyvaultPrefix+env.DBTokenKeyvaultSuffix+"."+_env.Environment().KeyVaultDNSSuffix+"/"),
portalKeyvault: keyvault.NewManager(kvAuthorizer, "https://"+*config.Configuration.KeyvaultPrefix+env.PortalKeyvaultSuffix+"."+_env.Environment().KeyVaultDNSSuffix+"/"),
serviceKeyvault: keyvault.NewManager(kvAuthorizer, "https://"+*config.Configuration.KeyvaultPrefix+env.ServiceKeyvaultSuffix+"."+_env.Environment().KeyVaultDNSSuffix+"/"),

Просмотреть файл

@ -40,9 +40,6 @@ func (d *deployer) DeployGateway(ctx context.Context) error {
// Special cases where the config isn't marshalled into the ARM template parameters cleanly
parameters := d.getParameters(template["parameters"].(map[string]interface{}))
parameters.Parameters["dbtokenURL"] = &arm.ParametersParameter{
Value: "https://dbtoken." + d.config.Location + "." + *d.config.Configuration.RPParentDomainName + ":8445",
}
parameters.Parameters["rpImage"] = &arm.ParametersParameter{
Value: *d.config.Configuration.RPImagePrefix + ":" + d.version,
}

Просмотреть файл

@ -114,12 +114,6 @@ func (d *deployer) configureDNS(ctx context.Context) error {
return err
}
lb, err := d.loadbalancers.Get(ctx, d.config.RPResourceGroupName, "rp-lb-internal", "")
if err != nil {
return err
}
dbtokenIP := *((*lb.FrontendIPConfigurations)[0].PrivateIPAddress)
zone, err := d.zones.Get(ctx, d.config.RPResourceGroupName, d.config.Location+"."+*d.config.Configuration.ClusterParentDomainName)
if err != nil {
return err
@ -153,20 +147,6 @@ func (d *deployer) configureDNS(ctx context.Context) error {
return err
}
_, err = d.globalrecordsets.CreateOrUpdate(ctx, *d.config.Configuration.GlobalResourceGroupName, *d.config.Configuration.RPParentDomainName, "dbtoken."+d.config.Location, mgmtdns.A, mgmtdns.RecordSet{
RecordSetProperties: &mgmtdns.RecordSetProperties{
TTL: to.Int64Ptr(3600),
ARecords: &[]mgmtdns.ARecord{
{
Ipv4Address: &dbtokenIP,
},
},
},
}, "", "")
if err != nil {
return err
}
nsRecords := make([]mgmtdns.NsRecord, 0, len(*zone.NameServers))
for i := range *zone.NameServers {
nsRecords = append(nsRecords, mgmtdns.NsRecord{

Просмотреть файл

@ -122,14 +122,10 @@ func DevConfig(_env env.Core) (*Config, error) {
PortalProvisionedThroughput: 400,
GatewayProvisionedThroughput: 400,
},
DBTokenClientID: to.StringPtr(os.Getenv("AZURE_DBTOKEN_CLIENT_ID")),
DisableCosmosDBFirewall: to.BoolPtr(true),
ExtraClusterKeyvaultAccessPolicies: []interface{}{
adminKeyvaultAccessPolicy(_env),
},
ExtraDBTokenKeyvaultAccessPolicies: []interface{}{
adminKeyvaultAccessPolicy(_env),
},
ExtraGatewayKeyvaultAccessPolicies: []interface{}{
adminKeyvaultAccessPolicy(_env),
},

Просмотреть файл

@ -199,8 +199,6 @@ func (g *generator) gatewayVMSS() *arm.Resource {
"azureSecPackQualysUrl",
"azureSecPackVSATenantId",
"databaseAccountName",
"dbtokenClientId",
"dbtokenUrl",
"mdmFrontendUrl",
"mdsdEnvironment",
"fluentbitImage",

Просмотреть файл

@ -99,32 +99,6 @@ func (g *generator) rpSecurityGroup() *arm.Resource {
})
} else {
rules = append(rules,
mgmtnetwork.SecurityRule{
SecurityRulePropertiesFormat: &mgmtnetwork.SecurityRulePropertiesFormat{
Protocol: mgmtnetwork.SecurityRuleProtocolTCP,
SourcePortRange: to.StringPtr("*"),
DestinationPortRange: to.StringPtr("445"),
SourceAddressPrefix: to.StringPtr("10.0.8.0/24"),
DestinationAddressPrefix: to.StringPtr("*"),
Access: mgmtnetwork.SecurityRuleAccessAllow,
Priority: to.Int32Ptr(140),
Direction: mgmtnetwork.SecurityRuleDirectionInbound,
},
Name: to.StringPtr("dbtoken_in_gateway_445"),
},
mgmtnetwork.SecurityRule{
SecurityRulePropertiesFormat: &mgmtnetwork.SecurityRulePropertiesFormat{
Protocol: mgmtnetwork.SecurityRuleProtocolTCP,
SourcePortRange: to.StringPtr("*"),
DestinationPortRange: to.StringPtr("8445"),
SourceAddressPrefix: to.StringPtr("10.0.8.0/24"),
DestinationAddressPrefix: to.StringPtr("*"),
Access: mgmtnetwork.SecurityRuleAccessAllow,
Priority: to.Int32Ptr(141),
Direction: mgmtnetwork.SecurityRuleDirectionInbound,
},
Name: to.StringPtr("dbtoken_in_gateway_8445"),
},
mgmtnetwork.SecurityRule{
SecurityRulePropertiesFormat: &mgmtnetwork.SecurityRulePropertiesFormat{
Protocol: mgmtnetwork.SecurityRuleProtocolTCP,
@ -334,69 +308,6 @@ func (g *generator) rpLB() *arm.Resource {
}
}
func (g *generator) rpLBInternal() *arm.Resource {
return &arm.Resource{
Resource: &mgmtnetwork.LoadBalancer{
Sku: &mgmtnetwork.LoadBalancerSku{
Name: mgmtnetwork.LoadBalancerSkuNameStandard,
},
LoadBalancerPropertiesFormat: &mgmtnetwork.LoadBalancerPropertiesFormat{
FrontendIPConfigurations: &[]mgmtnetwork.FrontendIPConfiguration{
{
FrontendIPConfigurationPropertiesFormat: &mgmtnetwork.FrontendIPConfigurationPropertiesFormat{
Subnet: &mgmtnetwork.Subnet{
ID: to.StringPtr("[resourceId('Microsoft.Network/virtualNetworks/subnets', 'rp-vnet', 'rp-subnet')]"),
},
},
Name: to.StringPtr("dbtoken-frontend"),
Zones: &[]string{},
},
},
BackendAddressPools: &[]mgmtnetwork.BackendAddressPool{
{
Name: to.StringPtr("rp-backend"),
},
},
LoadBalancingRules: &[]mgmtnetwork.LoadBalancingRule{
{
LoadBalancingRulePropertiesFormat: &mgmtnetwork.LoadBalancingRulePropertiesFormat{
FrontendIPConfiguration: &mgmtnetwork.SubResource{
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', 'rp-lb-internal', 'dbtoken-frontend')]"),
},
BackendAddressPool: &mgmtnetwork.SubResource{
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb-internal', 'rp-backend')]"),
},
Probe: &mgmtnetwork.SubResource{
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/probes', 'rp-lb-internal', 'dbtoken-probe')]"),
},
Protocol: mgmtnetwork.TransportProtocolTCP,
LoadDistribution: mgmtnetwork.LoadDistributionDefault,
FrontendPort: to.Int32Ptr(8445),
BackendPort: to.Int32Ptr(445),
},
Name: to.StringPtr("dbtoken-lbrule"),
},
},
Probes: &[]mgmtnetwork.Probe{
{
ProbePropertiesFormat: &mgmtnetwork.ProbePropertiesFormat{
Protocol: mgmtnetwork.ProbeProtocolHTTPS,
Port: to.Int32Ptr(445),
NumberOfProbes: to.Int32Ptr(2),
RequestPath: to.StringPtr("/healthz/ready"),
},
Name: to.StringPtr("dbtoken-probe"),
},
},
},
Name: to.StringPtr("rp-lb-internal"),
Type: to.StringPtr("Microsoft.Network/loadBalancers"),
Location: to.StringPtr("[resourceGroup().location]"),
},
APIVersion: azureclient.APIVersion("Microsoft.Network"),
}
}
// rpLBAlert generates an alert resource for the rp-lb healthprobe metric
func (g *generator) rpLBAlert(threshold float64, severity int32, name string, evalFreq string, windowSize string, metric string) *arm.Resource {
return &arm.Resource{
@ -463,7 +374,6 @@ func (g *generator) rpVMSS() *arm.Resource {
"clusterMdsdNamespace",
"clusterParentDomainName",
"databaseAccountName",
"dbtokenClientId",
"fluentbitImage",
"fpClientId",
"fpServicePrincipalId",
@ -604,9 +514,6 @@ func (g *generator) rpVMSS() *arm.Resource {
{
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb', 'rp-backend')]"),
},
{
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', 'rp-lb-internal', 'rp-backend')]"),
},
},
},
},
@ -654,7 +561,6 @@ func (g *generator) rpVMSS() *arm.Resource {
DependsOn: []string{
"[resourceId('Microsoft.Authorization/roleAssignments', guid(resourceGroup().id, parameters('rpServicePrincipalId'), 'RP / Reader'))]",
"[resourceId('Microsoft.Network/loadBalancers', 'rp-lb')]",
"[resourceId('Microsoft.Network/loadBalancers', 'rp-lb-internal')]",
"[resourceId('Microsoft.Storage/storageAccounts', substring(parameters('storageAccountDomain'), 0, indexOf(parameters('storageAccountDomain'), '.')))]",
},
}
@ -692,20 +598,6 @@ func (g *generator) rpClusterKeyvaultAccessPolicies() []mgmtkeyvault.AccessPolic
}
}
func (g *generator) rpDBTokenKeyvaultAccessPolicies() []mgmtkeyvault.AccessPolicyEntry {
return []mgmtkeyvault.AccessPolicyEntry{
{
TenantID: &tenantUUIDHack,
ObjectID: to.StringPtr("[parameters('rpServicePrincipalId')]"),
Permissions: &mgmtkeyvault.Permissions{
Secrets: &[]mgmtkeyvault.SecretPermissions{
mgmtkeyvault.SecretPermissionsGet,
},
},
},
}
}
func (g *generator) rpPortalKeyvaultAccessPolicies() []mgmtkeyvault.AccessPolicyEntry {
return []mgmtkeyvault.AccessPolicyEntry{
{
@ -776,53 +668,6 @@ func (g *generator) rpClusterKeyvault() *arm.Resource {
}
}
func (g *generator) rpDBTokenKeyvault() *arm.Resource {
vault := &mgmtkeyvault.Vault{
Properties: &mgmtkeyvault.VaultProperties{
EnableSoftDelete: to.BoolPtr(true),
TenantID: &tenantUUIDHack,
Sku: &mgmtkeyvault.Sku{
Name: mgmtkeyvault.Standard,
Family: to.StringPtr("A"),
},
AccessPolicies: &[]mgmtkeyvault.AccessPolicyEntry{
{
ObjectID: to.StringPtr(dbTokenAccessPolicyHack),
},
},
},
Name: to.StringPtr("[concat(parameters('keyvaultPrefix'), '" + env.DBTokenKeyvaultSuffix + "')]"),
Type: to.StringPtr("Microsoft.KeyVault/vaults"),
Location: to.StringPtr("[resourceGroup().location]"),
}
if !g.production {
*vault.Properties.AccessPolicies = append(g.rpDBTokenKeyvaultAccessPolicies(),
mgmtkeyvault.AccessPolicyEntry{
TenantID: &tenantUUIDHack,
ObjectID: to.StringPtr("[parameters('adminObjectId')]"),
Permissions: &mgmtkeyvault.Permissions{
Certificates: &[]mgmtkeyvault.CertificatePermissions{
mgmtkeyvault.Delete,
mgmtkeyvault.Get,
mgmtkeyvault.Import,
mgmtkeyvault.List,
},
Secrets: &[]mgmtkeyvault.SecretPermissions{
mgmtkeyvault.SecretPermissionsSet,
mgmtkeyvault.SecretPermissionsList,
},
},
},
)
}
return &arm.Resource{
Resource: vault,
APIVersion: azureclient.APIVersion("Microsoft.KeyVault"),
}
}
func (g *generator) rpPortalKeyvault() *arm.Resource {
vault := &mgmtkeyvault.Vault{
Properties: &mgmtkeyvault.VaultProperties{

Просмотреть файл

@ -265,8 +265,6 @@ echo "configuring aro-gateway service"
cat >/etc/sysconfig/aro-gateway <<EOF
ACR_RESOURCE_ID='$ACRRESOURCEID'
DATABASE_ACCOUNT_NAME='$DATABASEACCOUNTNAME'
AZURE_DBTOKEN_CLIENT_ID='$DBTOKENCLIENTID'
DBTOKEN_URL='$DBTOKENURL'
MDM_ACCOUNT="$RPMDMACCOUNT"
MDM_NAMESPACE=Gateway
GATEWAY_DOMAINS='$GATEWAYDOMAINS'
@ -290,8 +288,6 @@ ExecStart=/usr/bin/docker run \
--cap-drop net_raw \
-e ACR_RESOURCE_ID \
-e DATABASE_ACCOUNT_NAME \
-e AZURE_DBTOKEN_CLIENT_ID \
-e DBTOKEN_URL \
-e GATEWAY_DOMAINS \
-e GATEWAY_FEATURES \
-e MDM_ACCOUNT \

Просмотреть файл

@ -114,7 +114,6 @@ sysctl --system
firewall-cmd --add-port=443/tcp --permanent
firewall-cmd --add-port=444/tcp --permanent
firewall-cmd --add-port=445/tcp --permanent
firewall-cmd --add-port=2222/tcp --permanent
export AZURE_CLOUD_NAME=$AZURECLOUDNAME

Просмотреть файл

@ -16,7 +16,6 @@ import (
const (
tenantIDHack = "13805ec3-a223-47ad-ad65-8b2baf92c0fb"
clusterAccessPolicyHack = "e1992efe-4835-46cf-8c08-d8b8451044b8"
dbTokenAccessPolicyHack = "bb6c76fd-76ea-43c9-8ee3-ca568ae1c226"
portalAccessPolicyHack = "e5e11dae-7c49-4118-9628-e0afa4d6a502"
serviceAccessPolicyHack = "533a94d0-d6c2-4fca-9af1-374aa6493468"
gatewayAccessPolicyHack = "d377245e-57a7-4e58-b618-492f9dbdd74b"
@ -58,7 +57,6 @@ func (g *generator) templateFixup(t *arm.Template) ([]byte, error) {
if g.production {
b = regexp.MustCompile(`(?m)"accessPolicies": \[[^]]*`+clusterAccessPolicyHack+`[^]]*\]`).ReplaceAll(b, []byte(`"accessPolicies": "[concat(variables('clusterKeyvaultAccessPolicies'), parameters('extraClusterKeyvaultAccessPolicies'))]"`))
b = regexp.MustCompile(`(?m)"accessPolicies": \[[^]]*`+dbTokenAccessPolicyHack+`[^]]*\]`).ReplaceAll(b, []byte(`"accessPolicies": "[concat(variables('dbTokenKeyvaultAccessPolicies'), parameters('extraDBTokenKeyvaultAccessPolicies'))]"`))
b = regexp.MustCompile(`(?m)"accessPolicies": \[[^]]*`+gatewayAccessPolicyHack+`[^]]*\]`).ReplaceAll(b, []byte(`"accessPolicies": "[concat(variables('gatewayKeyvaultAccessPolicies'), parameters('extraGatewayKeyvaultAccessPolicies'))]"`))
b = regexp.MustCompile(`(?m)"accessPolicies": \[[^]]*`+portalAccessPolicyHack+`[^]]*\]`).ReplaceAll(b, []byte(`"accessPolicies": "[concat(variables('portalKeyvaultAccessPolicies'), parameters('extraPortalKeyvaultAccessPolicies'))]"`))
b = regexp.MustCompile(`(?m)"accessPolicies": \[[^]]*`+serviceAccessPolicyHack+`[^]]*\]`).ReplaceAll(b, []byte(`"accessPolicies": "[concat(variables('serviceKeyvaultAccessPolicies'), parameters('extraServiceKeyvaultAccessPolicies'))]"`))

Просмотреть файл

@ -27,8 +27,6 @@ func (g *generator) gatewayTemplate() *arm.Template {
"azureSecPackQualysUrl",
"azureSecPackVSATenantId",
"databaseAccountName",
"dbtokenClientId",
"dbtokenUrl",
"fluentbitImage",
"gatewayDomains",
"gatewayFeatures",

Просмотреть файл

@ -46,7 +46,6 @@ func (g *generator) rpTemplate() *arm.Template {
"clusterMdsdConfigVersion",
"clusterMdsdNamespace",
"cosmosDB",
"dbtokenClientId",
"disableCosmosDBFirewall",
"fluentbitImage",
"fpClientId",
@ -166,17 +165,12 @@ func (g *generator) rpTemplate() *arm.Template {
g.publicIPAddress("rp-pip"),
g.publicIPAddress("portal-pip"),
g.rpLB(),
g.rpLBInternal(),
g.rpVMSS(),
g.rpStorageAccount(),
g.rpLBAlert(30.0, 2, "rp-availability-alert", "PT5M", "PT15M", "DipAvailability"), // triggers on all 3 RPs being down for 10min, can't be >=0.3 due to deploys going down to 32% at times.
g.rpLBAlert(67.0, 3, "rp-degraded-alert", "PT15M", "PT6H", "DipAvailability"), // 1/3 backend down for 1h or 2/3 down for 3h in the last 6h
g.rpLBAlert(33.0, 2, "rp-vnet-alert", "PT5M", "PT5M", "VipAvailability")) // this will trigger only if the Azure network infrastructure between the loadBalancers and VMs is down for 3.5min
// more on alerts https://msazure.visualstudio.com/AzureRedHatOpenShift/_wiki/wikis/ARO.wiki/53765/WIP-Alerting
t.Resources = append(t.Resources,
g.virtualNetworkPeering("rp-vnet/peering-gateway-vnet", "[resourceId(parameters('gatewayResourceGroupName'), 'Microsoft.Network/virtualNetworks', 'gateway-vnet')]", false, false, nil),
)
}
t.Resources = append(t.Resources, g.rpDNSZone(),
@ -282,7 +276,6 @@ func (g *generator) rpPredeployTemplate() *arm.Template {
if g.production {
t.Variables = map[string]interface{}{
"clusterKeyvaultAccessPolicies": g.rpClusterKeyvaultAccessPolicies(),
"dbTokenKeyvaultAccessPolicies": g.rpDBTokenKeyvaultAccessPolicies(),
"portalKeyvaultAccessPolicies": g.rpPortalKeyvaultAccessPolicies(),
"serviceKeyvaultAccessPolicies": g.rpServiceKeyvaultAccessPolicies(),
}
@ -298,7 +291,6 @@ func (g *generator) rpPredeployTemplate() *arm.Template {
params = append(params,
"deployNSGs",
"extraClusterKeyvaultAccessPolicies",
"extraDBTokenKeyvaultAccessPolicies",
"extraPortalKeyvaultAccessPolicies",
"extraServiceKeyvaultAccessPolicies",
"gatewayResourceGroupName",
@ -317,7 +309,6 @@ func (g *generator) rpPredeployTemplate() *arm.Template {
p.Type = "bool"
p.DefaultValue = false
case "extraClusterKeyvaultAccessPolicies",
"extraDBTokenKeyvaultAccessPolicies",
"extraPortalKeyvaultAccessPolicies",
"extraServiceKeyvaultAccessPolicies":
p.Type = "array"
@ -337,7 +328,6 @@ func (g *generator) rpPredeployTemplate() *arm.Template {
g.rpVnet(),
g.rpPEVnet(),
g.rpClusterKeyvault(),
g.rpDBTokenKeyvault(),
g.rpPortalKeyvault(),
g.rpServiceKeyvault(),
g.rpServiceKeyvaultDynamic(),

1
pkg/env/core.go поставляемый
Просмотреть файл

@ -22,7 +22,6 @@ const (
COMPONENT_RP ServiceComponent = "RP"
COMPONENT_GATEWAY ServiceComponent = "GATEWAY"
COMPONENT_MONITOR ServiceComponent = "MONITOR"
COMPONENT_DBTOKEN ServiceComponent = "DBTOKEN"
COMPONENT_OPERATOR ServiceComponent = "OPERATOR"
COMPONENT_MIRROR ServiceComponent = "MIRROR"
COMPONENT_PORTAL ServiceComponent = "PORTAL"

2
pkg/env/env.go поставляемый
Просмотреть файл

@ -46,13 +46,11 @@ const (
EncryptionSecretV2Name = "encryption-key-v2"
FrontendEncryptionSecretName = "fe-encryption-key"
FrontendEncryptionSecretV2Name = "fe-encryption-key-v2"
DBTokenServerSecretName = "dbtoken-server"
PortalServerSecretName = "portal-server"
PortalServerClientSecretName = "portal-client"
PortalServerSessionKeySecretName = "portal-session-key"
PortalServerSSHKeySecretName = "portal-sshkey"
ClusterKeyvaultSuffix = "-cls"
DBTokenKeyvaultSuffix = "-dbt"
GatewayKeyvaultSuffix = "-gwy"
PortalKeyvaultSuffix = "-por"
ServiceKeyvaultSuffix = "-svc"