Merge pull request #42 from alainvetier/main

Major update
This commit is contained in:
Ayobami Ayodeji 2023-05-02 11:27:33 -07:00 коммит произвёл GitHub
Родитель e23ad68c15 0c35e03501
Коммит 80c8e0172a
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
34 изменённых файлов: 892 добавлений и 145 удалений

5
.gitignore поставляемый
Просмотреть файл

@ -350,6 +350,10 @@ MigrationBackup/
.ionide/
commands.sh
# MacOS
**/.DS_Store
#**********************************************************************************************************************************
# Local .terraform directories
@ -388,6 +392,7 @@ override.tf.json
# .tfstate files
*.tfstate
*.tfstate.*
**/.terraform.lock.hcl
# Crash log files
crash.log

Просмотреть файл

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: svcrattingsapp

Просмотреть файл

@ -0,0 +1,60 @@
# Before this deployment, ensure that MongoDB has been created using Helm.
apiVersion: apps/v1
kind: Deployment
metadata:
name: ratings-api
spec:
selector:
matchLabels:
app: ratings-api
template:
metadata:
labels:
app: ratings-api # the label for the pods and the deployments
spec:
serviceAccountName: svcrattingsapp
containers:
- name: ratings-api
image: <acr name>.azurecr.io/ratings-api:v1 # IMPORTANT: update with your own repository
imagePullSecrets:
- name: acr-secret
imagePullPolicy: Always
volumeMounts:
- name: secrets-store-inline
mountPath: "/mnt/secrets-store"
readOnly: true
ports:
- containerPort: 3000 # the application listens to this port
env:
- name: MONGODB_URI # the application expects to find the MongoDB connection details in this environment variable
valueFrom:
secretKeyRef:
name: mongodburi
key: MONGODBURI # the name of Secret in KeyVault
resources:
requests: # minimum resources required
cpu: 250m
memory: 64Mi
limits: # maximum resources allocated
cpu: 250m
memory: 256Mi
readinessProbe: # is the container ready to receive traffic?
initialDelaySeconds: 10
httpGet:
port: 3000
path: /healthz
livenessProbe: # is the container healthy?
initialDelaySeconds: 2
periodSeconds: 5
httpGet:
port: 3000
path: /healthz
volumes:
- name: secrets-store-inline
csi:
driver: secrets-store.csi.k8s.io
readOnly: true
volumeAttributes:
secretProviderClass: "mongo-secret-csi"
nodePublishSecretRef: # Only required when using service principal mode
name: secrets-store-creds

Просмотреть файл

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: ratings-api
spec:
selector:
app: ratings-api
ports:
- protocol: TCP
port: 80
targetPort: 3000
type: ClusterIP

Просмотреть файл

@ -0,0 +1,31 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ratings-web
spec:
selector:
matchLabels:
app: ratings-web
template:
metadata:
labels:
app: ratings-web # the label for the pods and the deployments
spec:
serviceAccountName: svcrattingsapp
containers:
- name: ratings-web
image: <acr name>.azurecr.io/ratings-web:v1 # IMPORTANT: update with your own repository
imagePullPolicy: Always
ports:
- containerPort: 8080 # the application listens to this port
env:
- name: API # the application expects to connect to the API at this endpoint
value: http://ratings-api.ratingsapp.svc.cluster.local
resources:
requests: # minimum resources required
cpu: 250m
memory: 64Mi
limits: # maximum resources allocated
cpu: 500m
memory: 512Mi

Просмотреть файл

@ -0,0 +1,41 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ratings-web
spec:
selector:
matchLabels:
app: ratings-web
template:
metadata:
labels:
app: ratings-web # the label for the pods and the deployments
spec:
serviceAccountName: svcrattingsapp
volumes:
- name: aks-tls-akv
csi:
driver: secrets-store.csi.k8s.io
readOnly: true
volumeAttributes:
secretProviderClass: "aks-tls-akv"
containers:
- name: ratings-web
image: <acr name>.azurecr.io/ratings-web:v1 # IMPORTANT: update with your own repository
imagePullPolicy: Always
ports:
- containerPort: 8080 # the application listens to this port
env:
- name: API # the application expects to connect to the API at this endpoint
value: http://ratings-api.ratingsapp.svc.cluster.local
resources:
requests: # minimum resources required
cpu: 250m
memory: 64Mi
limits: # maximum resources allocated
cpu: 500m
memory: 512Mi
volumeMounts:
- name: aks-tls-akv
mountPath: /mnt/secrets-store
readOnly: true

Просмотреть файл

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: ratings-web
spec:
selector:
app: ratings-web
ports:
- protocol: TCP
port: 80
targetPort: 8080
type: ClusterIP

Просмотреть файл

@ -0,0 +1,17 @@
# NON-TLS - HTTP Only
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ratingsapp-ingress
spec:
rules:
- host: <fqdn>
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ratings-web
port:
number: 80

Просмотреть файл

@ -0,0 +1,25 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: ratings-web-https
namespace: ratingsapp
annotations:
kubernetes.io/ingress.class: azure/application-gateway
# kubernetes.io/ingress.allow-http: 'false'
appgw.ingress.kubernetes.io/ssl-redirect: "true"
spec:
tls:
- hosts:
- <fqdn>
secretName: aks-tls-akv
rules:
- host: <fqdn>
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: ratings-web
port:
number: 80

Просмотреть файл

@ -0,0 +1,25 @@
apiVersion: secrets-store.csi.x-k8s.io/v1alpha1
kind: SecretProviderClass
metadata:
name: mongo-secret-csi
spec:
provider: azure
secretObjects:
- secretName: mongodburi
type: Opaque
data:
- objectName: MONGODBURI
key: MONGODBURI
parameters:
keyvaultName: <keyvault>
useVMManagedIdentity: "true"
userAssignedIdentityID: <client id> # the client ID of the MSI
cloudName: ""
objects: |
array:
- |
objectName: MONGODBURI
objectType: secret
objectVersion: ""
tenantId: <tenant id>

Просмотреть файл

@ -0,0 +1,29 @@
apiVersion: secrets-store.csi.x-k8s.io/v1alpha1
kind: SecretProviderClass
metadata:
name: aks-tls-akv
namespace: ratingsapp
spec:
provider: azure
parameters:
keyvaultName: <Key vault>
useVMManagedIdentity: "true"
userAssignedIdentityID: <aks identity client ID> # the client ID of the MSI created by the
objects: |
array:
- |
objectName: aks-ingress-tls
objectAlias: aks-ingress-tls
objectType: secret
# The objectType above is "secret" even though the aks-ingress-tls Certificate in the keyvault is certificate type.
# Also, the appropriate identity will need acces to GET "secrets" from the KV, as well as GET for "certificates"
tenantId: <tenant id>
secretObjects:
- secretName: aks-tls-akv # k8s secret manifest will be generated and synced after mounting it from pod/deploy
type: kubernetes.io/tls
data:
- objectName: aks-ingress-tls # must match the name of certificate in kv
key: tls.crt
- objectName: aks-ingress-tls # must match the name of certificate in kv
key: tls.key

7
Scenarios/Secure-Baseline/delete.azcli Normal file → Executable file
Просмотреть файл

@ -1,6 +1,7 @@
HUBRGNAME='hub-rg'
SPOKERGNAME='spoke-rg'
AROCLUSTER='ftaarocluster'
# Variables
HUBRGNAME='hub-aro' #name of hub resource group
SPOKERGNAME='spoke-aro' # name of spoke resource group
AROCLUSTER='ftaarocluster' # name of ARO cluster
# 1. Delete ARO cluster
az aro delete --resource-group $SPOKERGNAME --name $AROCLUSTER -y

Просмотреть файл

@ -7,13 +7,22 @@ logpath=/var/log/deploymentscriptlog
#############################
# Upgrading Linux Distribution
#############################
echo "#############################" >> $logpath
echo "Upgrading Linux Distribution" >> $logpath
echo "#############################" >> $logpath
#echo "#############################" >> $logpath
#echo "Upgrading Linux Distribution" >> $logpath
#echo "#############################" >> $logpath
#sudo apt-get update >> $logpath
#sudo apt-get -y upgrade >> $logpath
echo " " >> $logpath
#############################
#Install Misc Tools
#############################
echo "#############################" >> $logpath
echo "Installing Misc Tools" >> $logpath
echo "#############################" >> $logpath
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl vim git
#############################
#Install Azure CLI
#############################
@ -25,12 +34,12 @@ curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
#############################
#Install Docker
#############################
# echo "#############################" >> $logpath
# echo "Installing Docker" >> $logpath
# echo "#############################" >> $logpath
# wget -qO- https://get.docker.com/ | sh >> $logpath
# sudo usermod -aG docker $1
# echo " " >> $logpath
echo "#############################" >> $logpath
echo "Installing Docker" >> $logpath
echo "#############################" >> $logpath
wget -qO- https://get.docker.com/ | sh >> $logpath
sudo usermod -aG docker $1
echo " " >> $logpath
#############################
#Install Kubectl
@ -38,8 +47,6 @@ curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
echo "#############################" >> $logpath
echo "Installing Kubectl" >> $logpath
echo "#############################" >> $logpath
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
@ -53,7 +60,6 @@ echo "#############################" >> $logpath
echo "Installing Helm" >> $logpath
echo "#############################" >> $logpath
curl https://baltocdn.com/helm/signing.asc | sudo apt-key add -
sudo apt-get install apt-transport-https --yes
echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt-get update
sudo apt-get install helm
@ -69,4 +75,13 @@ tar -zxvf openshift-client-linux.tar.gz
sudo mv oc /usr/local/bin
rm README.md
rm kubectl
rm openshift-client-linux.tar.gz
rm openshift-client-linux.tar.gz
#############################
#Upgrade packages
#############################
echo "#############################" >> $logpath
echo "Upgrading Packages" >> $logpath
echo "#############################" >> $logpath
sudo apt upgrade -y

Просмотреть файл

@ -21,13 +21,18 @@ This deployment uses a single Azure CLI script to do the following:
* Initializes Terraform
* Deploys the environment
## Retrieve Jumpbox and ARO credentials
add description or commands to retrieve kv secrets for username and password required to connect on jumpbox with bastion (same credentials for windows and linux)
i would suggest also to add a note highlighting that windowsbox is meant for Azure and ARO portal access for tasks done using UI. For AZCLI commands either linux subsystem for windows is recommanded or use the linux jumpbox.
## Post Deployment Tasks
There are a few tasks that need to be completed after the deployment. These scripts must be run from the Jumpbox that is created during the deployment. These scripts are in the files in the folders listed below.
* [AAD Integration](../Azure-CLI/07-aad-RBAC)
* [Container Insights Integration](../Azure-CLI/08-containerinsights)
* [Application Deployment](../Azure-CLI/09-appdeployment)
* [AAD Integration](./post_deployment/aad-RBAC)
* [Container Insights Integration](./post_deployment/containerinsights)
* [Application Deployment](./post_deployment/appdeployment)
## Cleanup
In this step you will delete all the resources that were created during the previous steps.

Просмотреть файл

@ -1,12 +1,7 @@
#!/bin/bash
# log into azure with credentials you want to use for deploying the landing zone
az login
# Prerequisites Check
if [ "$(which terraform)" == "" ]; then
echo "Terraform not found. Please install terraform before running this script."
exit 1
fi
# Check if the RedHat Openshift Provider Is Installed
# Check if the RedHat Openshift Provider is registered, registering it otherwise
RHO_PROVIDER=$(az provider show --namespace Microsoft.RedHatOpenShift --query "[registrationState]" --output tsv)
if [ "$RHO_PROVIDER" != "Registered" ]; then
echo "RedHat OpenShift Provider Is Not Installed"
@ -14,43 +9,32 @@ if [ "$RHO_PROVIDER" != "Registered" ]; then
echo "RedHat OpenShift Provider has been installed Installed"
fi
# Prompt user for an Service Principal Name and Subscription ID
echo "Enter the Name of the Service Principal for ARO to use:"
read -r SPNNAME
echo "Enter the Subscription ID where you want ARO deployed:"
read -r SUBSCRIPTIONID
echo "Enter the ARO cluster base name"
read -r AROBASENAME
echo "Enter the ARO Domain"
read -r ARODOMAIN
# take note of the subscription id and tenant id in which you want to deploy the ARO LZ
az account show
TENANT_ID=$(az account show -s ${SUBSCRIPTIONID} --query tenantId -o tsv)
# Create the Service Principal
# TODO: Add check to see if the SPN already exists
az ad sp create-for-rbac --name $SPNNAME --scopes /subscriptions/$SUBSCRIPTIONID --role Contributor --output json > /tmp/sp.json
# Set variables from the Service Principal
SP_CLIENT_ID=$(jq -r '.appId' /tmp/sp.json)
SP_CLIENT_SECRET=$(jq -r '.password' /tmp/sp.json)
# Set variable for ARO Provider SP Object ID
ARO_RP_SP_OBJECT_ID=$(az ad sp list --display-name "Azure Red Hat OpenShift RP" --query "[0].objectId" -o tsv)
# identify the resource provider object id for ARO
aro_rp_object_id=$(az ad sp list --display-name "Azure Red Hat OpenShift RP" --query "[0].id" -o tsv)
echo $aro_rp_object_id
### Deploy Environment ###
# Initialize Terraform
terraform init
# Terraform Apply
# validate configuration
terraform plan \
-var tenant_id=YOURTENANTID \
-var subscription_id=YOURSUBSCRIPTIONID \
-var aro_rp_object_id=YOURARO_RP_OBJECTID \
-var aro_base_name=YOURAROCLUSTERBASENAME \
-var aro_domain=YOURAROUNIQUEDNSNAME
# optional : -var location=YOURREGION , default being eastus
# Deploy
terraform apply \
--auto-approve \
-var tenant_id="$TENANT_ID" \
-var subscription_id="$SUBSCRIPTIONID" \
-var aro_sp_client_id="$SP_CLIENT_ID" \
-var aro_sp_password="$SP_CLIENT_SECRET" \
-var aro_rp_object_id="$ARO_RP_SP_OBJECT_ID" \
-var aro_base_name="$AROBASENAME" \
-var aro_domain="$ARODOMAIN"
# Inform user that additional steps are required to complete the deployment. These must be completed from the JumpBox.
echo "Additional steps are required to complete the deployment. These must be completed from the JumpBox. Please consult the readme file"
-var tenant_id=YOURTENANTID \
-var subscription_id=YOURSUBSCRIPTIONID \
-var aro_rp_object_id=YOURARO_RP_OBJECTID \
-var aro_base_name=YOURAROCLUSTERBASENAME \
-var aro_domain=YOURAROUNIQUEDNSNAME
# optional : -var location=YOURREGION , default being eastus

Просмотреть файл

@ -69,6 +69,18 @@ module "supporting" {
]
}
module "serviceprincipal" {
source = "./modules/serviceprincipal"
aro_spn_name = var.aro_spn_name
spoke_rg_name = azurerm_resource_group.spoke.name
hub_rg_name = azurerm_resource_group.hub.name
depends_on = [
module.vnet
]
}
module "aro" {
source = "./modules/aro"
@ -78,15 +90,15 @@ module "aro" {
master_subnet_id = module.vnet.master_subnet_id
worker_subnet_id = module.vnet.worker_subnet_id
aro_sp_client_id = var.aro_sp_client_id
aro_sp_password = var.aro_sp_password
sp_client_id = module.serviceprincipal.sp_client_id
sp_client_secret = module.serviceprincipal.sp_client_secret
aro_rp_object_id = var.aro_rp_object_id
spoke_rg_name = azurerm_resource_group.spoke.name
base_name = var.aro_base_name
domain = var.aro_domain
depends_on = [
module.vnet
module.serviceprincipal
]
}

Просмотреть файл

@ -16,9 +16,9 @@ resource "azurerm_resource_group_template_deployment" "aro" {
deployment_mode = "Incremental"
parameters_content = jsonencode({
"clientId" = {
value = var.aro_sp_client_id }
value = var.sp_client_id }
"clientSecret" = {
value = var.aro_sp_password
value = var.sp_client_secret
}
"clusterName" = {
value = "openshift-cluster-${var.base_name}"

Просмотреть файл

@ -36,13 +36,12 @@ variable "location" {
type = string
}
variable "aro_sp_client_id" {
variable "sp_client_id"{
type = string
}
variable "aro_sp_password" {
variable "sp_client_secret" {
type = string
sensitive = true
}
variable "aro_rp_object_id" {

Просмотреть файл

@ -1,8 +1,8 @@
# There are lots of items missting from the Azure FD TF provider: https://github.com/hashicorp/terraform-provider-azurerm/issues?page=2&q=is%3Aissue+is%3Aopen+frontdoor
data "azurerm_client_config" "current" {}
data "external" "aro_ilb_name" {
program = [
"az", "network", "lb", "list", "-g", var.aro_resource_group_name, "--query", "[0].{name:name}", "-o", "json"
"az", "network", "lb", "list", "-g", var.aro_resource_group_name, "--query", "[1].{name:name}", "-o", "json"
]
}
@ -22,7 +22,8 @@ resource "azurerm_private_link_service" "pl" {
subnet_id = var.aro_worker_subnet_id
primary = true
}
load_balancer_frontend_ip_configuration_ids = ["${data.azurerm_lb.aro_ilb.frontend_ip_configuration[0].id}"]
load_balancer_frontend_ip_configuration_ids = [data.azurerm_lb.aro_ilb.frontend_ip_configuration[1].id]
visibility_subscription_ids = [data.azurerm_client_config.current.subscription_id]
}
resource "azurerm_cdn_frontdoor_profile" "fd" {
@ -31,32 +32,66 @@ resource "azurerm_cdn_frontdoor_profile" "fd" {
sku_name = var.afd_sku
}
resource "azurerm_cdn_frontdoor_endpoint" "fd" {
name = "aro-ilb${var.random}"
cdn_frontdoor_profile_id = azurerm_cdn_frontdoor_profile.fd.id
}
resource "azurerm_cdn_frontdoor_origin_group" "aro" {
name = "aro-origin-group"
cdn_frontdoor_profile_id = azurerm_cdn_frontdoor_profile.fd.id
health_probe {
interval_in_seconds = 100
path = "/"
protocol = "Http"
request_type = "HEAD"
}
load_balancing {}
}
resource "azurerm_cdn_frontdoor_origin" "aro" {
name = "aro-origin"
cdn_frontdoor_origin_group_id = azurerm_cdn_frontdoor_origin_group.aro.id
enabled = true
certificate_name_check_enabled = true
host_name = data.azurerm_lb.aro_ilb.frontend_ip_configuration[1].private_ip_address
priority = 1
weight = 500
private_link {
request_message = "Request access for Private Link Origin CDN Frontdoor"
location = var.location
private_link_target_id = azurerm_private_link_service.pl.id
}
}
resource "azurerm_monitor_diagnostic_setting" "afd_diag" {
name = "afdtoLogAnalytics"
target_resource_id = azurerm_cdn_frontdoor_profile.fd.id
log_analytics_workspace_id = var.la_id
log {
enabled_log {
category = "FrontDoorAccessLog"
enabled = true
retention_policy {
enabled = false
days = 0
}
}
log {
enabled_log {
category = "FrontDoorHealthProbeLog"
enabled = true
retention_policy {
enabled = false
days = 0
}
}
log {
enabled_log {
category = "FrontDoorWebApplicationFirewallLog"
enabled = true
retention_policy {
enabled = false
days = 0
@ -73,7 +108,4 @@ resource "azurerm_monitor_diagnostic_setting" "afd_diag" {
}
}
resource "azurerm_cdn_frontdoor_endpoint" "fd" {
name = "aro-ilb${var.random}"
cdn_frontdoor_profile_id = azurerm_cdn_frontdoor_profile.fd.id
}

Просмотреть файл

@ -0,0 +1,34 @@
resource "azuread_application" "aro-lza-sp" {
display_name = var.aro_spn_name
owners = [data.azuread_client_config.current.object_id]
}
resource "azuread_service_principal" "aro-lza-sp" {
application_id = azuread_application.aro-lza-sp.application_id
app_role_assignment_required = false
owners = [data.azuread_client_config.current.object_id]
}
resource "time_rotating" "password-rotation" {
rotation_days = 365
}
resource "azuread_application_password" "sp_client_secret" {
application_object_id = azuread_application.aro-lza-sp.object_id
display_name = "rbac"
rotate_when_changed = {
rotation = time_rotating.password-rotation.id
}
}
resource "azurerm_role_assignment" "aro" {
scope = data.azurerm_resource_group.spoke.id
role_definition_name = "Contributor"
principal_id = azuread_service_principal.aro-lza-sp.object_id
}
resource "azurerm_role_assignment" "aro-hub" {
scope = data.azurerm_resource_group.hub.id
role_definition_name = "Contributor"
principal_id = azuread_service_principal.aro-lza-sp.object_id
}

Просмотреть файл

@ -0,0 +1,22 @@
# Variables
variable "spoke_rg_name" {
type = string
}
variable "hub_rg_name" {
type = string
}
variable "aro_spn_name" {
type = string
}
data "azurerm_resource_group" "spoke" {
name = var.spoke_rg_name
}
data "azurerm_resource_group" "hub" {
name = var.hub_rg_name
}
data "azuread_client_config" "current" {}

Просмотреть файл

@ -0,0 +1,8 @@
output "sp_client_id" {
value = azuread_application.aro-lza-sp.application_id
}
output "sp_client_secret" {
value = azuread_application_password.sp_client_secret.value
sensitive=true
}

Просмотреть файл

@ -46,5 +46,13 @@ resource "azurerm_private_dns_zone_virtual_network_link" "dns_link" {
resource_group_name = var.spoke_rg_name
private_dns_zone_name = azurerm_private_dns_zone.dns.name
virtual_network_id = var.spoke_vnet_id
registration_enabled = false
}
resource "azurerm_private_dns_zone_virtual_network_link" "dns_link2" {
name = "AcrDNSLinkHub"
resource_group_name = var.spoke_rg_name
private_dns_zone_name = azurerm_private_dns_zone.dns.name
virtual_network_id = var.hub_vnet_id
registration_enabled = false
}

Просмотреть файл

@ -52,6 +52,14 @@ resource "azurerm_private_dns_zone_virtual_network_link" "cosmos" {
name = "CosmosDbDNSLink"
resource_group_name = data.azurerm_resource_group.spoke.name
private_dns_zone_name = azurerm_private_dns_zone.cosmos.name
virtual_network_id = var.spoke_vnet_id
registration_enabled = false
}
resource "azurerm_private_dns_zone_virtual_network_link" "cosmos2" {
name = "CosmosDbDNSLinkHub"
resource_group_name = data.azurerm_resource_group.spoke.name
private_dns_zone_name = azurerm_private_dns_zone.cosmos.name
virtual_network_id = var.hub_vnet_id
registration_enabled = false
}

Просмотреть файл

@ -43,4 +43,12 @@ resource "azurerm_private_dns_zone_virtual_network_link" "kv" {
private_dns_zone_name = azurerm_private_dns_zone.kv.name
virtual_network_id = var.spoke_vnet_id
registration_enabled = false
}
resource "azurerm_private_dns_zone_virtual_network_link" "kv2" {
name = "KeyvaultDNSLinkHub"
resource_group_name = data.azurerm_resource_group.spoke.name
private_dns_zone_name = azurerm_private_dns_zone.kv.name
virtual_network_id = var.hub_vnet_id
registration_enabled = false
}

Просмотреть файл

@ -1,3 +1,4 @@
data "azurerm_key_vault_secret" "admin_username" {
name = "vmadminusername"
key_vault_id = var.kv_id
@ -40,15 +41,72 @@ resource "azurerm_network_interface" "jumpbox" {
}
}
resource "azurerm_windows_virtual_machine" "jumpbox" {
name = var.jumbox_name
resource "azurerm_linux_virtual_machine" "jumpbox" {
name = var.jumpbox_name
resource_group_name = var.resource_group_name
location = var.location
size = var.jumpbox_size
admin_username = data.azurerm_key_vault_secret.admin_username.value
admin_password = data.azurerm_key_vault_secret.admin_password.value
network_interface_ids = [azurerm_network_interface.jumpbox.id]
disable_password_authentication = false
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
lifecycle {
ignore_changes = [
admin_username,
admin_password
]
}
}
resource "azurerm_virtual_machine_extension" "jumpbox" {
name = "jumpbox"
virtual_machine_id = azurerm_linux_virtual_machine.jumpbox.id
publisher = "Microsoft.Azure.Extensions"
type = "CustomScript"
type_handler_version = "2.0"
settings = <<SETTINGS
{
"fileUris": ["https://raw.githubusercontent.com/alainvetier/ARO-Landing-Zone-Accelerator/main/Scenarios/Secure-Baseline/script.sh"],
"commandToExecute": "sh script.sh"
}
SETTINGS
}
resource "azurerm_network_interface" "Jumpbox2" {
name = "${var.bastion_name}-nic2"
location = var.location
resource_group_name = var.resource_group_name
ip_configuration {
name = "internal"
subnet_id = var.vm_subnet_id
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_windows_virtual_machine" "Jumpbox2" {
name = var.jumpbox2_name
resource_group_name = var.resource_group_name
location = var.location
size = var.jumpbox_size
admin_username = data.azurerm_key_vault_secret.admin_username.value
admin_password = data.azurerm_key_vault_secret.admin_username.value
admin_password = data.azurerm_key_vault_secret.admin_password.value
network_interface_ids = [
azurerm_network_interface.jumpbox.id
azurerm_network_interface.Jumpbox2.id
]
os_disk {
@ -59,7 +117,7 @@ resource "azurerm_windows_virtual_machine" "jumpbox" {
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2016-Datacenter"
sku = "2022-Datacenter"
version = "latest"
}
@ -69,19 +127,6 @@ resource "azurerm_windows_virtual_machine" "jumpbox" {
admin_password
]
}
}
resource "azurerm_virtual_machine_extension" "jumpbox" {
name = "jumpbox"
virtual_machine_id = azurerm_windows_virtual_machine.jumpbox.id
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.9"
settings = <<SETTINGS
{
"fileUris": ["https://raw.githubusercontent.com/Azure/ARO-Landing-Zone-Accelerator/main/Scenarios/Secure-Baseline/terraform/modules/vm/start_script.ps1"],
"commandToExecute": "powershell -ExecutionPolicy Unrestricted -File start_script.ps1"
}
SETTINGS
priority = "Spot"
eviction_policy = "Deallocate"
}

Просмотреть файл

@ -19,16 +19,21 @@ variable "vm_subnet_id" {
type = string
}
variable "jumbox_name" {
variable "jumpbox_name" {
type = string
default = "jumpbox"
}
variable "jumpbox2_name" {
type = string
default = "Windowsbox"
}
variable "jumpbox_size" {
type = string
default = "Standard_D4s_v3"
default = "Standard_D2s_v3"
}
variable "kv_id" {
type = string
}
}

Просмотреть файл

@ -392,118 +392,104 @@ resource "azurerm_monitor_diagnostic_setting" "fw_diag" {
log_analytics_workspace_id = var.la_id
log_analytics_destination_type = "AzureDiagnostics"
log {
enabled_log {
category = "AzureFirewallApplicationRule"
enabled = true
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AzureFirewallNetworkRule"
enabled = true
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AzureFirewallDnsProxy"
enabled = true
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWApplicationRule"
enabled = false
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWApplicationRuleAggregation"
enabled = false
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWDnsQuery"
enabled = false
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWFqdnResolveFailure"
enabled = false
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWIdpsSignature"
enabled = false
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWNatRule"
enabled = false
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWNatRuleAggregation"
enabled = false
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWNetworkRule"
enabled = false
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWNetworkRuleAggregation"
enabled = false
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWThreatIntel"
enabled = false
retention_policy {
days = 0
enabled = false
}
}
log {
enabled_log {
category = "AZFWFatFlow"
enabled = false
retention_policy {
days = 0
enabled = false

Просмотреть файл

@ -25,8 +25,8 @@ resource "azurerm_subnet" "master_aro" {
resource_group_name = var.spoke_rg_name
virtual_network_name = azurerm_virtual_network.spoke.name
address_prefixes = var.master_aro_subnet_prefix
enforce_private_link_endpoint_network_policies = true
enforce_private_link_service_network_policies = true
private_endpoint_network_policies_enabled = true
private_link_service_network_policies_enabled = false
service_endpoints = [ "Microsoft.ContainerRegistry", "Microsoft.Storage" ]
}
@ -36,8 +36,8 @@ resource "azurerm_subnet" "worker_aro" {
resource_group_name = var.spoke_rg_name
virtual_network_name = azurerm_virtual_network.spoke.name
address_prefixes = var.worker_aro_subnet_prefix
enforce_private_link_service_network_policies = true
private_link_service_network_policies_enabled = false
private_endpoint_network_policies_enabled = true
service_endpoints = [ "Microsoft.ContainerRegistry", "Microsoft.Storage" ]
}
@ -47,5 +47,5 @@ resource "azurerm_subnet" "private_endpoint" {
resource_group_name = var.spoke_rg_name
virtual_network_name = azurerm_virtual_network.spoke.name
address_prefixes = var.private_endpoint_subnet_prefix
enforce_private_link_service_network_policies = true
private_link_service_network_policies_enabled = true
}

Просмотреть файл

@ -0,0 +1,9 @@
## add steps by steps based on the 2 official guidance or simply redirect to these links
## using Azure portal and ARO Portal from windows Jumpbox
https://learn.microsoft.com/en-us/azure/openshift/configure-azure-ad-ui
## using CLI from linux or Windows jumpbox
https://learn.microsoft.com/en-us/azure/openshift/configure-azure-ad-cli

Просмотреть файл

@ -0,0 +1,250 @@
#***************************************************************************************************************************
# Integrate Azure Container Registry
#***************************************************************************************************************************
# Below steps must be done from JumpBox VM
# If using Windows use Git Bash Terminal on the JumpBox VM to execute code from Visual Studio Code
# log in azure
az login #use same credential used for deployment
#set variables
SPOKERGNAME="spoke-aro" #resource group where ARO is deployed
AROCLUSTER=$(az aro list -g $SPOKERGNAME --query "[0].name" -o tsv) #name of the ARO cluster
LOCATION=$(az aro show -g $SPOKERGNAME -n $AROCLUSTER --query location -o tsv)
apiServer=$(az aro show -g $SPOKERGNAME -n $AROCLUSTER --query apiserverProfile.url -o tsv)
webConsole=$(az aro show -g $SPOKERGNAME -n $AROCLUSTER --query consoleProfile.url -o tsv)
ACR_NAME=$(az acr list -g $SPOKERGNAME --query '[0].name' -o tsv)
KV_NAME=$(az keyvault list -g $SPOKERGNAME --query '[0].name' -o tsv)
ACRPWD=$(az acr credential show -n $ACR_NAME --query 'passwords[0].value' -o tsv)
COSMOSDB_NAME=$(az cosmosdb list -g $SPOKERGNAME --query "[0].name" -o tsv)
# connect oc tool to ARO
kubeadmin_password=$(az aro list-credentials \
--name $AROCLUSTER \
--resource-group $SPOKERGNAME \
--query kubeadminPassword --output tsv)
oc login $apiServer -u kubeadmin -p $kubeadmin_password
# Integrate ACR with OC (assuming workload image in 'ratingsapp' namespace)
oc create ns ratingsapp
oc create secret docker-registry \
--docker-server=$ACR_NAME.azurecr.io \
--docker-username=$ACR_NAME \
--docker-password=$ACRPWD \
--docker-email=unused \
acr-secret -n ratingsapp
# oc secrets link default acr-secret --for=pull,mount
#***************************************************************************************************************************
# Integrate Azure Key Vault
#***************************************************************************************************************************
az k8s-extension create --name akvsecretsprovider \
--cluster-name $AROCLUSTER \
--resource-group $SPOKERGNAME \
--cluster-type connectedClusters \
--extension-type Microsoft.AzureKeyVaultSecretsProvider
# Create a service principal to access keyvault
SERVICE_PRINCIPAL_NAME="service-principal-name" # Ex: fta-aro-akv-sp
SERVICE_PRINCIPAL_CLIENT_SECRET="$(az ad sp create-for-rbac --skip-assignment --name $SERVICE_PRINCIPAL_NAME --query 'password' -o tsv)"
SERVICE_PRINCIPAL_CLIENT_ID="$(az ad sp list --display-name $SERVICE_PRINCIPAL_NAME --query [0].'appId' -o tsv)"
# Below steps must be done from JumpBox VM
az keyvault set-policy -n $KV_NAME --secret-permissions get --spn ${SERVICE_PRINCIPAL_CLIENT_ID}
# Create OC Secret
oc create secret generic secrets-store-creds --from-literal clientid=${SERVICE_PRINCIPAL_CLIENT_ID} --from-literal clientsecret=${SERVICE_PRINCIPAL_CLIENT_SECRET} -n ratingsapp
oc label secret secrets-store-creds secrets-store.csi.k8s.io/used=true -n ratingsapp
# Create Keyvault Secret
#grant set permission for secret on the keyvault for current logged in user
USEROID=$(az ad signed-in-user show --query "id" -o tsv)
az keyvault set-policy -n $KV_NAME --secret-permissions set get list --object-id $USEROID
# Get the Connection string for cosmos db and replace $COSMOSDB_URI_CONNECTIONSTRING
cosmosKey=$(az cosmosdb keys list -n $COSMOSDB_NAME -g $SPOKERGNAME --query "primaryMasterKey" -o tsv)
COSMOSDB_URI_CONNECTIONSTRING="mongodb://$COSMOSDB_NAME:$cosmosKey@$COSMOSDB_NAME.mongo.cosmos.azure.com:10255/ratingsdb?ssl=true&replicaSet=globaldb&retrywrites=false&appName=@$COSMOSDB_NAME@"
az keyvault secret set --vault-name ${KV_NAME} --name 'mongodburi' --value $COSMOSDB_URI_CONNECTIONSTRING
# Deploy Secret Provider Class
TENANT_ID=$(az account show --query tenantId -o tsv)
# change default project (namespace)
oc project ratingsapp
cat <<EOF | oc apply -f -
apiVersion: secrets-store.csi.x-k8s.io/v1
kind: SecretProviderClass
metadata:
name: mongo-secret-csi
namespace: ratingsapp
spec:
provider: azure
secretObjects:
- secretName: mongodburi
type: Opaque
data:
- objectName: MONGODBURI
key: MONGODBURI
parameters:
keyvaultName: "${KV_NAME}"
usePodIdentity: "false"
useVMManagedIdentity: "false"
userAssignedIdentityID: ""
cloudName: ""
objects: |
array:
- |
objectName: MONGODBURI
objectType: secret
objectVersion: ""
tenantId: "${TENANT_ID}"
EOF
# Optional: Test SecretProviderClass by running busybox pod
cat <<EOF | oc apply -f -
kind: Pod
apiVersion: v1
metadata:
name: busybox-secrets-store-inline
namespace: ratingsapp
spec:
containers:
- name: busybox
image: k8s.gcr.io/e2e-test-images/busybox:1.29
command:
- "/bin/sleep"
- "10000"
volumeMounts:
- name: secrets-store-inline
mountPath: "/mnt/secrets-store"
readOnly: true
volumes:
- name: secrets-store-inline
csi:
driver: secrets-store.csi.k8s.io
readOnly: true
volumeAttributes:
secretProviderClass: "mongo-secret-csi"
nodePublishSecretRef: # Only required when using service principal mode
name: secrets-store-creds # Only required when using service principal mode
EOF
# Execute in to the pod to check the secrete from key vault is mounted successfully
oc exec -it busybox-secrets-store-inline -- sh
# Once you are inside the pod, following should display cosmosdb connection string which should match $COSMOSDB_URI_CONNECTIONSTRING
cat /mnt/secrets-store/MONGODBURI
# exit out of the pod
exit
# Now that we tested SecretProviderClass, delete the busybox pod
oc delete po busybox-secrets-store-inline
#***************************************************************************************************************************
# Deploy Workload
#***************************************************************************************************************************
# Below steps must be executed from JumpBox VM
# You can run these commands from Git Bash terminal but skip the sudo word
# Deploy workload on JumpBox VM for testing
git clone https://github.com/MicrosoftDocs/mslearn-aks-workshop-ratings-api.git
git clone https://github.com/MicrosoftDocs/mslearn-aks-workshop-ratings-web.git
git clone https://github.com/Azure/ARO-Landing-Zone-Accelerator.git
# You should get Login Succeeded
sudo az acr login -n $ACR_NAME
cd mslearn-aks-workshop-ratings-api
# If running from Git Bash terminal skip the sudo word
sudo docker build . -t "$ACR_NAME.azurecr.io/ratings-api:v1"
sudo docker push "$ACR_NAME.azurecr.io/ratings-api:v1"
cd ..
cd mslearn-aks-workshop-ratings-web
# If running from Git Bash terminal skip the sudo word
sudo docker build . -t "$ACR_NAME.azurecr.io/ratings-web:v1"
sudo docker push "$ACR_NAME.azurecr.io/ratings-web:v1"
# Create app namespace
oc create namespace ratingsapp
# Navigate to RatingsApp folder and make necessary changes
cd ..
cd ARO-Landing-Zone-Accelerator/Scenarios/Secure-Baseline/Apps/RatingsApp/
oc adm policy add-scc-to-user privileged \
system:serviceaccount:ratingsapp:secrets-store-csi-driver
oc adm policy add-scc-to-user privileged \
system:serviceaccount:ratingsapp:csi-secrets-store-provider-azure
# Fix for this error when describing the replicaset
# Warning FailedCreate 25s (x15 over 106s) replicaset-controller Error creating: pods "ratings-api-d997c8f74-" is forbidden: unable to validate against any security context constraint: [provider "anyuid": Forbidden: not usable by user or serviceaccount, spec.volumes[0]: Invalid value: "csi": csi volumes are not allowed to be used, provider "nonroot": Forbidden: not usable by user or serviceaccount, provider "hostmount-anyuid": Forbidden: not usable by user or serviceaccount, provider "machine-api-termination-handler": Forbidden: not usable by user or serviceaccount, provider "hostnetwork": Forbidden: not usable by user or serviceaccount, provider "hostaccess": Forbidden: not usable by user or serviceaccount, provider "kube-aad-proxy-scc": Forbidden: not usable by user or serviceaccount, provider "node-exporter": Forbidden: not usable by user or serviceaccount, provider "privileged": Forbidden: not usable by user or serviceaccount, provider "privileged-genevalogging": Forbidden: not usable by user or serviceaccount]
# I have to add a serviceaccount for api deployment and add this privilige
oc adm policy add-scc-to-user privileged \
system:serviceaccount:ratingsapp:svcrattingsapp
# Deploy ratings service account
oc apply -f 0-ratings-serviceaccount.yaml -n ratingsapp
# Fix for unable to pull image from ACR
# Failed to pull image "aroacr10737.azurecr.io/ratings-api:v1": rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: authentication required, visit https://aka.ms/acr/authorization for more information.
oc secrets link svcrattingsapp acr-secret --for=pull,mount -n ratingsapp
# Deploy API
# Change the Azure Container Registry name in the following yaml file before applying
oc apply -f 1-ratings-api-deployment.yaml -n ratingsapp
# Verify API app is running
oc describe pod <pod name> -n ratingsapp
oc logs <pod name> -n ratingsapp
# deploy the service
oc apply -f 2-ratings-api-service.yaml -n ratingsapp
# Deploy web frontend
# Change the Azure Container Registry name in the following yaml file before applying
oc apply -f 3a-ratings-web-deployment.yaml -n ratingsapp
# Deploy frontend service
oc apply -f 4-ratings-web-service.yaml -n ratingsapp
# retrieve Azure Front Door endpoint FQDN
add screenshot of AFD overview pane that display the name of the endpoint or add commands to retrieve with clientid
# Deploy Ingress
# Change the FQDN for the hostname to Azure Front Door endpoint FQDN before deploying
# lower case must be used - if there was any upper case, modifying them to lower cases
oc apply -f 5-http-ratings-web-ingress.yaml -n ratingsapp
# Approva AFD private link service from the portal
add screenshot from aro-pls object to show how to approve AFD connection
open aro-pls object
go to private endpoint connections
select the connection in pending state
click approve
click on Yes to validate
# link AFD endpoint to ARO origin
select AFD object in spoke resource group
click on front door manager
on existing endpoint click on " + Add a route "
give it a name "aro-route"
ensure enable route is selected
select the only domain available in "domains" which should be the FQDN of the endpoint
uncheck the redirect to HTTPS case
select aro-origin-group as Origin Group
leave all other settings to default and click "Add"
# access your application through AFD
open a browser and connect to htt://yourAFDFQDN

Просмотреть файл

@ -0,0 +1,58 @@
# Below steps must be executed from JumpBox VM
# Why onboarding ARO on to Arc enabled k8s: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-transition-hybrid
# How to: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-enable-arc-enabled-clusters
# log in azure
az login #use same credentials used for deploying with terraform
#set variables
SPOKERGNAME="spoke-aro" #resource group where ARO is deployed
AROCLUSTER=$(az aro list -g $SPOKERGNAME --query "[0].name" -o tsv) #name of the ARO cluster
LOCATION=$(az aro show -g $SPOKERGNAME -n $AROCLUSTER --query location -o tsv)
apiServer=$(az aro show -g $SPOKERGNAME -n $AROCLUSTER --query apiserverProfile.url -o tsv)
webConsole=$(az aro show -g $SPOKERGNAME -n $AROCLUSTER --query consoleProfile.url -o tsv)
# connect oc tool to ARO
kubeadmin_password=$(az aro list-credentials \
--name $AROCLUSTER \
--resource-group $SPOKERGNAME \
--query kubeadminPassword --output tsv)
oc login $apiServer -u kubeadmin -p $kubeadmin_password
# Install the connectedk8s Azure CLI extension
az extension add --name connectedk8s
az extension add --name k8s-extension
# Register providers for Azure Arc-enabled Kubernetes
az provider register --namespace Microsoft.Kubernetes
az provider register --namespace Microsoft.KubernetesConfiguration
az provider register --namespace Microsoft.ExtendedLocation
# Monitor the registration process. Registration may take up to 10 minutes.
# Once registered, you should see the RegistrationState state for these namespaces change to Registered.
az provider show -n Microsoft.Kubernetes -o table
az provider show -n Microsoft.KubernetesConfiguration -o table
az provider show -n Microsoft.ExtendedLocation -o table
# Add a policy to enable arc
# You are granting service account azure-arc-kube-aad-proxy-sa in Project azure-arc to the privileged SCC permission
oc adm policy add-scc-to-user privileged system:serviceaccount:azure-arc:azure-arc-kube-aad-proxy-sa
# Connect the cluster to Arc
az connectedk8s connect --name $AROCLUSTER --resource-group $SPOKERGNAME --location $LOCATION
# Verify cluster connection
az connectedk8s list --resource-group $SPOKERGNAME --output table
# Check the deployment and pods. All deployment should be ready and all Pods should be in Ready and in Running state
oc get deployments,pods -n azure-arc
# Create Azure Monitor Container Insights extension instance
# *** Following command will not work from Git Bash terminal (bug in Git Bash), run from WSL terminal (no need to be on Jumpbox to execute) ***
az k8s-extension create --name azuremonitor-containers \
--cluster-name $AROCLUSTER \
--resource-group $SPOKERGNAME \
--cluster-type connectedClusters \
--extension-type Microsoft.AzureMonitor.Containers \
--configuration-settings logAnalyticsWorkspaceResourceID=$azlaworkspaceId

Просмотреть файл

@ -2,7 +2,7 @@ terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>3.9.0"
version = "~>3.51.0"
}
azuread = {
source = "hashicorp/azuread"

Просмотреть файл

@ -23,6 +23,11 @@ variable "spoke_name" {
default = "spoke-aro"
}
variable "aro_spn_name" {
type = string
default = "aro-lza-sp"
}
resource "random_password" "pw" {
length = 16
special = true
@ -55,14 +60,6 @@ resource "random_string" "random" {
}
}
variable "aro_sp_client_id" {
type = string
}
variable "aro_sp_password" {
type = string
}
variable "aro_rp_object_id" {
type = string
}