Merge pull request #156 from Azure/merging-nvm-tf-to-private-scenario

merged NVM folder into private cluster folder
This commit is contained in:
Ayobami Ayodeji 2024-10-24 08:42:28 -07:00 коммит произвёл GitHub
Родитель 2d5314b0b0 2bb5b4cfee
Коммит 49990cdecc
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
99 изменённых файлов: 5 добавлений и 3512 удалений

Просмотреть файл

@ -1,3 +0,0 @@
# AKS Secure Baseline AVM
This section has been moved to the [AKS-Secure-Baseline-PrivateCluster](../../AKS-Secure-Baseline-PrivateCluster/) folder.

Просмотреть файл

@ -1,3 +0,0 @@
# AKS Secure Baseline AVM
This section has been moved to the [AKS-Secure-Baseline-PrivateCluster](../../AKS-Secure-Baseline-PrivateCluster/) folder.

Просмотреть файл

@ -1,3 +0,0 @@
# AKS Secure Baseline AVM
This section has been moved to the [AKS-Secure-Baseline-PrivateCluster](../../AKS-Secure-Baseline-PrivateCluster/) folder.

Просмотреть файл

@ -1,3 +0,0 @@
# AKS Secure Baseline AVM
This section has been moved to the [AKS-Secure-Baseline-PrivateCluster](../../AKS-Secure-Baseline-PrivateCluster/) folder.

Просмотреть файл

@ -1,3 +0,0 @@
# AKS Secure Baseline AVM
This section has been moved to the [AKS-Secure-Baseline-PrivateCluster](../../AKS-Secure-Baseline-PrivateCluster/) folder.

Просмотреть файл

@ -1,3 +0,0 @@
# AKS Secure Baseline AVM
This section has been moved to the [AKS-Secure-Baseline-PrivateCluster](../../AKS-Secure-Baseline-PrivateCluster/) folder.

Просмотреть файл

@ -1,3 +0,0 @@
# AKS Secure Baseline AVM
This section has been moved to the [AKS-Secure-Baseline-PrivateCluster](../../AKS-Secure-Baseline-PrivateCluster/) folder.

Просмотреть файл

@ -1,3 +0,0 @@
# AKS Secure Baseline AVM
This section has been moved to the [AKS-Secure-Baseline-PrivateCluster](../AKS-Secure-Baseline-PrivateCluster/) folder.

Просмотреть файл

@ -29,10 +29,10 @@ Before creating the Microsoft Entra ID integrated cluster, groups must be create
Depending on the needs of your organization, you may have a choice of existing groups to use or a new groups may need to be created for each cluster deployment.
Navigate to "/AKS-Secure-Baseline-Private-AVM/Bicep/02-EID" folder
Navigate to "/AKS-Secure-Baseline-PrivateCluster/Bicep/02-EID" folder
```azurecli
cd ./Scenarios/AKS-Secure-Baseline-Private-AVM/Bicep/02-EID
cd ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Bicep/02-EID
```
Use the Azure CLI or Azure PowerShell to create the Microsoft Entra groups. Replace the Microsoft Entra group names below with the name of the Microsoft Entra groups you want to create, such as AKS_ES_dev, AKS_ES_ops. There should be no space in the names.

Просмотреть файл

@ -1,33 +0,0 @@
# Use the Azure CLI to create a storage account to store the Terraform state files.
This storage account will be used to store the state of each deployment step and will be accessed by Terraform to reference values stored in the various deployment state files.
Create some variables to start with
```bash
REGION=<REGION>
STORAGEACCOUNTNAME=<UNIQUENAME>
CONTAINERNAME=akscs
TFSTATE_RG=tfstate
```
Create a Resource Group:
```bash
az group create --name $TFSTATE_RG --location $REGION
```
Create a Storage Account:
```bash
az storage account create -n $STORAGEACCOUNTNAME -g $TFSTATE_RG -l $REGION --sku Standard_LRS
```
Create a Storage Container within the Storage Account:
```bash
az storage container-rm create --storage-account $STORAGEACCOUNTNAME --name $CONTAINERNAME
```
### Next step
:arrow_forward: [Create or import Microsoft Entra groups for AKS cluster admins and AKS cluster users](./03-eid.md)

Просмотреть файл

@ -1,21 +0,0 @@
#################
# For importing existing groups
##################
data "azuread_group" "appdevs" {
display_name = var.aks_user_group
}
data "azuread_group" "aksops" {
display_name = var.aks_admin_group
}
output "appdev_object_id" {
value = data.azuread_group.appdevs.object_id
}
output "aksops_object_id" {
value = data.azuread_group.aksops.object_id
}

Просмотреть файл

@ -1,25 +0,0 @@
# Update the variables in the BACKEND block to refrence the
# storage account created out of band for TF statemanagement.
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "= 3.44.1"
}
}
backend "azurerm" {
# resource_group_name = "" # Partial configuration, provided during "terraform init"
# storage_account_name = "" # Partial configuration, provided during "terraform init"
# container_name = "" # Partial configuration, provided during "terraform init"
key = "aad"
}
}
provider "azurerm" {
features {}
}
provider "azuread" {
}

Просмотреть файл

@ -1,11 +0,0 @@
#############
# VARIABLES #
#############
variable "aks_admin_group" {
default = "AKS App Admin Team"
}
variable "aks_user_group" {
default = "AKS App Dev Team"
}

Просмотреть файл

@ -1,21 +0,0 @@
###############
# For creation of new groups
###############
resource "azuread_group" "appdevs" {
display_name = var.aks_admin_group
security_enabled = true
}
resource "azuread_group" "aksops" {
display_name = var.aks_user_group
security_enabled = true
}
output "appdev_object_id" {
value = azuread_group.appdevs.object_id
}
output "aksops_object_id" {
value = azuread_group.aksops.object_id
}

Просмотреть файл

@ -1,30 +0,0 @@
# Update the variables in the BACKEND block to refrence the
# storage account created out of band for TF statemanagement.
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "= 3.44.1"
}
azuread = {
source = "hashicorp/azuread"
version = "= 2.34.1"
}
}
backend "azurerm" {
# resource_group_name = "" # Partial configuration, provided during "terraform init"
# storage_account_name = "" # Partial configuration, provided during "terraform init"
# container_name = "" # Partial configuration, provided during "terraform init"
key = "aad"
}
}
provider "azurerm" {
features {}
}
provider "azuread" {
}

Просмотреть файл

@ -1,11 +0,0 @@
#############
# VARIABLES #
#############
variable "aks_admin_group" {
default = "AKS App Admin Team 01357"
}
variable "aks_user_group" {
default = "AKS App Dev Team 01357"
}

Просмотреть файл

@ -1,68 +0,0 @@
# Prerequisites and Microsoft Entra ID
This is the starting point for the instructions on deploying the [AKS Baseline private cluster reference implementation](../README.md). There is required access and tooling you'll need in order to accomplish this. Follow the instructions below and on the subsequent pages so that you can get your environment ready to proceed with the AKS cluster creation.
## Steps
1. Latest [Azure CLI installed](https://learn.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest) (must be at least 2.37), or you can perform this from Azure Cloud Shell by clicking below.
1. An Azure subscription.
The subscription used in this deployment cannot be a [free account](https://azure.microsoft.com/free); it must be a standard EA, pay-as-you-go, or Visual Studio benefit subscription. This is because the resources deployed here are beyond the quotas of free subscriptions.
> :warning: The user or service principal initiating the deployment process _must_ have the following minimal set of Azure Role-Based Access Control (RBAC) roles:
>
> * [Contributor role](https://learn.microsoft.com/azure/role-based-access-control/built-in-roles#contributor) is _required_ at the subscription level to have the ability to create resource groups and perform deployments.
> * [User Access Administrator role](https://learn.microsoft.com/azure/role-based-access-control/built-in-roles#user-access-administrator) is _required_ at the subscription level since you'll be performing role assignments to managed identities across various resource groups.
1. **This step only applies if you are creating a new EID group for this deployment. If you have one already existing and you are a part of it, you can skip this prerequisite, and follow the import portion of the instructions below**.
A Microsoft Entra tenant to associate your Kubernetes RBAC Cluster API authentication to.
> :warning: The user or service principal initiating the deployment process _must_ have the following minimal set of Microsoft Entra ID permissions assigned:
>
> * Microsoft Entra [User Administrator](https://learn.microsoft.com/entra/identity/role-based-access-control/permissions-reference#user-administrator-permissions) is _required_ to create a "break glass" AKS admin Microsoft Entra security group and user. Alternatively, you could get your Microsoft Entra ID admin to create this for you when instructed to do so.
> * If you are not part of the User Administrator group in the tenant associated to your Azure subscription, please consider [creating a new tenant](https://learn.microsoft.com/entra/fundamentals/create-new-tenant#create-a-new-tenant-for-your-organization) to use while evaluating this implementation. The Microsoft Entra tenant backing your cluster's API RBAC does NOT need to be the same tenant associated with your Azure subscription.
## Create or import Microsoft Entra groups for AKS
Before creating the Microsoft Entra ID integrated cluster, groups must be created that can be later mapped to the Built-In Roles of "Azure Kubernetes Service Cluster User Role" and "Azure Kubernetes Service RBAC Cluster Admin".
Depending on the needs of your organization, you may have a choice of existing groups to use or a new groups may need to be created for each cluster deployment.
Navigate to "/Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/03-EID" folder, either "03-EID-create" or "03-EID-import"
```
cd ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/03-EID-import
```
In the "variables.tf" file, update the defaults to reflect the display names as needed to either match existing groups or create names that fit your requirements.
Once the files are updated, deploy using Terraform Init, Plan and Apply.
```bash
terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
```
```
terraform plan
```
```
terraform apply
```
If you get an error about changes to the configuration, go with the `-reconfigure` flag option.
## Ensure you are part of the Microsoft Entra group you just created or pointed to
1. Go to Azure portal and type Microsoft Entra
2. Select **Microsoft Entra ID**
3. Click on **Groups** in the left blade
4. Select the Admin User group you just created. For the default name, this should be *AKS App Admin Team*
5. Click on **Members** in the left blade
6. ![Location of private link for keyvault](../media/adding-to-eid-group.png)
7. Click **+ Add members**
8. Enter your name in the search bar and select your user(s)
9. Click **Select**
### Next step
:arrow_forward: [Creation of Hub Network & its respective Components](./04-network-hub.md)

Просмотреть файл

@ -10,7 +10,7 @@ If you haven't yet, clone the repo and cd to the appropriate folder
```bash
git clone https://github.com/Azure/AKS-Landing-Zone-Accelerator
cd ./Scenarios/AKS-Secure-Baseline-Private-AVM/Terraform/02-EID
cd ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/02-EID
```
The following will be created:
@ -20,7 +20,7 @@ The following will be created:
* Azure Firewall
* Azure Bastion Host
Navigate to "/Scenarios/AKS-Secure-Baseline-Private-AVM/Terraform/" folder
Navigate to "/Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/" folder
```bash
cd ./03-Network-Hub

Просмотреть файл

@ -1,51 +0,0 @@
####################################
# These resources will create an addtional subnet for user connectivity
# and a Linux Server to use with the Bastion Service.
####################################
# Dev Subnet
# (Additional subnet for Developer Jumpbox)
resource "azurerm_subnet" "dev" {
name = "devSubnet"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = ["10.0.4.0/24"]
private_endpoint_network_policies_enabled = false
}
resource "azurerm_network_security_group" "dev-nsg" {
name = "${azurerm_virtual_network.vnet.name}-${azurerm_subnet.dev.name}-nsg"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
}
resource "azurerm_subnet_network_security_group_association" "subnet" {
subnet_id = azurerm_subnet.dev.id
network_security_group_id = azurerm_network_security_group.dev-nsg.id
}
# Linux Server VM
module "create_linuxsserver" {
source = "./modules/compute-linux"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
vnet_subnet_id = azurerm_subnet.dev.id
server_name = "server-dev-linux"
admin_username = var.admin_username
admin_password = var.admin_password
}
#######################
# SENSITIVE VARIABLES #
#######################
variable "admin_password" {
default = "change me"
}
variable "admin_username" {
default = "sysadmin"
}

Просмотреть файл

@ -1,34 +0,0 @@
# Azure Firewall
# --------------
# Firewall Rules created via Module
resource "azurerm_firewall" "firewall" {
name = "${azurerm_virtual_network.vnet.name}-firewall"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
firewall_policy_id = module.firewall_rules_aks.fw_policy_id
sku_name = var.sku_name
sku_tier = var.sku_tier
ip_configuration {
name = "configuration"
subnet_id = azurerm_subnet.firewall.id
public_ip_address_id = azurerm_public_ip.firewall.id
}
}
resource "azurerm_public_ip" "firewall" {
name = "${azurerm_virtual_network.vnet.name}-firewall-pip"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
allocation_method = "Static"
sku = "Standard"
}
module "firewall_rules_aks" {
source = "./modules/aks-fw-rules"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
}

Просмотреть файл

@ -1,62 +0,0 @@
# Virtual Network for Hub
# -----------------------
resource "azurerm_virtual_network" "vnet" {
name = "vnet-${var.hub_prefix}"
resource_group_name = azurerm_resource_group.rg.name
location = var.location
address_space = ["10.0.0.0/16"]
dns_servers = null
tags = var.tags
}
# SUBNETS on Hub Network
# ----------------------
# Firewall Subnet
# (Additional subnet for Azure Firewall, without NSG as per Firewall requirements)
resource "azurerm_subnet" "firewall" {
name = "AzureFirewallSubnet"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = ["10.0.1.0/26"]
private_endpoint_network_policies_enabled = false
}
# Gateway Subnet
# (Additional subnet for Gateway, without NSG as per requirements)
resource "azurerm_subnet" "gateway" {
name = "GatewaySubnet"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = ["10.0.2.0/27"]
private_endpoint_network_policies_enabled = false
}
# Bastion - Module creates additional subnet (without NSG), public IP and Bastion
module "bastion" {
source = "./modules/bastion"
subnet_cidr = "10.0.3.0/26"
virtual_network_name = azurerm_virtual_network.vnet.name
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
}
#############
## OUTPUTS ##
#############
# These outputs are used by later deployments
output "hub_vnet_name" {
value = azurerm_virtual_network.vnet.name
}
output "hub_vnet_id" {
value = azurerm_virtual_network.vnet.id
}

Просмотреть файл

@ -1,27 +0,0 @@
#############
# RESOURCES #
#############
# Resource Group for Hub
# ----------------------
resource "azurerm_resource_group" "rg" {
name = "${var.hub_prefix}-HUB"
location = var.location
}
#############
## OUTPUTS ##
#############
# These outputs are used by later deployments
output "hub_rg_location" {
value = azurerm_resource_group.rg.location
}
output "hub_rg_name" {
value = azurerm_resource_group.rg.name
}

Просмотреть файл

@ -1,80 +0,0 @@
# Firewall Policy
resource "azurerm_firewall_policy" "aks" {
name = "AKSpolicy"
resource_group_name = var.resource_group_name
location = var.location
}
output "fw_policy_id" {
value = azurerm_firewall_policy.aks.id
}
# Rules Collection Group
resource "azurerm_firewall_policy_rule_collection_group" "AKS" {
name = "aks-rcg"
firewall_policy_id = azurerm_firewall_policy.aks.id
priority = 200
application_rule_collection {
name = "aks_app_rules"
priority = 205
action = "Allow"
rule {
name = "aks_service"
protocols {
type = "Https"
port = 443
}
source_addresses = ["10.1.0.0/16"]
destination_fqdn_tags = ["AzureKubnernetesService"]
}
}
network_rule_collection {
name = "aks_network_rules"
priority = 201
action = "Allow"
rule {
name = "https"
protocols = ["TCP"]
source_addresses = ["10.1.0.0/16"]
destination_addresses = ["*"]
destination_ports = ["443"]
}
rule {
name = "dns"
protocols = ["UDP"]
source_addresses = ["10.1.0.0/16"]
destination_addresses = ["*"]
destination_ports = ["53"]
}
rule {
name = "time"
protocols = ["UDP"]
source_addresses = ["10.1.0.0/16"]
destination_addresses = ["*"]
destination_ports = ["123"]
}
rule {
name = "tunnel_udp"
protocols = ["UDP"]
source_addresses = ["10.1.0.0/16"]
destination_addresses = ["*"]
destination_ports = ["1194"]
}
rule {
name = "tunnel_tcp"
protocols = ["TCP"]
source_addresses = ["10.1.0.0/16"]
destination_addresses = ["*"]
destination_ports = ["9000"]
}
}
}
variable "resource_group_name" {}
variable "location" {}

Просмотреть файл

@ -1,26 +0,0 @@
resource "azurerm_subnet" "bastionhost" {
name = "AzureBastionSubnet"
resource_group_name = var.resource_group_name
virtual_network_name = var.virtual_network_name
address_prefixes = [var.subnet_cidr]
}
resource "azurerm_public_ip" "bastionhost" {
name = "${var.virtual_network_name}-bastion-pip"
resource_group_name = var.resource_group_name
location = var.location
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_bastion_host" "bastionhost" {
name = "${var.virtual_network_name}-bastion"
resource_group_name = var.resource_group_name
location = var.location
ip_configuration {
name = "configuration"
subnet_id = azurerm_subnet.bastionhost.id
public_ip_address_id = azurerm_public_ip.bastionhost.id
}
}

Просмотреть файл

@ -1,7 +0,0 @@
variable "location" {}
variable "resource_group_name" {}
variable "virtual_network_name" {}
variable "subnet_cidr" {}

Просмотреть файл

@ -1,100 +0,0 @@
resource "azurerm_linux_virtual_machine" "compute" {
name = var.server_name
location = var.location
resource_group_name = var.resource_group_name
size = var.vm_size
admin_username = var.admin_username
admin_password = var.admin_password
disable_password_authentication = var.disable_password_authentication //Set to true if using SSH key
tags = var.tags
network_interface_ids = [
azurerm_network_interface.compute.id
]
os_disk {
caching = "ReadWrite"
storage_account_type = var.storage_account_type
}
source_image_reference {
publisher = var.os_publisher
offer = var.os_offer
sku = var.os_sku
version = var.os_version
}
boot_diagnostics {
storage_account_uri = null
}
}
resource "azurerm_network_interface" "compute" {
name = "${var.server_name}-nic"
location = var.location
resource_group_name = var.resource_group_name
enable_accelerated_networking = var.enable_accelerated_networking
tags = var.tags
ip_configuration {
name = "internal"
subnet_id = var.vnet_subnet_id
private_ip_address_allocation = "Dynamic"
}
}
variable "admin_username" {
default = "sysadmin"
}
variable "admin_password" {
default = "changeme"
}
variable "server_name" {}
variable "resource_group_name" {}
variable "location" {}
variable "vnet_subnet_id" {}
variable "os_publisher" {
default = "canonical"
}
variable "os_offer" {
default = "0001-com-ubuntu-server-focal"
}
variable "os_sku" {
default = "20_04-lts-gen2"
}
variable "os_version" {
default = "latest"
}
variable "disable_password_authentication" {
default = false #leave as true if using ssh key, if using a password make the value false
}
variable "enable_accelerated_networking" {
default = "false"
}
variable "storage_account_type" {
default = "Standard_LRS"
}
variable "vm_size" {
default = "Standard_D2s_v3"
}
variable "tags" {
type = map(string)
default = {
application = "compute"
}
}
variable "allocation_method" {
default = "Static"
}

Просмотреть файл

@ -1,21 +0,0 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3"
}
}
backend "azurerm" {
# resource_group_name = "" # Partial configuration, provided during "terraform init"
# storage_account_name = "" # Partial configuration, provided during "terraform init"
# container_name = "" # Partial configuration, provided during "terraform init"
key = "hub-net"
}
}
provider "azurerm" {
features {}
disable_terraform_partner_id = false
partner_id = "a30e584d-e662-44ee-9f11-ae84db89a0f0"
}

Просмотреть файл

@ -1,59 +0,0 @@
#!/bin/bash
#############################
# Script Definition
#############################
logpath=/var/log/deploymentscriptlog
#############################
# Upgrading Linux Distribution
#############################
echo "#############################" >> $logpath
echo "Upgrading Linux Distribution" >> $logpath
echo "#############################" >> $logpath
sudo apt-get update >> $logpath
sudo apt-get -y upgrade >> $logpath
echo " " >> $logpath
#############################
#Install Azure CLI
#############################
echo "#############################" >> $logpath
echo "Installing Azure CLI" >> $logpath
echo "#############################" >> $logpath
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
#############################
#Install Docker
#############################
echo "#############################" >> $logpath
echo "Installing Docker" >> $logpath
echo "#############################" >> $logpath
wget -qO- https://get.docker.com/ | sh >> $logpath
sudo usermod -aG docker $1
echo " " >> $logpath
#############################
#Install Kubectl
#############################
echo "#############################" >> $logpath
echo "Installing Kubectl" >> $logpath
echo "#############################" >> $logpath
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubectl
#############################
#Install Helm
#############################
echo "#############################" >> $logpath
echo "Installing Helm" >> $logpath
echo "#############################" >> $logpath
curl https://baltocdn.com/helm/signing.asc | sudo apt-key add -
sudo apt-get install apt-transport-https --yes
echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt-get update
sudo apt-get install helm

Просмотреть файл

@ -1,4 +0,0 @@
admin_password = "change me"
admin_username = "sysadmin"
location="eastus"
hub_prefix="escs-hub"

Просмотреть файл

@ -1,33 +0,0 @@
#############
# VARIABLES #
#############
variable "location" {
default = "eastus"
}
variable "tags" {
type = map(string)
default = {
project = "cs-aks"
}
}
variable "hub_prefix" {
default = "escs-hub"
}
variable "sku_name" {
default = "AZFW_VNet"
}
variable "sku_tier" {
default = "Standard"
}
## Sensitive Variables for the Jumpbox
## Sample terraform.tfvars File
# admin_password = "ChangeMe"
# admin_username = "sysadmin"

Просмотреть файл

@ -362,5 +362,4 @@ module "avm-res-network-applicationgateway" {
zones = ["1", "2", "3"]
depends_on = [module.avm-res-network-virtualnetwork-appgw-subnet.resource]
}

Просмотреть файл

@ -1,37 +0,0 @@
# Create the Hub Network
The following will be created:
* Resource Group for Hub Networking (hub-networking.tf)
* Hub Network (hub-networking.tf)
* Azure Firewall (firewall.tf)
* Azure Bastion Host (hub-networking.tf)
* Virtual Machine (dev-setup.tf)
Navigate to "/Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/04-Network-Hub" folder
```
cd ../04-Network-Hub
```
In the "variables.tf" file, update the defaults to reflect the tags you'd like to use throughout the rest of the deployment. There are a group of "sensitive" variables for the username and password of the jumpbox. It is not recommended that these variables be committed to code in a public repo, you should instead create a separate terraform.tfvars file (not committed via gitignore) or use GitHub secrets (with a workflow) to pass those values in at deployment time. (A sample terraform.tfvars.sample file is included for reference. Enter your values and rename it **terraform.tfvars**)
Once the files are updated, deploy using Terraform Init, Plan and Apply.
```bash
terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
```
> Enter terraform init -reconfigure if you get an error saying there was a change in the backend configuration which may require migrating existing state
```bash
terraform plan
```
```bash
terraform apply
```
If you get an error about changes to the configuration, go with the `-reconfigure` flag option.
:arrow_forward: [Creation of Spoke Network & its respective Components](./05-network-lz.md)

Просмотреть файл

@ -1,33 +0,0 @@
# This section create a subnet for AKS along with an associated NSG.
# "Here be dragons!" <-- Must elaborate
resource "azurerm_subnet" "aks" {
name = "aksSubnet"
resource_group_name = azurerm_resource_group.spoke-rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = ["10.1.16.0/20"]
private_endpoint_network_policies_enabled = true
}
output "aks_subnet_id" {
value = azurerm_subnet.aks.id
}
resource "azurerm_network_security_group" "aks-nsg" {
name = "${azurerm_virtual_network.vnet.name}-${azurerm_subnet.aks.name}-nsg"
resource_group_name = azurerm_resource_group.spoke-rg.name
location = azurerm_resource_group.spoke-rg.location
}
resource "azurerm_subnet_network_security_group_association" "subnet" {
subnet_id = azurerm_subnet.aks.id
network_security_group_id = azurerm_network_security_group.aks-nsg.id
}
# # Associate Route Table to AKS Subnet
resource "azurerm_subnet_route_table_association" "rt_association" {
subnet_id = azurerm_subnet.aks.id
route_table_id = azurerm_route_table.route_table.id
}

Просмотреть файл

@ -1,107 +0,0 @@
# Application Gateway and Supporting Infrastructure
#############
# LOCALS #
#############
/*
The following map enables the deployment of multiple application gateways, as example you can use to deploy two app gateways to support the the blue green deployment at AKS clusters level, instead if you need to deploy just one app gateway for sample and standard deployment then you you can configure a map with only one object.
locals {
Map of the aure application gateway to deploy, it used to manage the standard deployment but also the blue green deployment.
appgws = {
"appgw_blue" = {
prefix used to configure uniques names and parameter values
name_prefix="blue"
Boolean flag that enable or disable the deployment of the specific application gateway
appgw_turn_on=true
},
"appgw_green" = {
name_prefix="green"
appgw_turn_on=false
}
}
}
*/
locals {
appgws = {
"appgw_blue" = {
name_prefix = "blue"
appgw_turn_on = true
},
"appgw_green" = {
name_prefix = "green"
appgw_turn_on = false
}
}
}
resource "azurerm_subnet" "appgw" {
name = "appgwSubnet"
resource_group_name = azurerm_resource_group.spoke-rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = ["10.1.1.0/24"]
# private_endpoint_network_policies_enabled = false
}
module "appgw_nsgs" {
source = "./modules/app_gw_nsg"
resource_group_name = azurerm_resource_group.spoke-rg.name
location = azurerm_resource_group.spoke-rg.location
nsg_name = "${azurerm_virtual_network.vnet.name}-${azurerm_subnet.appgw.name}-nsg"
}
resource "azurerm_subnet_network_security_group_association" "appgwsubnet" {
subnet_id = azurerm_subnet.appgw.id
network_security_group_id = module.appgw_nsgs.appgw_nsg_id
}
# based on the structure of the appgws map are deployed multiple appplication gateway, usually this is used in the blue green scenario
resource "azurerm_public_ip" "appgw" {
for_each = { for appgws in local.appgws : appgws.name_prefix => appgws if appgws.appgw_turn_on == true }
name = "appgw-pip-${each.value.name_prefix}"
resource_group_name = azurerm_resource_group.spoke-rg.name
location = azurerm_resource_group.spoke-rg.location
allocation_method = "Static"
sku = "Standard"
}
# based on the structure of the appgws map are deployed multiple appplication gateway, usually this is used in the blue green scenario
module "appgw" {
source = "./modules/app_gw"
depends_on = [
module.appgw_nsgs
]
for_each = { for appgws in local.appgws : appgws.name_prefix => appgws if appgws.appgw_turn_on == true }
resource_group_name = azurerm_resource_group.spoke-rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
location = azurerm_resource_group.spoke-rg.location
appgw_name = "lzappgw-${each.value.name_prefix}"
frontend_subnet = azurerm_subnet.appgw.id
appgw_pip = azurerm_public_ip.appgw[each.value.name_prefix].id
}
# the app gateway name for each instance provisioned. If you are not using the blue green deployment then you can remove the for loop and use directly the attributes of the module module.appgw.
output "gateway_name" {
value = { for appgws in module.appgw : appgws.gateway_name => appgws.gateway_name }
}
# the app gateway id for each instance provisioned. If you are not using the blue green deployment then you can remove the for loop and use directly the attributes of the module module.appgw.
output "gateway_id" {
value = { for appgws in module.appgw : appgws.gateway_name => appgws.gateway_id }
}
# PIP IDs to permit the A Records registration in the DNS zone to invke the apps deployed on AKS. There is a PIP for each instance provisioned. If you are not using the blue green deployment then you can remove the for loop and use directly the attributes of the azurerm_public_ip.appgw resource.
output "azurerm_public_ip_ref" {
value = { for pips in azurerm_public_ip.appgw : pips.name => pips.id }
}
output "appgw_subnet_id" {
value = azurerm_subnet.appgw.id
}
output "appgw_subnet_name" {
value = azurerm_subnet.appgw.name
}

Просмотреть файл

@ -1,44 +0,0 @@
# # Deploy DNS Private Zone for ACR
resource "azurerm_private_dns_zone" "acr-dns" {
name = "privatelink.azurecr.io"
resource_group_name = azurerm_resource_group.spoke-rg.name
}
resource "azurerm_private_dns_zone_virtual_network_link" "lz_acr" {
name = "lz_to_acrs"
resource_group_name = azurerm_resource_group.spoke-rg.name
private_dns_zone_name = azurerm_private_dns_zone.acr-dns.name
virtual_network_id = azurerm_virtual_network.vnet.id
}
output "acr_private_zone_id" {
value = azurerm_private_dns_zone.acr-dns.id
}
output "acr_private_zone_name" {
value = azurerm_private_dns_zone.acr-dns.name
}
# # Deploy DNS Private Zone for KV
resource "azurerm_private_dns_zone" "kv-dns" {
name = "privatelink.vaultcore.azure.net"
resource_group_name = azurerm_resource_group.spoke-rg.name
}
resource "azurerm_private_dns_zone_virtual_network_link" "lz_kv" {
name = "lz_to_kvs"
resource_group_name = azurerm_resource_group.spoke-rg.name
private_dns_zone_name = azurerm_private_dns_zone.kv-dns.name
virtual_network_id = azurerm_virtual_network.vnet.id
}
output "kv_private_zone_id" {
value = azurerm_private_dns_zone.kv-dns.id
}
output "kv_private_zone_name" {
value = azurerm_private_dns_zone.kv-dns.name
}

Просмотреть файл

@ -1,29 +0,0 @@
# Peering Landing Zone (Spoke) Network to Connectivity (Hub) Network
## This assumes that the SP being used for this deployment has Network Contributor rights
## on the subscription(s) where the VNETs reside.
## If multiple subscriptions are used, provider aliases will be required.
# Spoke to Hub
resource "azurerm_virtual_network_peering" "direction1" {
name = "${azurerm_virtual_network.vnet.name}-to-${data.terraform_remote_state.existing-hub.outputs.hub_vnet_name}"
resource_group_name = azurerm_resource_group.spoke-rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
remote_virtual_network_id = data.terraform_remote_state.existing-hub.outputs.hub_vnet_id
allow_virtual_network_access = true
allow_forwarded_traffic = true
allow_gateway_transit = false
use_remote_gateways = false
}
# Hub to Spoke
resource "azurerm_virtual_network_peering" "direction2" {
name = "${data.terraform_remote_state.existing-hub.outputs.hub_vnet_name}-to-${azurerm_virtual_network.vnet.name}"
resource_group_name = data.terraform_remote_state.existing-hub.outputs.hub_rg_name
virtual_network_name = data.terraform_remote_state.existing-hub.outputs.hub_vnet_name
remote_virtual_network_id = azurerm_virtual_network.vnet.id
allow_virtual_network_access = true
allow_forwarded_traffic = true
allow_gateway_transit = false
use_remote_gateways = false
}

Просмотреть файл

@ -1,53 +0,0 @@
# Resource Group for Landing Zone Networking
# This RG uses the same region location as the Hub.
resource "azurerm_resource_group" "spoke-rg" {
name = "${var.lz_prefix}-SPOKE"
location = data.terraform_remote_state.existing-hub.outputs.hub_rg_location
}
output "lz_rg_location" {
value = azurerm_resource_group.spoke-rg.location
}
output "lz_rg_name" {
value = azurerm_resource_group.spoke-rg.name
}
# Virtual Network
resource "azurerm_virtual_network" "vnet" {
name = "vnet-${var.lz_prefix}"
resource_group_name = azurerm_resource_group.spoke-rg.name
location = azurerm_resource_group.spoke-rg.location
address_space = ["10.1.0.0/16"]
dns_servers = null
tags = var.tags
}
output "lz_vnet_name" {
value = azurerm_virtual_network.vnet.name
}
output "lz_vnet_id" {
value = azurerm_virtual_network.vnet.id
}
# # Create Route Table for Landing Zone
# (All subnets in the landing zone will need to connect to this Route Table)
resource "azurerm_route_table" "route_table" {
name = "rt-${var.lz_prefix}"
resource_group_name = azurerm_resource_group.spoke-rg.name
location = azurerm_resource_group.spoke-rg.location
disable_bgp_route_propagation = false
route {
name = "route_to_firewall"
address_prefix = "0.0.0.0/0"
next_hop_type = "VirtualAppliance"
next_hop_in_ip_address = "10.0.1.4"
}
}
output "lz_rt_id" {
value = azurerm_route_table.route_table.id
}

Просмотреть файл

@ -1,12 +0,0 @@
# Data From Existing Infrastructure
data "terraform_remote_state" "existing-hub" {
backend = "azurerm"
config = {
storage_account_name = var.state_sa_name
container_name = var.container_name
key = "hub-net"
access_key = var.access_key
}
}

Просмотреть файл

@ -1,100 +0,0 @@
locals {
backend_address_pool_name = "${var.virtual_network_name}-beap"
frontend_port_name = "${var.virtual_network_name}-feport"
frontend_ip_configuration_name = "${var.virtual_network_name}-feip"
http_setting_name = "${var.virtual_network_name}-be-htst"
listener_name = "${var.virtual_network_name}-httplstn"
request_routing_rule_name = "${var.virtual_network_name}-rqrt"
redirect_configuration_name = "${var.virtual_network_name}-rdrcfg"
}
resource "azurerm_application_gateway" "agw" {
name = var.appgw_name
resource_group_name = var.resource_group_name
location = var.location
sku {
name = "WAF_v2"
tier = "WAF_v2"
capacity = 2
}
gateway_ip_configuration {
name = "app-gateway-ip-configuration"
subnet_id = var.frontend_subnet
}
frontend_port {
name = local.frontend_port_name
port = 80
}
# frontend_port {
# name = "https-443"
# port = 443
# protocol = "Https"
# }
frontend_ip_configuration {
name = local.frontend_ip_configuration_name
public_ip_address_id = var.appgw_pip
}
backend_address_pool {
name = local.backend_address_pool_name
}
backend_http_settings {
name = local.http_setting_name
cookie_based_affinity = "Disabled"
path = "/path1/"
port = 80
protocol = "Http"
request_timeout = 60
}
http_listener {
name = local.listener_name
frontend_ip_configuration_name = local.frontend_ip_configuration_name
frontend_port_name = local.frontend_port_name
protocol = "Http"
}
request_routing_rule {
name = local.request_routing_rule_name
rule_type = "Basic"
http_listener_name = local.listener_name
backend_address_pool_name = local.backend_address_pool_name
backend_http_settings_name = local.http_setting_name
priority = 1 //priority arguement required as of 3.6.0 release. 1 is the highest priority and 20000 is the lowest priority.
}
waf_configuration {
enabled = true
firewall_mode = "Detection" # "Prevention" or "Detection"
rule_set_type = "OWASP" # "OWASP" or "Microsoft_BotManagerRuleSet"
rule_set_version = "3.2"
}
}
# Variables
variable "appgw_name" {}
variable "resource_group_name" {}
variable "location" {}
variable "frontend_subnet" {}
variable "virtual_network_name" {}
variable "appgw_pip" {}
output "gateway_name" {
value = azurerm_application_gateway.agw.name
}
output "gateway_id" {
value = azurerm_application_gateway.agw.id
}

Просмотреть файл

@ -1,90 +0,0 @@
resource "azurerm_network_security_group" "appgw-nsg" {
name = var.nsg_name
resource_group_name = var.resource_group_name
location = var.location
}
output "appgw_nsg_id" {
value = azurerm_network_security_group.appgw-nsg.id
}
resource "azurerm_network_security_rule" "inboundhttps" {
resource_group_name = var.resource_group_name
network_security_group_name = azurerm_network_security_group.appgw-nsg.name
name = "Allow443InBound"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "443"
source_address_prefix = "Internet"
destination_address_prefix = "VirtualNetwork"
}
resource "azurerm_network_security_rule" "controlplane" {
resource_group_name = var.resource_group_name
network_security_group_name = azurerm_network_security_group.appgw-nsg.name
name = "AllowControlPlane"
priority = 110
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "65200-65535"
source_address_prefix = "*"
destination_address_prefix = "*"
}
resource "azurerm_network_security_rule" "healthprobes" {
resource_group_name = var.resource_group_name
network_security_group_name = azurerm_network_security_group.appgw-nsg.name
name = "AllowHealthProbes"
priority = 120
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "AzureLoadBalancer"
destination_address_prefix = "VirtualNetwork"
}
resource "azurerm_network_security_rule" "DenyAllInBound" {
resource_group_name = var.resource_group_name
network_security_group_name = azurerm_network_security_group.appgw-nsg.name
name = "DenyAllInBound"
priority = 1000
direction = "Inbound"
access = "Deny"
protocol = "*"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
# resource "azurerm_network_security_rule" "AllowAllOutBound" {
# name = "AllowAllOutBound"
# priority = 1000
# direction = "Outbound"
# access = "Allow"
# protocol = "Any"
# source_port_range = "*"
# destination_port_range = "*"
# source_address_prefix = "*"
# destination_address_prefix = "*"
# resource_group_name = var.resource_group_name
# network_security_group_name = var.network_security_group_name
# }
variable "location" {}
variable "resource_group_name" {}
variable "nsg_name" {}

Просмотреть файл

@ -1,19 +0,0 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3"
}
}
backend "azurerm" {
# resource_group_name = "" # Partial configuration, provided during "terraform init"
# storage_account_name = "" # Partial configuration, provided during "terraform init"
# container_name = "" # Partial configuration, provided during "terraform init"
key = "lz-net"
}
}
provider "azurerm" {
features {}
}

Просмотреть файл

@ -1,7 +0,0 @@
# Update to reflect correct environment
state_sa_name = <storage account name>
container_name = "akscs"
access_key = "XXXXX"
lz_prefix = "escs-lz01"

Просмотреть файл

@ -1,30 +0,0 @@
#############
# VARIABLES #
#############
variable "tags" {
type = map(string)
default = {
project = "spoke-lz"
}
}
variable "lz_prefix" {
default = "escs-lz01"
}
# Used to retrieve outputs from other state files.
# The "access_key" variable is sensitive and should be passed using
# a .TFVARS file or other secure method.
variable "state_sa_name" {
default = "hub-net"
}
variable "container_name" {
default = "akscs"
}
# Storage Account Access Key
variable "access_key" {}

Просмотреть файл

@ -1,51 +0,0 @@
# Create the Landing Zone Network
The following will be created:
* Resource Group for Landing Zone Networking (lz-networking.tf)
* Route Table (lz-networking.tf)
* Peering of Hub and Spoke Networks (hub-spoke-peering.tf)
* Private DNS Zones (dns-zones.tf)
* Application Gateway (app-gateway.tf)
* Subnet for AKS (aks-networking.tf)
Navigate to "/Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/05-Network-LZ" folder
```
cd ../05-Network-LZ
```
In the "variables.tf" file, update the defaults to reflect the tags you'd like to use.
This deployment will need to reference data objects from the Hub deployment and will need access to the pre-existing state file, update the variables as needed.
This deployment will also need to use a storage access key (from Azure) to read the storage account data. This is a sensitive variable and should not be committed to the code repo.
Once again, A sample terraform.tfvars.sample file is included. Update the required variables, save it and rename it to **terraform.tfvars**.
To get the access key:
1. Go to Azure portal and find the storage account that was created for Terraform
2. Under **Security + networking** section in the left blade, click on **Access keys**
3. Click on **Show keys** at the top of the resulting page
4. Copy the string under **Key** from one of the two keys provided
5. Update your the terraform.tfvsars.sample file with this as the value for access_key
6. Rename the file to terraform.tfvars
Update the rest of the variables in the new terraform.tfvars file as well.
Once the files are updated, deploy using Terraform Init, Plan and Apply.
```bash
terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
```
```bash
terraform plan
```
```bash
terraform apply
```
If you get an error about changes to the configuration, go with the `-reconfigure` flag option.
:arrow_forward: [Creation of Supporting Components for AKS](./06-aks-supporting.md)

Просмотреть файл

@ -11,7 +11,6 @@ variable "rgHubName" {
variable "location" {
type = string
default = "eastus"
}
variable "vnetLzName" {
@ -27,7 +26,6 @@ variable "vnetHubName" {
variable "adminGroupObjectIds" {
type = string
default = " "
}
variable "acrName" {
@ -38,5 +36,4 @@ variable "acrName" {
variable "akvName" {
type = string
default = "akvlzti5y"
}

Просмотреть файл

@ -1,38 +0,0 @@
########
# DATA #
########
# Data From Existing Infrastructure
data "terraform_remote_state" "existing-lz" {
backend = "azurerm"
config = {
storage_account_name = var.state_sa_name
container_name = var.container_name
key = "lz-net"
access_key = var.access_key
}
}
data "azurerm_client_config" "current" {}
output "key_vault_id" {
value = module.create_kv.kv_id
}
output "container_registry_id" {
value = module.create_acr.acr_id
}

Просмотреть файл

@ -1,47 +0,0 @@
resource "azurerm_container_registry" "acr" {
name = var.acrname
resource_group_name = var.resource_group_name
location = var.location
sku = "Premium"
public_network_access_enabled = false
admin_enabled = true
}
resource "azurerm_private_endpoint" "acr-endpoint" {
name = "${var.acrname}-to_aks"
location = var.location
resource_group_name = var.resource_group_name
subnet_id = var.aks_sub_id
private_service_connection {
name = "${var.acrname}-privateserviceconnection"
private_connection_resource_id = azurerm_container_registry.acr.id
subresource_names = ["registry"]
is_manual_connection = false
}
private_dns_zone_group {
name = "acr-endpoint-zone"
private_dns_zone_ids = [var.private_zone_id]
}
}
output "acr_id" {
value = azurerm_container_registry.acr.id
}
output "custom_dns_configs" {
value = azurerm_private_endpoint.acr-endpoint.custom_dns_configs
}
# Variables
variable "acrname" {}
variable "resource_group_name" {}
variable "location" {}
variable "aks_sub_id" {}
variable "private_zone_id" {}

Просмотреть файл

@ -1,56 +0,0 @@
resource "azurerm_key_vault" "key-vault" {
name = var.name
location = var.location
resource_group_name = var.resource_group_name
enabled_for_disk_encryption = true
tenant_id = var.tenant_id
soft_delete_retention_days = 7
purge_protection_enabled = false
sku_name = "standard"
network_acls {
bypass = "AzureServices"
default_action = "Deny"
}
}
resource "azurerm_private_endpoint" "kv-endpoint" {
name = "${var.name}-endpoint"
location = var.location
resource_group_name = var.resource_group_name
subnet_id = var.dest_sub_id
private_service_connection {
name = "${var.name}-privateserviceconnection"
private_connection_resource_id = azurerm_key_vault.key-vault.id
subresource_names = ["vault"]
is_manual_connection = false
}
private_dns_zone_group {
name = "kv-endpoint-zone"
private_dns_zone_ids = [var.private_zone_id]
}
}
output "kv_id" {
value = azurerm_key_vault.key-vault.id
}
output "key_vault_url" {
value = azurerm_key_vault.key-vault.vault_uri
}
# Variables
variable "zone_resource_group_name" {}
variable "dest_sub_id" {}
variable "private_zone_id" {}
variable "private_zone_name" {}
variable "vnet_id" {}

Просмотреть файл

@ -1,7 +0,0 @@
variable "name" {}
variable "resource_group_name" {}
variable "location" {}
variable "tenant_id" {}

Просмотреть файл

@ -1,27 +0,0 @@
# Key Vault Access for Current User
resource "azurerm_key_vault_access_policy" "current_user" {
key_vault_id = module.create_kv.kv_id
tenant_id = data.azurerm_client_config.current.tenant_id
object_id = data.azurerm_client_config.current.object_id
secret_permissions = [
"Get", "List", "Set", "Delete"
]
}
# Azure KeyVault secret for MongoDB
resource "azurerm_key_vault_secret" "mongodb" {
name = "MongoDB"
value = var.mongodb_secret
key_vault_id = module.create_kv.kv_id
depends_on = [
azurerm_key_vault_access_policy.current_user
]
}
variable "mongodb_secret" {}

Просмотреть файл

@ -1,21 +0,0 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3"
}
random = {
version = ">=3.0"
}
}
backend "azurerm" {
# resource_group_name = "" # Partial configuration, provided during "terraform init"
# storage_account_name = "" # Partial configuration, provided during "terraform init"
# container_name = "" # Partial configuration, provided during "terraform init"
key = "aks-support"
}
}
provider "azurerm" {
features {}
}

Просмотреть файл

@ -1,51 +0,0 @@
resource "random_integer" "deployment" {
min = 10000
max = 99999
}
# Deploy Azure Container Registry
module "create_acr" {
source = "./modules/acr-private"
acrname = "acr${random_integer.deployment.result}"
resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
location = data.terraform_remote_state.existing-lz.outputs.lz_rg_location
aks_sub_id = data.terraform_remote_state.existing-lz.outputs.aks_subnet_id
private_zone_id = data.terraform_remote_state.existing-lz.outputs.acr_private_zone_id
}
# Deploy Azure Key Vault
module "create_kv" {
source = "./modules/kv-private"
name = "kv${random_integer.deployment.result}-${var.prefix}"
resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
location = data.terraform_remote_state.existing-lz.outputs.lz_rg_location
tenant_id = data.azurerm_client_config.current.tenant_id
vnet_id = data.terraform_remote_state.existing-lz.outputs.lz_vnet_id
dest_sub_id = data.terraform_remote_state.existing-lz.outputs.aks_subnet_id
private_zone_id = data.terraform_remote_state.existing-lz.outputs.kv_private_zone_id
private_zone_name = data.terraform_remote_state.existing-lz.outputs.kv_private_zone_name
zone_resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
}
# Deploy Public DNS to register application domains hosted in AKS. If you are not planning to use the blue green deployment, then you don't need to deploy the public DNS Zone and you can skip this leaving empty the variable public_domain.
resource "azurerm_dns_zone" "public-dns-apps" {
count = var.public_domain != "" ? 1 : 0
name = var.public_domain
resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
}
# DNS Zone name to map A records. This is empty if the public DNS Zone is not deployed.
output "public_dns_zone_apps_name" {
value = one(azurerm_dns_zone.public-dns-apps[*].name)
}
# DNS Zone ID to reference in other terraform state and/or resources/modules. This is empty if the public DNS Zone is not deployed.
output "public_dns_zone_apps_id" {
value = one(azurerm_dns_zone.public-dns-apps[*].id)
}

Просмотреть файл

@ -1,9 +0,0 @@
state_sa_name = <storage account name>
container_name = "akscs"
access_key = "XXXXXX"
prefix = "akscs"
public_domain = <public domain to register apps hostname>

Просмотреть файл

@ -1,17 +0,0 @@
#############
# VARIABLES #
#############
variable "prefix" {}
variable "access_key" {} # Provide using a .tfvars file.
variable "state_sa_name" {}
variable "container_name" {}
# The Public Domain for the public dns zone, that is used to register the hostnames assigned to the workloads hosted in AKS; if empty the dns zone not provisioned.
variable "public_domain" {
description = "The Public Domain for the public dns zone, that is used to register the hostnames assigned to the workloads hosted in AKS; if empty the dns zone not provisioned."
default = ""
}

Просмотреть файл

@ -7,7 +7,7 @@ The following will be created:
* ACR Access to the AKS Cluster
* Updates to KeyVault access policy with AKS keyvault addon
Navigate to "/Scenarios/AKS-Secure-Baseline-Private-AVM/Terraform/" folder
Navigate to "/Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/" folder
```bash
cd ./06-AKS-cluster

Просмотреть файл

@ -1,41 +0,0 @@
# Create resources that support AKS
The following will be created:
* Azure Container Registry (supporting-infra.tf)
* Azure Key Vault (supporting-infra.tf)
* Private Link Endpoints for ACR and Key Vault
* Public DNS Zone (supporting-infra.tf), this is optional and required only if you have custom hostnames assigned to endpoints and workloads deployed in AKS, this is mainly related to the blue gree deployment.
Navigate to "/Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/06-AKS-supporting" folder
```
cd ../06-AKS-supporting
```
This deployment will need to reference data objects from the Hub deployment and will need access to the pre-existing state file, update the variables as needed in the .tfvars sample file. This deployment will also need to use a storage access key (from Azure) to read the storage account data. This is a sensitive variable and should not be committed to the code repo.
Once again, A sample terraform.tfvars.sample file is included. Update the required variables, save it and rename it to **terraform.tfvars**.
### Add the Access key variable to terraform.tfvars
1. Open the *terraform.tfvars.sample* file and add the access key as the value of the access_key variable. Update the other storage related variables.
1. Rename the file to *terraform.tfvars*
## Deploy the Supporting Services
Once the files are updated, deploy using Terraform Init, Plan and Apply.
```bash
terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
```
```
terraform plan
```
```
terraform apply
```
If you get an error about changes to the configuration, go with the `-reconfigure` flag option.
:arrow_forward: [Creation of AKS & enabling Addons](./07-aks-cluster.md)

Просмотреть файл

@ -1,108 +0,0 @@
#############
# RESOURCES #
#############
# MSI for Kubernetes Cluster (Control Plane)
# This ID is used by the AKS control plane to create or act on other resources in Azure.
# It is referenced in the "identity" block in the azurerm_kubernetes_cluster resource.
resource "azurerm_user_assigned_identity" "mi-aks-cp" {
name = "mi-${var.prefix}-aks-cp"
resource_group_name = var.existing_spoke_vnet_rg_name
location = var.existing_spoke_vnet_rg_location
}
# Role Assignments for Control Plane MSI
resource "azurerm_role_assignment" "aks-to-rt" {
scope = var.existing_rt_id
role_definition_name = "Contributor"
principal_id = azurerm_user_assigned_identity.mi-aks-cp.principal_id
}
resource "azurerm_role_assignment" "aks-to-vnet" {
scope = var.existing_spoke_vnet_id
role_definition_name = "Network Contributor"
principal_id = azurerm_user_assigned_identity.mi-aks-cp.principal_id
}
# Role assignment to to create Private DNS zone for cluster
resource "azurerm_role_assignment" "aks-to-dnszone" {
scope = azurerm_private_dns_zone.aks-dns.id
role_definition_name = "Private DNS Zone Contributor"
principal_id = azurerm_user_assigned_identity.mi-aks-cp.principal_id
}
# Log Analytics Workspace for Cluster
resource "azurerm_log_analytics_workspace" "aks" {
name = "aks-la-01"
resource_group_name = var.existing_spoke_vnet_rg_name
location = var.existing_spoke_vnet_rg_location
sku = "PerGB2018"
retention_in_days = 30
}
# AKS Cluster
module "aks" {
source = "./modules/aks"
depends_on = [
azurerm_role_assignment.aks-to-vnet,
azurerm_role_assignment.aks-to-dnszone
]
resource_group_name = var.existing_spoke_vnet_rg_name
location = var.existing_spoke_vnet_rg_location
prefix = "aks-${var.prefix}"
vnet_subnet_id = var.existing_aks_subnet_id
mi_aks_cp_id = azurerm_user_assigned_identity.mi-aks-cp.id
la_id = azurerm_log_analytics_workspace.aks.id
gateway_name = var.existing_gateway_name
gateway_id = var.existing_gateway_id
private_dns_zone_id = azurerm_private_dns_zone.aks-dns.id
}
# These role assignments grant the groups made in "03-EID" access to use
# The AKS cluster.
resource "azurerm_role_assignment" "appdevs_user" {
scope = module.aks.aks_id
role_definition_name = "Azure Kubernetes Service Cluster User Role"
principal_id = var.existing_appdev_object_id
}
resource "azurerm_role_assignment" "aksops_admin" {
scope = module.aks.aks_id
role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin"
principal_id = var.existing_aksops_object_id
}
# This role assigned grants the current user running the deployment admin rights
# to the cluster. In production, you should use just the Microsoft Entra groups (above).
resource "azurerm_role_assignment" "aks_rbac_admin" {
scope = module.aks.aks_id
role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin"
principal_id = data.azurerm_client_config.current.object_id
}
# Role Assignment to Azure Container Registry from AKS Cluster
# This must be granted after the cluster is created in order to use the kubelet identity.
resource "azurerm_role_assignment" "aks-to-acr" {
scope = var.existing_container_registry_id
role_definition_name = "AcrPull"
principal_id = module.aks.kubelet_id
}
# Role Assignments for AGIC on AppGW
# This must be granted after the cluster is created in order to use the ingress identity.
resource "azurerm_role_assignment" "agic_appgw" {
scope = var.existing_gateway_id
role_definition_name = "Contributor"
principal_id = module.aks.agic_id
}

Просмотреть файл

@ -1,46 +0,0 @@
# These resources will set up the required permissions for
# Pod Identity (v1)
# Managed Identity for Pod Identity
resource "azurerm_user_assigned_identity" "aks_pod_identity" {
resource_group_name = var.existing_spoke_vnet_rg_name
location = var.existing_spoke_vnet_rg_location
name = "pod-identity-example"
}
# Role assignments
resource "azurerm_role_assignment" "aks_identity_operator" {
scope = azurerm_user_assigned_identity.aks_pod_identity.id
role_definition_name = "Managed Identity Operator"
principal_id = module.aks.kubelet_id
}
resource "azurerm_role_assignment" "aks_vm_contributor" {
scope = "/subscriptions/${data.azurerm_client_config.current.subscription_id}/resourcegroups/${module.aks.node_pool_rg}"
role_definition_name = "Virtual Machine Contributor"
principal_id = module.aks.kubelet_id
}
# Azure Key Vault Access Policy for Managed Identity for Pod Identity
resource "azurerm_key_vault_access_policy" "aad_pod_identity" {
key_vault_id = var.existing_key_vault_id // change to data call
tenant_id = data.azurerm_client_config.current.tenant_id
object_id = azurerm_user_assigned_identity.aks_pod_identity.principal_id
secret_permissions = [
"Get", "List"
]
}
# Outputs
output "aad_pod_identity_resource_id" {
value = azurerm_user_assigned_identity.aks_pod_identity.id
description = "Resource ID for the Managed Identity for Pod Identity"
}
output "aad_pod_identity_client_id" {
value = azurerm_user_assigned_identity.aks_pod_identity.client_id
description = "Client ID for the Managed Identity for Pod Identity"
}

Просмотреть файл

@ -1,38 +0,0 @@
{
"issuerParameters": {
"certificateTransparency": null,
"name": "Self"
},
"keyProperties": {
"curve": null,
"exportable": true,
"keySize": 2048,
"keyType": "RSA",
"reuseKey": true
},
"lifetimeActions": [
{
"action": {
"actionType": "AutoRenew"
},
"trigger": {
"daysBeforeExpiry": 90
}
}
],
"secretProperties": {
"contentType": "application/x-pkcs12"
},
"x509CertificateProperties": {
"keyUsage": [
"cRLSign",
"dataEncipherment",
"digitalSignature",
"keyEncipherment",
"keyAgreement",
"keyCertSign"
],
"subject": "CN=CLIGetDefaultPolicy",
"validityInMonths": 12
}
}

Просмотреть файл

@ -1,20 +0,0 @@
# Deploy DNS Private Zone for AKS
resource "azurerm_private_dns_zone" "aks-dns" {
name = var.private_dns_zone_name
resource_group_name = var.existing_spoke_vnet_rg_name // lives in the spoke rg
}
# Needed for Jumpbox to resolve cluster URL using a private endpoint and private dns zone
resource "azurerm_private_dns_zone_virtual_network_link" "hub_aks" {
name = "hub_to_aks"
resource_group_name = var.existing_spoke_vnet_rg_name // lives in the spoke rg
private_dns_zone_name = azurerm_private_dns_zone.aks-dns.name
virtual_network_id = var.existing_hub_vnet_id
}
output "aks_private_zone_id" {
value = azurerm_private_dns_zone.aks-dns.id
}
output "aks_private_zone_name" {
value = azurerm_private_dns_zone.aks-dns.name
}

Просмотреть файл

@ -1,48 +0,0 @@
variable "existing_aksops_object_id" {
default = "change me"
}
variable "existing_appdev_object_id" {
default = "change me"
}
variable "existing_spoke_vnet_rg_name" {
default = "change me"
}
variable "existing_spoke_vnet_rg_location" {
default = "change me"
}
variable "existing_spoke_vnet_id" {
default = "change me"
}
variable "existing_hub_vnet_id" {
default = "change me"
}
variable "existing_aks_subnet_id" {
default = "change me"
}
variable "existing_rt_id" {
default = "change me"
}
variable "existing_gateway_name" {
default = "change me"
}
variable "existing_gateway_id" {
default = "change me"
}
variable "existing_container_registry_id" {
default = "change me"
}
# Only needed if you are using pod identity
variable "existing_key_vault_id" {
default = "change me"
}

Просмотреть файл

@ -1,51 +0,0 @@
########
# DATA #
########
# Data From Existing Infrastructure
data "terraform_remote_state" "existing-lz" {
backend = "azurerm"
config = {
storage_account_name = var.state_sa_name
container_name = var.container_name
key = "lz-net"
access_key = var.access_key
}
}
data "terraform_remote_state" "aks-support" {
backend = "azurerm"
config = {
storage_account_name = var.state_sa_name
container_name = var.container_name
key = "aks-support"
access_key = var.access_key
}
}
data "terraform_remote_state" "aad" {
backend = "azurerm"
config = {
storage_account_name = var.state_sa_name
container_name = var.container_name
key = "aad"
access_key = var.access_key
}
}
data "terraform_remote_state" "existing-hub" {
backend = "azurerm"
config = {
storage_account_name = var.state_sa_name
container_name = var.container_name
key = "hub-net"
access_key = var.access_key
}
}
data "azurerm_client_config" "current" {}

Просмотреть файл

@ -1,76 +0,0 @@
# Creates cluster with default linux node pool
resource "azurerm_kubernetes_cluster" "akscluster" {
lifecycle {
ignore_changes = [
default_node_pool[0].node_count
]
}
name = var.prefix
dns_prefix = var.prefix
location = var.location
resource_group_name = var.resource_group_name
kubernetes_version = "1.25.5"
private_cluster_enabled = true
private_dns_zone_id = var.private_dns_zone_id
azure_policy_enabled = true
ingress_application_gateway {
gateway_id = var.gateway_id
}
oms_agent {
log_analytics_workspace_id = var.la_id
}
default_node_pool {
name = "defaultpool"
vm_size = "Standard_DS2_v2"
os_disk_size_gb = 30
type = "VirtualMachineScaleSets"
node_count = 3
vnet_subnet_id = var.vnet_subnet_id
}
network_profile {
network_plugin = "azure"
# network_policy = "azure"
outbound_type = "userDefinedRouting"
dns_service_ip = "192.168.100.10"
service_cidr = "192.168.100.0/24"
docker_bridge_cidr = "172.17.0.1/16"
}
role_based_access_control_enabled = true
azure_active_directory_role_based_access_control {
managed = true
// admin_group_object_ids = talk to Ayo about this one, this arg could reduce code other places possibly
azure_rbac_enabled = true
}
identity {
type = "UserAssigned"
identity_ids = [var.mi_aks_cp_id]
}
}
output "aks_id" {
value = azurerm_kubernetes_cluster.akscluster.id
}
output "node_pool_rg" {
value = azurerm_kubernetes_cluster.akscluster.node_resource_group
}
# Managed Identities created for Addons
output "kubelet_id" {
value = azurerm_kubernetes_cluster.akscluster.kubelet_identity[0].object_id
}
output "agic_id" {
value = azurerm_kubernetes_cluster.akscluster.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id
}

Просмотреть файл

@ -1,23 +0,0 @@
variable "prefix" {
description = "A prefix used for all resources in this example"
}
variable "location" {
description = "The Azure Region in which all resources in this example should be provisioned"
}
variable "resource_group_name" {}
variable "vnet_subnet_id" {}
variable "mi_aks_cp_id" {}
# variable "mi_aks_kubelet_id" {}
variable "la_id" {}
variable "gateway_name" {}
variable "gateway_id" {}
variable "private_dns_zone_id" {}

Просмотреть файл

@ -1,23 +0,0 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3"
}
random = {
version = ">=3.0"
}
}
backend "azurerm" {
# resource_group_name = "" # Partial configuration, provided during "terraform init"
# storage_account_name = "" # Partial configuration, provided during "terraform init"
# container_name = "" # Partial configuration, provided during "terraform init"
key = "aks-existing-infra"
}
}
provider "azurerm" {
features {}
disable_terraform_partner_id = false
partner_id = "5c162503-e576-4058-b2b4-2d4fd32d3584"
}

Просмотреть файл

@ -1,5 +0,0 @@
access_key = "XXXXXX"
state_sa_name = <storage account name>
container_name = "akscs"
prefix = "escs"
private_dns_zone_name = "privatelink.XXXXXXXXX.azmk8s.io"

Просмотреть файл

@ -1,15 +0,0 @@
#############
# VARIABLES #
#############
variable "prefix" {}
variable "state_sa_name" {}
variable "container_name" {}
variable "access_key" {}
variable "private_dns_zone_name" {
default = "privatelink.eastus.azmk8s.io"
}

Просмотреть файл

@ -1,192 +0,0 @@
#############
# LOCALS #
#############
/*
The following map enables the deployment of multiple clusters, as example you can use to deploy two clusters for the blue green deployment, instead if you need to deploy just one AKS cluster for sample and standard deployment then you you can configure a map with only one object.
locals {
Map of the AKS Clusters to deploy
aks_clusters = {
"aks_blue" = {
prefix used to configure unique names and parameter values
name_prefix="blue"
Boolean flag that enable or disable the deployment of the specific AKS cluster
aks_turn_on=true
The kubernetes version to use on the cluster
k8s_version="1.25.5"
Reference Name to the Application gateway that need to be associaated to the AKS Cluster with the AGIC addo-on
appgw_name="lzappgw-blue"
},
"aks_green" = {
name_prefix="green"
aks_turn_on=false
k8s_version="1.23.8"
appgw_name="lzappgw-green"
}
}
}
*/
locals {
aks_clusters = {
"aks_blue" = {
name_prefix = "blue"
aks_turn_on = true
k8s_version = "1.29"
appgw_name = "lzappgw-blue"
},
"aks_green" = {
name_prefix = "green"
aks_turn_on = false
k8s_version = "1.29"
appgw_name = "lzappgw-green"
}
}
}
#############
# RESOURCES #
#############
# MSI for Kubernetes Cluster (Control Plane)
# This ID is used by the AKS control plane to create or act on other resources in Azure.
# It is referenced in the "identity" block in the azurerm_kubernetes_cluster resource.
# Based on the structure of the aks_clusters map is created an identity per each AKS Cluster, this is mainly used in the blue green deployment scenario.
resource "azurerm_user_assigned_identity" "mi-aks-cp" {
for_each = { for aks_clusters in local.aks_clusters : aks_clusters.name_prefix => aks_clusters if aks_clusters.aks_turn_on == true }
name = "mi-${var.prefix}-aks-${each.value.name_prefix}-cp"
resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
location = data.terraform_remote_state.existing-lz.outputs.lz_rg_location
}
# Role Assignments for Control Plane MSI
# Based on the structure of the aks_clusters map is defined the role assignment per each AKS Cluster, this is mainly used in the blue green deployment scenario.
resource "azurerm_role_assignment" "aks-to-rt" {
for_each = azurerm_user_assigned_identity.mi-aks-cp
scope = data.terraform_remote_state.existing-lz.outputs.lz_rt_id
role_definition_name = "Contributor"
principal_id = each.value.principal_id
}
resource "azurerm_role_assignment" "aks-to-vnet" {
for_each = azurerm_user_assigned_identity.mi-aks-cp
scope = data.terraform_remote_state.existing-lz.outputs.lz_vnet_id
role_definition_name = "Network Contributor"
principal_id = each.value.principal_id
}
# Role assignment to to create Private DNS zone for cluster
# Based on the structure of the aks_clusters map is defined the role assignment per each AKS Cluster, this is mainly used in the blue green deployment scenario.
resource "azurerm_role_assignment" "aks-to-dnszone" {
for_each = azurerm_user_assigned_identity.mi-aks-cp
scope = azurerm_private_dns_zone.aks-dns.id
role_definition_name = "Private DNS Zone Contributor"
principal_id = each.value.principal_id
}
# Log Analytics Workspace for Cluster
resource "azurerm_log_analytics_workspace" "aks" {
name = "aks-la-01"
resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
location = data.terraform_remote_state.existing-lz.outputs.lz_rg_location
sku = "PerGB2018"
retention_in_days = 30
}
# AKS Cluster
# Based on the structure of the aks_clusters map are provisioned multiple AKS Clusters, this is mainly used in the blue green deployment scenario.
module "aks" {
source = "./modules/aks"
for_each = { for aks_clusters in local.aks_clusters : aks_clusters.name_prefix => aks_clusters if aks_clusters.aks_turn_on == true }
resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
location = data.terraform_remote_state.existing-lz.outputs.lz_rg_location
prefix = "aks-${var.prefix}-${each.value.name_prefix}"
vnet_subnet_id = data.terraform_remote_state.existing-lz.outputs.aks_subnet_id
mi_aks_cp_id = azurerm_user_assigned_identity.mi-aks-cp[each.value.name_prefix].id
la_id = azurerm_log_analytics_workspace.aks.id
gateway_name = data.terraform_remote_state.existing-lz.outputs.gateway_name[each.value.appgw_name]
gateway_id = data.terraform_remote_state.existing-lz.outputs.gateway_id[each.value.appgw_name]
private_dns_zone_id = azurerm_private_dns_zone.aks-dns.id
network_plugin = try(var.network_plugin, "azure")
pod_cidr = try(var.pod_cidr, null)
k8s_version = each.value.k8s_version
depends_on = [
azurerm_role_assignment.aks-to-vnet,
azurerm_role_assignment.aks-to-dnszone
]
}
# These role assignments grant the groups made in "03-EID" access to use
# The AKS cluster.
# Based on the instances of AKS Clusters deployed are defined the role assignments per each cluster, this is mainly used in the blue green deployment scenario.
resource "azurerm_role_assignment" "appdevs_user" {
for_each = module.aks
scope = each.value.aks_id
role_definition_name = "Azure Kubernetes Service Cluster User Role"
principal_id = data.terraform_remote_state.aad.outputs.appdev_object_id
}
resource "azurerm_role_assignment" "aksops_admin" {
for_each = module.aks
scope = each.value.aks_id
role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin"
principal_id = data.terraform_remote_state.aad.outputs.aksops_object_id
}
# This role assigned grants the current user running the deployment admin rights
# to the cluster. In production, you should use just the EID groups (above).
# Based on the instances of AKS Clusters deployed are defined the role assignments per each cluster, this is mainly used in the blue green deployment scenario.
resource "azurerm_role_assignment" "aks_rbac_admin" {
for_each = module.aks
scope = each.value.aks_id
role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin"
principal_id = data.azurerm_client_config.current.object_id
}
# Role Assignment to Azure Container Registry from AKS Cluster
# This must be granted after the cluster is created in order to use the kubelet identity.
# Based on the instances of AKS Clusters deployed are defined the role assignments per each cluster, this is mainly used in the blue green deployment scenario.
resource "azurerm_role_assignment" "aks-to-acr" {
for_each = module.aks
scope = data.terraform_remote_state.aks-support.outputs.container_registry_id
role_definition_name = "AcrPull"
principal_id = each.value.kubelet_id
}
# Role Assignments for AGIC on AppGW
# This must be granted after the cluster is created in order to use the ingress identity.
# Based on the instances of AKS Clusters deployed are defined the role assignments per each cluster, this is mainly used in the blue green deployment scenario.
resource "azurerm_role_assignment" "agic_appgw" {
for_each = module.aks
scope = each.value.appgw_id
role_definition_name = "Contributor"
principal_id = each.value.agic_id
}
# Route table to support AKS cluster with kubenet network plugin
resource "azurerm_route_table" "rt" {
count = var.network_plugin == "kubenet" ? 1 : 0
name = "appgw-rt"
location = data.terraform_remote_state.existing-lz.outputs.lz_rg_location
resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
disable_bgp_route_propagation = false
}
resource "azurerm_subnet_route_table_association" "rt_kubenet_association" {
count = var.network_plugin == "kubenet" ? 1 : 0
subnet_id = data.terraform_remote_state.existing-lz.outputs.appgw_subnet_id
route_table_id = azurerm_route_table.rt[count.index].id
depends_on = [ azurerm_route_table.rt]
}

Просмотреть файл

@ -1,49 +0,0 @@
# These resources will set up the required permissions for
# Microsoft Entra Pod Identity (v1)
# Managed Identity for Pod Identity
resource "azurerm_user_assigned_identity" "aks_pod_identity" {
resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
location = data.terraform_remote_state.existing-lz.outputs.lz_rg_location
name = "pod-identity-example"
}
# Role assignments
# Based on the instances of AKS Clusters deployed are defined the role assignments per each cluster, this is mainly used in the blue green deployment scenario.
resource "azurerm_role_assignment" "aks_identity_operator" {
for_each = module.aks
scope = azurerm_user_assigned_identity.aks_pod_identity.id
role_definition_name = "Managed Identity Operator"
principal_id = each.value.kubelet_id
}
resource "azurerm_role_assignment" "aks_vm_contributor" {
for_each = module.aks
scope = "/subscriptions/${data.azurerm_client_config.current.subscription_id}/resourcegroups/${each.value.node_pool_rg}"
role_definition_name = "Virtual Machine Contributor"
principal_id = each.value.kubelet_id
}
# Azure Key Vault Access Policy for Managed Identity for Microsoft Entra Pod Identity
resource "azurerm_key_vault_access_policy" "aad_pod_identity" {
key_vault_id = data.terraform_remote_state.aks-support.outputs.key_vault_id
tenant_id = data.azurerm_client_config.current.tenant_id
object_id = azurerm_user_assigned_identity.aks_pod_identity.principal_id
secret_permissions = [
"Get", "List"
]
}
# Outputs
output "aad_pod_identity_resource_id" {
value = azurerm_user_assigned_identity.aks_pod_identity.id
description = "Resource ID for the Managed Identity for Microsoft Entra Pod Identity"
}
output "aad_pod_identity_client_id" {
value = azurerm_user_assigned_identity.aks_pod_identity.client_id
description = "Client ID for the Managed Identity for Microsoft Entra Pod Identity"
}

Просмотреть файл

@ -1,38 +0,0 @@
{
"issuerParameters": {
"certificateTransparency": null,
"name": "Self"
},
"keyProperties": {
"curve": null,
"exportable": true,
"keySize": 2048,
"keyType": "RSA",
"reuseKey": true
},
"lifetimeActions": [
{
"action": {
"actionType": "AutoRenew"
},
"trigger": {
"daysBeforeExpiry": 90
}
}
],
"secretProperties": {
"contentType": "application/x-pkcs12"
},
"x509CertificateProperties": {
"keyUsage": [
"cRLSign",
"dataEncipherment",
"digitalSignature",
"keyEncipherment",
"keyAgreement",
"keyCertSign"
],
"subject": "CN=CLIGetDefaultPolicy",
"validityInMonths": 12
}
}

Просмотреть файл

@ -1,20 +0,0 @@
# Deploy DNS Private Zone for AKS
resource "azurerm_private_dns_zone" "aks-dns" {
name = var.private_dns_zone_name
resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
}
# Needed for Jumpbox to resolve cluster URL using a private endpoint and private dns zone
resource "azurerm_private_dns_zone_virtual_network_link" "hub_aks" {
name = "hub_to_aks"
resource_group_name = data.terraform_remote_state.existing-lz.outputs.lz_rg_name
private_dns_zone_name = azurerm_private_dns_zone.aks-dns.name
virtual_network_id = data.terraform_remote_state.existing-hub.outputs.hub_vnet_id
}
output "aks_private_zone_id" {
value = azurerm_private_dns_zone.aks-dns.id
}
output "aks_private_zone_name" {
value = azurerm_private_dns_zone.aks-dns.name
}

Просмотреть файл

@ -1,50 +0,0 @@
########
# DATA #
########
# Data From Existing Infrastructure
data "terraform_remote_state" "existing-lz" {
backend = "azurerm"
config = {
storage_account_name = var.state_sa_name
container_name = var.container_name
key = "lz-net"
access_key = var.access_key
}
}
data "terraform_remote_state" "aks-support" {
backend = "azurerm"
config = {
storage_account_name = var.state_sa_name
container_name = var.container_name
key = "aks-support"
access_key = var.access_key
}
}
data "terraform_remote_state" "aad" {
backend = "azurerm"
config = {
storage_account_name = var.state_sa_name
container_name = var.container_name
key = "aad"
access_key = var.access_key
}
}
data "terraform_remote_state" "existing-hub" {
backend = "azurerm"
config = {
storage_account_name = var.state_sa_name
container_name = var.container_name
key = "hub-net"
access_key = var.access_key
}
}
data "azurerm_client_config" "current" {}

Просмотреть файл

@ -1,84 +0,0 @@
# Creates cluster with default linux node pool
resource "azurerm_kubernetes_cluster" "akscluster" {
name = var.prefix
dns_prefix = var.prefix
location = var.location
resource_group_name = var.resource_group_name
kubernetes_version = var.k8s_version
private_cluster_enabled = true
private_dns_zone_id = var.private_dns_zone_id
azure_policy_enabled = true
private_cluster_public_fqdn_enabled = false
ingress_application_gateway {
gateway_id = var.gateway_id
}
oms_agent {
log_analytics_workspace_id = var.la_id
}
default_node_pool {
name = "defaultpool"
vm_size = "Standard_DS2_v2"
os_disk_size_gb = 30
type = "VirtualMachineScaleSets"
node_count = 3
vnet_subnet_id = var.vnet_subnet_id
}
network_profile {
network_plugin = var.network_plugin
outbound_type = "userDefinedRouting"
dns_service_ip = "192.168.100.10"
service_cidr = "192.168.100.0/24"
docker_bridge_cidr = "172.16.1.1/30"
pod_cidr = var.pod_cidr
}
role_based_access_control_enabled = true
azure_active_directory_role_based_access_control {
managed = true
// admin_group_object_ids = talk to Ayo about this one, this arg could reduce code other places possibly
azure_rbac_enabled = true
}
identity {
type = "UserAssigned"
identity_ids = [var.mi_aks_cp_id]
}
key_vault_secrets_provider {
secret_rotation_enabled = false
}
lifecycle {
ignore_changes = [
default_node_pool[0].node_count
]
}
}
output "aks_id" {
value = azurerm_kubernetes_cluster.akscluster.id
}
output "node_pool_rg" {
value = azurerm_kubernetes_cluster.akscluster.node_resource_group
}
# Managed Identities created for Addons
output "kubelet_id" {
value = azurerm_kubernetes_cluster.akscluster.kubelet_identity.0.object_id
}
output "agic_id" {
value = azurerm_kubernetes_cluster.akscluster.ingress_application_gateway.0.ingress_application_gateway_identity.0.object_id
}
output "appgw_id" {
value = var.gateway_id
}

Просмотреть файл

@ -1,37 +0,0 @@
variable "prefix" {
description = "A prefix used for all resources in this example"
}
variable "location" {
description = "The Azure Region in which all resources in this example should be provisioned"
}
variable "resource_group_name" {}
variable "vnet_subnet_id" {}
variable "mi_aks_cp_id" {}
# variable "mi_aks_kubelet_id" {}
variable "la_id" {}
variable "gateway_name" {}
variable "gateway_id" {}
variable "private_dns_zone_id" {}
variable "network_plugin" {
default = "azure"
}
variable "pod_cidr" {}
variable "k8s_version" {
description = "Kubernetes version to assign to the AKS Cluster"
}

Просмотреть файл

@ -1,23 +0,0 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3"
}
random = {
version = ">=3.0"
}
}
backend "azurerm" {
# resource_group_name = "" # Partial configuration, provided during "terraform init"
# storage_account_name = "" # Partial configuration, provided during "terraform init"
# container_name = "" # Partial configuration, provided during "terraform init"
key = "aks"
}
}
provider "azurerm" {
features {}
disable_terraform_partner_id = false
partner_id = "5c162503-e576-4058-b2b4-2d4fd32d3584"
}

Просмотреть файл

@ -1,7 +0,0 @@
access_key = "XXXXXX"
state_sa_name = <storage account name>
container_name = "akscs"
prefix = "escs"
private_dns_zone_name = "privatelink.XXXXXXXXX.azmk8s.io" // change "XXXXXXXXX" the region to match where your aks cluster is deployed.
network_plugin = "azure" //other option is kubenet
#pod_cidr = "172.17.0.0/16" uncomment if using kubenet

Просмотреть файл

@ -1,23 +0,0 @@
#############
# VARIABLES #
#############
variable "prefix" {}
variable "state_sa_name" {}
variable "container_name" {}
variable "access_key" {}
variable "private_dns_zone_name" {
default = "privatelink.eastus.azmk8s.io"
}
variable "network_plugin" {
default = "azure"
}
variable "pod_cidr" {
default = null
}

Просмотреть файл

@ -1,135 +0,0 @@
# Create resources for the AKS Private Cluster
The following will be created:
* AKS Private Clusters
* Log Analytics Workspace
* Managed Identity for AKS Control Plane
* Managed Identity for Application Gateway Ingress Controller
* Managed Identity for the Azure key vault secrets provider add on
* AKS Pod Identity Assignments - OPTIONAL
Navigate to "/Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/07-AKS-cluster" folder
```bash
cd ../07-AKS-cluster
```
## AKS Networking Choices
You can choose which AKS network plugin you want to deploy the cluster: azure or kubenet.
For the Azure network plugin, each pod in the cluster will have an IP from the AKS Subnet CIDR. This allows Application Gateway and any other external service to reach the pod using this IP.
For kubenet plugin, all the PODs get an IP address from POD-CIDR within the cluster. To route traffic to these pods, the TCP/UDP flow must go to the node where the pod resides. By default, AKS will maintain the User Defined Route (UDR) associated with the subnet where it belongs to always be updated with the CIDR /24 of the respective POD/Node IP address.
Currently Application Gateway does not support any scenario where a route 0.0.0.0/0 needs to be redirected through any virtual appliance, a hub/spoke virtual network, or on-premises (forced tunneling). Since Application Gateway doesn't support UDR with a route 0.0.0.0/0 and it's a requirement for AKS egress control you cannot use the same route table for both subnets (Application Gateway subnet and AKS subnet).
This means the Application Gateway doesn't know how to route the traffic of a POD backend pool in a AKS cluster when you are using the kubenet plugin. Because of this limitation, you cannot associate the default AKS UDR to the Application Gateway subnet since an AKS cluster with egress controller requires a 0.0.0.0/0 route. It's possible to create a manual route table to address this problem but once a node scale operation happens, the route needs to be updated again and this would require a manual update.
For the purpose of this deployment when used with kubenet, a Route Table will be applied to the App gateway subnet during the deployment. You will need to create 3 manual routes inside the route table that point the nodes where the pods reside.
It's also possible to use an Azure external solution to watch the scaling operations and auto-update the routes using Azure Automation, Azure Functions or Logic Apps.
### Reference: Follow steps 1 and 2 below only if you are going with the Kubenet option
Step 1:
[How to setup networking between Application Gateway and AKS](https://azure.github.io/application-gateway-kubernetes-ingress/how-tos/networking/)
Step 2: (Optional - *if you don't do this, you'll have to manually update the route table after scaling changes in the cluster*)
[Using AKS kubenet egress control with AGIC](https://github.com/Welasco/AKS-AGIC-UDR-AutoUpdate)
More info:
[Use kubenet networking with your own IP address ranges in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet)
[Application Gateway infrastructure configuration](https://learn.microsoft.com/en-us/azure/application-gateway/configuration-infrastructure#supported-user-defined-routes)
This deployment will need to reference data objects from the Hub deployment and will need access to the pre-existing state file, update the variables as needed. This deployment will also need to use a storage access key (from Azure) to read the storage account data. This is a sensitive variable and should not be committed to the code repo.
Once again, A sample terraform.tfvars.sample file is included. Update the required variables, save it and rename it to **terraform.tfvars**.
Once the files are updated, deploy using Terraform Init, Plan and Apply.
```bash
terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
```
```bash
terraform plan
```
```bash
terraform apply
```
If you get an error about changes to the configuration, go with the `-reconfigure` flag option.
## The Key Vault Add-On
We start by creating some environment variables. The AKS cluster name can be found in the portal or in the variables file. The value is aks-<prefix value>.
```
AKSCLUSTERNAME=<AKS cluster name>
AKSRESOURCEGROUP=<AKS RG name>
KV_NAME=<Key vault name>
KV_RESOURCEGROUP=<KV RG name>
```
## Enable aks-preview Azure CLI extension and add AKS-AzureKeyVaultSecretsProvider feature
You also need the *aks-preview* Azure CLI extension version 0.5.9 or later. If you don't already, enter the following in your command line
```bash
# Install the aks-preview extension
az extension add --name aks-preview
# Update the extension to make sure you have the latest version installed
az extension update --name aks-preview
```
You also need to register the AKS-AzureKeyVaultSecretsProvider preview feature in your subscription. Check to see if it has already been enabled
```bash
az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/AKS-AzureKeyVaultSecretsProvider')].{Name:name,State:properties.state}"
```
if not enter the command below to enable it
```bash
az feature register --namespace "Microsoft.ContainerService" --name "AKS-AzureKeyVaultSecretsProvider"
```
It takes a few minutes for the status to show *Registered*. Verify the registration status by using the [az feature list](https://learn.microsoft.com/cli/azure/feature#az_feature_list) command:
```bash
az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/AKS-AzureKeyVaultSecretsProvider')].{Name:name,State:properties.state}"
```
When ready, refresh the registration of the *Microsoft.ContainerService* resource provider by using the [az provider register](https://learn.microsoft.com/cli/azure/provider#az_provider_register) command:
```bash
az provider register --namespace Microsoft.ContainerService
```
Update the permissions on the Key Vault to allow access from the newly created identity. The object-type can be certificate, key or secret. In this case, it should be all 3. Run the command below 3 times, one for each of the options.
```bash
az keyvault set-policy -n $KV_NAME -g $KV_RESOURCEGROUP --<object type>-permissions get --spn <client-id>
```
## Grant access from hub network to private link created for keyvault
For the jumpbox you just created in the hub network to have access to Key vault's private link you need to add the network to the access. To do this,
1. Find the Private DNS zone created for keyvault. This should be in the landing zone resource group (escs-lz01-rg for example)
![Location of private link for keyvault](../media/keyvault-privatelink-location.png)
2. Click on **Virtual network links** in the left blade under **Settings**
3. Click on **+ Add** in the in the top left of the next screen
4. enter a name for the link eg *hub_to_kv*
5. Select the hub virtual network for the **Virtual network** field
6. Click on **OK** at the bottom
> :warning: Stop here if you are deploying the blue-green scenario and return to the next step there. Do not deploy the basic workload in the link below.
:arrow_forward: [Deploy a Basic Workload](./08-workload.md)

Просмотреть файл

@ -1,354 +0,0 @@
# Deploy a Basic Workload using the Fruit Smoothie Ratings Application
The application consists of a web frontend, an API service and a MongoDB database.
Because the infrastructure has been deployed in a private AKS cluster setup with private endpoints for the container registry and other components, you will need to perform the application container build and the publishing to the Container Registry from the Dev Jumpbox in the Hub VNET, connecting via the Bastion Host service. If your computer is connected to the hub network, you may be able to just use that as well. The rest of the steps can be performed on your local machine by using AKS Run commands which allow access into private clusters using RBAC. This will help with improving security and will provide a more user-friendly way of editing YAML files.
## Connecting to the Bastion Host
1. Use Bastion Host to connect to the jumpbox.
2. Enter the username and password. If you have used a public key, then select upload private key (corresponding to the public key) to connect.
3. Once you connect ensure you permit the site to read the content of your clipboard
* Clone it on the jumpbox.
```bash
git clone https://github.com/Azure/AKS-Landing-Zone-Accelerator
```
* Run the script below to install the required tools (Az CLI, Docker, Kubectl, Helm etc). Navigate to "AKS-Landing-Zone-Accelerator/Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/04-Network-Hub" folder.
```bash
cd AKS-Landing-Zone-Accelerator/Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/04-Network-Hub
chmod +x script.sh
sudo ./script.sh
```
* Login to Azure
```bash
TENANTID=<tenant id>
az login -t $TENANTID --debug
```
* Ensure you are connected to the correct subscription
```bash
az account set --subscription <subscription id>
```
## Connect the Container Registry Private link to the Hub network
Since the Container registry can only be accessed via private link, we need to connect it to the network where jumpbox or whichever computer we are using to create the container images resides. We already added the container registry to the spoke network where the cluster resides using terraform.
1. Go to Azure portal
2. Find the Private DNS zone created for acr. This should be in the landing zone resource group (ESLZ-SPOKE for example)
![Location of private link for acr](../media/acr-privatelink-location.png)
3. Click on **Virtual network links** in the left blade under **Settings**
4. Click on **+ Add** in the in the top left of the next screen
5. enter a name for the link eg *hub_to_acr*
6. Select the hub virtual network for the **Virtual network** field
7. Click on **OK** at the bottom
## Provide yourself Access to Create Secrets in your Key vault
1. Go to the Azure portal and find your Key Vault. This should be in the landing zone resource group (ESLZ-SPOKE for example)
2. You should see your pod-identity-example managed identity as well as the azurekeyvaultsecrets identity. The pod identity will provide pods access to the pull secrets from the keyvault. The azurekeyvaultsecrets identity will be used by the keyvault driver. If either of these are missing, perhaps you are missing a step.
3. Click on **Access policies** under **Settings** in the left blade![add access policy](../media/add-access-policy-acr.png)
4. Select the required access policies ![add access policy](../media/add-access-policy-acr2.png)
5. Under **Select principal** click on the **None selected** link and select the user group(s) you created for this to provide you and everyone in the group access to the Key vault
6. Click **Select** at the bottom of the the screen
7. **Important**: Click **Save** at the top of the next screen to save the changes ![add access policy](../media/add-access-policy-acr2.png)
## Build Container Images
Clone the required repos to the Dev Jumpbox:
1. The Ratings API repo
```bash
cd ..
git clone https://github.com/MicrosoftDocs/mslearn-aks-workshop-ratings-api.git
```
2. The Ratings Web repo
```bash
git clone https://github.com/MicrosoftDocs/mslearn-aks-workshop-ratings-web.git
```
Navigate to each of the application code directories, build and tag the containers with the name of your Azure Container Registry and push the images to ACR. // Make sure it is the correct ACR
```bash
# enter the name of your ACR below
SPOKERG=<resource group name for spoke>
ACRNAME=<ACR NAME>
cd mslearn-aks-workshop-ratings-api
sudo docker build . -t $ACRNAME.azurecr.io/ratings-api:v1
cd ../mslearn-aks-workshop-ratings-web
sudo docker build . -t $ACRNAME.azurecr.io/ratings-web:v1
```
Log into ACR
```bash
sudo az acr login -n $ACRNAME
```
Push the images into the container registry. Ensure you are logged into the Azure Container Registry, you should show a successful login from the command above.
```bash
sudo docker push $ACRNAME.azurecr.io/ratings-api:v1
sudo docker push $ACRNAME.azurecr.io/ratings-web:v1
```
To verify they have been pushed run the following commands:
```bash
az acr repository show -n $ACRNAME --image ratings-api:v1
az acr repository show -n $ACRNAME --image ratings-web:v1
```
Create the secret in key vault. You may use anything you'd like for the username and password for the MongoDB database but this needs to match what you will use when you create the helm chart in the next steps.
**Note:** Passwords with special characters in a connection string might break the connection because of wrong encoding.
**Note:** Ensure you have access to create passwords in keyvault by going to the Key vault in Azure Portal, clicking on Access Policies and Add Access Policy. **Don't forget to hit "Save" after adding yourself or user group to Key vault access**
```bash
# update keyvault name, username and password before running the command below
KEYVAULTNAME=<key vault name>
PGUSERNAME=<postgres db user name>
PGPASSWORD=<postgres db password>
az keyvault secret set --name mongodburi --vault-name $KEYVAULTNAME --value "mongodb://$PGUSERNAME:$PGPASSWORD@ratings-mongodb.ratingsapp:27017/ratingsdb"
```
# The following Steps can be performed using AKS Run Commands from your local machine provided you have the correct permissions.
## Deploy the database into the cluster
The following steps can be performed using AKS Run Commands from your local machine provided you have the correct permissions.
Ensure the AKS run commands are working as expected.
```bash
# create environment variable for cluster and its resource group name
ClusterRGName=<cluster resource group name>
ClusterName=<AKS cluster name>
```
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl get nodes"
```
On the Kubernetes cluster, create a namespace for the Ratings Application.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl create namespace ratingsapp"
```
The MongoDB backend application is installed using Helm. Your username and password must be the same username and password using in the connection string secret that was created in Key vault in the previous step.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "helm repo add bitnami https://charts.bitnami.com/bitnami && helm install ratings bitnami/mongodb --namespace ratingsapp --set auth.username=$PGUSERNAME,auth.password=$PGPASSWORD,auth.database=ratingsdb"
```
## Deploy the workload into the cluster
We need to deploy the key vaults secret add on somewhere before this step, it is in the original markdown file but not this new one
In this section, you will be manipulating some of the deployment yaml files, replacing some entries related with Azure Key Vault, Azure Container Registry and Microsoft Entra ID references like ClientID, TenantID etc.
All files will be under the following folder: "Scenarios/AKS-Secure-Baseline-PrivateCluster/Apps/RatingsApp"
You will have to carefully update the following files:
- [api-secret-provider-class.yaml](../Apps/RatingsApp/api-secret-provider-class.yaml)
- [1-ratings-api-deployment.yaml](../Apps/RatingsApp/1-ratings-api-deployment.yaml)
- [3a-ratings-web-deployment.yaml](../Apps/RatingsApp/3a-ratings-web-deployment.yaml)
- [4-ratings-web-service.yaml](../Apps/RatingsApp/4-ratings-web-service.yaml)
### Deploy workload
Navigate to "Scenarios/AKS-Secure-Baseline-PrivateCluster/Apps/RatingsApp" folder.
1. Updating **api-secret-provider-class.yaml**
Update the **"api-secret-provider-class.yaml"** file to reflect the correct value for the following items:
- Key Vault name
- Client ID for the AKS Key Vault Add-on
- Tenant ID for the subscription.
> If you don't have the Client ID, you can find it by going to the Key vault and clicking on **Access Policies** in the left blade. Find the identity that starts with "azurekeyvaultsecrets-name of your aks cluster", then look for the resource by searching for the name in the search bar at the top. When you click on the resource, you will find the Client ID on the right side of the screen.
Deploy the edited yaml file.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl apply -f api-secret-provider-class.yaml -n ratingsapp" --file api-secret-provider-class.yaml
```
1. Updating **1-ratings-api-deployment.yaml**
Update the **"1-ratings-api-deployment.yaml"** file to reflect the correct name for the Azure Container Registry. Deploy the file.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl apply -f 1-ratings-api-deployment.yaml -n ratingsapp" --file 1-ratings-api-deployment.yaml
```
1. Ensure the ratings-api deployment was successful.
If you don't get a running state then it is likely that the pod was unable to get the secret from Key vault. This may be because the username and password of the db doesn't match the connection string that was created in Key vault or because the proper access to the Key vault wasn't granted to the azuresecret identity.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl get pods -n ratingsapp"
```
You can troubleshoot container creation issues by running
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl describe pod <pod name> -n ratingsapp"
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl logs <pod name> -n ratingsapp"
```
1. Updating **2-ratings-api-service.yaml**
Deploy the "2-ratings-api-service.yaml" file.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl apply -f 2-ratings-api-service.yaml -n ratingsapp" --file 2-ratings-api-service.yaml
```
1. Updating **3a-ratings-web-deployment.yaml**
Update the **"3a-ratings-web-deployment.yaml"** file to reflect the correct name for the Azure Container Registry. Deploy the file.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl apply -f 3a-ratings-web-deployment.yaml -n ratingsapp" --file 3a-ratings-web-deployment.yaml
```
1. Deploy the "4-ratings-web-service.yaml" file.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl apply -f 4-ratings-web-service.yaml -n ratingsapp" --file 4-ratings-web-service.yaml
```
## **(Optional)** Deploy the Ingress without support for HTTPS
This step is optional. If you would like to go straight to using https which is the secure option, skip this section and go straight to the **Update the Ingress to support HTTPS traffic** section.
It is important to first configure the NSG for the Application Gateway to accept traffic on port 80 if using the HTTP option. Run the following command to allow HTTP.
```bash
APPGWSUBNSG=<Name of NSG for AppGwy>
az network nsg rule create -g $SPOKERG --nsg-name $APPGWSUBNSG -n AllowHTTPInbound --priority 1000 \
--source-address-prefixes '*' --source-port-ranges '*' \
--destination-address-prefixes '*' --destination-port-ranges 80 --access Allow \
--protocol Tcp --description "Allow Inbound traffic through the Application Gateway on port 80"
```
1. Deploy the **"5-ratings-web-ingress.yaml"** file.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl apply -f 5-http-ratings-web-ingress.yaml -n ratingsapp" --file 5-http-ratings-web-ingress.yaml
```
1. Get the ip address of your ingress controller
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl get ingress -n ratingsapp"
```
### Check your deployed workload
1. Copy the ip address displayed, open a browser, navigate to the IP address obtained above from the ingress controller and explore your website
![deployed workload](../media/deployed-workload.png)
It is important to delete the rule that allows HTTP traffic to keep the cluster safe since we have completed the test.
```bash
az network nsg rule delete -g $SPOKERG --nsg-name $APPGWSUBNSG -n AllowHTTPInbound
```
**the optional steps end here**
## Deploy the Ingress with HTTPS support
**Please note: This section is still in development**
A fully qualified DNS name and a certificate are needed to configure HTTPS support on the the front end of the web application. You are welcome to bring your own certificate and DNS if you have them available, however a simple way to demonstrate this is to use a self-signed certificate with an FQDN configured on the IP address used by the Application Gateway.
**Objectives**
1. Configure the Public IP address of your Application Gateway to have a DNS name. It will be in the format of customPrefix.region.cloudapp.azure.com
2. Create a certificate using the FQDN and store it in Key Vault.
### Creating Public IP address for your Application Gateway
1. Find your application gateway in your landing zone resource group and click on it. By default it should be in the spoke resource group.
2. Click on the *Frontend public IP address*
![front end public ip address](../media/front-end-pip-link.png)
3. Click on configuration in the left blade of the resulting page.
4. Enter a unique DNS name in the field provided and click **Save**.
![creating nds](../media/dns-created.png)
### Create the self-signed certificate using Lets Encrypt
We are going to use Lets Encrypt and Cert-Manager to provide easy to use certificate management for the application within AKS. Cert-Manager will also handle future certificate renewals removing any manual processes.
1. First of all, you will need to install cert-manager into your cluster.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.8.0/cert-manager.yaml"
```
First of all this will create a new namespace called cert-manager which is where all of the resources for cert-manager will be kept. This will then go ahead and download some CRDs (CustomResourceDefinitions) which provides extra functionality in the cluster for the creation of certificates.
We will then proceed to test this certificate process with a staging certificate.
2. Edit the 'certificateIssuer.yaml' file and include your email address. This will be used for certificate renewal notifications.
Deploy certificateIssuer.yaml
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl apply -f certificateIssuer.yaml -n ratingsapp" --file certificateIssuer.yaml
```
3. Edit the '5-https-ratings-web-ingress.yaml' file with the FQDN of your host that you created earlier on the public IP of the Application Gateway.
Deploy 5-https-ratings-web-ingress.yaml
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl apply -f 5-https-ratings-web-ingress.yaml -n ratingsapp" --file 5-https-ratings-web-ingress.yaml
```
After updating the ingress, A request will be sent to letsEncrypt to provide a 'staging' certificate. This can take a few minutes. You can check on the progress by running the below command. When the status Ready = True. You should be able to browse to the same URL you configured on the PIP of the Application Gateway earlier.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl get certificate -n ratingsapp"
```
If you notice the status is not changing after a few minutes, there could be a problem with your certificate request. You can gather more information by running a describe on the request using the below command.
```bash
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl get certificaterequest -n ratingsapp"
az aks command invoke --resource-group $ClusterRGName --name $ClusterName --command "kubectl describe certificaterequest <certificaterequestname> -n ratingsapp"
```
Upon navigating to your new FQDN you will see you receive a certificate warning because it is not a production certificate.
![deployed workload https](../media/deployed-workload-https.png)
**Note: For production clusters, it is better to use a paid for SSL certificate because they can offer better liability and protection than a free SSL certificate.**
## Next Step
:arrow_forward: [Cleanup](./09-cleanup.md)

Просмотреть файл

@ -1,85 +0,0 @@
# Cleanup
Remember to destroy resources that are not in use. The instructions below assume you terminal is at the "Scenarios/AKS-Secure-Baseline-PrivateCluster/Apps/RatingsApp". If you are not there navigate there first.
1. Delete the AKS cluster
```bash
cd ../../Terraform/07-AKS-cluster
```
```bash
terrform init
```
```bash
terraform destroy
```
2. Delete the supporting services
```bash
cd ../06-AKS-supporting
```
```bash
terraform init
```
```bash
terraform destroy
```
3. Delete the spoke network
```bash
cd ../05-Network-LZ
```
```bash
terraform init
```
```bash
terraform destroy
```
if you get an error, move on to the next step
4. Delete the hub network
```
cd ../04-Network-Hub
```
```
terraform init
```
```
terraform destroy
```
if you get an error, stating that some resources weren't destroyed, run terraform destroy again
```
terraform destroy
```
5. Delete the user groups you created
```
cd ../03-EID
```
```
terraform init
```
```
terraform destroy
```

Просмотреть файл

@ -1,36 +0,0 @@
## Steps of Implementation for AKS Construction Set
A deployment of AKS-hosted workloads typically experiences a separation of duties and lifecycle management in the area of prerequisites, the host network, the cluster infrastructure, and finally the workload itself. This reference implementation is similar. Also, be aware our primary purpose is to illustrate the topology and decisions of a baseline cluster. We feel a "step-by-step" flow will help you learn the pieces of the solution and give you insight into the relationship between them. Ultimately, lifecycle/SDLC management of your cluster and its dependencies will depend on your situation (team roles, organizational standards, tooling, etc), and must be implemented as appropriate for your needs.
## Accounting for Separation of Duties
While the code here is located in one folder in a single repo, the steps are designed to mimic how an organization may break up the deployment of various Azure components across teams, into different code repos or have them run by different pipelines with specific credentials.
## Keeping It As Simple As Possible
The code here is purposely written to avoid loops, complex variables and logic. In most cases, it is resource blocks, small modules and limited variables, with the goal of making it easier to determine what is being deployed and how they are connected. Resources are broken into separate files for future modularization or adjustments as needed by your organization.
## Terraform State Management
In this example, state is stored in an Azure Storage account that was created out-of-band. All deployments reference this storage account to either store state or reference variables from other parts of the deployment however you may choose to use other tools for state management, like Terraform Cloud after making the necessary code changes.
## Getting Started
This section is organized using folders that match the steps outlined below. Make any necessary adjustments to the variables and settings within that folder to match the needs of your deployment.
1. Prerequisites: Clone this repo, install Azure CLI, install Terraform
2. [Creation of Azure Storage Account for State Management](./02-state-storage.md)
3. [Create or import Microsoft Entra groups for AKS cluster admins and AKS cluster users](./03-eid.md)
4. [Creation of Hub Network & its respective Components](./04-network-hub.md)
5. [Creation of Spoke Network & its respective Components](./05-network-lz.md)
6. [Creation of Supporting Components for AKS](./06-aks-supporting.md)
7. [Creation of AKS & enabling Addons](./07-aks-cluster.md)
8. [Deploy a Basic Workload](./08-workload.md)
## Deploying AKS into Existing Infrastructure
The steps above assume that you will be creating the Hub and Spoke (Landing Zone) Network and supporting components using the code provided, where each step refers to state file information from the previous steps.
To deploy AKS into an existing network, use the [AKS for Existing Cluster](./07-AKS-cluster-existing-infra) folder. Update the "existing-infra.variables.tf" file to reference the names and resource IDs of the pre-existing infrastructure.

Просмотреть файл

@ -1,52 +0,0 @@
name: 'Terraform'
on:
push:
branches:
- main
pull_request:
jobs:
terraform:
name: 'Terraform'
env:
ARM_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
ARM_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
ARM_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUB_ID }}
ARM_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
runs-on: ubuntu-latest
environment: production
# Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
defaults:
run:
shell: bash
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout
uses: actions/checkout@v2
# Install the latest version of Terraform CLI and configure the Terraform CLI configuration file with a Terraform Cloud user API token
- name: Setup Terraform
uses: hashicorp/setup-terraform@v1
with:
cli_config_credentials_token: ${{ secrets.DEPLOYMENT_SP }}
# Initialize the TF working directory.
# Change the backend-config variables to match the storage account used for your deployments.
- name: Terraform Init 03-EID
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/03-EID-create
run: terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
# Generates an execution plan for Terraform
- name: Terraform Plan 03-EID
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/03-EID-create
run: terraform plan
# On push to main, build or change infrastructure according to Terraform configuration files
# Note: It is recommended to set up a required "strict" status check in your repository for "Terraform Cloud". See the documentation on "strict" required status checks for more information: https://help.github.com/en/github/administering-a-repository/types-of-required-status-checks
- name: Terraform Apply 03-EID
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/03-EID-create
run: terraform apply -auto-approve

Просмотреть файл

@ -1,86 +0,0 @@
name: 'Terraform'
on:
push:
branches:
- main
pull_request:
jobs:
terraform:
name: 'Terraform'
env:
ARM_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
ARM_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
ARM_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUB_ID }}
ARM_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
TF_VAR_admin_password: ${{ secrets.ADMIN_PASSWORD }}
TF_VAR_access_key: ${{ secrets.ACCESS_KEY }}
runs-on: ubuntu-latest
environment: production
# Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
defaults:
run:
shell: bash
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout
uses: actions/checkout@v2
# Install the latest version of Terraform CLI and configure the Terraform CLI configuration file with a Terraform Cloud user API token
- name: Setup Terraform
uses: hashicorp/setup-terraform@v1
with:
cli_config_credentials_token: ${{ secrets.DEPLOYMENT_SP }}
# Initialize the TF working directory.
# Change the backend-config variables to match the storage account used for your deployments.
- name: Terraform Init 04-Network-Hub
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/04-Network-Hub
run: terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
# Generates an execution plan for Terraform
- name: Terraform Plan 04-Network-Hub
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/04-Network-Hub
run: terraform plan
# On push to main, build or change infrastructure according to Terraform configuration files
# Note: It is recommended to set up a required "strict" status check in your repository for "Terraform Cloud". See the documentation on "strict" required status checks for more information: https://help.github.com/en/github/administering-a-repository/types-of-required-status-checks
- name: Terraform Apply 04-Network-Hub
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/04-Network-Hub
run: terraform apply -auto-approve
# Change the backend-config variables to match the storage account used for your deployments.
- name: Terraform Init 05-Network-LZ
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/05-Network-LZ
run: terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
- name: Terraform Plan 05-Network-LZ
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/05-Network-LZ
run: terraform plan
- name: Terraform Apply 05-Network-LZ
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/05-Network-LZ
run: terraform apply -auto-approve
# Change the backend-config variables to match the storage account used for your deployments.
- name: Terraform Init 06-AKS-supporting
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/06-AKS-supporting
run: terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
- name: Terraform Plan 06-AKS-supporting
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/06-AKS-supporting
run: terraform plan
- name: Terraform Apply 06-AKS-supporting
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/06-AKS-supporting
run: terraform apply -auto-approve

Просмотреть файл

@ -1,52 +0,0 @@
name: 'Terraform'
on:
push:
branches:
- main
pull_request:
jobs:
terraform:
name: 'Terraform'
env:
ARM_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
ARM_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
ARM_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUB_ID }}
ARM_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
runs-on: ubuntu-latest
environment: production
# Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
defaults:
run:
shell: bash
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout
uses: actions/checkout@v2
# Install the latest version of Terraform CLI and configure the Terraform CLI configuration file with a Terraform Cloud user API token
- name: Setup Terraform
uses: hashicorp/setup-terraform@v1
with:
cli_config_credentials_token: ${{ secrets.DEPLOYMENT_SP }}
# Initialize the TF working directory.
# Change the backend-config variables to match the storage account used for your deployments.
- name: Terraform Init 07-AKS-cluster
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/07-AKS-cluster
run: terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
# Generates an execution plan for Terraform
- name: Terraform Plan 07-AKS-cluster
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/07-AKS-cluster
run: terraform plan
# On push to main, build or change infrastructure according to Terraform configuration files
# Note: It is recommended to set up a required "strict" status check in your repository for "Terraform Cloud". See the documentation on "strict" required status checks for more information: https://help.github.com/en/github/administering-a-repository/types-of-required-status-checks
- name: Terraform Apply 07-AKS-cluster
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/07-AKS-cluster
run: terraform apply -auto-approve

Просмотреть файл

@ -1,53 +0,0 @@
# This pipeline is required only for the blue green deployment.
name: 'Terraform'
on:
push:
branches:
- main
pull_request:
jobs:
terraform:
name: 'Terraform'
env:
ARM_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
ARM_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
ARM_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUB_ID }}
ARM_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
runs-on: ubuntu-latest
environment: production
# Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
defaults:
run:
shell: bash
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout
uses: actions/checkout@v2
# Install the latest version of Terraform CLI and configure the Terraform CLI configuration file with a Terraform Cloud user API token
- name: Setup Terraform
uses: hashicorp/setup-terraform@v1
with:
cli_config_credentials_token: ${{ secrets.DEPLOYMENT_SP }}
# Initialize the TF working directory.
# Change the backend-config variables to match the storage account used for your deployments.
- name: Terraform Init 08-DNS-Records
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/08-DNS-Records
run: terraform init -backend-config="resource_group_name=$TFSTATE_RG" -backend-config="storage_account_name=$STORAGEACCOUNTNAME" -backend-config="container_name=$CONTAINERNAME"
# Generates an execution plan for Terraform
- name: Terraform Plan 08-DNS-Records
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/08-DNS-Records
run: terraform plan
# On push to main, build or change infrastructure according to Terraform configuration files
# Note: It is recommended to set up a required "strict" status check in your repository for "Terraform Cloud". See the documentation on "strict" required status checks for more information: https://help.github.com/en/github/administering-a-repository/types-of-required-status-checks
- name: Terraform Apply 08-DNS-Records
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
working-directory: ./Scenarios/AKS-Secure-Baseline-PrivateCluster/Terraform/08-DNS-Records
run: terraform apply -auto-approve