Merge branch '2112.int' into patch.5.5.0
This commit is contained in:
Коммит
7b90a92959
|
@ -6,7 +6,7 @@
|
|||
version: '3.7'
|
||||
services:
|
||||
rover:
|
||||
image: aztfmod/rover:1.0.9-2111.0103
|
||||
image: aztfmod/rover:1.0.11-2112.0723
|
||||
user: vscode
|
||||
|
||||
labels:
|
||||
|
|
|
@ -23,6 +23,8 @@ env:
|
|||
ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }}
|
||||
ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }}
|
||||
ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }}
|
||||
TF_REGISTRY_DISCOVERY_RETRY: 5
|
||||
TF_REGISTRY_CLIENT_TIMEOUT: 15
|
||||
ROVER_RUNNER: true
|
||||
|
||||
jobs:
|
||||
|
@ -37,7 +39,7 @@ jobs:
|
|||
random_length: ['5']
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:1.0.9-2111.0103
|
||||
image: aztfmod/rover:1.0.11-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -64,7 +66,7 @@ jobs:
|
|||
|
||||
- name: foundations
|
||||
run: |
|
||||
sleep 90
|
||||
sleep 120
|
||||
/tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_solution -a apply \
|
||||
-var-folder ${GITHUB_WORKSPACE}/caf_solution/scenario/foundations/100-passthrough \
|
||||
-tfstate caf_foundations.tfstate \
|
||||
|
@ -90,7 +92,7 @@ jobs:
|
|||
]
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:1.0.9-2111.0103
|
||||
image: aztfmod/rover:1.0.11-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -133,7 +135,7 @@ jobs:
|
|||
random_length: ['5']
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:1.0.9-2111.0103
|
||||
image: aztfmod/rover:1.0.11-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -184,7 +186,7 @@ jobs:
|
|||
]
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:1.0.9-2111.0103
|
||||
image: aztfmod/rover:1.0.11-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -226,7 +228,7 @@ jobs:
|
|||
random_length: ['5']
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:1.0.9-2111.0103
|
||||
image: aztfmod/rover:1.0.11-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
|
|
@ -17,6 +17,8 @@ env:
|
|||
ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }}
|
||||
ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }}
|
||||
ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }}
|
||||
TF_REGISTRY_DISCOVERY_RETRY: 5
|
||||
TF_REGISTRY_CLIENT_TIMEOUT: 15
|
||||
ROVER_RUNNER: true
|
||||
|
||||
jobs:
|
||||
|
@ -31,7 +33,7 @@ jobs:
|
|||
random_length: ['5']
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:0.14.11-2111.0103
|
||||
image: aztfmod/rover:0.14.11-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -83,7 +85,7 @@ jobs:
|
|||
]
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:0.14.11-2111.0103
|
||||
image: aztfmod/rover:0.14.11-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -111,7 +113,8 @@ jobs:
|
|||
-parallelism=30 \
|
||||
-var-folder ${GITHUB_WORKSPACE}/${{ matrix.config_files }} \
|
||||
--environment ${{ github.run_id }} \
|
||||
-refresh=false
|
||||
-refresh=false \
|
||||
-auto-approve
|
||||
|
||||
foundations200:
|
||||
name: foundations-200
|
||||
|
@ -126,7 +129,7 @@ jobs:
|
|||
random_length: ['5']
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:0.14.11-2111.0103
|
||||
image: aztfmod/rover:0.14.11-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -177,7 +180,7 @@ jobs:
|
|||
]
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:0.14.11-2111.0103
|
||||
image: aztfmod/rover:0.14.11-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -205,7 +208,8 @@ jobs:
|
|||
-parallelism=30 \
|
||||
-var-folder ${GITHUB_WORKSPACE}/${{ matrix.config_files }} \
|
||||
--environment ${{ github.run_id }} \
|
||||
-refresh=false
|
||||
-refresh=false \
|
||||
-auto-approve
|
||||
|
||||
foundations_destroy:
|
||||
name: foundations_destroy
|
||||
|
@ -219,7 +223,7 @@ jobs:
|
|||
random_length: ['5']
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:0.14.11-2111.0103
|
||||
image: aztfmod/rover:0.14.11-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -240,7 +244,7 @@ jobs:
|
|||
-level level1 \
|
||||
-parallelism=30 \
|
||||
--environment ${{ github.run_id }} \
|
||||
'-var tags={testing_job_id="${{ github.run_id }}"}'
|
||||
-auto-approve
|
||||
|
||||
- name: Remove launchpad
|
||||
run: |
|
||||
|
@ -250,9 +254,7 @@ jobs:
|
|||
-launchpad \
|
||||
-parallelism=30 \
|
||||
--environment ${{ github.run_id }} \
|
||||
'-var random_length=${{ matrix.random_length }}' \
|
||||
'-var prefix=g${{ github.run_id }}' \
|
||||
'-var tags={testing_job_id="${{ github.run_id }}"}'
|
||||
-auto-approve
|
||||
|
||||
|
||||
- name: Complete purge
|
||||
|
|
|
@ -17,6 +17,8 @@ env:
|
|||
ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }}
|
||||
ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }}
|
||||
ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }}
|
||||
TF_REGISTRY_DISCOVERY_RETRY: 5
|
||||
TF_REGISTRY_CLIENT_TIMEOUT: 15
|
||||
ROVER_RUNNER: true
|
||||
|
||||
jobs:
|
||||
|
@ -31,7 +33,7 @@ jobs:
|
|||
random_length: ['5']
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:0.15.5-2111.0103
|
||||
image: aztfmod/rover:0.15.5-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -83,7 +85,7 @@ jobs:
|
|||
]
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:0.15.5-2111.0103
|
||||
image: aztfmod/rover:0.15.5-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -126,7 +128,7 @@ jobs:
|
|||
random_length: ['5']
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:0.15.5-2111.0103
|
||||
image: aztfmod/rover:0.15.5-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -177,7 +179,7 @@ jobs:
|
|||
]
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:0.15.5-2111.0103
|
||||
image: aztfmod/rover:0.15.5-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
@ -219,7 +221,7 @@ jobs:
|
|||
random_length: ['5']
|
||||
|
||||
container:
|
||||
image: aztfmod/rover:0.15.5-2111.0103
|
||||
image: aztfmod/rover:0.15.5-2112.0723
|
||||
options: --user 0
|
||||
|
||||
steps:
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
|
||||
module "dynamic_keyvault_secrets" {
|
||||
source = "aztfmod/caf/azurerm//modules/security/dynamic_keyvault_secrets"
|
||||
version = "~>5.4.2"
|
||||
# source = "aztfmod/caf/azurerm//modules/security/dynamic_keyvault_secrets"
|
||||
# version = "~>5.4.2"
|
||||
|
||||
# source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/security/dynamic_keyvault_secrets?ref=master"
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/security/dynamic_keyvault_secrets?ref=master"
|
||||
|
||||
for_each = try(var.dynamic_keyvault_secrets, {})
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ module "launchpad" {
|
|||
# version = "~>5.4.2"
|
||||
|
||||
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git?ref=patch.5.5.0"
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git?ref=master"
|
||||
# source = "../../aztfmod"
|
||||
|
||||
providers = {
|
||||
|
|
|
@ -23,9 +23,6 @@ variable "tags" {
|
|||
|
||||
variable "aks_cluster_key" {
|
||||
description = "AKS cluster key to deploy the Gitlab Helm charts. The key must be defined in the variable aks_clusters"
|
||||
}
|
||||
variable "aks_cluster_vnet_key" {
|
||||
|
||||
}
|
||||
variable "aks_clusters" {}
|
||||
variable "vnets" {
|
||||
|
@ -35,4 +32,4 @@ variable "managed_identities" {
|
|||
description = "Map of the user managed identities."
|
||||
}
|
||||
|
||||
variable "aad_pod_identity" {}
|
||||
variable "aad_pod_identity" {}
|
||||
|
|
|
@ -22,6 +22,7 @@ resource "helm_release" "charts" {
|
|||
skip_crds = try(each.value.skip_crds, false)
|
||||
create_namespace = try(each.value.create_namespace, false)
|
||||
values = try(each.value.values, null)
|
||||
version = try(each.value.version, null)
|
||||
|
||||
dynamic "set" {
|
||||
for_each = try(each.value.sets, {})
|
||||
|
@ -44,4 +45,4 @@ resource "helm_release" "charts" {
|
|||
# values = [
|
||||
# "${file("values.yaml")}"
|
||||
# ]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,10 +2,16 @@
|
|||
|
||||
module "enterprise_scale" {
|
||||
source = "Azure/caf-enterprise-scale/azurerm"
|
||||
version = "~> 0.3.0"
|
||||
version = "~> 1.1.0"
|
||||
|
||||
# source = "../../../../eslz"
|
||||
|
||||
providers = {
|
||||
azurerm = azurerm
|
||||
azurerm.connectivity = azurerm
|
||||
azurerm.management = azurerm
|
||||
}
|
||||
|
||||
root_parent_id = data.azurerm_client_config.current.tenant_id
|
||||
default_location = local.global_settings.regions[local.global_settings.default_region]
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ terraform {
|
|||
required_providers {
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "~> 2.65.0"
|
||||
version = "~> 2.80.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
terraform {
|
||||
backend "azurerm" {
|
||||
}
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
locals {
|
||||
azure_workspace_resource_id = local.remote.databricks_workspaces[var.databricks.lz_key][var.databricks.workspace_key].id
|
||||
}
|
||||
|
||||
provider "databricks" {
|
||||
azure_workspace_resource_id = local.azure_workspace_resource_id
|
||||
# azure_client_id = var.client_id
|
||||
# azure_client_secret = var.client_secret
|
||||
# azure_tenant_id = var.tenant_id
|
||||
}
|
||||
|
||||
module "databricks" {
|
||||
source = "../../modules/databricks"
|
||||
|
||||
settings = var.databricks
|
||||
}
|
||||
|
||||
output "databricks" {
|
||||
value = module.databricks
|
||||
sensitive = false
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
locals {
|
||||
landingzone = {
|
||||
current = {
|
||||
storage_account_name = var.tfstate_storage_account_name
|
||||
container_name = var.tfstate_container_name
|
||||
resource_group_name = var.tfstate_resource_group_name
|
||||
}
|
||||
lower = {
|
||||
storage_account_name = var.lower_storage_account_name
|
||||
container_name = var.lower_container_name
|
||||
resource_group_name = var.lower_resource_group_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "terraform_remote_state" "remote" {
|
||||
for_each = try(var.landingzone.tfstates, {})
|
||||
|
||||
backend = var.landingzone.backend_type
|
||||
config = {
|
||||
storage_account_name = local.landingzone[try(each.value.level, "current")].storage_account_name
|
||||
container_name = local.landingzone[try(each.value.level, "current")].container_name
|
||||
resource_group_name = local.landingzone[try(each.value.level, "current")].resource_group_name
|
||||
key = each.value.tfstate
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
landingzone_tag = {
|
||||
"landingzone" = var.landingzone.key
|
||||
}
|
||||
|
||||
tags = merge(local.global_settings.tags, local.landingzone_tag, { "level" = var.landingzone.level }, { "environment" = local.global_settings.environment }, { "rover_version" = var.rover_version }, var.tags)
|
||||
|
||||
global_settings = data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.objects[var.landingzone.global_settings_key].global_settings
|
||||
|
||||
remote = {
|
||||
databricks_workspaces = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.databricks_workspaces[key], {}))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "~> 2.43"
|
||||
}
|
||||
azurecaf = {
|
||||
source = "aztfmod/azurecaf"
|
||||
version = "1.0.0"
|
||||
}
|
||||
databricks = {
|
||||
source = "databrickslabs/databricks"
|
||||
version = "~> 0.2.5"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
|
||||
provider "azurerm" {
|
||||
features {
|
||||
key_vault {
|
||||
purge_soft_delete_on_destroy = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "azurerm_client_config" "current" {}
|
||||
|
||||
data "terraform_remote_state" "landingzone" {
|
||||
backend = "azurerm"
|
||||
config = {
|
||||
storage_account_name = var.tfstate_storage_account_name
|
||||
container_name = var.tfstate_container_name
|
||||
key = var.tfstate_key
|
||||
resource_group_name = var.tfstate_resource_group_name
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
diagnostics = {
|
||||
diagnostics_definition = merge(data.terraform_remote_state.landingzone.outputs.diagnostics.diagnostics_definition, var.diagnostics_definition)
|
||||
diagnostics_destinations = data.terraform_remote_state.landingzone.outputs.diagnostics.diagnostics_destinations
|
||||
storage_accounts = data.terraform_remote_state.landingzone.outputs.diagnostics.storage_accounts
|
||||
log_analytics = data.terraform_remote_state.landingzone.outputs.diagnostics.log_analytics
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Update the tfstates map
|
||||
tfstates = merge(
|
||||
tomap(
|
||||
{
|
||||
(var.landingzone.key) = local.backend[var.landingzone.backend_type]
|
||||
}
|
||||
)
|
||||
,
|
||||
data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.tfstates
|
||||
)
|
||||
|
||||
|
||||
backend = {
|
||||
azurerm = {
|
||||
storage_account_name = var.tfstate_storage_account_name
|
||||
container_name = var.tfstate_container_name
|
||||
resource_group_name = var.tfstate_resource_group_name
|
||||
key = var.tfstate_key
|
||||
level = var.landingzone.level
|
||||
tenant_id = var.tenant_id
|
||||
subscription_id = data.azurerm_client_config.current.subscription_id
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,157 +0,0 @@
|
|||
# Map of the remote data state for lower level
|
||||
variable "lower_storage_account_name" {}
|
||||
variable "lower_container_name" {}
|
||||
variable "lower_resource_group_name" {}
|
||||
|
||||
variable "tfstate_storage_account_name" {}
|
||||
variable "tfstate_container_name" {}
|
||||
variable "tfstate_key" {}
|
||||
variable "tfstate_resource_group_name" {}
|
||||
|
||||
variable "global_settings" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "landingzone" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
default = "sandpit"
|
||||
}
|
||||
variable "rover_version" {
|
||||
default = null
|
||||
}
|
||||
variable "max_length" {
|
||||
default = 40
|
||||
}
|
||||
variable "logged_user_objectId" {
|
||||
default = null
|
||||
}
|
||||
variable "logged_aad_app_objectId" {
|
||||
default = null
|
||||
}
|
||||
variable "tags" {
|
||||
default = null
|
||||
type = map(any)
|
||||
}
|
||||
variable "diagnostic_log_analytics" {
|
||||
default = {}
|
||||
}
|
||||
variable "app_service_environments" {
|
||||
default = {}
|
||||
}
|
||||
variable "app_service_plans" {
|
||||
default = {}
|
||||
}
|
||||
variable "app_services" {
|
||||
default = {}
|
||||
}
|
||||
variable "diagnostics_definition" {
|
||||
default = null
|
||||
}
|
||||
variable "resource_groups" {
|
||||
default = null
|
||||
}
|
||||
variable "network_security_group_definition" {
|
||||
default = {}
|
||||
}
|
||||
variable "vnets" {
|
||||
default = {}
|
||||
}
|
||||
variable "azurerm_redis_caches" {
|
||||
default = {}
|
||||
}
|
||||
variable "mssql_servers" {
|
||||
default = {}
|
||||
}
|
||||
variable "mssql_databases" {
|
||||
default = {}
|
||||
}
|
||||
variable "mssql_elastic_pools" {
|
||||
default = {}
|
||||
}
|
||||
variable "storage_accounts" {
|
||||
default = {}
|
||||
}
|
||||
variable "azuread_groups" {
|
||||
default = {}
|
||||
}
|
||||
variable "keyvaults" {
|
||||
default = {}
|
||||
}
|
||||
variable "keyvault_access_policies" {
|
||||
default = {}
|
||||
}
|
||||
variable "virtual_machines" {
|
||||
default = {}
|
||||
}
|
||||
variable "azure_container_registries" {
|
||||
default = {}
|
||||
}
|
||||
variable "bastion_hosts" {
|
||||
default = {}
|
||||
}
|
||||
variable "public_ip_addresses" {
|
||||
default = {}
|
||||
}
|
||||
variable "diagnostic_storage_accounts" {
|
||||
default = {}
|
||||
}
|
||||
variable "managed_identities" {
|
||||
default = {}
|
||||
}
|
||||
variable "private_dns" {
|
||||
default = {}
|
||||
}
|
||||
variable "synapse_workspaces" {
|
||||
default = {}
|
||||
}
|
||||
variable "azurerm_application_insights" {
|
||||
default = {}
|
||||
}
|
||||
variable "role_mapping" {
|
||||
default = {}
|
||||
}
|
||||
variable "aks_clusters" {
|
||||
default = {}
|
||||
}
|
||||
variable "databricks_workspaces" {
|
||||
default = {}
|
||||
}
|
||||
variable "machine_learning_workspaces" {
|
||||
default = {}
|
||||
}
|
||||
variable "monitoring" {
|
||||
default = {}
|
||||
}
|
||||
variable "virtual_wans" {
|
||||
default = {}
|
||||
}
|
||||
variable "event_hub_namespaces" {
|
||||
default = {}
|
||||
}
|
||||
variable "application_gateways" {
|
||||
default = {}
|
||||
}
|
||||
variable "application_gateway_applications" {
|
||||
default = {}
|
||||
}
|
||||
variable "application_gateway_waf_policies" {
|
||||
default = {}
|
||||
}
|
||||
variable "dynamic_keyvault_secrets" {
|
||||
default = {}
|
||||
}
|
||||
variable "disk_encryption_sets" {
|
||||
default = {}
|
||||
}
|
||||
variable "keyvault_keys" {
|
||||
default = {}
|
||||
}
|
||||
variable "databricks" {
|
||||
default = {}
|
||||
}
|
||||
variable "var_folder_path" {
|
||||
default = {}
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
resource "databricks_cluster" "cluster" {
|
||||
for_each = var.databricks_clusters
|
||||
|
||||
#
|
||||
# Required
|
||||
#
|
||||
|
||||
spark_version = data.databricks_spark_version.runtime[each.key].id
|
||||
|
||||
#
|
||||
# Required - Optional if ....
|
||||
#
|
||||
|
||||
# Required - optional if instance_pool_id is given
|
||||
node_type_id = can(each.value.instance_pool) ? null : data.databricks_node_type.node_type[each.key].id
|
||||
|
||||
#
|
||||
# Optional
|
||||
#
|
||||
|
||||
autotermination_minutes = try(each.value.autotermination_minutes, null)
|
||||
cluster_name = try(each.value.name, null)
|
||||
custom_tags = try(each.value.custom_tags, null)
|
||||
driver_node_type_id = can(each.value.driver_node_type) ? data.databricks_node_type.driver_node_type[each.key].id : data.databricks_node_type.node_type[each.key].id
|
||||
enable_local_disk_encryption = try(each.value.enable_local_disk_encryption, null)
|
||||
idempotency_token = try(each.value.idempotency_token, null)
|
||||
is_pinned = try(each.value.is_pinned, false)
|
||||
single_user_name = try(each.value.single_user_name, null)
|
||||
spark_conf = try(each.value.spark_conf, null)
|
||||
spark_env_vars = try(each.value.spark_env_vars, null)
|
||||
ssh_public_keys = try(each.value.ssh_public_keys, null)
|
||||
|
||||
dynamic "autoscale" {
|
||||
for_each = try(each.value.autoscale, null) == null ? [] : [1]
|
||||
|
||||
content {
|
||||
min_workers = try(each.value.autoscale.min_workers, null)
|
||||
max_workers = try(each.value.autoscale.max_workers, null)
|
||||
}
|
||||
}
|
||||
|
||||
# Add library block - Doc not super clear - https://registry.terraform.io/providers/databrickslabs/databricks/latest/docs/resources/cluster#library-configuration-block
|
||||
|
||||
dynamic "cluster_log_conf" {
|
||||
for_each = try(each.value.cluster_log_conf, {})
|
||||
|
||||
content {
|
||||
dynamic "dbfs" {
|
||||
for_each = each.value.cluster_log_conf.dbfs
|
||||
|
||||
content {
|
||||
destination = dbfs.value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
data "databricks_node_type" "driver_node_type" {
|
||||
for_each = {
|
||||
for key, value in var.databricks_clusters : key => value
|
||||
if can(value.driver_node_type)
|
||||
}
|
||||
|
||||
category = try(each.value.driver_node_type.min_memory_gb, "General Purpose (HDD)")
|
||||
gb_per_core = try(each.value.driver_node_type.min_memory_gb, 0)
|
||||
is_io_cache_enabled = try(each.value.driver_node_type.is_io_cache_enabled, false)
|
||||
local_disk = try(each.value.driver_node_type.local_disk, false)
|
||||
min_cores = try(each.value.driver_node_type.min_cores, 0)
|
||||
min_gpus = try(each.value.driver_node_type.min_gpus, 0)
|
||||
min_memory_gb = try(each.value.driver_node_type.min_memory_gb, 0)
|
||||
photon_driver_capable = try(each.value.driver_node_type.photon_driver_capable, false)
|
||||
photon_worker_capable = try(each.value.driver_node_type.photon_worker_capable, false)
|
||||
support_port_forwarding = try(each.value.driver_node_type.support_port_forwarding, false)
|
||||
|
||||
}
|
||||
|
||||
data "databricks_node_type" "node_type" {
|
||||
for_each = {
|
||||
for key, value in var.databricks_clusters : key => value
|
||||
if can(value.node_type)
|
||||
}
|
||||
|
||||
category = try(each.value.node_type.min_memory_gb, "General Purpose (HDD)")
|
||||
gb_per_core = try(each.value.node_type.min_memory_gb, 0)
|
||||
is_io_cache_enabled = try(each.value.node_type.is_io_cache_enabled, false)
|
||||
local_disk = try(each.value.node_type.local_disk, false)
|
||||
min_cores = try(each.value.node_type.min_cores, 0)
|
||||
min_gpus = try(each.value.node_type.min_gpus, 0)
|
||||
min_memory_gb = try(each.value.node_type.min_memory_gb, 0)
|
||||
photon_driver_capable = try(each.value.node_type.photon_driver_capable, false)
|
||||
photon_worker_capable = try(each.value.node_type.photon_worker_capable, false)
|
||||
support_port_forwarding = try(each.value.node_type.support_port_forwarding, false)
|
||||
|
||||
}
|
||||
|
||||
data "databricks_spark_version" "runtime" {
|
||||
for_each = var.databricks_clusters
|
||||
|
||||
beta = try(each.value.spark_version.beta, false)
|
||||
genomics = try(each.value.spark_version.genomics, false)
|
||||
gpu = try(each.value.spark_version.gpu, false)
|
||||
latest = try(each.value.spark_version.latest, true)
|
||||
long_term_support = try(each.value.spark_version.long_term_support, false)
|
||||
ml = try(each.value.spark_version.ml, false)
|
||||
photon = try(each.value.spark_version.photon, false)
|
||||
scala = try(each.value.spark_version.scala, "2.12")
|
||||
spark_version = try(each.value.spark_version.spark_version, "3.0.1")
|
||||
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
locals {
|
||||
landingzone = {
|
||||
current = {
|
||||
storage_account_name = var.tfstate_storage_account_name
|
||||
container_name = var.tfstate_container_name
|
||||
resource_group_name = var.tfstate_resource_group_name
|
||||
}
|
||||
lower = {
|
||||
storage_account_name = var.lower_storage_account_name
|
||||
container_name = var.lower_container_name
|
||||
resource_group_name = var.lower_resource_group_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "terraform_remote_state" "remote" {
|
||||
for_each = try(var.landingzone.tfstates, {})
|
||||
|
||||
backend = var.landingzone.backend_type
|
||||
config = local.remote_state[try(each.value.backend_type, var.landingzone.backend_type, "azurerm")][each.key]
|
||||
}
|
||||
|
||||
locals {
|
||||
|
||||
remote_state = {
|
||||
azurerm = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => {
|
||||
container_name = try(value.workspace, local.landingzone[try(value.level, "current")].container_name)
|
||||
key = value.tfstate
|
||||
resource_group_name = try(value.resource_group_name, local.landingzone[try(value.level, "current")].resource_group_name)
|
||||
storage_account_name = try(value.storage_account_name, local.landingzone[try(value.level, "current")].storage_account_name)
|
||||
subscription_id = try(value.subscription_id, data.azurerm_client_config.current.subscription_id)
|
||||
tenant_id = try(value.tenant_id, data.azurerm_client_config.current.tenant_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
global_settings = data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.objects[var.landingzone.global_settings_key].global_settings
|
||||
|
||||
remote = {
|
||||
databricks_workspaces = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].databricks_workspaces, {}))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "~> 2.82.0"
|
||||
}
|
||||
azurecaf = {
|
||||
source = "aztfmod/azurecaf"
|
||||
version = "~> 1.2.0"
|
||||
}
|
||||
databricks = {
|
||||
source = "databrickslabs/databricks"
|
||||
version = "~> 0.3.9"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
|
||||
provider "azurerm" {
|
||||
features {}
|
||||
}
|
||||
|
||||
data "azurerm_client_config" "current" {}
|
||||
|
||||
locals {
|
||||
azure_workspace_resource = local.remote.databricks_workspaces[var.databricks_workspace.lz_key][var.databricks_workspace.workspace_key]
|
||||
}
|
||||
|
||||
provider "databricks" {
|
||||
host = local.azure_workspace_resource.workspace_url
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
output "cluster" {
|
||||
value = databricks_cluster.cluster
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
# Map of the remote data state for lower level
|
||||
variable "lower_storage_account_name" {}
|
||||
variable "lower_container_name" {}
|
||||
variable "lower_resource_group_name" {}
|
||||
|
||||
variable "tfstate_storage_account_name" {}
|
||||
variable "tfstate_container_name" {}
|
||||
variable "tfstate_key" {}
|
||||
variable "tfstate_resource_group_name" {}
|
||||
|
||||
variable "global_settings" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "landingzone" {
|
||||
default = ""
|
||||
}
|
||||
variable "databricks_clusters" {
|
||||
description = "This resource allows you to create, update, and delete clusters."
|
||||
default = {}
|
||||
}
|
||||
variable "databricks_workspace" {
|
||||
description = "Azure Databricks workspace where the resources will be created"
|
||||
}
|
|
@ -1,8 +1,8 @@
|
|||
module "dynamic_keyvault_secrets" {
|
||||
source = "aztfmod/caf/azurerm//modules/security/dynamic_keyvault_secrets"
|
||||
version = "~>5.4.2"
|
||||
# source = "aztfmod/caf/azurerm//modules/security/dynamic_keyvault_secrets"
|
||||
# version = "~>5.4.2"
|
||||
|
||||
# source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/security/dynamic_keyvault_secrets?ref=master"
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/security/dynamic_keyvault_secrets?ref=master"
|
||||
|
||||
for_each = {
|
||||
for keyvault_key, secrets in try(var.dynamic_keyvault_secrets, {}) : keyvault_key => {
|
||||
|
|
|
@ -2,7 +2,7 @@ module "solution" {
|
|||
# source = "aztfmod/caf/azurerm"
|
||||
# version = "~>5.4.2"
|
||||
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git?ref=patch.5.5.0"
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git?ref=master"
|
||||
# source = "../../aztfmod"
|
||||
|
||||
providers = {
|
||||
|
@ -26,6 +26,7 @@ module "solution" {
|
|||
event_hub_namespaces = var.event_hub_namespaces
|
||||
event_hubs = var.event_hubs
|
||||
global_settings = local.global_settings
|
||||
identity = local.identity
|
||||
keyvault_access_policies = var.keyvault_access_policies
|
||||
keyvault_access_policies_azuread_apps = var.keyvault_access_policies_azuread_apps
|
||||
keyvault_certificate_issuers = var.keyvault_certificate_issuers
|
||||
|
|
|
@ -9,14 +9,15 @@ locals {
|
|||
container_groups = var.container_groups
|
||||
dedicated_host_groups = var.dedicated_host_groups
|
||||
dedicated_hosts = var.dedicated_hosts
|
||||
machine_learning_compute_instance = var.machine_learning_compute_instance
|
||||
proximity_placement_groups = var.proximity_placement_groups
|
||||
virtual_machines = var.virtual_machines
|
||||
virtual_machine_scale_sets = var.virtual_machine_scale_sets
|
||||
vmware_private_clouds = var.vmware_private_clouds
|
||||
virtual_machines = var.virtual_machines
|
||||
vmware_clusters = var.vmware_clusters
|
||||
vmware_express_route_authorizations = var.vmware_express_route_authorizations
|
||||
wvd_applications = var.wvd_applications
|
||||
vmware_private_clouds = var.vmware_private_clouds
|
||||
wvd_application_groups = var.wvd_application_groups
|
||||
wvd_applications = var.wvd_applications
|
||||
wvd_host_pools = var.wvd_host_pools
|
||||
wvd_workspaces = var.wvd_workspaces
|
||||
}
|
||||
|
|
|
@ -1,20 +1,28 @@
|
|||
locals {
|
||||
data_factory = {
|
||||
data_factory = var.data_factory
|
||||
data_factory_pipeline = var.data_factory_pipeline
|
||||
data_factory_trigger_schedule = var.data_factory_trigger_schedule
|
||||
datasets = {
|
||||
azure_blob = try(var.datasets.azure_blob, {})
|
||||
cosmosdb_sqlapi = try(var.datasets.cosmosdb_sqlapi, {})
|
||||
delimited_text = try(var.datasets.delimited_text, {})
|
||||
http = try(var.datasets.http, {})
|
||||
json = try(var.datasets.json, {})
|
||||
mysql = try(var.datasets.mysql, {})
|
||||
postgresql = try(var.datasets.postgresql, {})
|
||||
sql_server_table = try(var.datasets.sql_server_table, {})
|
||||
}
|
||||
linked_services = {
|
||||
azure_blob_storage = try(var.linked_services.azure_blob_storage, {})
|
||||
}
|
||||
data_factory = var.data_factory
|
||||
data_factory_pipeline = var.data_factory_pipeline
|
||||
data_factory_trigger_schedule = var.data_factory_trigger_schedule
|
||||
data_factory_integration_runtime_self_hosted = var.data_factory_integration_runtime_self_hosted
|
||||
datasets = {
|
||||
azure_blob = merge(try(var.datasets.azure_blob, {}), try(var.data_factory_datasets.azure_blob, {}))
|
||||
cosmosdb_sqlapi = merge(try(var.datasets.cosmosdb_sqlapi, {}), try(var.data_factory_datasets.cosmosdb_sqlapi, {}))
|
||||
delimited_text = merge(try(var.datasets.delimited_text, {}), try(var.data_factory_datasets.delimited_text, {}))
|
||||
http = merge(try(var.datasets.http, {}), try(var.data_factory_datasets.http, {}))
|
||||
json = merge(try(var.datasets.json, {}), try(var.data_factory_datasets.json, {}))
|
||||
mysql = merge(try(var.datasets.mysql, {}), try(var.data_factory_datasets.mysql, {}))
|
||||
postgresql = merge(try(var.datasets.postgresql, {}), try(var.data_factory_datasets.postgresql, {}))
|
||||
sql_server_table = merge(try(var.datasets.sql_server_table, {}), try(var.data_factory_datasets.sql_server_table, {}))
|
||||
}
|
||||
linked_services = {
|
||||
azure_blob_storage = merge(try(var.linked_services.azure_blob_storage, {}), try(var.data_factory_linked_services.azure_blob_storage, {}), var.data_factory_linked_services_azure_blob_storages)
|
||||
azure_databricks = merge(try(var.data_factory_linked_services.azure_databricks, {}), try(var.data_factory_linked_services.azure_databricks, var.data_factory_linked_service_azure_databricks))
|
||||
cosmosdb = merge(try(var.data_factory_linked_services.cosmosdb, {}), try(var.data_factory_linked_services.cosmosdb, {}))
|
||||
key_vault = var.data_factory_linked_service_key_vaults
|
||||
mysql = merge(try(var.data_factory_linked_services.mysql, {}), try(var.data_factory_linked_services.mysql, {}))
|
||||
postgresql = merge(try(var.data_factory_linked_services.postgresql, {}), try(var.data_factory_linked_services.postgresql, {}))
|
||||
sql_server = merge(try(var.data_factory_linked_services.sql_server, {}), try(var.data_factory_linked_services.sql_server, {}))
|
||||
web = merge(try(var.data_factory_linked_services.web, {}), try(var.data_factory_linked_services.web, {}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ locals {
|
|||
app_config = var.app_config
|
||||
azurerm_redis_caches = var.azurerm_redis_caches
|
||||
cosmos_dbs = var.cosmos_dbs
|
||||
cosmosdb_sql_databases = var.cosmosdb_sql_databases
|
||||
databricks_workspaces = var.databricks_workspaces
|
||||
machine_learning_workspaces = var.machine_learning_workspaces
|
||||
mariadb_databases = var.mariadb_databases
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
locals {
|
||||
identity = {
|
||||
active_directory_domain_service = var.active_directory_domain_service
|
||||
active_directory_domain_service_replica_set = var.active_directory_domain_service_replica_set
|
||||
}
|
||||
}
|
|
@ -68,6 +68,12 @@ locals {
|
|||
container_registry = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].azure_container_registries, {}))
|
||||
}
|
||||
databricks_workspaces = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].databricks_workspaces, {}))
|
||||
}
|
||||
cosmos_dbs = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].cosmos_dbs, {}))
|
||||
}
|
||||
disk_encryption_sets = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].disk_encryption_sets, {}))
|
||||
}
|
||||
|
@ -98,6 +104,9 @@ locals {
|
|||
integration_service_environment = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].integration_service_environment, {}))
|
||||
}
|
||||
keyvault_certificates = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].keyvault_certificates, {}))
|
||||
}
|
||||
keyvault_certificate_requests = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].keyvault_certificate_requests, {}))
|
||||
}
|
||||
|
@ -172,6 +181,9 @@ locals {
|
|||
storage_accounts = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].storage_accounts, {}))
|
||||
}
|
||||
storage_containers = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].storage_containers, {}))
|
||||
}
|
||||
subscriptions = {
|
||||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].subscriptions, {}))
|
||||
}
|
||||
|
@ -218,4 +230,4 @@ locals {
|
|||
for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].wvd_workspaces, {}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ locals {
|
|||
data "terraform_remote_state" "remote" {
|
||||
for_each = try(var.landingzone.tfstates, {})
|
||||
|
||||
backend = var.landingzone.backend_type
|
||||
backend = try(each.value.backend_type, var.landingzone.backend_type, "azurerm")
|
||||
config = local.remote_state[try(each.value.backend_type, var.landingzone.backend_type, "azurerm")][each.key]
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,7 @@ locals {
|
|||
subscription_id = try(value.subscription_id, var.tfstate_subscription_id)
|
||||
tenant_id = try(value.tenant_id, data.azurerm_client_config.current.tenant_id)
|
||||
sas_token = try(value.sas_token, null) != null ? var.sas_token : null
|
||||
use_azuread_auth = try(value.use_azuread_auth, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
resource "databricks_cluster" "cluster" {
|
||||
cluster_name = var.settings.name
|
||||
spark_version = var.settings.spark_version
|
||||
node_type_id = var.settings.node_type_id
|
||||
autotermination_minutes = var.settings.autotermination_minutes
|
||||
|
||||
dynamic "autoscale" {
|
||||
for_each = try(var.settings.autoscale, null) == null ? [] : [1]
|
||||
|
||||
content {
|
||||
min_workers = try(var.settings.autoscale.min_workers, null)
|
||||
max_workers = try(var.settings.autoscale.max_workers, null)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
databricks = {
|
||||
source = "databrickslabs/databricks"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
output "cluster" {
|
||||
value = {
|
||||
id = databricks_cluster.cluster.id
|
||||
default_tags = databricks_cluster.cluster.default_tags
|
||||
state = databricks_cluster.cluster.state
|
||||
}
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
variable "azure_workspace_resource_id" {
|
||||
default = {}
|
||||
}
|
||||
variable "settings" {}
|
|
@ -27,6 +27,9 @@ variable "dedicated_host_groups" {
|
|||
variable "dedicated_hosts" {
|
||||
default = {}
|
||||
}
|
||||
variable "machine_learning_compute_instance" {
|
||||
default = {}
|
||||
}
|
||||
variable "proximity_placement_groups" {
|
||||
default = {}
|
||||
}
|
||||
|
|
|
@ -23,4 +23,33 @@ variable "linked_services" {
|
|||
default = {
|
||||
# azure_blob_storage
|
||||
}
|
||||
}
|
||||
variable "data_factory_datasets" {
|
||||
default = {
|
||||
# azure_blob
|
||||
# cosmosdb_sqlapi
|
||||
# delimited_text
|
||||
# http
|
||||
# json
|
||||
# mysql
|
||||
# postgresql
|
||||
# sql_server_table
|
||||
}
|
||||
}
|
||||
variable "data_factory_linked_services" {
|
||||
default = {
|
||||
# azure_blob_storage
|
||||
}
|
||||
}
|
||||
variable "data_factory_linked_service_key_vaults" {
|
||||
default = {}
|
||||
}
|
||||
variable "data_factory_linked_services_azure_blob_storages" {
|
||||
default = {}
|
||||
}
|
||||
variable "data_factory_linked_service_azure_databricks" {
|
||||
default = {}
|
||||
}
|
||||
variable "data_factory_integration_runtime_self_hosted" {
|
||||
default = {}
|
||||
}
|
|
@ -7,6 +7,9 @@ variable "azurerm_redis_caches" {
|
|||
variable "cosmos_dbs" {
|
||||
default = {}
|
||||
}
|
||||
variable "cosmosdb_sql_databases" {
|
||||
default = {}
|
||||
}
|
||||
variable "database" {
|
||||
description = "Database configuration objects"
|
||||
default = {}
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
variable "active_directory_domain_service" {
|
||||
description = "Manages an Active Directory Domain Service."
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "active_directory_domain_service_replica_set" {
|
||||
description = "Manages a Replica Set for an Active Directory Domain Service."
|
||||
default = {}
|
||||
}
|
|
@ -3,10 +3,10 @@
|
|||
#
|
||||
|
||||
module "vm_extension_monitoring_agent" {
|
||||
source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions"
|
||||
version = "~>5.4.0"
|
||||
# source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions"
|
||||
# version = "~>5.4.0"
|
||||
|
||||
# source = "/tf/caf/aztfmod/modules/compute/virtual_machine_extensions"
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master"
|
||||
|
||||
depends_on = [module.solution]
|
||||
|
||||
|
@ -25,10 +25,10 @@ module "vm_extension_monitoring_agent" {
|
|||
}
|
||||
|
||||
module "vm_extension_diagnostics" {
|
||||
source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions"
|
||||
version = "~>5.4.0"
|
||||
# source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions"
|
||||
# version = "~>5.4.0"
|
||||
|
||||
# source = "/tf/caf/aztfmod/modules/compute/virtual_machine_extensions"
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master"
|
||||
|
||||
depends_on = [module.solution]
|
||||
|
||||
|
@ -50,12 +50,12 @@ module "vm_extension_diagnostics" {
|
|||
}
|
||||
|
||||
module "vm_extension_microsoft_azure_domainjoin" {
|
||||
source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions"
|
||||
version = "~>5.4.0"
|
||||
# source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions"
|
||||
# version = "~>5.4.0"
|
||||
|
||||
# source = "/tf/caf/aztfmod/modules/compute/virtual_machine_extensions"
|
||||
|
||||
# source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master"
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master"
|
||||
|
||||
depends_on = [module.solution]
|
||||
|
||||
|
@ -72,12 +72,12 @@ module "vm_extension_microsoft_azure_domainjoin" {
|
|||
}
|
||||
|
||||
module "vm_extension_session_host_dscextension" {
|
||||
source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions"
|
||||
version = "~>5.4.0"
|
||||
# source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions"
|
||||
# version = "~>5.4.0"
|
||||
|
||||
# source = "/tf/caf/aztfmod/modules/compute/virtual_machine_extensions"
|
||||
|
||||
# source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master"
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master"
|
||||
|
||||
depends_on = [module.vm_extension_microsoft_azure_domainjoin]
|
||||
|
||||
|
@ -92,4 +92,26 @@ module "vm_extension_session_host_dscextension" {
|
|||
extension_name = "session_host_dscextension"
|
||||
keyvaults = merge(tomap({ (var.landingzone.key) = module.solution.keyvaults }), try(local.remote.keyvaults, {}))
|
||||
wvd_host_pools = merge(tomap({ (var.landingzone.key) = module.solution.wvd_host_pools }), try(local.remote.wvd_host_pools, {}))
|
||||
}
|
||||
|
||||
|
||||
module "vm_extension_custom_scriptextension" {
|
||||
# source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions"
|
||||
# version = "~>5.4.0"
|
||||
|
||||
source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master"
|
||||
|
||||
depends_on = [module.solution, module.vm_extension_microsoft_azure_domainjoin]
|
||||
|
||||
for_each = {
|
||||
for key, value in try(var.virtual_machines, {}) : key => value
|
||||
if try(value.virtual_machine_extensions.custom_script, null) != null
|
||||
}
|
||||
|
||||
client_config = module.solution.client_config
|
||||
virtual_machine_id = module.solution.virtual_machines[each.key].id
|
||||
extension = each.value.virtual_machine_extensions.custom_script
|
||||
extension_name = "custom_script"
|
||||
managed_identities = merge(tomap({(var.landingzone.key) = module.solution.managed_identities}), try(local.remote.managed_identities, {}))
|
||||
storage_accounts = merge(tomap({(var.landingzone.key) = module.solution.storage_accounts}), try(local.remote.storage_accounts, {}))
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
# module "vmss_extension_microsoft_azure_domainjoin" {
|
||||
# # source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_scale_set_extensions"
|
||||
# # version = "~>5.4.0"
|
||||
|
||||
# source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_scale_set_extensions?ref=master"
|
||||
|
||||
# depends_on = [module.solution]
|
||||
|
||||
# for_each = {
|
||||
# for key, value in try(var.virtual_machine_scale_sets, {}) : key => value
|
||||
# if try(value.virtual_machine_scale_set_extensions.microsoft_azure_domainjoin, null) != null
|
||||
# }
|
||||
|
||||
# client_config = module.solution.client_config
|
||||
# virtual_machine_scale_set_id = module.solution.virtual_machine_scale_sets[each.key].id
|
||||
# extension = each.value.virtual_machine_scale_set_extensions.microsoft_azure_domainjoin
|
||||
# extension_name = "microsoft_azure_domainJoin"
|
||||
# keyvaults = merge(tomap({ (var.landingzone.key) = module.solution.keyvaults }), try(local.remote.keyvaults, {}))
|
||||
# }
|
||||
|
||||
|
||||
# module "vmss_extension_custom_scriptextension" {
|
||||
# # source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_scale_set_extensions"
|
||||
# # version = "~>5.4.0"
|
||||
|
||||
# source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_scale_set_extensions?ref=master"
|
||||
|
||||
# depends_on = [module.solution]
|
||||
|
||||
# for_each = {
|
||||
# for key, value in try(var.virtual_machine_scale_sets, {}) : key => value
|
||||
# if try(value.virtual_machine_scale_set_extensions.custom_script, null) != null
|
||||
# }
|
||||
|
||||
# client_config = module.solution.client_config
|
||||
# virtual_machine_scale_set_id = module.solution.virtual_machine_scale_sets[each.key].id
|
||||
# extension = each.value.virtual_machine_scale_set_extensions.custom_script
|
||||
# extension_name = "custom_script"
|
||||
# managed_identities = merge(tomap({ (var.landingzone.key) = module.solution.managed_identities }), try(local.remote.managed_identities, {}))
|
||||
# storage_accounts = merge(tomap({ (var.landingzone.key) = module.solution.storage_accounts }), try(local.remote.storage_accounts, {}))
|
||||
# }
|
Загрузка…
Ссылка в новой задаче