updates for HPC Cache terraform 0.15.* examples (#1175)

This commit is contained in:
Rick Shahid 2021-04-26 15:11:58 -07:00 коммит произвёл GitHub
Родитель 1d573a7ca7
Коммит c361e259b0
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
16 изменённых файлов: 352 добавлений и 756 удалений

Просмотреть файл

@ -1,66 +1,67 @@
// customize the HPC Cache by editing the following local variables
locals {
// the region of the deployment
location = "eastus"
// network details
network_resource_group_name = "network_resource_group"
// hpc cache details
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// the region of the deployment
location = "eastus"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// network details
network_resource_group_name = "network_resource_group"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// hpc cache details
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// unique name for cache
cache_name = "uniquename"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// nfs filer related variables
filer_resource_group_name = "filer_resource_group"
vm_admin_username = "azureuser"
// use either SSH Key data or admin password, if ssh_key_data is specified
// then admin_password is ignored
vm_admin_password = "ReplacePassword$"
// if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600
// populated where you are running terraform
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."
// unique name for cache
cache_name = "uniquename"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// nfs filer related variables
filer_resource_group_name = "filer_resource_group"
vm_admin_username = "azureuser"
// use either SSH Key data or admin password, if ssh_key_data is specified
// then admin_password is ignored
vm_admin_password = "ReplacePassword$"
// if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600
// populated where you are running terraform
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."
}
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
}
}
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.56.0"
}
}
}
provider "azurerm" {
features {}
features {}
}
// the render network
module "network" {
source = "github.com/Azure/Avere/src/terraform/modules/render_network"
resource_group_name = local.network_resource_group_name
location = local.location
source = "github.com/Azure/Avere/src/terraform/modules/render_network"
resource_group_name = local.network_resource_group_name
location = local.location
}
resource "azurerm_resource_group" "hpc_cache_rg" {
@ -84,19 +85,23 @@ resource "azurerm_resource_group" "nfsfiler" {
// the ephemeral filer
module "nasfiler1" {
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = "Standard_D2s_v3"
unique_name = "nasfiler1"
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = "Standard_D2s_v3"
unique_name = "nasfiler1"
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
depends_on = [
azurerm_resource_group.nfsfiler
]
}
resource "azurerm_hpc_cache_nfs_target" "nfs_targets" {
@ -118,4 +123,4 @@ output "mount_addresses" {
output "export_namespace" {
value = tolist(azurerm_hpc_cache_nfs_target.nfs_targets.namespace_junction)[0].namespace_path
}
}

Просмотреть файл

@ -1,67 +1,67 @@
// customize the HPC Cache by editing the following local variables
locals {
// the region of the deployment
location = "eastus"
// network details
network_resource_group_name = "network_resource_group"
// hpc cache details
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// the region of the deployment
location = "eastus"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// network details
network_resource_group_name = "network_resource_group"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// hpc cache details
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// unique name for cache
cache_name = "uniquename"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// nfs filer related variables
filer_resource_group_name = "filer_resource_group"
vm_admin_username = "azureuser"
// use either SSH Key data or admin password, if ssh_key_data is specified
// then admin_password is ignored
vm_admin_password = "ReplacePassword$"
// if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600
// populated where you are running terraform
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."
// unique name for cache
cache_name = "uniquename"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// nfs filer related variables
filer_resource_group_name = "filer_resource_group"
vm_admin_username = "azureuser"
// use either SSH Key data or admin password, if ssh_key_data is specified
// then admin_password is ignored
vm_admin_password = "ReplacePassword$"
// if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600
// populated where you are running terraform
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."
}
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
}
}
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.56.0"
}
}
}
provider "azurerm" {
features {}
features {}
}
// the render network
module "network" {
source = "github.com/Azure/Avere/src/terraform/modules/render_network"
resource_group_name = local.network_resource_group_name
location = local.location
source = "github.com/Azure/Avere/src/terraform/modules/render_network"
resource_group_name = local.network_resource_group_name
location = local.location
}
resource "azurerm_resource_group" "hpc_cache_rg" {
@ -85,19 +85,23 @@ resource "azurerm_resource_group" "nfsfiler" {
// the ephemeral filer
module "nasfiler1" {
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = "Standard_D2s_v3"
unique_name = "nasfiler1"
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = "Standard_D2s_v3"
unique_name = "nasfiler1"
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
depends_on = [
azurerm_resource_group.nfsfiler
]
}
resource "azurerm_hpc_cache_nfs_target" "nfs_targets1" {
@ -115,19 +119,23 @@ resource "azurerm_hpc_cache_nfs_target" "nfs_targets1" {
// the ephemeral filer
module "nasfiler2" {
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = "Standard_D2s_v3"
unique_name = "nasfiler2"
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = "Standard_D2s_v3"
unique_name = "nasfiler2"
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
depends_on = [
azurerm_resource_group.nfsfiler
]
}
resource "azurerm_hpc_cache_nfs_target" "nfs_targets2" {
@ -145,19 +153,23 @@ resource "azurerm_hpc_cache_nfs_target" "nfs_targets2" {
// the ephemeral filer
module "nasfiler3" {
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = "Standard_D2s_v3"
unique_name = "nasfiler3"
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = "Standard_D2s_v3"
unique_name = "nasfiler3"
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
depends_on = [
azurerm_resource_group.nfsfiler
]
}
resource "azurerm_hpc_cache_nfs_target" "nfs_targets3" {
@ -187,4 +199,4 @@ output "export_namespace_2" {
output "export_namespace_3" {
value = tolist(azurerm_hpc_cache_nfs_target.nfs_targets3.namespace_junction)[0].namespace_path
}
}

Просмотреть файл

@ -10,26 +10,26 @@ locals {
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "uniquename"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// storage details
@ -49,10 +49,11 @@ locals {
}
terraform {
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
version = "~>2.56.0"
}
}
}
@ -120,9 +121,9 @@ resource "azurerm_storage_account_network_rules" "storage_acls" {
storage_account_name = azurerm_storage_account.storage.name
virtual_network_subnet_ids = [
module.network.cloud_cache_subnet_id,
// need for the controller to create the container
module.network.jumpbox_subnet_id,
module.network.cloud_cache_subnet_id,
// need for the controller to create the container
module.network.jumpbox_subnet_id,
]
default_action = "Deny"

Просмотреть файл

@ -1,41 +1,42 @@
// customize the simple VM by editing the following local variables
locals {
// the region of the deployment
location = "eastus"
network_resource_group_name = "network_resource_group"
vm_admin_username = "azureuser"
// use either SSH Key data or admin password, if ssh_key_data is specified
// then admin_password is ignored
vm_admin_password = "ReplacePassword$"
// if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600
// populated where you are running terraform
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."
// the region of the deployment
location = "eastus"
network_resource_group_name = "network_resource_group"
vm_admin_username = "azureuser"
// use either SSH Key data or admin password, if ssh_key_data is specified
// then admin_password is ignored
vm_admin_password = "ReplacePassword$"
// if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600
// populated where you are running terraform
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."
// nfs filer details
filer_location = "westus2"
filer_resource_group_name = "filer_resource_group"
// more filer sizes listed at https://github.com/Azure/Avere/tree/main/src/terraform/modules/nfs_filer
filer_size = "Standard_D2s_v3"
// nfs filer details
filer_location = "westus2"
filer_resource_group_name = "filer_resource_group"
// more filer sizes listed at https://github.com/Azure/Avere/tree/main/src/terraform/modules/nfs_filer
filer_size = "Standard_D2s_v3"
}
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
}
}
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.56.0"
}
}
}
provider "azurerm" {
features {}
features {}
}
// the render network
module "network" {
source = "github.com/Azure/Avere/src/terraform/modules/render_network"
resource_group_name = local.network_resource_group_name
location = local.location
source = "github.com/Azure/Avere/src/terraform/modules/render_network"
resource_group_name = local.network_resource_group_name
location = local.location
}
resource "azurerm_resource_group" "nfsfiler" {
@ -44,16 +45,16 @@ resource "azurerm_resource_group" "nfsfiler" {
}
resource "azurerm_virtual_network" "filervnet" {
name = "filervnet"
address_space = ["192.168.254.240/29"]
location = azurerm_resource_group.nfsfiler.location
resource_group_name = azurerm_resource_group.nfsfiler.name
name = "filervnet"
address_space = ["192.168.254.240/29"]
location = azurerm_resource_group.nfsfiler.location
resource_group_name = azurerm_resource_group.nfsfiler.name
// this subnet holds the cloud cache, there should be one cloud cache per subnet
subnet {
name = "filersubnet"
address_prefix = "192.168.254.240/29"
}
// this subnet holds the cloud cache, there should be one cloud cache per subnet
subnet {
name = "filersubnet"
address_prefix = "192.168.254.240/29"
}
}
resource "azurerm_virtual_network_peering" "peer-to-filer" {
@ -72,19 +73,23 @@ resource "azurerm_virtual_network_peering" "peer-from-filer" {
// the ephemeral filer
module "nasfiler1" {
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = local.filer_size
unique_name = "nasfiler1"
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = local.filer_size
unique_name = "nasfiler1"
// network details
virtual_network_resource_group = azurerm_virtual_network.filervnet.resource_group_name
virtual_network_name = azurerm_virtual_network.filervnet.name
virtual_network_subnet_name = tolist(azurerm_virtual_network.filervnet.subnet)[0].name
// network details
virtual_network_resource_group = azurerm_virtual_network.filervnet.resource_group_name
virtual_network_name = azurerm_virtual_network.filervnet.name
virtual_network_subnet_name = tolist(azurerm_virtual_network.filervnet.subnet)[0].name
depends_on = [
azurerm_resource_group.nfsfiler
]
}
output "filer_username" {
@ -125,4 +130,4 @@ output "hpccache_cache_subnet_id" {
output "hpccache_render_subnet_name" {
value = module.network.render_clients1_subnet_name
}
}

Просмотреть файл

@ -3,7 +3,7 @@ locals {
// the region of the deployment
location = "eastus"
hpccache_resource_group_name = "hpccache_resource_group"
vm_admin_username = "azureuser"
// use either SSH Key data or admin password, if ssh_key_data is specified
// then admin_password is ignored
@ -14,7 +14,7 @@ locals {
// controller details
controller_add_public_ip = true
// for ease paste all the values (even unused) from the output of setting up network and filer
hpccache_jumpbox_subnet_name = ""
hpccache_network_name = ""
@ -22,34 +22,35 @@ locals {
}
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
}
}
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.56.0"
}
}
}
provider "azurerm" {
features {}
features {}
}
module "jumpbox" {
source = "github.com/Azure/Avere/src/terraform/modules/jumpbox"
resource_group_name = local.hpccache_resource_group_name
location = local.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
add_public_ip = local.controller_add_public_ip
build_vfxt_terraform_provider = false
// needed for the cachewarmer so it can create virtual machine scalesets
add_role_assignments = true
// network details
virtual_network_resource_group = local.hpccache_network_resource_group_name
virtual_network_name = local.hpccache_network_name
virtual_network_subnet_name = local.hpccache_jumpbox_subnet_name
source = "github.com/Azure/Avere/src/terraform/modules/jumpbox"
resource_group_name = local.hpccache_resource_group_name
location = local.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
add_public_ip = local.controller_add_public_ip
build_vfxt_terraform_provider = false
// needed for the cachewarmer so it can create virtual machine scalesets
add_role_assignments = true
// network details
virtual_network_resource_group = local.hpccache_network_resource_group_name
virtual_network_name = local.hpccache_network_name
virtual_network_subnet_name = local.hpccache_jumpbox_subnet_name
}
output "hpccache_resource_group_name" {

Просмотреть файл

@ -9,26 +9,26 @@ locals {
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "hpccache"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// storage account hosting the queue
@ -50,10 +50,11 @@ locals {
}
terraform {
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
version = "~>2.56.0"
}
}
}

Просмотреть файл

@ -1,47 +0,0 @@
# HPC Cache + DNS Spoofing using Unbound DNS server with Split Horizen
This deploys the 1-filer vFXT example, and an Azure virtual machine that installs and configures [Unbound](https://nlnetlabs.nl/projects/unbound/about/) and configures it to override the address of an on-premises filer so that the render nodes mount the Avere to hide the latency. All other dns requests are forwarded to pre-configured on-premises dns servers.
![The architecture](../../../../docs/images/terraform/1filerdns.png)
## Deployment Instructions
To run the example, execute the following instructions. This assumes use of Azure Cloud Shell. If you are installing into your own environment, you will need to follow the [instructions to setup terraform for the Azure environment](https://docs.microsoft.com/en-us/azure/terraform/terraform-install-configure).
1. browse to https://shell.azure.com
2. Specify your subscription by running this command with your subscription ID: ```az account set --subscription YOUR_SUBSCRIPTION_ID```. You will need to run this every time after restarting your shell, otherwise it may default you to the wrong subscription, and you will see an error similar to `azurerm_public_ip.vm is empty tuple`.
3. get the terraform examples
```bash
mkdir tf
cd tf
git init
git remote add origin -f https://github.com/Azure/Avere.git
git config core.sparsecheckout true
echo "src/terraform/*" >> .git/info/sparse-checkout
git pull origin main
```
4. `cd src/terraform/examples/dnsserver`
7. `code main.tf` to edit the local variables section at the top of the file, to customize to your preferences
8. execute `terraform init` in the directory of `main.tf`.
9. execute `terraform apply -auto-approve` to deploy the dns server and cluster
10. use the output DNS ip address to populate the dns servers on your vnet.
Here are some dig commands to test your records:
```bash
# to lookup the A record for nfs1.rendering.com to unbound server 10.0.3.253
dig A @10.0.3.253 nfs1.rendering.com
# to do a reverse lookup to one of the vfxt addresses to unbound server 10.0.3.253
dig @10.0.3.253 -x 10.0.1.200
```
Once installed you will be able to point all the cloud nodes using avere at the DNS server.
When you are done, you can destroy all resources by running `terraform destroy -auto-approve`.

Просмотреть файл

@ -1,107 +0,0 @@
// customize the HPC Cache by editing the following local variables
locals {
// the region of the deployment
location = "eastus"
// hpc cache details
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "uniquename"
network_resource_group_name = "network_resource_group"
virtual_network_name = "rendervnet"
virtual_network_subnet_name = "cloud_cache"
// nfs filer related variables
vm_admin_username = "azureuser"
// use either SSH Key data or admin password, if ssh_key_data is specified
// then admin_password is ignored
vm_admin_password = "ReplacePassword$"
// if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600
// populated where you are running terraform
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."
dnsserver_static_ip = "10.0.1.250" // the address of the dns server or leave blank to dynamically assign
onprem_dns_servers = "10.0.3.254 169.254.169.254 " // space separated list
onprem_filer_fqdn = "nfs1.rendering.com" // the name of the filer to spoof
}
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
}
}
}
provider "azurerm" {
features {}
}
data "azurerm_subnet" "cachesubnet" {
name = local.virtual_network_subnet_name
virtual_network_name = local.virtual_network_name
resource_group_name = local.network_resource_group_name
}
resource "azurerm_resource_group" "hpc_cache_rg" {
name = local.hpc_cache_resource_group_name
location = local.location
}
resource "azurerm_hpc_cache" "hpc_cache" {
name = local.cache_name
resource_group_name = azurerm_resource_group.hpc_cache_rg.name
location = azurerm_resource_group.hpc_cache_rg.location
cache_size_in_gb = local.cache_size
subnet_id = azurerm_subnet.cachesubnet.id
sku_name = local.cache_throughput
}
module "dnsserver" {
source = "github.com/Azure/Avere/src/terraform/modules/dnsserver"
resource_group_name = local.network_resource_group_name
location = local.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = local.virtual_network_name
virtual_network_subnet_name = local.virtual_network_subnet_name
// this is the address of the unbound dns server
private_ip_address = local.dnsserver_static_ip
dns_server = local.onprem_dns_servers
avere_address_list = azurerm_hpc_cache.hpc_cache.mount_addresses
avere_filer_fqdn = local.onprem_filer_fqdn
excluded_subnet_cidrs = azurerm_subnet.cachesubnet.address_prefixes
// set the TTL
dns_max_ttl_seconds = 300
}
output "mount_addresses" {
value = azurerm_hpc_cache.hpc_cache.mount_addresses
}
output "unbound_dns_server_ip" {
value = module.dnsserver.dnsserver_address
}

Просмотреть файл

@ -1,46 +0,0 @@
# HPC Cache Deployment with a Hammerspace Filer
This example shows how to deploy a render network, controller, and HPC Cache with with a Hammerspace filer as shown in the diagram below:
![The architecture](../../../../../docs/images/terraform/hammerspace-hpcc.png)
# Hammerspace Licensing
To use this example, please contact a [Hammerspace representative](https://hammerspace.com/contact/) to get access to the Hammerspace Azure Image.
Once you have the Hammerspace Image ID, use the [Hammerspace Image copy instructions](../../hammerspace/HammerspaceCopyImage.md) to copy the image, and now you will be ready to deploy, and can proceed to the deployment instructions.
## Deployment Instructions
***Important Note*** HPC Cache needs to release a feature to disable NLM for it to mount a Hammerspace filer. In the meantime, please consider the [Avere vFXT Hammerspace example](../../vfxt/hammerspace).
To run the example, execute the following instructions. This assumes use of Azure Cloud Shell. If you are installing into your own environment, you will need to follow the [instructions to setup terraform for the Azure environment](https://docs.microsoft.com/en-us/azure/terraform/terraform-install-configure).
1. browse to https://shell.azure.com
2. Specify your subscription by running this command with your subscription ID: ```az account set --subscription YOUR_SUBSCRIPTION_ID```. You will need to run this every time after restarting your shell, otherwise it may default you to the wrong subscription, and you will see an error similar to `azurerm_public_ip.vm is empty tuple`.
3. double check your [HPC Cache prerequisites](https://docs.microsoft.com/en-us/azure/hpc-cache/hpc-cache-prereqs)
4. get the terraform examples
```bash
mkdir tf
cd tf
git init
git remote add origin -f https://github.com/Azure/Avere.git
git config core.sparsecheckout true
echo "src/terraform/*" >> .git/info/sparse-checkout
git pull origin main
```
6. `cd src/terraform/examples/HPC\ Cache/1-filer`
7. `code main.tf` to edit the local variables section at the top of the file, to customize to your preferences
8. execute `terraform init` in the directory of `main.tf`.
9. execute `terraform apply -auto-approve` to build the HPC Cache cluster
Once installed you will be able to mount the HPC Cache cluster, using the according to the `mount_addresses` output and following the [documentation](https://docs.microsoft.com/en-us/azure/hpc-cache/hpc-cache-mount).
When you are done using the cluster, you can destroy it by running `terraform destroy -auto-approve` or just delete the three resource groups created.

Просмотреть файл

@ -1,242 +0,0 @@
// customize the HPC Cache by editing the following local variables
locals {
// the region of the deployment
location = "eastus"
// network details
network_resource_group_name = "network_resource_group"
// hpc cache details
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "uniquename"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// nfs filer related variables
filer_resource_group_name = "filer_resource_group"
vm_admin_username = "azureuser"
// use either SSH Key data or admin password, if ssh_key_data is specified
// then admin_password is ignored
vm_admin_password = "ReplacePassword$"
unique_name = "hammerspace1"
hammerspace_image_id = ""
use_highly_available = false
anvil_configuration = local.use_highly_available ? "High Availability" : "Standalone"
data_subnet_mask_bits = 25
anvil_data_cluster_ip = "10.0.2.110" // leave blank to be dynamic
dsx_instance_count = 1
// More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes
// vm_size = "Standard_F16s_v2"
// vm_size = "Standard_F32s_v2"
// vm_size = "Standard_F48s_v2"
anvil_instance_type = "Standard_F16s_v2"
// More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes
// vm_size = "Standard_F16s_v2"
// vm_size = "Standard_F32s_v2"
// vm_size = "Standard_F48s_v2"
dsx_instance_type = "Standard_F16s_v2"
// storage_account_type = "Standard_LRS"
// storage_account_type = "StandardSSD_LRS"
storage_account_type = "Premium_LRS"
// more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/
// disk_size_gb = 127 // P10, E10, S10
metadata_disk_size_gb = 255 // P15, E15, S15
// disk_size_gb = 511 // P20, E20, S20
// disk_size_gb = 1023 // P30, E30, S30
// disk_size_gb = 2047 // P40, E40, S40
// disk_size_gb = 4095 // P50, E50, S50
// disk_size_gb = 8191 // P60, E60, S60
// disk_size_gb = 16383 // P70, E70, S70
// metadata_disk_size_gb = 32767 // P80, E80, S80
// more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/
// disk_size_gb = 127 // P10, E10, S10
// disk_size_gb = 255 // P15, E15, S15
// disk_size_gb = 511 // P20, E20, S20
// disk_size_gb = 1023 // P30, E30, S30
// disk_size_gb = 2047 // P40, E40, S40
datadisk_size_gb = 4095 // P50, E50, S50
// disk_size_gb = 8191 // P60, E60, S60
// disk_size_gb = 16383 // P70, E70, S70
// data_disk_size_gb = 32767 // P80, E80, S80
hammerspace_filer_nfs_export_path = "/data"
}
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
}
}
}
provider "azurerm" {
features {}
}
// the render network
module "network" {
source = "github.com/Azure/Avere/src/terraform/modules/render_network"
resource_group_name = local.network_resource_group_name
location = local.location
}
resource "azurerm_resource_group" "hpc_cache_rg" {
name = local.hpc_cache_resource_group_name
location = local.location
}
resource "azurerm_hpc_cache" "hpc_cache" {
name = local.cache_name
resource_group_name = azurerm_resource_group.hpc_cache_rg.name
location = azurerm_resource_group.hpc_cache_rg.location
cache_size_in_gb = local.cache_size
subnet_id = module.network.cloud_cache_subnet_id
sku_name = local.cache_throughput
}
resource "azurerm_resource_group" "nfsfiler" {
name = local.filer_resource_group_name
location = local.location
}
// the ephemeral filer
module "anvil" {
source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/anvil"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
hammerspace_image_id = local.hammerspace_image_id
unique_name = local.unique_name
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
anvil_configuration = local.anvil_configuration
anvil_instance_type = local.anvil_instance_type
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_ha_subnet_name = module.network.cloud_filers_ha_subnet_name
virtual_network_data_subnet_name = module.network.cloud_filers_subnet_name
virtual_network_data_subnet_mask_bits = local.data_subnet_mask_bits
anvil_data_cluster_ip = local.anvil_data_cluster_ip
anvil_metadata_disk_storage_type = local.storage_account_type
anvil_metadata_disk_size = local.metadata_disk_size_gb
depends_on = [
module.network,
azurerm_resource_group.nfsfiler,
]
}
// the ephemeral filer
module "dsx" {
source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/dsx"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
hammerspace_image_id = local.hammerspace_image_id
unique_name = local.unique_name
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
dsx_instance_count = local.dsx_instance_count
dsx_instance_type = local.dsx_instance_type
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_data_subnet_name = module.network.cloud_filers_subnet_name
virtual_network_data_subnet_mask_bits = local.data_subnet_mask_bits
anvil_password = module.anvil.web_ui_password
anvil_data_cluster_ip = module.anvil.anvil_data_cluster_ip
anvil_domain = module.anvil.anvil_domain
dsx_data_disk_storage_type = local.storage_account_type
dsx_data_disk_size = local.datadisk_size_gb
depends_on = [
module.network,
azurerm_resource_group.nfsfiler,
]
}
module "anvil_configure" {
source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/anvil-run-once-configure"
anvil_arm_virtual_machine_id = length(module.anvil.arm_virtual_machine_ids) == 0 ? "" : module.anvil.arm_virtual_machine_ids[0]
anvil_data_cluster_ip = module.anvil.anvil_data_cluster_ip
web_ui_password = module.anvil.web_ui_password
dsx_count = local.dsx_instance_count
nfs_export_path = local.hammerspace_filer_nfs_export_path
anvil_hostname = length(module.anvil.anvil_host_names) == 0 ? "" : module.anvil.anvil_host_names[0]
depends_on = [
module.anvil,
]
}
resource "azurerm_hpc_cache_nfs_target" "nfs_targets" {
name = "nfs_targets"
resource_group_name = azurerm_resource_group.hpc_cache_rg.name
cache_name = azurerm_hpc_cache.hpc_cache.name
target_host_name = module.dsx.dsx_ip_addresses[0]
usage_model = local.usage_model
namespace_junction {
namespace_path = "/nfs1data"
nfs_export = local.hammerspace_filer_nfs_export_path
target_path = ""
}
depends_on = [
module.anvil_configure,
]
}
output "hammerspace_filer_addresses" {
value = module.dsx.dsx_ip_addresses
}
output "hammerspace_webui_address" {
value = module.anvil.anvil_data_cluster_ip
}
output "hammerspace_webui_address" {
value = module.anvil.anvil_data_cluster_ip
}
output "hammerspace_filer_export" {
value = local.hammerspace_filer_nfs_export_path
}
output "hammerspace_webui_username" {
value = module.anvil.web_ui_username
}
output "hammerspace_webui_password" {
value = module.anvil.web_ui_password
}
output "mount_addresses" {
value = azurerm_hpc_cache.hpc_cache.mount_addresses
}
output "export_namespace" {
value = tolist(azurerm_hpc_cache_nfs_target.nfs_targets.namespace_junction)[0].namespace_path
}

Просмотреть файл

@ -23,34 +23,35 @@ locals {
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "uniquename"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "WRITE_AROUND"
}
terraform {
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
version = "~>2.56.0"
}
}
}

Просмотреть файл

@ -10,26 +10,26 @@ locals {
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "uniquename"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// netapp filer details
@ -42,10 +42,11 @@ locals {
}
terraform {
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
version = "~>2.56.0"
}
}
}

Просмотреть файл

@ -10,17 +10,17 @@ locals {
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
@ -28,7 +28,7 @@ locals {
}
terraform {
required_version = ">= 0.14.0"
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"

Просмотреть файл

@ -8,26 +8,26 @@ locals {
vmss_resource_group_name = "vdbench_vmss_rg"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "hpccache"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "WRITE_WORKLOAD_15"
// create a globally unique name for the storage account
@ -70,10 +70,11 @@ locals {
}
terraform {
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
version = "~>2.56.0"
}
}
}
@ -135,11 +136,11 @@ resource "azurerm_storage_container" "blob_container" {
resource "azurerm_storage_account_network_rules" "storage_acls" {
resource_group_name = azurerm_resource_group.storage.name
storage_account_name = azurerm_storage_account.storage.name
virtual_network_subnet_ids = [
module.network.cloud_cache_subnet_id,
// need for the controller to create the container
module.network.jumpbox_subnet_id,
module.network.cloud_cache_subnet_id,
// need for the controller to create the container
module.network.jumpbox_subnet_id,
]
default_action = "Deny"

Просмотреть файл

@ -8,26 +8,26 @@ locals {
vmss_resource_group_name = "vdbench_vmss_rg"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "hpccache"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "WRITE_WORKLOAD_15"
// nfs filer related variables
@ -58,10 +58,11 @@ locals {
}
terraform {
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
version = "~>2.56.0"
}
}
}
@ -113,6 +114,10 @@ module "nasfiler1" {
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
depends_on = [
azurerm_resource_group.nfsfiler
]
}
resource "azurerm_hpc_cache_nfs_target" "nfs_targets" {

Просмотреть файл

@ -10,26 +10,26 @@ locals {
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "uniquename"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// nfs filer related variables
@ -62,10 +62,11 @@ locals {
}
terraform {
required_version = ">= 0.14.0,< 0.16.0"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.12.0"
version = "~>2.56.0"
}
}
}
@ -125,6 +126,10 @@ module "nasfiler1" {
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
depends_on = [
azurerm_resource_group.nfsfiler
]
}
resource "azurerm_hpc_cache_nfs_target" "nfs_targets" {