update CacheWarmer documentation

This commit is contained in:
Anthony Howe 2020-05-16 10:01:09 -04:00
Родитель 65ccf0083e
Коммит 76aef7d3bf
7 изменённых файлов: 5 добавлений и 158 удалений

Двоичные данные
docs/images/terraform/cachewarmer-hpcc.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 16 KiB

После

Ширина:  |  Высота:  |  Размер: 15 KiB

Двоичные данные
docs/images/terraform/cachewarmer.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 13 KiB

После

Ширина:  |  Высота:  |  Размер: 15 KiB

Двоичные данные
docs/images/terraform/cachewarmerpipeline-hpcc.png Normal file

Двоичный файл не отображается.

После

Ширина:  |  Высота:  |  Размер: 5.3 KiB

Просмотреть файл

@ -37,6 +37,7 @@ locals {
hpccache_cache_subnet_name = ""
hpccache_network_name = ""
hpccache_network_resource_group_name = ""
hpccache_render_subnet_name = ""
// paste the values from the values from the jumpbox creation
hpccache_resource_group_name = ""
@ -117,6 +118,7 @@ module "cachewarmer_manager_install" {
vmss_user_name = local.jumpbox_username
vmss_password = local.vm_admin_password
vmss_ssh_public_key = local.vm_ssh_key_data
vmss_subnet_name = local.hpccache_render_subnet_name
module_depends_on = [module.cachewarmer_build.module_depends_on_id]
}

Просмотреть файл

@ -26,7 +26,7 @@ To simulate latency, the NFS filer will live in a different vnet, resource group
A nfs filer will be used to hold the bootstrap directory and the warm job directories. The example is broken into 3 phases. The third phase demonstrates how to chain up the terraform modules including deployment of the HPC Cache, mounting all junctions, building and installation of the CacheWarmer, and finally the job submission. Once the 3 phase has completed the cache is warmed with the desired content.
![The architecture](../../../../../docs/images/terraform/cachewarmerpipeline.png)
![The architecture](../../../../../docs/images/terraform/cachewarmerpipeline-hpcc.png)
## Deploy the Virtual Networks and Filer

Просмотреть файл

@ -1,157 +0,0 @@
// customize the HPC Cache by editing the following local variables
locals {
// the region of the deployment
location = "eastus"
// network details
network_resource_group_name = "network_resource_group"
// hpc cache details
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "uniquename"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// nfs filer related variables
filer_resource_group_name = "filer_resource_group"
nfs_export_path = "/nfs1data"
vm_admin_username = "azureuser"
// use either SSH Key data or admin password, if ssh_key_data is specified
// then admin_password is ignored
vm_admin_password = "ReplacePassword$"
// if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600
// populated where you are running terraform
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."
// jumpbox variable
jumpbox_add_public_ip = true
}
provider "azurerm" {
version = "~>2.8.0"
features {}
}
// the render network
module "network" {
source = "github.com/Azure/Avere/src/terraform/modules/render_network"
resource_group_name = local.network_resource_group_name
location = local.location
}
resource "azurerm_resource_group" "hpc_cache_rg" {
name = local.hpc_cache_resource_group_name
location = local.location
// the depends on is necessary for destroy. Due to the
// limitation of the template deployment, the only
// way to destroy template resources is to destroy
// the resource group
depends_on = [module.network]
}
resource "azurerm_hpc_cache" "hpc_cache" {
name = local.cache_name
resource_group_name = azurerm_resource_group.hpc_cache_rg.name
location = azurerm_resource_group.hpc_cache_rg.location
cache_size_in_gb = local.cache_size
subnet_id = module.network.cloud_filers_subnet_id
sku_name = local.cache_throughput
}
resource "azurerm_resource_group" "nfsfiler" {
name = local.filer_resource_group_name
location = local.location
}
// the ephemeral filer
module "nasfiler1" {
source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer"
resource_group_name = azurerm_resource_group.nfsfiler.name
location = azurerm_resource_group.nfsfiler.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
vm_size = "Standard_D2s_v3"
unique_name = "nasfiler1"
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.cloud_filers_subnet_name
}
resource "azurerm_hpc_cache_nfs_target" "nfs_targets" {
name = "nfs_targets"
resource_group_name = azurerm_resource_group.hpc_cache_rg.name
cache_name = azurerm_hpc_cache.hpc_cache.name
target_host_name = module.nasfiler1.primary_ip
usage_model = local.usage_model
namespace_junction {
namespace_path = local.nfs_export_path
nfs_export = module.nasfiler1.core_filer_export
target_path = ""
}
}
module "jumpbox" {
source = "github.com/Azure/Avere/src/terraform/modules/jumpbox"
resource_group_name = azurerm_resource_group.hpc_cache_rg.name
location = local.location
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
add_public_ip = local.jumpbox_add_public_ip
// network details
virtual_network_resource_group = local.network_resource_group_name
virtual_network_name = module.network.vnet_name
virtual_network_subnet_name = module.network.jumpbox_subnet_name
}
// the cachewarmer configure module installs the cachewarmer service to the jumpbox
module "cachewarmer_configure" {
source = "github.com/Azure/Avere/src/terraform/modules/cachewarmer_config"
node_address = module.jumpbox.jumpbox_address
admin_username = module.jumpbox.jumpbox_username
admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password
ssh_key_data = local.vm_ssh_key_data
nfs_address = azurerm_hpc_cache.hpc_cache.mount_addresses[0]
nfs_export_path = tolist(azurerm_hpc_cache_nfs_target.nfs_targets.namespace_junction)[0].namespace_path
}
output "jumpbox_username" {
value = module.jumpbox.jumpbox_username
}
output "jumpbox_address" {
value = module.jumpbox.jumpbox_address
}
output "mount_addresses" {
value = azurerm_hpc_cache.hpc_cache.mount_addresses
}
output "export_namespace" {
value = tolist(azurerm_hpc_cache_nfs_target.nfs_targets.namespace_junction)[0].namespace_path
}

Просмотреть файл

@ -24,6 +24,7 @@ locals {
filer_address = ""
filer_export = ""
vfxt_cache_subnet_name = ""
vfxt_render_subnet_name = ""
// paste the values from the values from the controller creation
controller_address = ""
@ -111,6 +112,7 @@ module "cachewarmer_manager_install" {
vmss_user_name = local.controller_username
vmss_password = local.vm_admin_password
vmss_ssh_public_key = local.vm_ssh_key_data
vmss_subnet_name = local.vfxt_render_subnet_name
module_depends_on = [module.cachewarmer_build.module_depends_on_id]
}