add HPC Cache Azure Blob Storage Target

This commit is contained in:
Anthony Howe 2020-03-27 21:18:11 -04:00
Родитель ebd9b03f22
Коммит 6b4dac4f0a
5 изменённых файлов: 211 добавлений и 5 удалений

Двоичные данные
docs/images/terraform/cloudfiler-hpcc.png Normal file

Двоичный файл не отображается.

После

Ширина:  |  Высота:  |  Размер: 10 KiB

Просмотреть файл

@ -8,10 +8,11 @@ The examples show how to deploy HPC Cache, Avere vFXT, and an NFS Filer from min
1. [HPC Cache](examples/HPC%20Cache)
1. [no-filer example](examples/HPC%20Cache/no-filers)
2. [HPC Cache mounting 1 IaaS NAS filer example](examples/HPC%20Cache/1-filer)
3. [HPC Cache mounting 3 IaaS NAS filers example](examples/HPC%20Cache/3-filers)
4. [HPC Cache and VDBench example](examples/HPC%20Cache/vdbench)
5. [HPC Cache and VMSS example](examples/HPC%20Cache/vmss)
2. [HPC Cache mounting Azure Blob Storage cloud core filer example](examples/HPC%20Cache/azureblobfiler)
3. [HPC Cache mounting 1 IaaS NAS filer example](examples/HPC%20Cache/1-filer)
4. [HPC Cache mounting 3 IaaS NAS filers example](examples/HPC%20Cache/3-filers)
5. [HPC Cache and VDBench example](examples/HPC%20Cache/vdbench)
6. [HPC Cache and VMSS example](examples/HPC%20Cache/vmss)
2. [Avere vFXT](examples/vfxt)
1. [no-filer example](examples/vfxt/no-filers)
2. [Avere vFXT mounting Azure Blob Storage cloud core filer example](examples/vfxt/azureblobfiler)

Просмотреть файл

@ -1,8 +1,9 @@
# HPC Cache
The examples in this folder build various configurations of the Avere vFXT with IaaS based filers:
The examples in this folder build various configurations of the HPC Cache with Azure Storage Account and IaaS based filers:
1. [no-filer example](no-filers/)
2. [HPC Cache mounting Azure Blob Storage cloud core filer example](azureblobfiler/)
2. [HPC Cache mounting 1 IaaS NAS filer example](1-filer/)
3. [HPC Cache mounting 3 IaaS NAS filers example](3-filers/)
4. [HPC Cache and VDBench example](vdbench/)

Просмотреть файл

@ -0,0 +1,38 @@
# HPC Cache mounting Azure Blob Storage cloud core filer example
This examples configures a render network, controller, and HPC Cache with an Azure Blob Storage cloud core filer as shown in the diagram below:
![The architecture](../../../../../docs/images/terraform/cloudfiler-hpcc.png)
## Deployment Instructions
To run the example, execute the following instructions. This assumes use of Azure Cloud Shell, but you can use in your own environment, ensure you install the vfxt provider as described in the [build provider instructions](../../../providers/terraform-provider-avere#build-the-terraform-provider-binary). However, if you are installing into your own environment, you will need to follow the [instructions to setup terraform for the Azure environment](https://docs.microsoft.com/en-us/azure/terraform/terraform-install-configure).
1. browse to https://shell.azure.com
2. Specify your subscription by running this command with your subscription ID: ```az account set --subscription YOUR_SUBSCRIPTION_ID```. You will need to run this every time after restarting your shell, otherwise it may default you to the wrong subscription, and you will see an error similar to `azurerm_public_ip.vm is empty tuple`.
3. double check your [HPC Cache prerequisites](https://docs.microsoft.com/en-us/azure/hpc-cache/hpc-cache-prereqs)
4. get the terraform examples
```bash
mkdir tf
cd tf
git init
git remote add origin -f https://github.com/Azure/Avere.git
git config core.sparsecheckout true
echo "src/terraform/*" >> .git/info/sparse-checkout
git pull origin master
```
6. `cd src/terraform/examples/HPC\ Cache/azureblobfiler`
7. `code main.tf` to edit the local variables section at the top of the file, to customize to your preferences. If you are using an [ssk key](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys), ensure that ~/.ssh/id_rsa is populated.
8. execute `terraform init` in the directory of `main.tf`.
9. execute `terraform apply -auto-approve` to build the HPC Cache cluster
Once installed you will be able to mount the HPC Cache cluster, using the according to the `mount_addresses` output and following the [documentation](https://docs.microsoft.com/en-us/azure/hpc-cache/hpc-cache-mount).
When you are done using the cluster, you can destroy it by running `terraform destroy -auto-approve` or just delete the three resource groups created.

Просмотреть файл

@ -0,0 +1,166 @@
// customize the HPC Cache by editing the following local variables
locals {
// the region of the deployment
location = "eastus"
// network details
network_resource_group_name = "network_resource_group"
// hpc cache details
hpc_cache_resource_group_name = "hpc_cache_resource_group"
// HPC Cache Throughput SKU - 3 allowed values for throughput (GB/s) of the cache
// Standard_2G
// Standard_4G
// Standard_8G
cache_throughput = "Standard_2G"
// HPC Cache Size - 5 allowed sizes (GBs) for the cache
// 3072
// 6144
// 12288
// 24576
// 49152
cache_size = 12288
// unique name for cache
cache_name = "uniquename"
// usage model
// WRITE_AROUND
// READ_HEAVY_INFREQ
// WRITE_WORKLOAD_15
usage_model = "READ_HEAVY_INFREQ"
// storage details
storage_resource_group_name = "storage_resource_group"
// create a globally unique name for the storage account
storage_account_name = ""
avere_storage_container_name = "hpccache"
// per the hpc cache documentation: https://docs.microsoft.com/en-us/azure/hpc-cache/hpc-cache-add-storage
// customers who joined during the preview (before GA), will need to
// swap the display names below. This will manifest as the following
// error:
// Error: A Service Principal with the Display Name "HPC Cache Resource Provider" was not found
//
//hpc_cache_principal_name = "StorageCache Resource Provider"
hpc_cache_principal_name = "HPC Cache Resource Provider"
}
provider "azurerm" {
version = "~>2.3.0"
features {}
}
// the render network
module "network" {
source = "github.com/Azure/Avere/src/terraform/modules/render_network"
resource_group_name = local.network_resource_group_name
location = local.location
}
resource "azurerm_resource_group" "hpc_cache_rg" {
name = local.hpc_cache_resource_group_name
location = local.location
// the depends on is necessary for destroy. Due to the
// limitation of the template deployment, the only
// way to destroy template resources is to destroy
// the resource group
depends_on = [module.network]
}
resource "azurerm_hpc_cache" "hpc_cache" {
name = local.cache_name
resource_group_name = azurerm_resource_group.hpc_cache_rg.name
location = azurerm_resource_group.hpc_cache_rg.location
cache_size_in_gb = local.cache_size
subnet_id = module.network.cloud_filers_subnet_id
sku_name = "Standard_2G"
}
resource "azurerm_resource_group" "storage" {
name = local.storage_resource_group_name
location = local.location
}
resource "azurerm_storage_account" "storage" {
name = local.storage_account_name
resource_group_name = azurerm_resource_group.storage.name
location = azurerm_resource_group.storage.location
account_tier = "Standard"
account_replication_type = "LRS"
// if the nsg associations do not complete before the storage account
// create is started, it will fail with "subnet updating"
depends_on = [module.network]
}
resource "azurerm_storage_container" "blob_container" {
name = local.avere_storage_container_name
storage_account_name = azurerm_storage_account.storage.name
}
/*
// Azure Storage ACLs on the subnet are not compatible with the azurerm_storage_container blob_container
resource "azurerm_storage_account_network_rules" "storage_acls" {
resource_group_name = azurerm_resource_group.storage.name
storage_account_name = azurerm_storage_account.storage.name
virtual_network_subnet_ids = [
module.network.cloud_cache_subnet_id,
// need for the controller to create the container
module.network.jumpbox_subnet_id,
]
default_action = "Deny"
depends_on = [azurerm_storage_container.blob_container]
}*/
data "azuread_service_principal" "hpc_cache_sp" {
display_name = local.hpc_cache_principal_name
}
resource "azurerm_role_assignment" "storage_account_contrib" {
scope = azurerm_storage_account.storage.id
role_definition_name = "Storage Account Contributor"
principal_id = data.azuread_service_principal.hpc_cache_sp.object_id
}
resource "azurerm_role_assignment" "storage_blob_data_contrib" {
scope = azurerm_storage_account.storage.id
role_definition_name = "Storage Blob Data Contributor"
principal_id = data.azuread_service_principal.hpc_cache_sp.object_id
}
// delay in linux or windows 180s for the role assignments to propagate.
// there is similar guidance in per the hpc cache documentation: https://docs.microsoft.com/en-us/azure/hpc-cache/hpc-cache-add-storage
resource "null_resource" "delay" {
depends_on = [
azurerm_role_assignment.storage_account_contrib,
azurerm_role_assignment.storage_blob_data_contrib,
]
provisioner "local-exec" {
command = "sleep 180 || ping -n 180 127.0.0.1 > nul"
on_failure = continue
}
}
resource "azurerm_hpc_cache_blob_target" "blob_target1" {
name = "azureblobtarget"
resource_group_name = azurerm_resource_group.hpc_cache_rg.name
cache_name = azurerm_hpc_cache.hpc_cache.name
storage_container_id = azurerm_storage_container.blob_container.resource_manager_id
namespace_path = "/blob_storage"
depends_on = [null_resource.delay]
}
output "mount_addresses" {
value = azurerm_hpc_cache.hpc_cache.mount_addresses
}
output "export_namespace" {
value = azurerm_hpc_cache_blob_target.blob_target1.namespace_path
}