Merge branch 'master' into vdbench_32

This commit is contained in:
rebecca-makar 2019-02-19 15:27:28 -05:00
Родитель 910ee3e9e5 11db367599
Коммит e80241e24d
11 изменённых файлов: 499 добавлений и 69 удалений

Просмотреть файл

@ -37,7 +37,7 @@ jobs:
pytest --disable-pytest-warnings test/test_vfxt_template_deploy.py \
-k test_deploy_template \
--location $VFXT_DEPLOY_LOCATION \
--doctest-modules --junitxml=junit/test-results01.xml
--doctest-modules --junitxml=junit/test-results01a.xml
displayName: 'Test template-based deployment of Avere vFXT'
condition: and(succeeded(), eq(variables['RUN_DEPLOY'], 'true'))
env:
@ -63,6 +63,21 @@ jobs:
AZURE_CLIENT_SECRET: $(AZURE-CLIENT-SECRET)
AZURE_SUBSCRIPTION_ID: $(AZURE-SUBSCRIPTION-ID)
- script: |
pytest --disable-pytest-warnings test/test_vfxt_template_deploy.py \
-k test_byovnet_deploy \
--location $VFXT_DEPLOY_LOCATION \
--doctest-modules --junitxml=junit/test-results01c.xml
displayName: 'Test deploy of vFXT with a VNET in a different resource group'
condition: and(succeeded(), eq(variables['RUN_BYOVNET'], 'true'))
env:
AVERE_ADMIN_PW: $(AVERE-ADMIN-PW)
AVERE_CONTROLLER_PW: $(AVERE-CONTROLLER-PW)
AZURE_TENANT_ID: $(AZURE-TENANT-ID)
AZURE_CLIENT_ID: $(AZURE-CLIENT-ID)
AZURE_CLIENT_SECRET: $(AZURE-CLIENT-SECRET)
AZURE_SUBSCRIPTION_ID: $(AZURE-SUBSCRIPTION-ID)
- script: |
if [ -f $VFXT_TEST_VARS_FILE ]; then
cat $VFXT_TEST_VARS_FILE
@ -85,24 +100,7 @@ jobs:
pytest --disable-pytest-warnings test/test_vfxt_cluster_status.py \
-k TestVfxtSupport \
--doctest-modules --junitxml=junit/test-results03.xml
CONTROLLER_IP=$(jq -r .controller_ip $VFXT_TEST_VARS_FILE)
CONTROLLER_NAME=$(jq -r .controller_name $VFXT_TEST_VARS_FILE)
CONTROLLER_USER=$(jq -r .controller_user $VFXT_TEST_VARS_FILE)
echo "CONTROLLER_IP : $CONTROLLER_IP"
echo "CONTROLLER_NAME: $CONTROLLER_NAME"
echo "CONTROLLER_USER: $CONTROLLER_USER"
ARTIFACTS_DIR="$BUILD_SOURCESDIRECTORY/test_artifacts"
mkdir -p $ARTIFACTS_DIR
tar -zcvf ${ARTIFACTS_DIR}/vfxt_artifacts_${CONTROLLER_NAME}.tar.gz vfxt_artifacts_*
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -r $CONTROLLER_USER@$CONTROLLER_IP:~/*.log $ARTIFACTS_DIR/.
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ~/.ssh/* $CONTROLLER_USER@$CONTROLLER_IP:~/.ssh/.
echo "vfxt.log from $CONTROLLER_NAME:"
cat $ARTIFACTS_DIR/vfxt.log
displayName: 'Collect vFXT deployment artifacts and dump vfxt.log'
displayName: 'Collect vFXT deployment artifacts'
condition: always()
env:
AVERE_ADMIN_PW: $(AVERE-ADMIN-PW)
@ -112,6 +110,24 @@ jobs:
AZURE_CLIENT_SECRET: $(AZURE-CLIENT-SECRET)
AZURE_SUBSCRIPTION_ID: $(AZURE-SUBSCRIPTION-ID)
- script: |
DEPLOY_ID=$(jq -r .deploy_id $VFXT_TEST_VARS_FILE)
CONTROLLER_NAME=$(jq -r .controller_name $VFXT_TEST_VARS_FILE)
T_ARTIFACTS_DIR="vfxt_artifacts_${DEPLOY_ID}"
echo "DEPLOY_ID: $DEPLOY_ID"
echo "CONTROLLER_NAME: $CONTROLLER_NAME"
echo "T_ARTIFACTS_DIR: $T_ARTIFACTS_DIR"
P_ARTIFACTS_DIR="$BUILD_SOURCESDIRECTORY/test_artifacts"
mkdir -p $P_ARTIFACTS_DIR
tar -zcvf ${P_ARTIFACTS_DIR}/vfxt_artifacts_${DEPLOY_ID}.tar.gz ${T_ARTIFACTS_DIR}
echo "vfxt.log from ${T_ARTIFACTS_DIR}:"
cat ${T_ARTIFACTS_DIR}/vfxt.log
displayName: 'Archive deployment artifacts and dump vfxt.log'
condition: always()
- script: |
grep -i -C 5 -e vfxt:ERROR -e exception $BUILD_SOURCESDIRECTORY/test_artifacts/vfxt.log
displayName: 'Grep errors from vfxt.log (+/- 5 lines)'
@ -151,6 +167,10 @@ jobs:
RESOURCE_GROUP=$(jq -r .resource_group $VFXT_TEST_VARS_FILE)
echo "RESOURCE_GROUP: $RESOURCE_GROUP"
az group delete --yes -n $RESOURCE_GROUP
if [ "true" = "$RUN_BYOVNET" ]; then
echo "RESOURCE_GROUP (vnet): ${RESOURCE_GROUP}-vnet"
az group delete --yes -n "${RESOURCE_GROUP}-vnet"
fi
displayName: 'Clean up resource group'
condition: and(always(), ne(variables['SKIP_RG_CLEANUP'], 'true'))
env:

Просмотреть файл

@ -24,19 +24,19 @@
"type": "string",
"defaultValue": "[resourceGroup().name]",
"metadata": {
"description": "The resource group name for the VNET. If createVirtualNetwork is set to true, the current resource group must be specified, otherwise the value should be blank."
"description": "The resource group name for the VNET. If createVirtualNetwork is set to true, this field should be blank. Otherwise, provide the name of the resource group containing an existing VNET."
}
},
"virtualNetworkName": {
"type": "string",
"metadata": {
"description": "The name used for the virtual network. If createVirtualNetwork is set to true, you may reuse the unique name above."
"description": "The unique name used for the virtual network. If createVirtualNetwork is set to true, you may reuse the unique name above."
}
},
"virtualNetworkSubnetName": {
"type": "string",
"metadata": {
"description": "The unique name used for the virtual network subnet. If createVirtualNetwork is set to true, you may reuse the unique name above."
"description": "The unique name used for the virtual network subnet. If createVirtualNetwork is set to true, you may reuse the unique name above."
}
},
"vnetAddressSpacePrefix":{
@ -189,7 +189,7 @@
"virtualNetworkSubnetName": "[parameters('virtualNetworkSubnetName')]",
"addressPrefix": "[parameters('vnetAddressSpacePrefix')]",
"subnetPrefix": "[parameters('subnetAddressRangePrefix')]",
"useAvereBackedStorageAccount": "[parameters('useAvereBackedStorageAccount')]",
"useAvereBackedStorageAccount": "[parameters('useAvereBackedStorageAccount')]",
"avereBackedStorageAccountName": "[parameters('avereBackedStorageAccountName')]",
"controllerName": "[parameters('controllerName')]",
"controllerAdminUsername": "[parameters('controllerAdminUsername')]",

Просмотреть файл

@ -0,0 +1,230 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"uniqueName": {
"type": "string",
"metadata": {
"description": "The unique name used as a basis for resource names."
}
},
"virtualNetworkName": {
"type": "string",
"defaultValue": "[concat(parameters('uniqueName'), '-vnet')]",
"metadata": {
"description": "The name of the virtual network (VNET)."
}
},
"virtualNetworkSubnetName": {
"type": "string",
"defaultValue": "[concat(parameters('uniqueName'), '-subnet')]",
"metadata": {
"description": "The name of the subnet in the VNET."
}
},
"vnetAddressSpacePrefix": {
"type": "string",
"defaultValue": "10.0.0.0/16",
"metadata": {
"description": "The IP address prefix of the virtual network (VNET)."
}
},
"subnetAddressRangePrefix": {
"type": "string",
"defaultValue": "10.0.0.0/24",
"metadata": {
"description": "The IP address range prefix of the subnet in the VNET."
}
},
"jumpboxAdminUsername": {
"type": "string",
"defaultValue": "azureuser",
"metadata": {
"description": "The administrative username for the jumpbox."
}
},
"jumpboxSSHKeyData": {
"type": "string",
"metadata": {
"description": "The SSH public key used to connect to the jumpbox."
}
}
},
"variables": {
"vmSku": "Standard_A1",
"uniqueName": "[parameters('uniqueName')]",
"virtualNetworkName": "[parameters('virtualNetworkName')]",
"subnetName": "[parameters('virtualNetworkSubnetName')]",
"addressPrefix": "[parameters('vnetAddressSpacePrefix')]",
"subnetPrefix": "[parameters('subnetAddressRangePrefix')]",
"subnetRef": "[resourceId('Microsoft.Network/virtualNetworks/subnets', variables('virtualNetworkName'), variables('subnetName'))]",
"publicIPAddressName": "[concat(variables('uniqueName'), '-publicip')]",
"storageAccountType": "Standard_LRS",
"jumpboxName": "[concat('jbox-', variables('uniqueName'))]",
"jumpboxSAName": "[concat(variables('uniqueName'), 'jbsa')]",
"jumpboxOSDiskName": "[concat(variables('jumpboxName'), '-osdisk')]",
"jumpboxIPConfigName": "[concat(variables('jumpboxName'), '-ipconfig')]",
"jumpboxNicName": "[concat(variables('jumpboxName'), '-nic')]",
"jumpboxSSHKeyPath": "[concat('/home/',parameters('jumpboxAdminUsername'),'/.ssh/authorized_keys')]",
"osType": {
"publisher": "Canonical",
"offer": "UbuntuServer",
"sku": "16.04-LTS",
"version": "latest"
},
"imageReference": "[variables('osType')]"
},
"resources": [
{
"apiVersion": "2017-10-01",
"type": "Microsoft.Network/virtualNetworks",
"name": "[variables('virtualNetworkName')]",
"location": "[resourceGroup().location]",
"properties": {
"addressSpace": {
"addressPrefixes": [
"[variables('addressPrefix')]"
]
},
"subnets": [
{
"name": "[variables('subnetName')]",
"properties": {
"addressPrefix": "[variables('subnetPrefix')]",
"serviceEndpoints": [
{
"service": "Microsoft.Storage"
}
]
}
}
]
}
},
{
"type": "Microsoft.Network/publicIPAddresses",
"name": "[variables('publicIPAddressName')]",
"location": "[resourceGroup().location]",
"apiVersion": "2017-10-01",
"properties": {
"publicIPAllocationMethod": "Static"
}
},
{
"type": "Microsoft.Network/networkInterfaces",
"name": "[variables('jumpboxNicName')]",
"location": "[resourceGroup().location]",
"apiVersion": "2017-10-01",
"dependsOn": [
"[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]",
"[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
],
"properties": {
"ipConfigurations": [
{
"name": "[variables('jumpboxIPConfigName')]",
"properties": {
"privateIPAllocationMethod": "Dynamic",
"publicIPAddress": {
"id": "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]"
},
"subnet": {
"id": "[variables('subnetRef')]"
}
}
}
]
}
},
{
"type": "Microsoft.Storage/storageAccounts",
"name": "[variables('jumpboxSAName')]",
"location": "[resourceGroup().location]",
"apiVersion": "2015-06-15",
"properties": {
"accountType": "[variables('storageAccountType')]"
}
},
{
"type": "Microsoft.Compute/virtualMachines",
"name": "[variables('jumpboxName')]",
"location": "[resourceGroup().location]",
"apiVersion": "2017-03-30",
"dependsOn": [
"[concat('Microsoft.Storage/storageAccounts/', variables('jumpboxSAName'))]",
"[concat('Microsoft.Network/networkInterfaces/', variables('jumpboxNicName'))]"
],
"properties": {
"hardwareProfile": {
"vmSize": "[variables('vmSku')]"
},
"osProfile": {
"computerName": "[variables('jumpboxName')]",
"adminUsername": "[parameters('jumpboxAdminUsername')]",
"linuxConfiguration": {
"disablePasswordAuthentication": true,
"ssh": {
"publicKeys": [
{
"path": "[variables('jumpboxSSHKeyPath')]",
"keyData": "[parameters('jumpboxSSHKeyData')]"
}
]
}
}
},
"storageProfile": {
"imageReference": "[variables('imageReference')]",
"osDisk": {
"name": "[variables('jumpboxOSDiskName')]",
"caching": "ReadWrite",
"createOption": "FromImage"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces',variables('jumpboxNicName'))]"
}
]
},
"diagnosticsProfile": {
"bootDiagnostics": {
"enabled": true,
"storageUri": "[concat('http://',variables('jumpboxSAName'),'.blob.core.windows.net')]"
}
}
}
}
],
"outputs": {
"location": {
"type": "string",
"value": "[resourceGroup().location]"
},
"public_host": {
"type": "string",
"value": "[variables('jumpboxName')]"
},
"public_ip_address": {
"type": "string",
"value": "[reference(resourceId('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))).ipAddress]"
},
"resource_group": {
"type": "string",
"value": "[resourceGroup().name]"
},
"subnet_id": {
"type": "string",
"value": "[concat(resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName')),'/subnets/',variables('subnetName'))]"
},
"subnet_name": {
"type": "string",
"value": "[variables('subnetName')]"
},
"virtual_network_name": {
"type": "string",
"value": "[variables('virtualNetworkName')]"
}
}
}

Просмотреть файл

@ -24,19 +24,19 @@
"type": "string",
"defaultValue": "[resourceGroup().name]",
"metadata": {
"description": "The resource group name for the VNET. If createVirtualNetwork is set to true, the current resource group must be specified, otherwise the value should be blank."
"description": "The resource group name for the VNET. If createVirtualNetwork is set to true, this field should be blank. Otherwise, provide the name of the resource group containing an existing VNET."
}
},
"virtualNetworkName": {
"type": "string",
"metadata": {
"description": "The name used for the virtual network. If createVirtualNetwork is set to true, you may reuse the unique name above."
"description": "The unique name used for the virtual network. If createVirtualNetwork is set to true, you may reuse the unique name above."
}
},
"virtualNetworkSubnetName": {
"type": "string",
"metadata": {
"description": "The unique name used for the virtual network subnet. If createVirtualNetwork is set to true, you may reuse the unique name above."
"description": "The unique name used for the virtual network subnet. If createVirtualNetwork is set to true, you may reuse the unique name above."
}
},
"vnetAddressSpacePrefix":{
@ -189,7 +189,7 @@
"virtualNetworkSubnetName": "[parameters('virtualNetworkSubnetName')]",
"addressPrefix": "[parameters('vnetAddressSpacePrefix')]",
"subnetPrefix": "[parameters('subnetAddressRangePrefix')]",
"useAvereBackedStorageAccount": "[parameters('useAvereBackedStorageAccount')]",
"useAvereBackedStorageAccount": "[parameters('useAvereBackedStorageAccount')]",
"avereBackedStorageAccountName": "[parameters('avereBackedStorageAccountName')]",
"controllerName": "[parameters('controllerName')]",
"controllerAdminUsername": "[parameters('controllerAdminUsername')]",

Двоичные данные
src/vfxt/src/marketplace.zip

Двоичный файл не отображается.

Просмотреть файл

@ -2,14 +2,17 @@
import json
import logging
import os
from time import sleep
# from requirements.txt
import pytest
from arm_template_deploy import ArmTemplateDeploy
from scp import SCPClient
from sshtunnel import SSHTunnelForwarder
# local libraries
from lib.helpers import (create_ssh_client, run_ssh_command, run_ssh_commands)
from arm_template_deploy import ArmTemplateDeploy
from lib.helpers import (create_ssh_client, run_ssh_command, run_ssh_commands,
wait_for_op)
# COMMAND-LINE OPTIONS ########################################################
@ -113,7 +116,8 @@ def storage_account(test_vars):
@pytest.fixture()
def scp_cli(ssh_con):
def scp_con(ssh_con):
"""Create an SCP client based on an SSH connection to the controller."""
client = SCPClient(ssh_con.get_transport())
yield client
client.close()
@ -121,12 +125,47 @@ def scp_cli(ssh_con):
@pytest.fixture()
def ssh_con(test_vars):
client = create_ssh_client(test_vars["controller_user"],
test_vars["controller_ip"],
key_filename=test_vars["ssh_priv_key"])
"""Create an SSH connection to the controller."""
log = logging.getLogger("ssh_con")
ssh_params = { # common parameters for SSH tunnel, connection
"username": test_vars["controller_user"],
"hostname": test_vars["public_ip"],
"key_filename": test_vars["ssh_priv_key"]
}
ssh_tunnel = None
# If the controller's IP is not the same as the public IP, then we are
# using a jumpbox to get into the VNET containing the controller. In that
# case, create an SSH tunnel before connecting to the controller.
if test_vars["public_ip"] != test_vars["controller_ip"]:
log.debug("Creating an SSH tunnel to the jumpbox.")
ssh_tunnel = SSHTunnelForwarder(
ssh_params["hostname"],
ssh_username=ssh_params["username"],
ssh_pkey=ssh_params["key_filename"],
remote_bind_address=(test_vars["controller_ip"], 22),
)
ssh_tunnel.start()
sleep(5)
log.debug("SSH tunnel connected: {}".format(ssh_params))
log.debug("Local bind port: {}".format(ssh_tunnel.local_bind_port))
# When SSH'ing to the controller below, we'll instead connect to
# localhost through the local bind port connected to the SSH tunnel.
ssh_params["hostname"] = "127.0.0.1"
ssh_params["port"] = ssh_tunnel.local_bind_port
log.debug("Creating SSH client connection: {}".format(ssh_params))
client = create_ssh_client(**ssh_params)
yield client
log.debug("Closing SSH client connection.")
client.close()
if ssh_tunnel:
log.debug("Closing SSH tunnel.")
ssh_tunnel.stop()
@pytest.fixture(scope="module")
def test_vars(request):
@ -191,3 +230,35 @@ def test_vars(request):
log.debug("Saving vars to {} (test_vars_file)".format(test_vars_file))
with open(test_vars_file, "w") as vtvf:
json.dump(vars, vtvf, **cja)
@pytest.fixture()
def ext_vnet(test_vars):
"""
Creates a resource group containing a new VNET, subnet, public IP, and
jumpbox for use in other tests.
"""
log = logging.getLogger("ext_vnet")
vnet_atd = ArmTemplateDeploy(
location=test_vars["location"],
resource_group=test_vars["atd_obj"].deploy_id + "-rg-vnet"
)
rg = vnet_atd.create_resource_group()
log.info("Resource Group: {}".format(rg))
vnet_atd.deploy_name = "ext_vnet"
with open("{}/src/vfxt/azuredeploy.vnet.json".format(
test_vars["build_root"])) as tfile:
vnet_atd.template = json.load(tfile)
with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
ssh_pub_key = ssh_pub_f.read()
vnet_atd.deploy_params = {
"uniqueName": test_vars["atd_obj"].deploy_id,
"jumpboxAdminUsername": "azureuser",
"jumpboxSSHKeyData": ssh_pub_key
}
test_vars["ext_vnet"] = wait_for_op(vnet_atd.deploy()).properties.outputs
log.debug(test_vars["ext_vnet"])
return test_vars["ext_vnet"]

Просмотреть файл

@ -19,6 +19,27 @@ def create_ssh_client(username, hostname, port=22, password=None, key_filename=N
return ssh_client
def get_vm_ips(nm_client, resource_group, vm_name):
"""
Get the private and public IP addresses for a given virtual machine.
If a virtual machine has the more than one IP address of each type, then
only the first one (as determined by the Azure SDK) is returned.
This function returns the following tuple: (private IP, public IP)
If a given VM does not have a private or public IP address, its tuple
entry will be None.
"""
for nif in nm_client.network_interfaces.list(resource_group):
if vm_name in nif.name:
ipc = nif.ip_configurations[0]
pub_ip = ipc.public_ip_address
if pub_ip:
pub_ip = pub_ip.ip_address
return (ipc.private_ip_address, pub_ip)
return (None, None) # (private IP, public IP)
def run_averecmd(ssh_client, node_ip, password, method, user='admin', args='',
timeout=60):
"""Run averecmd on the vFXT controller connected via ssh_client."""

Просмотреть файл

@ -126,7 +126,7 @@ class TestEdasim:
log = logging.getLogger("test_edasim_run")
node_ip = test_vars["deploy_edasim_outputs"]["jobsubmitter_0_ip_address"]["value"]
with SSHTunnelForwarder(
test_vars["controller_ip"],
test_vars["public_ip"],
ssh_username=test_vars["controller_user"],
ssh_pkey=test_vars["ssh_priv_key"],
remote_bind_address=(node_ip, 22),

Просмотреть файл

@ -60,7 +60,7 @@ class TestVDBench:
log = logging.getLogger("test_vdbench_run")
node_ip = test_vars["deploy_vd_outputs"]["node_0_ip_address"]["value"]
with SSHTunnelForwarder(
test_vars["controller_ip"],
test_vars["public_ip"],
ssh_username=test_vars["controller_user"],
ssh_pkey=test_vars["ssh_priv_key"],
remote_bind_address=(node_ip, 22),

Просмотреть файл

@ -21,7 +21,7 @@ from lib.helpers import (create_ssh_client, run_averecmd, run_ssh_commands,
class TestVfxtClusterStatus:
"""Basic vFXT cluster health tests."""
def test_basic_fileops(self, mnt_nodes, scp_cli, ssh_con, test_vars): # noqa: E501, F811
def test_basic_fileops(self, mnt_nodes, scp_con, ssh_con, test_vars): # noqa: E501, F811
"""
Quick check of file operations.
See check_node_basic_fileops.sh for more information.
@ -30,7 +30,7 @@ class TestVfxtClusterStatus:
pytest.skip("no storage account")
script_name = "check_node_basic_fileops.sh"
scp_cli.put(
scp_con.put(
"{0}/test/{1}".format(test_vars["build_root"], script_name),
r"~/.",
)
@ -86,13 +86,23 @@ class TestVfxtSupport:
assert(not cores_found)
def test_artifacts_collect(self, averecmd_params, scp_cli, test_vars): # noqa: F811, E501
def test_artifacts_collect(self, averecmd_params, scp_con, test_vars): # noqa: F811, E501
"""
Collect test artifacts (node logs, rolling trace) from each node.
Artifacts are stored to local directories.
"""
log = logging.getLogger("test_collect_artifacts")
artifacts_dir = "vfxt_artifacts_" + test_vars["atd_obj"].deploy_id
os.makedirs(artifacts_dir, exist_ok=True)
log.debug("Copying logs from controller to {}".format(artifacts_dir))
for lf in ["vfxt.log", "enablecloudtrace.log", "create_cluster_command.log"]:
scp_con.get("~/" + lf, artifacts_dir)
log.debug("Copying SSH keys to the controller")
scp_con.put(test_vars["ssh_priv_key"], "~/.ssh/.")
scp_con.put(test_vars["ssh_pub_key"], "~/.ssh/.")
nodes = run_averecmd(**averecmd_params, method="node.list")
log.debug("nodes found: {}".format(nodes))
for node in nodes:
@ -112,7 +122,7 @@ class TestVfxtSupport:
args=node)[node]["primaryClusterIP"]["IP"]
log.debug("tunneling to node {} using IP {}".format(node, node_ip))
with SSHTunnelForwarder(
test_vars["controller_ip"],
test_vars["public_ip"],
ssh_username=test_vars["controller_user"],
ssh_pkey=test_vars["ssh_priv_key"],
remote_bind_address=(node_ip, 22),

Просмотреть файл

@ -16,11 +16,17 @@ from uuid import uuid4
import pytest
# local libraries
from lib.helpers import split_ip_range, wait_for_op
from lib.helpers import get_vm_ips, split_ip_range, wait_for_op
class TestVfxtTemplateDeploy:
# TODO: modularize common code
def test_deploy_template(self, resource_group, test_vars): # noqa: F811
"""
Deploy a vFXT cluster.
- create a new VNET
- use an Avere-backed storage account
"""
log = logging.getLogger("test_deploy_template")
atd = test_vars["atd_obj"]
with open("{}/src/vfxt/azuredeploy-auto.json".format(
@ -29,38 +35,51 @@ class TestVfxtTemplateDeploy:
with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
ssh_pub_key = ssh_pub_f.read()
atd.deploy_params = {
"avereInstanceType": "Standard_E32s_v3",
"avereClusterName": atd.deploy_id + "-cluster",
"virtualNetworkResourceGroup": atd.resource_group,
"virtualNetworkName": atd.deploy_id + "-vnet",
"virtualNetworkSubnetName": atd.deploy_id + "-subnet",
"adminPassword": os.environ["AVERE_ADMIN_PW"],
"avereBackedStorageAccountName": atd.deploy_id + "sa",
"controllerName": atd.deploy_id + "-con",
"avereClusterName": atd.deploy_id + "-cluster",
"avereInstanceType": "Standard_E32s_v3",
"avereNodeCount": 3,
"controllerAdminUsername": "azureuser",
"controllerAuthenticationType": "sshPublicKey",
"controllerSSHKeyData": ssh_pub_key,
"controllerName": atd.deploy_id + "-con",
"controllerPassword": os.environ["AVERE_CONTROLLER_PW"],
"avereNodeCount": 3,
"adminPassword": os.environ["AVERE_ADMIN_PW"],
"rbacRoleAssignmentUniqueId": str(uuid4()),
"controllerSSHKeyData": ssh_pub_key,
"enableCloudTraceDebugging": True,
"rbacRoleAssignmentUniqueId": str(uuid4()),
"createVirtualNetwork": True,
"virtualNetworkName": atd.deploy_id + "-vnet",
"virtualNetworkResourceGroup": atd.resource_group,
"virtualNetworkSubnetName": atd.deploy_id + "-subnet",
}
test_vars["controller_name"] = atd.deploy_params["controllerName"]
test_vars["controller_user"] = atd.deploy_params["controllerAdminUsername"]
test_vars["storage_account"] = atd.deploy_params["avereBackedStorageAccountName"]
test_vars["controller_name"] = atd.deploy_params["controllerName"]
test_vars["controller_user"] = atd.deploy_params["controllerAdminUsername"]
log.debug("Generated deploy parameters: \n{}".format(
json.dumps(atd.deploy_params, indent=4)))
atd.deploy_name = "test_deploy_template"
try:
deploy_outputs = wait_for_op(atd.deploy()).properties.outputs
test_vars["cluster_mgmt_ip"] = deploy_outputs["mgmt_ip"]["value"]
test_vars["cluster_vs_ips"] = split_ip_range(deploy_outputs["vserver_ips"]["value"])
finally:
test_vars["controller_ip"] = atd.nm_client.public_ip_addresses.get(
# (c_priv_ip, c_pub_ip) = get_vm_ips(
# atd.nm_client, atd.resource_group, test_vars["controller_name"])
# test_vars["controller_ip"] = c_pub_ip or c_priv_ip
test_vars["public_ip"] = atd.nm_client.public_ip_addresses.get(
atd.resource_group, "publicip-" + test_vars["controller_name"]
).ip_address
test_vars["controller_ip"] = test_vars["public_ip"]
def test_no_storage_account_deploy(self, resource_group, test_vars): # noqa: F811
def test_no_storage_account_deploy(self, resource_group, test_vars): # noqa: E501, F811
"""
Deploy a vFXT cluster.
- create a new VNET
- do NOT use an Avere-backed storage account
"""
log = logging.getLogger("test_deploy_template")
atd = test_vars["atd_obj"]
with open("{}/src/vfxt/azuredeploy-auto.json".format(
@ -69,38 +88,97 @@ class TestVfxtTemplateDeploy:
with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
ssh_pub_key = ssh_pub_f.read()
atd.deploy_params = {
"avereInstanceType": "Standard_E32s_v3",
"adminPassword": os.environ["AVERE_ADMIN_PW"],
"avereClusterName": atd.deploy_id + "-cluster",
"virtualNetworkResourceGroup": atd.resource_group,
"virtualNetworkName": atd.deploy_id + "-vnet",
"virtualNetworkSubnetName": atd.deploy_id + "-subnet",
"avereBackedStorageAccountName": atd.deploy_id + "sa",
"controllerName": atd.deploy_id + "-con",
"avereInstanceType": "Standard_E32s_v3",
"avereNodeCount": 3,
"controllerAdminUsername": "azureuser",
"controllerAuthenticationType": "sshPublicKey",
"controllerSSHKeyData": ssh_pub_key,
"controllerName": atd.deploy_id + "-con",
"controllerPassword": os.environ["AVERE_CONTROLLER_PW"],
"avereNodeCount": 3,
"adminPassword": os.environ["AVERE_ADMIN_PW"],
"rbacRoleAssignmentUniqueId": str(uuid4()),
"controllerSSHKeyData": ssh_pub_key,
"enableCloudTraceDebugging": True,
"rbacRoleAssignmentUniqueId": str(uuid4()),
"createVirtualNetwork": True,
"virtualNetworkName": atd.deploy_id + "-vnet",
"virtualNetworkResourceGroup": atd.resource_group,
"virtualNetworkSubnetName": atd.deploy_id + "-subnet",
"useAvereBackedStorageAccount": False,
"avereBackedStorageAccountName": atd.deploy_id + "sa", # BUG
}
test_vars["controller_name"] = atd.deploy_params["controllerName"]
test_vars["controller_user"] = atd.deploy_params["controllerAdminUsername"]
log.debug("Generated deploy parameters: \n{}".format(
json.dumps(atd.deploy_params, indent=4)))
atd.deploy_name = "test_deploy_template"
atd.deploy_name = "test_no_storage_account_deploy"
try:
deploy_outputs = wait_for_op(atd.deploy()).properties.outputs
test_vars["cluster_mgmt_ip"] = deploy_outputs["mgmt_ip"]["value"]
test_vars["cluster_vs_ips"] = split_ip_range(deploy_outputs["vserver_ips"]["value"])
time.sleep(60)
finally:
# (c_priv_ip, c_pub_ip) = get_vm_ips(
# atd.nm_client, atd.resource_group, test_vars["controller_name"])
# test_vars["controller_ip"] = c_pub_ip or c_priv_ip
test_vars["public_ip"] = atd.nm_client.public_ip_addresses.get(
atd.resource_group, "publicip-" + test_vars["controller_name"]
).ip_address
test_vars["controller_ip"] = test_vars["public_ip"]
def test_byovnet_deploy(self, ext_vnet, resource_group, test_vars): # noqa: E501, F811
"""
Deploy a vFXT cluster.
- do NOT create a new VNET
- use an Avere-backed storage account
"""
log = logging.getLogger("test_deploy_template_byovnet")
atd = test_vars["atd_obj"]
with open("{}/src/vfxt/azuredeploy-auto.json".format(
test_vars["build_root"])) as tfile:
atd.template = json.load(tfile)
with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
ssh_pub_key = ssh_pub_f.read()
atd.deploy_params = {
"adminPassword": os.environ["AVERE_ADMIN_PW"],
"avereBackedStorageAccountName": atd.deploy_id + "sa",
"avereClusterName": atd.deploy_id + "-cluster",
"avereInstanceType": "Standard_E32s_v3",
"avereNodeCount": 3,
"controllerAdminUsername": "azureuser",
"controllerAuthenticationType": "sshPublicKey",
"controllerName": atd.deploy_id + "-con",
"controllerPassword": os.environ["AVERE_CONTROLLER_PW"],
"controllerSSHKeyData": ssh_pub_key,
"enableCloudTraceDebugging": True,
"rbacRoleAssignmentUniqueId": str(uuid4()),
"createVirtualNetwork": False,
"virtualNetworkResourceGroup": ext_vnet["resource_group"]["value"],
"virtualNetworkName": ext_vnet["virtual_network_name"]["value"],
"virtualNetworkSubnetName": ext_vnet["subnet_name"]["value"],
}
test_vars["storage_account"] = atd.deploy_params["avereBackedStorageAccountName"]
test_vars["controller_name"] = atd.deploy_params["controllerName"]
test_vars["controller_user"] = atd.deploy_params["controllerAdminUsername"]
log.debug("Generated deploy parameters: \n{}".format(
json.dumps(atd.deploy_params, indent=4)))
atd.deploy_name = "test_deploy_template_byovnet"
try:
deploy_outputs = wait_for_op(atd.deploy()).properties.outputs
test_vars["cluster_mgmt_ip"] = deploy_outputs["mgmt_ip"]["value"]
test_vars["cluster_vs_ips"] = split_ip_range(deploy_outputs["vserver_ips"]["value"])
finally:
test_vars["controller_ip"] = atd.nm_client.public_ip_addresses.get(
atd.resource_group, "publicip-" + test_vars["controller_name"]
).ip_address
test_vars["controller_ip"] = get_vm_ips(
atd.nm_client, atd.resource_group, test_vars["controller_name"]
)[0]
test_vars["public_ip"] = ext_vnet["public_ip_address"]["value"]
time.sleep(60)
if __name__ == "__main__":
pytest.main(sys.argv)