Updated for new lemur and marketplace lustre

This commit is contained in:
Paul Edwards 2021-09-08 09:45:18 +01:00
Родитель b37e05d034
Коммит 8e2d92951d
17 изменённых файлов: 121 добавлений и 543 удалений

Просмотреть файл

@ -48,14 +48,14 @@ These can be read by packer from a JSON file. Use this template to create `opti
"var_client_id": "",
"var_client_secret": "",
"var_resource_group": "",
"var_image": "lustre-7.8-lustre-2.13.5"
"var_image": "azurehpc-lustre-2.12.5"
}
```
Use the following command to build with packer:
```
packer build -var-file=options.json centos-7.8-lustre-2.12.5.json
packer build -var-file=options.json azurehpc-lustre-2.12.5.json
```
Once this successfully completes the image will be available.
@ -71,8 +71,7 @@ The "Deploy to Azure" button can be used once the image is available (alternativ
| ossSku | The SKU for the OSS VMs |
| instanceCount | The number of OSS VMs |
| rsaPublicKey | The RSA public key to access the VMs |
| imageResourceGroup | The name of the resource group containing the image |
| imageName | The name of the Lustre image to use |
| imageResourceId | The Lustre image resource ID |
| existingVnetResourceGroupName | The resource group containing the VNET where Lustre is to be deployed |
| existingVnetName | The name of the VNET where Lustre is to be deployed |
| existingSubnetName | The name of the subnet where Lustre is to be deployed |
@ -91,10 +90,10 @@ The "Deploy to Azure" button can be used once the image is available (alternativ
The additional parameters can be used to enable HSM for the Lustre deployment.
| Parameter | Description |
|------------------|------------------------------------|
|------------------|-------------------------------------|
| storageAccount | The storage account to use for HSM |
| storageContainer | The container name to use |
| storageKey | The key for the storage account |
| storageSas | The SAS key for the storage account |
#### Options for Logging with Log Analytics

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Просмотреть файл

@ -3,169 +3,169 @@
"contentVersion": "1.0.0.0",
"parameters": {
"name": {
"type": "string",
"maxLength": 20,
"type": "String",
"metadata": {
"description": "The name for the Lustre filesystem."
}
},
"mdsSku": {
"defaultValue": "Standard_D8s_v3",
"type": "string",
"metadata": "The SKU for the MDS"
"type": "String",
"metadata": {
"description": "The SKU for the Lustre MDS."
}
},
"ossSku": {
"defaultValue": "Standard_L8s_v2",
"type": "string",
"type": "String",
"metadata": {
"description": "The VM type for the Lustre nodes."
}
},
"instanceCount": {
"maxValue": 300,
"type": "int",
"type": "Int",
"metadata": {
"description": "Number of additional Lustre nodes."
}
},
"rsaPublicKey": {
"type": "string",
"type": "String",
"metadata": {
"description": "The RSA public key to access the nodes."
}
},
"imageResourceGroup": {
"type": "string",
"imageResourceId": {
"defaultValue": "azhpc:azurehpc-lustre:azurehpc-lustre-2_12:latest",
"type": "String",
"metadata": {
"description": "Name of the the resource group containing the Lustre image"
}
},
"imageName": {
"type": "string",
"metadata": {
"description": "Name of the Lustre image to use"
"description": "The resource ID for the image containing the Lustre image"
}
},
"existingVnetResourceGroupName": {
"type": "string",
"type": "String",
"metadata": {
"description": "Name of the resource group for the existing virtual network to deploy the scale set into."
}
},
"existingVnetName": {
"type": "string",
"type": "String",
"metadata": {
"description": "Name of the existing virtual network to deploy the scale set into."
}
},
"existingSubnetName": {
"type": "string",
"type": "String",
"metadata": {
"description": "Name of the existing subnet to deploy the scale set into."
}
},
"storageAccount": {
"type": "string",
"defaultValue": "",
"type": "String",
"metadata": {
"description": "Optional. The storage account to use (leave blank to disable HSM)"
}
},
"storageContainer": {
"type": "string",
"defaultValue": "",
"type": "String",
"metadata": {
"description": "The storage container to use for archive"
}
},
"storageKey": {
"type": "string",
"storageSas": {
"defaultValue": "",
"type": "String",
"metadata": {
"description": "The storage account key"
"description": "A SAS key for the storage account"
}
},
"logAnalyticsAccount": {
"type": "string",
"defaultValue": "",
"type": "String",
"metadata": {
"description": "Optional. The log analytics account to use (leave blank to disable logging)"
}
},
"logAnalyticsWorkspaceId": {
"type": "string",
"defaultValue": "",
"type": "String",
"metadata": {
"description": " The log analytics workspace id"
}
},
"logAnalyticsKey": {
"type": "string",
"defaultValue": "",
"type": "String",
"metadata": {
"description": "The log analytics account key"
}
},
"mdtStorageSku": {
"type": "string",
"defaultValue": "Premium_LRS",
"type": "String",
"metadata": {
"description": "The size of the MDT disks"
}
},
"mdtCacheOption": {
"type": "string",
"defaultValue": "ReadWrite",
"type": "String",
"metadata": {
"description": "The size of the MDT disks"
}
},
"mdtDiskSize": {
"type": "int",
"defaultValue": 1024,
"type": "Int",
"metadata": {
"description": "The size of the MDT disks"
}
},
"mdtNumDisks": {
"type": "int",
"defaultValue": 2,
"type": "Int",
"metadata": {
"description": "The number of disks in the MDT RAID"
}
},
"ostStorageSku": {
"type": "string",
"defaultValue": "Premium_LRS",
"type": "String",
"metadata": {
"description": "The size of the MDT disks"
}
},
"ostCacheOption": {
"type": "string",
"defaultValue": "None",
"type": "String",
"metadata": {
"description": "The size of the MDT disks"
}
},
"ostDiskSize": {
"type": "int",
"defaultValue": 1024,
"type": "Int",
"metadata": {
"description": "The size of the OSS disks"
}
},
"ostNumDisks": {
"type": "int",
"defaultValue": 6,
"type": "Int",
"metadata": {
"description": "The number of disks on each OSS"
}
},
"ossDiskSetup": {
"type": "string",
"defaultValue": "raid",
"allowedValues": [ "raid", "separate" ],
"allowedValues": [
"raid",
"separate"
],
"type": "String",
"metadata": {
"description": "Create a single RAID or use multiple OSTs"
}
@ -175,7 +175,7 @@
"tagname": "[concat('LustreFS-', parameters('name'))]",
"subnet": "[resourceId(parameters('existingVnetResourceGroupName'), 'Microsoft.Network/virtualNetworks/subnets', parameters('existingVnetName'), parameters('existingSubNetName'))]",
"imageReference": {
"id": "[resourceId(parameters('imageResourceGroup'), 'Microsoft.Compute/images', parameters('imageName'))]"
"id": "[parameters('imageResourceId')]"
},
"ciScript": "",
"copy": [
@ -207,11 +207,12 @@
}
]
},
"functions": [],
"resources": [
{
"name": "[concat(parameters('name'), '-NetworkInterface')]",
"type": "Microsoft.Network/networkInterfaces",
"apiVersion": "2018-08-01",
"name": "[concat(parameters('name'), '-NetworkInterface')]",
"location": "[resourceGroup().location]",
"tags": {
"filesystem": "[variables('tagname')]"
@ -232,9 +233,9 @@
}
},
{
"name": "[parameters('name')]",
"type": "Microsoft.Compute/virtualMachines",
"apiVersion": "2017-03-30",
"apiVersion": "2019-07-01",
"name": "[parameters('name')]",
"location": "[resourceGroup().location]",
"dependsOn": [
"[resourceId('Microsoft.Network/networkInterfaces', concat(parameters('name'), '-NetworkInterface'))]"
@ -280,8 +281,10 @@
}
},
{
"name": "[concat(parameters('name'), '-vmss')]",
"type": "Microsoft.Compute/virtualMachineScaleSets",
"apiVersion": "2018-10-01",
"name": "[concat(parameters('name'), '-vmss')]",
"location": "[resourceGroup().location]",
"tags": {
"filesystem": "[variables('tagname')]"
},
@ -290,8 +293,6 @@
"tier": "Standard",
"capacity": "[parameters('instanceCount')]"
},
"apiVersion": "2018-10-01",
"location": "[resourceGroup().location]",
"properties": {
"overprovision": true,
"upgradePolicy": {
@ -347,8 +348,5 @@
}
}
],
"outputs": {
},
"functions": [
]
"outputs": {}
}

Просмотреть файл

@ -4,11 +4,12 @@
azuredeploy.json \
azuredeploy_template.json \
ciScript \
packer/lustre-setup-scripts \
scripts \
setup_lustre.sh \
name \
instanceCount \
storageAccount \
storageKey \
storageSas \
storageContainer \
logAnalyticsAccount \
logAnalyticsWorkspaceId \

Просмотреть файл

@ -38,7 +38,7 @@ echo -n "set --'," >>${script_name}.str
while test $# -gt 0
do
echo -n "' ',parameters('$1')," >>${script_name}.str
echo -n "' \"',parameters('$1'),'\"'," >>${script_name}.str
shift
done
echo "'" >>${script_name}.str

Просмотреть файл

@ -1,44 +0,0 @@
#!/bin/bash
# arg: $1 = raid_device (e.g. /dev/md10)
# arg: $* = devices to use (can use globbing)
raid_device=$1
shift
devices=
while (( "$#" )); do
devices="$devices $1"
shift
done
echo "devices=$devices"
# print partition information
parted -s --list 2>/dev/null
# creating the partitions
for disk in $devices; do
echo "partitioning $disk"
parted -s $disk "mklabel gpt"
parted -s $disk -a optimal "mkpart primary 1 -1"
parted -s $disk print
parted -s $disk "set 1 raid on"
done
# make sure all the partitions are ready
sleep 10
# get the partition names
partitions=
for disk in $devices; do
partitions="$partitions $(lsblk -no kname -p $disk | tail -n1)"
done
echo "partitions=$partitions"
ndevices=$(echo $partitions | wc -w)
echo "creating raid device"
mdadm --create $raid_device --level 0 --raid-devices $ndevices $partitions || exit 1
sleep 10
mdadm --verbose --detail --scan > /etc/mdadm.conf

Просмотреть файл

@ -1,3 +0,0 @@
#!/bin/bash
ethtool -L eth1 tx 8 rx 8 && ifconfig eth1 down && ifconfig eth1 up

Просмотреть файл

@ -1,16 +0,0 @@
#!/bin/bash
# arg: $1 = lfsserver
# arg: $2 = mount point (default: /lustre)
master=$1
lfs_mount=${2:-/lustre}
if [ "$lustre_version" = "2.10" ]; then
yum install -y kmod-lustre-client
weak-modules --add-kernel $(uname -r)
fi
mkdir $lfs_mount
echo "${master}@tcp0:/LustreFS $lfs_mount lustre defaults,_netdev 0 0" >> /etc/fstab
mount -a
chmod 777 $lfs_mount

Просмотреть файл

@ -1,89 +0,0 @@
#!/bin/bash
# arg: $1 = lfsserver
# arg: $2 = storage account
# arg: $3 = storage key
# arg: $4 = storage container
# arg: $5 = lustre version (default 2.10)
master=$1
storage_account=$2
storage_key=$3
storage_container=$4
lustre_version=${5-2.10}
# adding kernel module for lustre client
if [ "$lustre_version" = "2.10" ]; then
yum install -y kmod-lustre-client
weak-modules --add-kernel $(uname -r)
fi
if ! rpm -q lemur-azure-hsm-agent lemur-azure-data-movers; then
yum -y install \
https://azurehpc.azureedge.net/rpms/lemur-azure-hsm-agent-1.0.0-lustre_${lustre_version}.x86_64.rpm \
https://azurehpc.azureedge.net/rpms/lemur-azure-data-movers-1.0.0-lustre_${lustre_version}.x86_64.rpm
fi
mkdir -p /var/run/lhsmd
chmod 755 /var/run/lhsmd
mkdir -p /etc/lhsmd
chmod 755 /etc/lhsmd
cat <<EOF >/etc/lhsmd/agent
# Lustre NID and filesystem name for the front end filesystem, the agent will mount this
client_device="${master}@tcp:/LustreFS"
# Do you want to use S3 and POSIX, in this example we use POSIX
enabled_plugins=["lhsm-plugin-az"]
## Directory to look for the plugins
plugin_dir="/usr/libexec/lhsmd"
# TBD, I used 16
handler_count=16
# TBD
snapshots {
enabled = false
}
EOF
chmod 600 /etc/lhsmd/agent
cat <<EOF >/etc/lhsmd/lhsm-plugin-az
az_storage_account = "$storage_account"
az_storage_key = "$storage_key"
num_threads = 32
#
# One or more archive definition is required.
#
archive "az-blob" {
id = 1 # Must be unique to this endpoint
container = "$storage_container" # Container used for this archive
prefix = "" # Optional prefix
num_threads = 32
}
EOF
chmod 600 /etc/lhsmd/lhsm-plugin-az
cat <<EOF >/etc/systemd/system/lhsmd.service
[Unit]
Description=The lhsmd server
After=syslog.target network.target remote-fs.target nss-lookup.target
[Service]
Type=simple
PIDFile=/run/lhsmd.pid
ExecStartPre=/bin/mkdir -p /var/run/lhsmd
ExecStart=/sbin/lhsmd -config /etc/lhsmd/agent
Restart=always
[Install]
WantedBy=multi-user.target
EOF
chmod 600 /etc/systemd/system/lhsmd.service
systemctl daemon-reload
systemctl enable lhsmd
systemctl start lhsmd

Просмотреть файл

@ -1,25 +0,0 @@
#!/bin/bash
# arg: $1 = storage account
# arg: $2 = storage key
# arg: $3 = storage container
# arg: $3 = lfs mount
# arg: $4 = lustre mount (default=/lustre)
# arg: $5 = lustre version (default=2.10)
storage_account=$1
storage_key=$2
storage_container=$3
lfs_mount=${4:-/lustre}
lustre_version=${5-2.10}
if ! rpm -q lemur-azure-hsm-agent lemur-azure-data-movers; then
yum -y install \
https://azurehpc.azureedge.net/rpms/lemur-azure-hsm-agent-1.0.0-lustre_${lustre_version}.x86_64.rpm \
https://azurehpc.azureedge.net/rpms/lemur-azure-data-movers-1.0.0-lustre_${lustre_version}.x86_64.rpm
fi
cd $lfs_mount
export STORAGE_ACCOUNT=$storage_account
export STORAGE_KEY=$storage_key
/sbin/azure-import ${storage_container}

Просмотреть файл

@ -1,31 +0,0 @@
#!/bin/bash
# arg: $1 = name
# arg: $2 = log analytics workspace id
# arg: $3 = log analytics key
name=$1
log_analytics_workspace_id=$2
log_analytics_key=$3
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
sed "s#__FS_NAME__#${name}#g;s#__LOG_ANALYTICS_WORKSPACE_ID__#${log_analytics_workspace_id}#g;s#__LOG_ANALYTICS_KEY__#${log_analytics_key}#g" $DIR/lfsloganalyticsd.sh.in >/usr/bin/lfsloganalyticsd.sh
chmod +x /usr/bin/lfsloganalyticsd.sh
cat <<EOF >/lib/systemd/system/lfsloganalytics.service
[Unit]
Description=Lustre logging service to Log Analytics.
[Service]
Type=simple
ExecStart=/bin/bash /usr/bin/lfsloganalyticsd.sh
Restart=always
[Install]
WantedBy=multi-user.target
EOF
systemctl enable lfsloganalytics
systemctl start lfsloganalytics

Просмотреть файл

@ -1,65 +0,0 @@
#!/bin/bash
fs_name=__FS_NAME__
workspace_id=__LOG_ANALYTICS_WORKSPACE_ID__
key="__LOG_ANALYTICS_KEY__"
DATE=`date '+%Y-%m-%d %H:%M:%S'`
echo "Lustre Log Analytics service started at ${DATE}" | systemd-cat -p info
me=$(hostname)
node=$(ls /proc/fs/lustre/osd-ldiskfs | grep LustreFS)
eth0=$(grep eth0 /proc/net/dev | sed 's/ */ /g')
bytesrecv_last=$(cut -d' ' -f 3 <<<"$eth0")
bytessend_last=$(cut -d' ' -f 11 <<<"$eth0")
while true
do
sleep 60;
eth0=$(grep eth0 /proc/net/dev | sed 's/ */ /g')
bytesrecv=$(cut -d' ' -f 3 <<<"$eth0")
bytessend=$(cut -d' ' -f 11 <<<"$eth0")
bytesrecv_int=$(($bytesrecv - $bytesrecv_last))
bytessend_int=$(($bytessend - $bytessend_last))
bytesrecv_last=$bytesrecv
bytessend_last=$bytessend
loadavg=$(cut -f1 -d' ' < /proc/loadavg)
kbytesfree=$(</proc/fs/lustre/osd-ldiskfs/${node}/kbytesfree)
content=$(cat <<EOF
{
"fsname":"$fs_name",
"hostname":"$me",
"uuid":"$node",
"loadavg":$loadavg,
"kbytesfree":$kbytesfree,
"bytessend":$bytessend_int,
"bytesrecv":$bytesrecv_int
}
EOF
)
content_len=${#content}
rfc1123date="$(date -u +%a,\ %d\ %b\ %Y\ %H:%M:%S\ GMT)"
string_to_hash="POST\n${content_len}\napplication/json\nx-ms-date:${rfc1123date}\n/api/logs"
utf8_to_hash=$(echo -n "$string_to_hash" | iconv -t utf8)
decoded_hex_key="$(echo "$key" | base64 --decode --wrap=0 | xxd -p -c256)"
signature="$(echo -ne "$utf8_to_hash" | openssl dgst -sha256 -mac HMAC -macopt "hexkey:$decoded_hex_key" -binary | base64)"
auth_token="SharedKey $workspace_id:$signature"
curl -s -S \
-H "Content-Type: application/json" \
-H "Log-Type: $fs_name" \
-H "Authorization: $auth_token" \
-H "x-ms-date: $rfc1123date" \
-X POST \
--data "$content" \
https://$workspace_id.ods.opinsights.azure.com/api/logs?api-version=2016-04-01
done

Просмотреть файл

@ -1,18 +0,0 @@
#!/bin/bash
# arg: $1 = device (e.g. L=/dev/sdb Lv2=/dev/nvme0n1)
device=$1
mkfs.lustre --fsname=LustreFS --mgs --mdt --mountfsoptions="user_xattr,errors=remount-ro" --backfstype=ldiskfs --reformat $device --index 0
mkdir /mnt/mgsmds
echo "$device /mnt/mgsmds lustre noatime,nodiratime,nobarrier 0 2" >> /etc/fstab
mount -a
# set up hsm
lctl set_param -P mdt.*-MDT0000.hsm_control=enabled
lctl set_param -P mdt.*-MDT0000.hsm.default_archive_id=1
lctl set_param mdt.*-MDT0000.hsm.max_requests=128
# allow any user and group ids to write
lctl set_param mdt.*-MDT0000.identity_upcall=NONE

Просмотреть файл

@ -1,31 +0,0 @@
#!/bin/bash
# arg: $1 = lfsmaster
# arg: $2 = device (e.g. L=/dev/sdb Lv2=/dev/nvme0n1)
# arg: $3 = start index
master=$1
devices=$2
index=$3
ndevices=$(wc -w <<<$devices)
for device in $devices; do
mkfs.lustre \
--fsname=LustreFS \
--backfstype=ldiskfs \
--reformat \
--ost \
--mgsnode=$master \
--index=$index \
--mountfsoptions="errors=remount-ro" \
$device
mkdir /mnt/oss${index}
echo "$device /mnt/oss${index} lustre noatime,nodiratime,nobarrier 0 2" >> /etc/fstab
index=$(( $index + 1 ))
done
mount -a

Просмотреть файл

@ -1,6 +1,6 @@
#!/bin/bash
yum -y install lustre kmod-lustre-osd-ldiskfs lustre-osd-ldiskfs-mount lustre-resource-agents e2fsprogs lustre-tests
yum -y install lustre kmod-lustre-osd-ldiskfs lustre-osd-ldiskfs-mount lustre-resource-agents e2fsprogs lustre-tests || exit 1
sed -i 's/ResourceDisk\.Format=y/ResourceDisk.Format=n/g' /etc/waagent.conf
@ -8,4 +8,12 @@ systemctl restart waagent
weak-modules --add-kernel --no-initramfs
if [ -f "/etc/systemd/system/temp-disk-swapfile.service" ]; then
systemctl stop temp-disk-swapfile.service
systemctl disable temp-disk-swapfile.service
fi
umount /mnt/resource
sed -i '/^ - disk_setup$/d;/^ - mounts$/d' /etc/cloud/cloud.cfg
sed -i '/azure_resource-part1/d' /etc/fstab

Просмотреть файл

@ -1,10 +1,16 @@
#!/bin/bash
lustre_version=${1-2.12.5}
lustre_version=${1-2.10}
if [ "$lustre_version" = "2.10" -o "$lustre_version" = "2.12" ]; then
lustre_dir=latest-${lustre_version}-release
else
lustre_dir="lustre-$lustre_version"
fi
cat << EOF >/etc/yum.repos.d/LustrePack.repo
[lustreserver]
name=lustreserver
baseurl=https://downloads.whamcloud.com/public/lustre/lustre-${lustre_version}/el7/patchless-ldiskfs-server/
baseurl=https://downloads.whamcloud.com/public/lustre/${lustre_dir}/el7/patchless-ldiskfs-server/
enabled=1
gpgcheck=0
@ -16,7 +22,7 @@ gpgcheck=0
[lustreclient]
name=lustreclient
baseurl=https://downloads.whamcloud.com/public/lustre/lustre-${lustre_version}/el7/client/
baseurl=https://downloads.whamcloud.com/public/lustre/${lustre_dir}/el7/client/
enabled=1
gpgcheck=0
EOF

Просмотреть файл

@ -1,118 +0,0 @@
#!/bin/bash
exec > /var/log/setup_lustre.log
exec 2>&1
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo "script_dir = $script_dir"
mds="$1"
storage_account="$2"
storage_key="$3"
storage_container="$4"
log_analytics_name="$5"
log_analytics_workspace_id="$6"
log_analytics_key="$7"
oss_disk_setup="$8"
# vars used in script
if [ -e /dev/nvme0n1 ]; then
devices='/dev/nvme*n1'
n_devices=$(echo $devices | wc -w)
echo "Using $n_devices NVME devices"
elif [ -e /dev/sdc ]; then
devices='/dev/sd[c-m]'
n_devices=$(echo $devices | wc -w)
echo "Using $n_devices NVME devices"
else
echo "ERROR: cannot find devices for storage"
exit 1
fi
lustre_version=2.12.5
if [ "$storage_account" = "" ]; then
use_hsm=false
else
use_hsm=true
fi
if [ "$log_analytics_name" = "" ]; then
use_log_analytics=false
else
use_log_analytics=true
fi
if [[ "$n_devices" -gt "1" && ( "$oss_disk_setup" = "raid" || "$HOSTNAME" = "$mds" ) ]]; then
device=/dev/md10
echo "creating raid ($device) from $n_devices devices : $devices"
$script_dir/create_raid0.sh $device $devices
devices=$device
n_devices=1
fi
echo "using $n_devices device(s) : $devices"
# SETUP LUSTRE YUM REPO
#$script_dir/lfsrepo.sh $lustre_version
# INSTALL LUSTRE PACKAGES
#$script_dir/lfspkgs.sh
ost_index=1
if [ "$HOSTNAME" = "$mds" ]; then
# SETUP MDS
$script_dir/lfsmaster.sh $devices
else
echo "wait for the mds to start"
modprobe lustre
while ! lctl ping $mds@tcp; do
sleep 2
done
idx=0
for c in $(echo ${HOSTNAME##$mds} | grep -o .); do
echo $c
idx=$(($idx * 36))
if [ -z "${c##[0-9]}" ]; then
idx=$(($idx + $c))
else
idx=$(($(printf "$idx + 10 + %d - %d" "'${c^^}" "'A")))
fi
done
ost_index=$(( ( $idx * $n_devices ) + 1 ))
echo "starting ost index=$ost_index"
mds_ip=$(ping -c 1 $mds | head -1 | sed 's/^[^)]*(//g;s/).*$//g')
$script_dir/lfsoss.sh $mds_ip "$devices" $ost_index
fi
if [ "${use_hsm,,}" = "true" ]; then
$script_dir/lfshsm.sh "$mds" "$storage_account" "$storage_key" "$storage_container" "$lustre_version"
if [ "$HOSTNAME" = "$mds" ]; then
# IMPORT CONTAINER
$script_dir/lfsclient.sh $mds /lustre
$script_dir/lfsimport.sh "$storage_account" "$storage_key" "$storage_container" /lustre "$lustre_version"
fi
fi
if [ "${use_log_analytics,,}" = "true" ]; then
$script_dir/lfsloganalytics.sh $log_analytics_name $log_analytics_workspace_id "$log_analytics_key"
fi