зеркало из https://github.com/Azure/AgentBaker.git
cleanup: remove all stail testdata (#5113)
This commit is contained in:
Родитель
0b4bd631b3
Коммит
158c3d2b11
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -1,588 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
CC_SERVICE_IN_TMP=/opt/azure/containers/cc-proxy.service.in
|
||||
CC_SOCKET_IN_TMP=/opt/azure/containers/cc-proxy.socket.in
|
||||
CNI_CONFIG_DIR="/etc/cni/net.d"
|
||||
CNI_BIN_DIR="/opt/cni/bin"
|
||||
#TODO pull this out of componetns.json too?
|
||||
CNI_DOWNLOADS_DIR="/opt/cni/downloads"
|
||||
CRICTL_DOWNLOAD_DIR="/opt/crictl/downloads"
|
||||
CRICTL_BIN_DIR="/usr/local/bin"
|
||||
CONTAINERD_DOWNLOADS_DIR="/opt/containerd/downloads"
|
||||
RUNC_DOWNLOADS_DIR="/opt/runc/downloads"
|
||||
K8S_DOWNLOADS_DIR="/opt/kubernetes/downloads"
|
||||
K8S_PRIVATE_PACKAGES_CACHE_DIR="/opt/kubernetes/downloads/private-packages"
|
||||
UBUNTU_RELEASE=$(lsb_release -r -s)
|
||||
SECURE_TLS_BOOTSTRAP_KUBELET_EXEC_PLUGIN_DOWNLOAD_DIR="/opt/azure/tlsbootstrap"
|
||||
SECURE_TLS_BOOTSTRAP_KUBELET_EXEC_PLUGIN_VERSION="v0.1.0-alpha.2"
|
||||
TELEPORTD_PLUGIN_DOWNLOAD_DIR="/opt/teleportd/downloads"
|
||||
CREDENTIAL_PROVIDER_DOWNLOAD_DIR="/opt/credentialprovider/downloads"
|
||||
CREDENTIAL_PROVIDER_BIN_DIR="/var/lib/kubelet/credential-provider"
|
||||
TELEPORTD_PLUGIN_BIN_DIR="/usr/local/bin"
|
||||
CONTAINERD_WASM_VERSIONS="v0.3.0 v0.5.1 v0.8.0"
|
||||
MANIFEST_FILEPATH="/opt/azure/manifest.json"
|
||||
COMPONENTS_FILEPATH="/opt/azure/components.json"
|
||||
MAN_DB_AUTO_UPDATE_FLAG_FILEPATH="/var/lib/man-db/auto-update"
|
||||
CURL_OUTPUT=/tmp/curl_verbose.out
|
||||
UBUNTU_OS_NAME="UBUNTU"
|
||||
MARINER_OS_NAME="MARINER"
|
||||
|
||||
removeManDbAutoUpdateFlagFile() {
|
||||
rm -f $MAN_DB_AUTO_UPDATE_FLAG_FILEPATH
|
||||
}
|
||||
|
||||
createManDbAutoUpdateFlagFile() {
|
||||
touch $MAN_DB_AUTO_UPDATE_FLAG_FILEPATH
|
||||
}
|
||||
|
||||
cleanupContainerdDlFiles() {
|
||||
rm -rf $CONTAINERD_DOWNLOADS_DIR
|
||||
}
|
||||
|
||||
installContainerdWithComponentsJson() {
|
||||
os=${UBUNTU_OS_NAME}
|
||||
if [[ -z "$UBUNTU_RELEASE" ]]; then
|
||||
os=${MARINER_OS_NAME}
|
||||
os_version="current"
|
||||
else
|
||||
os_version="${UBUNTU_RELEASE}"
|
||||
fi
|
||||
|
||||
containerdPackage=$(jq ".Packages" "$COMPONENTS_FILEPATH" | jq ".[] | select(.name == \"containerd\")") || exit $ERR_CONTAINERD_VERSION_INVALID
|
||||
PACKAGE_VERSIONS=()
|
||||
if [[ "${os}" == "${MARINER_OS_NAME}" && "${IS_KATA}" == "true" ]]; then
|
||||
os=${MARINER_KATA_OS_NAME}
|
||||
fi
|
||||
updatePackageVersions "${containerdPackage}" "${os}" "${os_version}"
|
||||
|
||||
#Containerd's versions array is expected to have only one element.
|
||||
#If it has more than one element, we will install the last element in the array.
|
||||
if [[ ${#PACKAGE_VERSIONS[@]} -gt 1 ]]; then
|
||||
echo "WARNING: containerd package versions array has more than one element. Installing the last element in the array."
|
||||
fi
|
||||
if [[ ${#PACKAGE_VERSIONS[@]} -eq 0 || ${PACKAGE_VERSIONS[0]} == "<SKIP>" ]]; then
|
||||
echo "INFO: containerd package versions array is either empty or the first element is <SKIP>. Skipping containerd installation."
|
||||
return 0
|
||||
fi
|
||||
IFS=$'\n' sortedPackageVersions=($(sort -V <<<"${PACKAGE_VERSIONS[*]}"))
|
||||
unset IFS
|
||||
array_size=${#sortedPackageVersions[@]}
|
||||
[[ $((array_size-1)) -lt 0 ]] && last_index=0 || last_index=$((array_size-1))
|
||||
packageVersion=${sortedPackageVersions[${last_index}]}
|
||||
containerdMajorMinorPatchVersion="$(echo "$packageVersion" | cut -d- -f1)"
|
||||
containerdHotFixVersion="$(echo "$packageVersion" | cut -d- -s -f2)"
|
||||
if [ -z "$containerdMajorMinorPatchVersion" ] || [ "$containerdMajorMinorPatchVersion" == "null" ] || [ "$containerdHotFixVersion" == "null" ]; then
|
||||
echo "invalid containerd version: $packageVersion"
|
||||
exit $ERR_CONTAINERD_VERSION_INVALID
|
||||
fi
|
||||
logs_to_events "AKS.CSE.installContainerRuntime.installStandaloneContainerd" "installStandaloneContainerd ${containerdMajorMinorPatchVersion} ${containerdHotFixVersion}"
|
||||
echo "in installContainerRuntime - CONTAINERD_VERSION = ${packageVersion}"
|
||||
|
||||
}
|
||||
|
||||
installContainerdWithManifestJson() {
|
||||
local containerd_version
|
||||
if [ -f "$MANIFEST_FILEPATH" ]; then
|
||||
local containerd_version
|
||||
containerd_version="$(jq -r .containerd.edge "$MANIFEST_FILEPATH")"
|
||||
if [ "${UBUNTU_RELEASE}" == "18.04" ]; then
|
||||
containerd_version="$(jq -r '.containerd.pinned."1804"' "$MANIFEST_FILEPATH")"
|
||||
fi
|
||||
else
|
||||
echo "WARNING: containerd version not found in manifest, defaulting to hardcoded."
|
||||
fi
|
||||
containerd_patch_version="$(echo "$containerd_version" | cut -d- -f1)"
|
||||
containerd_revision="$(echo "$containerd_version" | cut -d- -f2)"
|
||||
if [ -z "$containerd_patch_version" ] || [ "$containerd_patch_version" == "null" ] || [ "$containerd_revision" == "null" ]; then
|
||||
echo "invalid container version: $containerd_version"
|
||||
exit $ERR_CONTAINERD_INSTALL_TIMEOUT
|
||||
fi
|
||||
logs_to_events "AKS.CSE.installContainerRuntime.installStandaloneContainerd" "installStandaloneContainerd ${containerd_patch_version} ${containerd_revision}"
|
||||
echo "in installContainerRuntime - CONTAINERD_VERSION = ${containerd_patch_version}"
|
||||
}
|
||||
|
||||
installContainerRuntime() {
|
||||
echo "in installContainerRuntime - KUBERNETES_VERSION = ${KUBERNETES_VERSION}"
|
||||
if [[ "${NEEDS_CONTAINERD}" != "true" ]]; then
|
||||
installMoby
|
||||
fi
|
||||
if [ -f "$COMPONENTS_FILEPATH" ] && jq '.Packages[] | select(.name == "containerd")' < $COMPONENTS_FILEPATH > /dev/null; then
|
||||
echo "Package \"containerd\" exists in $COMPONENTS_FILEPATH."
|
||||
installContainerdWithComponentsJson
|
||||
return
|
||||
fi
|
||||
echo "Package \"containerd\" does not exist in $COMPONENTS_FILEPATH."
|
||||
installContainerdWithManifestJson
|
||||
}
|
||||
|
||||
installNetworkPlugin() {
|
||||
if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then
|
||||
installAzureCNI
|
||||
fi
|
||||
installCNI #reference plugins. Mostly for kubenet but loopback plugin is used by containerd until containerd 2
|
||||
rm -rf $CNI_DOWNLOADS_DIR &
|
||||
}
|
||||
|
||||
|
||||
downloadCredentalProvider() {
|
||||
mkdir -p $CREDENTIAL_PROVIDER_DOWNLOAD_DIR
|
||||
CREDENTIAL_PROVIDER_TGZ_TMP=${CREDENTIAL_PROVIDER_DOWNLOAD_URL##*/} # Use bash builtin #
|
||||
retrycmd_get_tarball 120 5 "$CREDENTIAL_PROVIDER_DOWNLOAD_DIR/$CREDENTIAL_PROVIDER_TGZ_TMP" "$CREDENTIAL_PROVIDER_DOWNLOAD_URL" || exit $ERR_CREDENTIAL_PROVIDER_DOWNLOAD_TIMEOUT
|
||||
}
|
||||
|
||||
installCredentalProvider() {
|
||||
logs_to_events "AKS.CSE.installCredentalProvider.downloadCredentalProvider" downloadCredentalProvider
|
||||
tar -xzf "$CREDENTIAL_PROVIDER_DOWNLOAD_DIR/${CREDENTIAL_PROVIDER_TGZ_TMP}" -C $CREDENTIAL_PROVIDER_DOWNLOAD_DIR
|
||||
mkdir -p "${CREDENTIAL_PROVIDER_BIN_DIR}"
|
||||
chown -R root:root "${CREDENTIAL_PROVIDER_BIN_DIR}"
|
||||
mv "${CREDENTIAL_PROVIDER_DOWNLOAD_DIR}/azure-acr-credential-provider" "${CREDENTIAL_PROVIDER_BIN_DIR}/acr-credential-provider"
|
||||
chmod 755 "${CREDENTIAL_PROVIDER_BIN_DIR}/acr-credential-provider"
|
||||
rm -rf ${CREDENTIAL_PROVIDER_DOWNLOAD_DIR}
|
||||
}
|
||||
|
||||
downloadSecureTLSBootstrapKubeletExecPlugin() {
|
||||
local plugin_url="https://k8sreleases.blob.core.windows.net/aks-tls-bootstrap-client/${SECURE_TLS_BOOTSTRAP_KUBELET_EXEC_PLUGIN_VERSION}/linux/amd64/tls-bootstrap-client"
|
||||
if [[ $(isARM64) == 1 ]]; then
|
||||
plugin_url="https://k8sreleases.blob.core.windows.net/aks-tls-bootstrap-client/${SECURE_TLS_BOOTSTRAP_KUBELET_EXEC_PLUGIN_VERSION}/linux/arm64/tls-bootstrap-client"
|
||||
fi
|
||||
|
||||
mkdir -p $SECURE_TLS_BOOTSTRAP_KUBELET_EXEC_PLUGIN_DOWNLOAD_DIR
|
||||
plugin_download_path="${SECURE_TLS_BOOTSTRAP_KUBELET_EXEC_PLUGIN_DOWNLOAD_DIR}/tls-bootstrap-client"
|
||||
|
||||
if [ ! -f "$plugin_download_path" ]; then
|
||||
retrycmd_if_failure 30 5 60 curl -fSL -o "$plugin_download_path" "$plugin_url" || exit $ERR_DOWNLOAD_SECURE_TLS_BOOTSTRAP_KUBELET_EXEC_PLUGIN_TIMEOUT
|
||||
chown -R root:root "$SECURE_TLS_BOOTSTRAP_KUBELET_EXEC_PLUGIN_DOWNLOAD_DIR"
|
||||
chmod -R 755 "$SECURE_TLS_BOOTSTRAP_KUBELET_EXEC_PLUGIN_DOWNLOAD_DIR"
|
||||
fi
|
||||
}
|
||||
|
||||
downloadContainerdWasmShims() {
|
||||
declare -a wasmShimPids=()
|
||||
for shim_version in $CONTAINERD_WASM_VERSIONS; do
|
||||
binary_version="$(echo "${shim_version}" | tr . -)"
|
||||
local containerd_wasm_filepath="/usr/local/bin"
|
||||
local containerd_wasm_url="https://acs-mirror.azureedge.net/containerd-wasm-shims/${shim_version}/linux/amd64"
|
||||
if [[ $(isARM64) == 1 ]]; then
|
||||
containerd_wasm_url="https://acs-mirror.azureedge.net/containerd-wasm-shims/${shim_version}/linux/arm64"
|
||||
fi
|
||||
|
||||
if [ ! -f "$containerd_wasm_filepath/containerd-shim-spin-${shim_version}" ] || [ ! -f "$containerd_wasm_filepath/containerd-shim-slight-${shim_version}" ]; then
|
||||
retrycmd_if_failure 30 5 60 curl -fSLv -o "$containerd_wasm_filepath/containerd-shim-spin-${binary_version}-v1" "$containerd_wasm_url/containerd-shim-spin-v1" 2>&1 | tee $CURL_OUTPUT >/dev/null | grep -E "^(curl:.*)|([eE]rr.*)$" && (cat $CURL_OUTPUT && exit $ERR_KRUSTLET_DOWNLOAD_TIMEOUT) &
|
||||
wasmShimPids+=($!)
|
||||
retrycmd_if_failure 30 5 60 curl -fSLv -o "$containerd_wasm_filepath/containerd-shim-slight-${binary_version}-v1" "$containerd_wasm_url/containerd-shim-slight-v1" 2>&1 | tee $CURL_OUTPUT >/dev/null | grep -E "^(curl:.*)|([eE]rr.*)$" && (cat $CURL_OUTPUT && exit $ERR_KRUSTLET_DOWNLOAD_TIMEOUT) &
|
||||
wasmShimPids+=($!)
|
||||
if [ "$shim_version" == "v0.8.0" ]; then
|
||||
retrycmd_if_failure 30 5 60 curl -fSLv -o "$containerd_wasm_filepath/containerd-shim-wws-${binary_version}-v1" "$containerd_wasm_url/containerd-shim-wws-v1" 2>&1 | tee $CURL_OUTPUT >/dev/null | grep -E "^(curl:.*)|([eE]rr.*)$" && (cat $CURL_OUTPUT && exit $ERR_KRUSTLET_DOWNLOAD_TIMEOUT) &
|
||||
wasmShimPids+=($!)
|
||||
fi
|
||||
fi
|
||||
done
|
||||
wait ${wasmShimPids[@]}
|
||||
for shim_version in $CONTAINERD_WASM_VERSIONS; do
|
||||
binary_version="$(echo "${shim_version}" | tr . -)"
|
||||
chmod 755 "$containerd_wasm_filepath/containerd-shim-spin-${binary_version}-v1"
|
||||
chmod 755 "$containerd_wasm_filepath/containerd-shim-slight-${binary_version}-v1"
|
||||
if [ "$shim_version" == "v0.8.0" ]; then
|
||||
chmod 755 "$containerd_wasm_filepath/containerd-shim-wws-${binary_version}-v1"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
installOras() {
|
||||
ORAS_DOWNLOAD_DIR="/opt/oras/downloads"
|
||||
ORAS_EXTRACTED_DIR=${1}
|
||||
ORAS_DOWNLOAD_URL=${2}
|
||||
ORAS_VERSION=${3}
|
||||
|
||||
mkdir -p $ORAS_DOWNLOAD_DIR
|
||||
|
||||
echo "Installing Oras version $ORAS_VERSION..."
|
||||
ORAS_TMP=${ORAS_DOWNLOAD_URL##*/} # Use bash builtin #
|
||||
retrycmd_get_tarball 120 5 "$ORAS_DOWNLOAD_DIR/${ORAS_TMP}" ${ORAS_DOWNLOAD_URL} || exit $ERR_ORAS_DOWNLOAD_ERROR
|
||||
|
||||
if [ ! -f "$ORAS_DOWNLOAD_DIR/${ORAS_TMP}" ]; then
|
||||
echo "File $ORAS_DOWNLOAD_DIR/${ORAS_TMP} does not exist."
|
||||
exit $ERR_ORAS_DOWNLOAD_ERROR
|
||||
fi
|
||||
|
||||
echo "File $ORAS_DOWNLOAD_DIR/${ORAS_TMP} exists."
|
||||
sudo tar -zxf "$ORAS_DOWNLOAD_DIR/${ORAS_TMP}" -C $ORAS_EXTRACTED_DIR/
|
||||
rm -r "$ORAS_DOWNLOAD_DIR"
|
||||
echo "Oras version $ORAS_VERSION installed successfully."
|
||||
|
||||
}
|
||||
|
||||
evalPackageDownloadURL() {
|
||||
local url=${1:-}
|
||||
if [[ -n "$url" ]]; then
|
||||
eval "result=${url}"
|
||||
echo $result
|
||||
return
|
||||
fi
|
||||
echo ""
|
||||
}
|
||||
|
||||
downloadAzureCNI() {
|
||||
mkdir -p ${1-$:CNI_DOWNLOADS_DIR}
|
||||
VNET_CNI_PLUGINS_URL=${2:-$VNET_CNI_PLUGINS_URL}
|
||||
if [[ -z "$VNET_CNI_PLUGINS_URL" ]]; then
|
||||
echo "VNET_CNI_PLUGINS_URL is not set. Exiting..."
|
||||
return
|
||||
fi
|
||||
CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin #
|
||||
retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${VNET_CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT
|
||||
}
|
||||
|
||||
downloadCrictl() {
|
||||
#if $1 is empty, take ${CRICTL_DOWNLOAD_DIR} as default value. Otherwise take $1 as the value
|
||||
downloadDir=${1:-${CRICTL_DOWNLOAD_DIR}}
|
||||
mkdir -p $downloadDir
|
||||
url=${2}
|
||||
crictlTgzTmp=${url##*/}
|
||||
retrycmd_curl_file 10 5 60 "$downloadDir/${crictlTgzTmp}" ${url} || exit $ERR_CRICTL_DOWNLOAD_TIMEOUT
|
||||
}
|
||||
|
||||
installCrictl() {
|
||||
CPU_ARCH=$(getCPUArch)
|
||||
currentVersion=$(crictl --version 2>/dev/null | sed 's/crictl version //g')
|
||||
if [[ "${currentVersion}" != "" ]]; then
|
||||
echo "version ${currentVersion} of crictl already installed. skipping installCrictl of target version ${KUBERNETES_VERSION%.*}.0"
|
||||
else
|
||||
CRICTL_TGZ_TEMP="crictl-v${CRICTL_VERSION}-linux-${CPU_ARCH}.tar.gz"
|
||||
if [[ ! -f "$CRICTL_DOWNLOAD_DIR/${CRICTL_TGZ_TEMP}" ]]; then
|
||||
rm -rf ${CRICTL_DOWNLOAD_DIR}
|
||||
echo "pre-cached crictl not found: skipping installCrictl"
|
||||
return 1
|
||||
fi
|
||||
echo "Unpacking crictl into ${CRICTL_BIN_DIR}"
|
||||
tar zxvf "$CRICTL_DOWNLOAD_DIR/${CRICTL_TGZ_TEMP}" -C ${CRICTL_BIN_DIR}
|
||||
chown root:root $CRICTL_BIN_DIR/crictl
|
||||
chmod 755 $CRICTL_BIN_DIR/crictl
|
||||
fi
|
||||
}
|
||||
|
||||
downloadTeleportdPlugin() {
|
||||
DOWNLOAD_URL=$1
|
||||
TELEPORTD_VERSION=$2
|
||||
if [[ $(isARM64) == 1 ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -z ${DOWNLOAD_URL} ]]; then
|
||||
echo "download url parameter for downloadTeleportdPlugin was not given"
|
||||
exit $ERR_TELEPORTD_DOWNLOAD_ERR
|
||||
fi
|
||||
if [[ -z ${TELEPORTD_VERSION} ]]; then
|
||||
echo "teleportd version not given"
|
||||
exit $ERR_TELEPORTD_DOWNLOAD_ERR
|
||||
fi
|
||||
mkdir -p $TELEPORTD_PLUGIN_DOWNLOAD_DIR
|
||||
retrycmd_curl_file 10 5 60 "${TELEPORTD_PLUGIN_DOWNLOAD_DIR}/teleportd-v${TELEPORTD_VERSION}" "${DOWNLOAD_URL}/v${TELEPORTD_VERSION}/teleportd" || exit ${ERR_TELEPORTD_DOWNLOAD_ERR}
|
||||
}
|
||||
|
||||
installTeleportdPlugin() {
|
||||
if [[ $(isARM64) == 1 ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
CURRENT_VERSION=$(teleportd --version 2>/dev/null | sed 's/teleportd version v//g')
|
||||
local TARGET_VERSION="0.8.0"
|
||||
if semverCompare ${CURRENT_VERSION:-"0.0.0"} ${TARGET_VERSION}; then
|
||||
echo "currently installed teleportd version ${CURRENT_VERSION} is greater than (or equal to) target base version ${TARGET_VERSION}. skipping installTeleportdPlugin."
|
||||
else
|
||||
downloadTeleportdPlugin ${TELEPORTD_PLUGIN_DOWNLOAD_URL} ${TARGET_VERSION}
|
||||
mv "${TELEPORTD_PLUGIN_DOWNLOAD_DIR}/teleportd-v${TELEPORTD_VERSION}" "${TELEPORTD_PLUGIN_BIN_DIR}/teleportd" || exit ${ERR_TELEPORTD_INSTALL_ERR}
|
||||
chmod 755 "${TELEPORTD_PLUGIN_BIN_DIR}/teleportd" || exit ${ERR_TELEPORTD_INSTALL_ERR}
|
||||
fi
|
||||
rm -rf ${TELEPORTD_PLUGIN_DOWNLOAD_DIR}
|
||||
}
|
||||
|
||||
setupCNIDirs() {
|
||||
mkdir -p $CNI_BIN_DIR
|
||||
chown -R root:root $CNI_BIN_DIR
|
||||
chmod -R 755 $CNI_BIN_DIR
|
||||
|
||||
mkdir -p $CNI_CONFIG_DIR
|
||||
chown -R root:root $CNI_CONFIG_DIR
|
||||
chmod 755 $CNI_CONFIG_DIR
|
||||
}
|
||||
|
||||
|
||||
installCNI() {
|
||||
|
||||
if [ ! -f "$COMPONENTS_FILEPATH" ] || ! jq '.Packages[] | select(.name == "cni-plugins")' < $COMPONENTS_FILEPATH > /dev/null; then
|
||||
echo "WARNING: no cni-plugins components present falling back to hard coded download of 1.4.1. This should error eventually"
|
||||
retrycmd_get_tarball 120 5 "${CNI_DOWNLOADS_DIR}/refcni.tar.gz" "https://acs-mirror.azureedge.net/cni-plugins/v1.4.1/binaries/cni-plugins-linux-amd64-v1.4.1.tgz" || exit
|
||||
tar -xzf "${CNI_DOWNLOADS_DIR}/refcni.tar.gz" -C $CNI_BIN_DIR
|
||||
return
|
||||
fi
|
||||
|
||||
#always just use what is listed in components.json so we don't have to sync.
|
||||
cniPackage=$(jq ".Packages" "$COMPONENTS_FILEPATH" | jq ".[] | select(.name == \"cni-plugins\")") || exit $ERR_CNI_VERSION_INVALID
|
||||
|
||||
#CNI doesn't really care about this but wanted to reuse updatePackageVersions which requires it.
|
||||
os=${UBUNTU_OS_NAME}
|
||||
if [[ -z "$UBUNTU_RELEASE" ]]; then
|
||||
os=${MARINER_OS_NAME}
|
||||
os_version="current"
|
||||
fi
|
||||
os_version="${UBUNTU_RELEASE}"
|
||||
if [[ "${os}" == "${MARINER_OS_NAME}" && "${IS_KATA}" == "true" ]]; then
|
||||
os=${MARINER_KATA_OS_NAME}
|
||||
fi
|
||||
PACKAGE_VERSIONS=()
|
||||
updatePackageVersions "${cniPackage}" "${os}" "${os_version}"
|
||||
|
||||
#should change to ne
|
||||
if [[ ${#PACKAGE_VERSIONS[@]} -gt 1 ]]; then
|
||||
echo "WARNING: containerd package versions array has more than one element. Installing the last element in the array."
|
||||
exit $ERR_CONTAINERD_VERSION_INVALID
|
||||
fi
|
||||
packageVersion=${PACKAGE_VERSIONS[0]}
|
||||
|
||||
if [[ $(isARM64) == 1 ]]; then
|
||||
CNI_DIR_TMP="cni-plugins-linux-arm64-v${packageVersion}"
|
||||
else
|
||||
CNI_DIR_TMP="cni-plugins-linux-amd64-v${packageVersion}"
|
||||
fi
|
||||
|
||||
if [[ -d "$CNI_DOWNLOADS_DIR/${CNI_DIR_TMP}" ]]; then
|
||||
#not clear to me when this would ever happen. assume its related to the line above Latest VHD should have the untar, older should have the tgz.
|
||||
mv ${CNI_DOWNLOADS_DIR}/${CNI_DIR_TMP}/* $CNI_BIN_DIR
|
||||
else
|
||||
echo "CNI tarball should already be unzipped by components.json"
|
||||
exit $ERR_CNI_VERSION_INVALID
|
||||
fi
|
||||
|
||||
chown -R root:root $CNI_BIN_DIR
|
||||
}
|
||||
|
||||
installAzureCNI() {
|
||||
CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin #
|
||||
CNI_DIR_TMP=${CNI_TGZ_TMP%.tgz}
|
||||
|
||||
if [[ -d "$CNI_DOWNLOADS_DIR/${CNI_DIR_TMP}" ]]; then
|
||||
mv ${CNI_DOWNLOADS_DIR}/${CNI_DIR_TMP}/* $CNI_BIN_DIR
|
||||
else
|
||||
if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then
|
||||
logs_to_events "AKS.CSE.installAzureCNI.downloadAzureCNI" downloadAzureCNI
|
||||
fi
|
||||
|
||||
tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR
|
||||
fi
|
||||
|
||||
chown -R root:root $CNI_BIN_DIR
|
||||
}
|
||||
|
||||
extractKubeBinaries() {
|
||||
local k8s_version="$1"
|
||||
local kube_binary_url="$2"
|
||||
local is_private_url="$3"
|
||||
local k8s_downloads_dir="$4"
|
||||
|
||||
local k8s_tgz_tmp_filename=${kube_binary_url##*/}
|
||||
|
||||
if [[ $is_private_url == true ]]; then
|
||||
k8s_tgz_tmp="${K8S_PRIVATE_PACKAGES_CACHE_DIR}/${k8s_tgz_tmp_filename}"
|
||||
|
||||
if [[ ! -f "${k8s_tgz_tmp}" ]]; then
|
||||
echo "cached package ${k8s_tgz_tmp} not found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "cached package ${k8s_tgz_tmp} found, will extract that"
|
||||
rm -rf /usr/local/bin/kubelet-* /usr/local/bin/kubectl-*
|
||||
else
|
||||
k8s_tgz_tmp="${k8s_downloads_dir}/${k8s_tgz_tmp_filename}"
|
||||
mkdir -p ${k8s_downloads_dir}
|
||||
|
||||
retrycmd_get_tarball 120 5 "${k8s_tgz_tmp}" ${kube_binary_url} || exit $ERR_K8S_DOWNLOAD_TIMEOUT
|
||||
if [[ ! -f ${k8s_tgz_tmp} ]]; then
|
||||
exit "$ERR_K8S_DOWNLOAD_TIMEOUT"
|
||||
fi
|
||||
fi
|
||||
|
||||
tar --transform="s|.*|&-${k8s_version}|" --show-transformed-names -xzvf "${k8s_tgz_tmp}" \
|
||||
--strip-components=3 -C /usr/local/bin kubernetes/node/bin/kubelet kubernetes/node/bin/kubectl || exit $ERR_K8S_INSTALL_ERR
|
||||
if [[ ! -f /usr/local/bin/kubectl-${k8s_version} ]] || [[ ! -f /usr/local/bin/kubelet-${k8s_version} ]]; then
|
||||
exit $ERR_K8S_INSTALL_ERR
|
||||
fi
|
||||
|
||||
if [[ $is_private_url == false ]]; then
|
||||
rm -f "${k8s_tgz_tmp}"
|
||||
fi
|
||||
}
|
||||
|
||||
installKubeletKubectlAndKubeProxy() {
|
||||
CUSTOM_KUBE_BINARY_DOWNLOAD_URL="${CUSTOM_KUBE_BINARY_URL:=}"
|
||||
PRIVATE_KUBE_BINARY_DOWNLOAD_URL="${PRIVATE_KUBE_BINARY_URL:=}"
|
||||
echo "using private url: ${PRIVATE_KUBE_BINARY_DOWNLOAD_URL}, custom url: ${CUSTOM_KUBE_BINARY_DOWNLOAD_URL}"
|
||||
install_default_if_missing=true
|
||||
|
||||
if [[ ! -z ${CUSTOM_KUBE_BINARY_DOWNLOAD_URL} ]]; then
|
||||
rm -rf /usr/local/bin/kubelet-* /usr/local/bin/kubectl-*
|
||||
|
||||
logs_to_events "AKS.CSE.installKubeletKubectlAndKubeProxy.extractKubeBinaries" extractKubeBinaries ${KUBERNETES_VERSION} ${CUSTOM_KUBE_BINARY_DOWNLOAD_URL} false
|
||||
install_default_if_missing=false
|
||||
elif [[ ! -z ${PRIVATE_KUBE_BINARY_DOWNLOAD_URL} ]]; then
|
||||
logs_to_events "AKS.CSE.installKubeletKubectlAndKubeProxy.extractKubeBinaries" extractKubeBinaries ${KUBERNETES_VERSION} ${PRIVATE_KUBE_BINARY_DOWNLOAD_URL} true
|
||||
fi
|
||||
|
||||
if [[ ! -f "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" ]] || [[ ! -f "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" ]]; then
|
||||
if [[ "$install_default_if_missing" == true ]]; then
|
||||
#TODO: remove the condition check on KUBE_BINARY_URL once RP change is released
|
||||
if (($(echo ${KUBERNETES_VERSION} | cut -d"." -f2) >= 17)) && [ -n "${KUBE_BINARY_URL}" ]; then
|
||||
logs_to_events "AKS.CSE.installKubeletKubectlAndKubeProxy.extractKubeBinaries" extractKubeBinaries ${KUBERNETES_VERSION} ${KUBE_BINARY_URL} false
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
mv "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" "/usr/local/bin/kubelet"
|
||||
mv "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" "/usr/local/bin/kubectl"
|
||||
|
||||
chmod a+x /usr/local/bin/kubelet /usr/local/bin/kubectl
|
||||
rm -rf /usr/local/bin/kubelet-* /usr/local/bin/kubectl-* /home/hyperkube-downloads &
|
||||
}
|
||||
|
||||
pullContainerImage() {
|
||||
CLI_TOOL=$1
|
||||
CONTAINER_IMAGE_URL=$2
|
||||
echo "pulling the image ${CONTAINER_IMAGE_URL} using ${CLI_TOOL}"
|
||||
if [[ ${CLI_TOOL} == "ctr" ]]; then
|
||||
logs_to_events "AKS.CSE.imagepullctr.${CONTAINER_IMAGE_URL}" "retrycmd_if_failure 2 1 120 ctr --namespace k8s.io image pull $CONTAINER_IMAGE_URL" || (echo "timed out pulling image ${CONTAINER_IMAGE_URL} via ctr" && exit $ERR_CONTAINERD_CTR_IMG_PULL_TIMEOUT)
|
||||
elif [[ ${CLI_TOOL} == "crictl" ]]; then
|
||||
logs_to_events "AKS.CSE.imagepullcrictl.${CONTAINER_IMAGE_URL}" "retrycmd_if_failure 2 1 120 crictl pull $CONTAINER_IMAGE_URL" || (echo "timed out pulling image ${CONTAINER_IMAGE_URL} via crictl" && exit $ERR_CONTAINERD_CRICTL_IMG_PULL_TIMEOUT)
|
||||
else
|
||||
logs_to_events "AKS.CSE.imagepull.${CONTAINER_IMAGE_URL}" "retrycmd_if_failure 2 1 120 docker pull $CONTAINER_IMAGE_URL" || (echo "timed out pulling image ${CONTAINER_IMAGE_URL} via docker" && exit $ERR_DOCKER_IMG_PULL_TIMEOUT)
|
||||
fi
|
||||
}
|
||||
|
||||
retagContainerImage() {
|
||||
CLI_TOOL=$1
|
||||
CONTAINER_IMAGE_URL=$2
|
||||
RETAG_IMAGE_URL=$3
|
||||
echo "retagging from ${CONTAINER_IMAGE_URL} to ${RETAG_IMAGE_URL} using ${CLI_TOOL}"
|
||||
if [[ ${CLI_TOOL} == "ctr" ]]; then
|
||||
ctr --namespace k8s.io image tag $CONTAINER_IMAGE_URL $RETAG_IMAGE_URL
|
||||
elif [[ ${CLI_TOOL} == "crictl" ]]; then
|
||||
crictl image tag $CONTAINER_IMAGE_URL $RETAG_IMAGE_URL
|
||||
else
|
||||
docker image tag $CONTAINER_IMAGE_URL $RETAG_IMAGE_URL
|
||||
fi
|
||||
}
|
||||
|
||||
retagMCRImagesForChina() {
|
||||
if [[ "${CONTAINER_RUNTIME}" == "containerd" ]]; then
|
||||
allMCRImages=($(ctr --namespace k8s.io images list | grep '^mcr.microsoft.com/' | awk '{print $1}'))
|
||||
else
|
||||
allMCRImages=($(docker images | grep '^mcr.microsoft.com/' | awk '{str = sprintf("%s:%s", $1, $2)} {print str}'))
|
||||
fi
|
||||
if [[ "${allMCRImages}" == "" ]]; then
|
||||
echo "failed to find mcr images for retag"
|
||||
return
|
||||
fi
|
||||
for mcrImage in ${allMCRImages[@]+"${allMCRImages[@]}"}; do
|
||||
retagMCRImage=$(echo ${mcrImage} | sed -e 's/^mcr.microsoft.com/mcr.azk8s.cn/g')
|
||||
if [[ "${CONTAINER_RUNTIME}" == "containerd" ]]; then
|
||||
retagContainerImage "ctr" ${mcrImage} ${retagMCRImage}
|
||||
else
|
||||
retagContainerImage "docker" ${mcrImage} ${retagMCRImage}
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
removeContainerImage() {
|
||||
CLI_TOOL=$1
|
||||
CONTAINER_IMAGE_URL=$2
|
||||
if [[ "${CLI_TOOL}" == "docker" ]]; then
|
||||
docker image rm $CONTAINER_IMAGE_URL
|
||||
else
|
||||
crictl rmi $CONTAINER_IMAGE_URL
|
||||
fi
|
||||
}
|
||||
|
||||
cleanUpImages() {
|
||||
local targetImage=$1
|
||||
export targetImage
|
||||
function cleanupImagesRun() {
|
||||
if [ "${NEEDS_CONTAINERD}" == "true" ]; then
|
||||
if [[ "${CLI_TOOL}" == "crictl" ]]; then
|
||||
images_to_delete=$(crictl images | awk '{print $1":"$2}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep ${targetImage} | tr ' ' '\n')
|
||||
else
|
||||
images_to_delete=$(ctr --namespace k8s.io images list | awk '{print $1}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep ${targetImage} | tr ' ' '\n')
|
||||
fi
|
||||
else
|
||||
images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep ${targetImage} | tr ' ' '\n')
|
||||
fi
|
||||
local exit_code=$?
|
||||
if [[ $exit_code != 0 ]]; then
|
||||
exit $exit_code
|
||||
elif [[ "${images_to_delete}" != "" ]]; then
|
||||
echo "${images_to_delete}" | while read image; do
|
||||
if [ "${NEEDS_CONTAINERD}" == "true" ]; then
|
||||
removeContainerImage ${CLI_TOOL} ${image}
|
||||
else
|
||||
removeContainerImage "docker" ${image}
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
export -f cleanupImagesRun
|
||||
retrycmd_if_failure 10 5 120 bash -c cleanupImagesRun
|
||||
}
|
||||
|
||||
cleanUpKubeProxyImages() {
|
||||
echo $(date),$(hostname), startCleanUpKubeProxyImages
|
||||
cleanUpImages "kube-proxy"
|
||||
echo $(date),$(hostname), endCleanUpKubeProxyImages
|
||||
}
|
||||
|
||||
cleanupRetaggedImages() {
|
||||
if [[ "${TARGET_CLOUD}" != "AzureChinaCloud" ]]; then
|
||||
if [ "${NEEDS_CONTAINERD}" == "true" ]; then
|
||||
if [[ "${CLI_TOOL}" == "crictl" ]]; then
|
||||
images_to_delete=$(crictl images | awk '{print $1":"$2}' | grep '^mcr.azk8s.cn/' | tr ' ' '\n')
|
||||
else
|
||||
images_to_delete=$(ctr --namespace k8s.io images list | awk '{print $1}' | grep '^mcr.azk8s.cn/' | tr ' ' '\n')
|
||||
fi
|
||||
else
|
||||
images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep '^mcr.azk8s.cn/' | tr ' ' '\n')
|
||||
fi
|
||||
if [[ "${images_to_delete}" != "" ]]; then
|
||||
echo "${images_to_delete}" | while read image; do
|
||||
if [ "${NEEDS_CONTAINERD}" == "true" ]; then
|
||||
removeContainerImage "ctr" ${image}
|
||||
else
|
||||
removeContainerImage "docker" ${image}
|
||||
fi
|
||||
done
|
||||
fi
|
||||
else
|
||||
echo "skipping container cleanup for AzureChinaCloud"
|
||||
fi
|
||||
}
|
||||
|
||||
cleanUpContainerImages() {
|
||||
export KUBERNETES_VERSION
|
||||
export CLI_TOOL
|
||||
export -f retrycmd_if_failure
|
||||
export -f removeContainerImage
|
||||
export -f cleanUpImages
|
||||
export -f cleanUpKubeProxyImages
|
||||
bash -c cleanUpKubeProxyImages &
|
||||
}
|
||||
|
||||
cleanUpContainerd() {
|
||||
rm -Rf $CONTAINERD_DOWNLOADS_DIR
|
||||
}
|
||||
|
||||
overrideNetworkConfig() {
|
||||
CONFIG_FILEPATH="/etc/cloud/cloud.cfg.d/80_azure_net_config.cfg"
|
||||
touch ${CONFIG_FILEPATH}
|
||||
cat <<EOF >>${CONFIG_FILEPATH}
|
||||
datasource:
|
||||
Azure:
|
||||
apply_network_config: false
|
||||
EOF
|
||||
}
|
||||
|
||||
#EOF
|
|
@ -1,549 +0,0 @@
|
|||
#!/bin/bash
|
||||
ERR_SYSTEMCTL_START_FAIL=4
|
||||
ERR_CLOUD_INIT_TIMEOUT=5
|
||||
ERR_FILE_WATCH_TIMEOUT=6
|
||||
ERR_HOLD_WALINUXAGENT=7
|
||||
ERR_RELEASE_HOLD_WALINUXAGENT=8
|
||||
ERR_APT_INSTALL_TIMEOUT=9
|
||||
ERR_DOCKER_INSTALL_TIMEOUT=20
|
||||
ERR_DOCKER_DOWNLOAD_TIMEOUT=21
|
||||
ERR_DOCKER_KEY_DOWNLOAD_TIMEOUT=22
|
||||
ERR_DOCKER_APT_KEY_TIMEOUT=23
|
||||
ERR_DOCKER_START_FAIL=24
|
||||
ERR_MOBY_APT_LIST_TIMEOUT=25
|
||||
ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT=26
|
||||
ERR_MOBY_INSTALL_TIMEOUT=27
|
||||
ERR_CONTAINERD_INSTALL_TIMEOUT=28
|
||||
ERR_RUNC_INSTALL_TIMEOUT=29
|
||||
ERR_K8S_RUNNING_TIMEOUT=30
|
||||
ERR_K8S_DOWNLOAD_TIMEOUT=31
|
||||
ERR_KUBECTL_NOT_FOUND=32
|
||||
ERR_IMG_DOWNLOAD_TIMEOUT=33
|
||||
ERR_KUBELET_START_FAIL=34
|
||||
ERR_DOCKER_IMG_PULL_TIMEOUT=35
|
||||
ERR_CONTAINERD_CTR_IMG_PULL_TIMEOUT=36
|
||||
ERR_CONTAINERD_CRICTL_IMG_PULL_TIMEOUT=37
|
||||
ERR_CONTAINERD_INSTALL_FILE_NOT_FOUND=38
|
||||
ERR_CONTAINERD_VERSION_INVALID=39
|
||||
ERR_CNI_DOWNLOAD_TIMEOUT=41
|
||||
ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT=42
|
||||
ERR_MS_PROD_DEB_PKG_ADD_FAIL=43
|
||||
ERR_ORAS_DOWNLOAD_ERROR=45
|
||||
ERR_SYSTEMD_INSTALL_FAIL=48
|
||||
ERR_MODPROBE_FAIL=49
|
||||
ERR_OUTBOUND_CONN_FAIL=50
|
||||
ERR_K8S_API_SERVER_CONN_FAIL=51
|
||||
ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL=52
|
||||
ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL=53
|
||||
ERR_KATA_KEY_DOWNLOAD_TIMEOUT=60
|
||||
ERR_KATA_APT_KEY_TIMEOUT=61
|
||||
ERR_KATA_INSTALL_TIMEOUT=62
|
||||
ERR_VHD_FILE_NOT_FOUND=65
|
||||
ERR_CONTAINERD_DOWNLOAD_TIMEOUT=70
|
||||
ERR_RUNC_DOWNLOAD_TIMEOUT=71
|
||||
ERR_CUSTOM_SEARCH_DOMAINS_FAIL=80
|
||||
ERR_GPU_DOWNLOAD_TIMEOUT=83
|
||||
ERR_GPU_DRIVERS_START_FAIL=84
|
||||
ERR_GPU_DRIVERS_INSTALL_TIMEOUT=85
|
||||
ERR_GPU_DEVICE_PLUGIN_START_FAIL=86
|
||||
ERR_GPU_INFO_ROM_CORRUPTED=87
|
||||
ERR_SGX_DRIVERS_INSTALL_TIMEOUT=90
|
||||
ERR_SGX_DRIVERS_START_FAIL=91
|
||||
ERR_APT_DAILY_TIMEOUT=98
|
||||
ERR_APT_UPDATE_TIMEOUT=99
|
||||
ERR_CSE_PROVISION_SCRIPT_NOT_READY_TIMEOUT=100
|
||||
ERR_APT_DIST_UPGRADE_TIMEOUT=101
|
||||
ERR_APT_PURGE_FAIL=102
|
||||
ERR_SYSCTL_RELOAD=103
|
||||
ERR_CIS_ASSIGN_ROOT_PW=111
|
||||
ERR_CIS_ASSIGN_FILE_PERMISSION=112
|
||||
ERR_PACKER_COPY_FILE=113
|
||||
ERR_CIS_APPLY_PASSWORD_CONFIG=115
|
||||
ERR_SYSTEMD_DOCKER_STOP_FAIL=116
|
||||
ERR_CRICTL_DOWNLOAD_TIMEOUT=117
|
||||
ERR_CRICTL_OPERATION_ERROR=118
|
||||
ERR_CTR_OPERATION_ERROR=119
|
||||
|
||||
ERR_AZURE_STACK_GET_ARM_TOKEN=120
|
||||
ERR_AZURE_STACK_GET_NETWORK_CONFIGURATION=121
|
||||
ERR_AZURE_STACK_GET_SUBNET_PREFIX=122
|
||||
|
||||
ERR_VHD_BUILD_ERROR=125
|
||||
|
||||
ERR_SWAP_CREATE_FAIL=130
|
||||
ERR_SWAP_CREATE_INSUFFICIENT_DISK_SPACE=131
|
||||
|
||||
ERR_TELEPORTD_DOWNLOAD_ERR=150
|
||||
ERR_TELEPORTD_INSTALL_ERR=151
|
||||
ERR_ARTIFACT_STREAMING_DOWNLOAD=152
|
||||
ERR_ARTIFACT_STREAMING_INSTALL=153
|
||||
|
||||
ERR_HTTP_PROXY_CA_CONVERT=160
|
||||
ERR_UPDATE_CA_CERTS=161
|
||||
ERR_DOWNLOAD_SECURE_TLS_BOOTSTRAP_KUBELET_EXEC_PLUGIN_TIMEOUT=169
|
||||
|
||||
ERR_DISBALE_IPTABLES=170
|
||||
|
||||
ERR_KRUSTLET_DOWNLOAD_TIMEOUT=171
|
||||
ERR_DISABLE_SSH=172
|
||||
ERR_PRIMARY_NIC_IP_NOT_FOUND=173
|
||||
ERR_INSERT_IMDS_RESTRICTION_RULE_INTO_MANGLE_TABLE=174
|
||||
ERR_INSERT_IMDS_RESTRICTION_RULE_INTO_FILTER_TABLE=175
|
||||
ERR_DELETE_IMDS_RESTRICTION_RULE_FROM_MANGLE_TABLE=176
|
||||
ERR_DELETE_IMDS_RESTRICTION_RULE_FROM_FILTER_TABLE=177
|
||||
|
||||
ERR_VHD_REBOOT_REQUIRED=200
|
||||
ERR_NO_PACKAGES_FOUND=201
|
||||
ERR_SNAPSHOT_UPDATE_START_FAIL=202
|
||||
|
||||
ERR_PRIVATE_K8S_PKG_ERR=203
|
||||
ERR_K8S_INSTALL_ERR=204
|
||||
|
||||
ERR_SYSTEMCTL_MASK_FAIL=2
|
||||
|
||||
ERR_CREDENTIAL_PROVIDER_DOWNLOAD_TIMEOUT=205
|
||||
|
||||
ERR_CNI_VERSION_INVALID=206
|
||||
|
||||
ERR_CURL_REMOVE_TIMEOUT=300
|
||||
ERR_CURL_DOWNLOAD_TIMEOUT=301
|
||||
ERR_CURL_EXTRACT_TIMEOUT=302
|
||||
ERR_CURL_DOWNGRADE_LIBSSL=303
|
||||
ERR_CURL_DOWNLOAD_DEV_TIMEOUT=304
|
||||
ERR_CURL_INSTALL_TIMEOUT=305
|
||||
ERR_CURL_VERSION_MISMATCH=306
|
||||
|
||||
if find /etc -type f -name "*-release" -print -quit 2>/dev/null | grep -q '.'; then
|
||||
OS=$(sort -r /etc/*-release | gawk 'match($0, /^(ID_LIKE=(coreos)|ID=(.*))$/, a) { print toupper(a[2] a[3]); exit }')
|
||||
OS_VERSION=$(sort -r /etc/*-release | gawk 'match($0, /^(VERSION_ID=(.*))$/, a) { print toupper(a[2] a[3]); exit }' | tr -d '"')
|
||||
else
|
||||
echo "/etc/*-release not found"
|
||||
fi
|
||||
|
||||
UBUNTU_OS_NAME="UBUNTU"
|
||||
MARINER_OS_NAME="MARINER"
|
||||
MARINER_KATA_OS_NAME="MARINERKATA"
|
||||
KUBECTL=/usr/local/bin/kubectl
|
||||
DOCKER=/usr/bin/docker
|
||||
export GPU_DV="${GPU_DRIVER_VERSION:=}"
|
||||
export GPU_DEST=/usr/local/nvidia
|
||||
NVIDIA_DOCKER_VERSION=2.8.0-1
|
||||
DOCKER_VERSION=1.13.1-1
|
||||
NVIDIA_CONTAINER_RUNTIME_VERSION="3.6.0"
|
||||
export NVIDIA_DRIVER_IMAGE_SHA="${GPU_IMAGE_SHA:=}"
|
||||
export NVIDIA_DRIVER_IMAGE_TAG="${GPU_DV}-${NVIDIA_DRIVER_IMAGE_SHA}"
|
||||
export NVIDIA_DRIVER_IMAGE="mcr.microsoft.com/aks/aks-gpu"
|
||||
export CTR_GPU_INSTALL_CMD="ctr run --privileged --rm --net-host --with-ns pid:/proc/1/ns/pid --mount type=bind,src=/opt/gpu,dst=/mnt/gpu,options=rbind --mount type=bind,src=/opt/actions,dst=/mnt/actions,options=rbind"
|
||||
export DOCKER_GPU_INSTALL_CMD="docker run --privileged --net=host --pid=host -v /opt/gpu:/mnt/gpu -v /opt/actions:/mnt/actions --rm"
|
||||
APT_CACHE_DIR=/var/cache/apt/archives/
|
||||
PERMANENT_CACHE_DIR=/root/aptcache/
|
||||
EVENTS_LOGGING_DIR=/var/log/azure/Microsoft.Azure.Extensions.CustomScript/events/
|
||||
CURL_OUTPUT=/tmp/curl_verbose.out
|
||||
|
||||
retrycmd_if_failure() {
|
||||
retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift
|
||||
for i in $(seq 1 $retries); do
|
||||
timeout $timeout "${@}" && break || \
|
||||
if [ $i -eq $retries ]; then
|
||||
echo Executed \"$@\" $i times;
|
||||
return 1
|
||||
else
|
||||
sleep $wait_sleep
|
||||
fi
|
||||
done
|
||||
echo Executed \"$@\" $i times;
|
||||
}
|
||||
retrycmd_nslookup() {
|
||||
wait_sleep=$1; timeout=$2; total_timeout=$3; record=$4
|
||||
start_time=$(date +%s)
|
||||
while true; do
|
||||
nslookup -timeout=$timeout -retry=0 $record && break || \
|
||||
current_time=$(date +%s)
|
||||
if [ $((current_time - start_time)) -ge $total_timeout ]; then
|
||||
echo "Total timeout $total_timeout reached, nslookup -timeout=$timeout -retry=0 $record failed"
|
||||
return 1
|
||||
fi
|
||||
sleep $wait_sleep
|
||||
done
|
||||
current_time=$(date +%s)
|
||||
echo "Executed nslookup -timeout=$timeout -retry=0 $record for $((current_time - start_time)) seconds";
|
||||
}
|
||||
retrycmd_if_failure_no_stats() {
|
||||
retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift
|
||||
for i in $(seq 1 $retries); do
|
||||
timeout $timeout ${@} && break || \
|
||||
if [ $i -eq $retries ]; then
|
||||
return 1
|
||||
else
|
||||
sleep $wait_sleep
|
||||
fi
|
||||
done
|
||||
}
|
||||
retrycmd_get_tarball() {
|
||||
tar_retries=$1; wait_sleep=$2; tarball=$3; url=$4
|
||||
echo "${tar_retries} retries"
|
||||
for i in $(seq 1 $tar_retries); do
|
||||
tar -tzf $tarball && break || \
|
||||
if [ $i -eq $tar_retries ]; then
|
||||
return 1
|
||||
else
|
||||
timeout 60 curl -fsSLv $url -o $tarball > $CURL_OUTPUT 2>&1
|
||||
if [[ $? != 0 ]]; then
|
||||
cat $CURL_OUTPUT
|
||||
fi
|
||||
sleep $wait_sleep
|
||||
fi
|
||||
done
|
||||
}
|
||||
retrycmd_curl_file() {
|
||||
curl_retries=$1; wait_sleep=$2; timeout=$3; filepath=$4; url=$5
|
||||
echo "${curl_retries} retries"
|
||||
for i in $(seq 1 $curl_retries); do
|
||||
[[ -f $filepath ]] && break
|
||||
if [ $i -eq $curl_retries ]; then
|
||||
return 1
|
||||
else
|
||||
timeout $timeout curl -fsSLv $url -o $filepath 2>&1 | tee $CURL_OUTPUT >/dev/null
|
||||
if [[ $? != 0 ]]; then
|
||||
cat $CURL_OUTPUT
|
||||
fi
|
||||
sleep $wait_sleep
|
||||
fi
|
||||
done
|
||||
}
|
||||
wait_for_file() {
|
||||
retries=$1; wait_sleep=$2; filepath=$3
|
||||
paved=/opt/azure/cloud-init-files.paved
|
||||
grep -Fq "${filepath}" $paved && return 0
|
||||
for i in $(seq 1 $retries); do
|
||||
grep -Fq '#EOF' $filepath && break
|
||||
if [ $i -eq $retries ]; then
|
||||
return 1
|
||||
else
|
||||
sleep $wait_sleep
|
||||
fi
|
||||
done
|
||||
sed -i "/#EOF/d" $filepath
|
||||
echo $filepath >> $paved
|
||||
}
|
||||
systemctl_restart() {
|
||||
retries=$1; wait_sleep=$2; timeout=$3 svcname=$4
|
||||
for i in $(seq 1 $retries); do
|
||||
timeout $timeout systemctl daemon-reload
|
||||
timeout $timeout systemctl restart $svcname && break || \
|
||||
if [ $i -eq $retries ]; then
|
||||
return 1
|
||||
else
|
||||
systemctl status $svcname --no-pager -l
|
||||
journalctl -u $svcname
|
||||
sleep $wait_sleep
|
||||
fi
|
||||
done
|
||||
}
|
||||
systemctl_stop() {
|
||||
retries=$1; wait_sleep=$2; timeout=$3 svcname=$4
|
||||
for i in $(seq 1 $retries); do
|
||||
timeout $timeout systemctl daemon-reload
|
||||
timeout $timeout systemctl stop $svcname && break || \
|
||||
if [ $i -eq $retries ]; then
|
||||
return 1
|
||||
else
|
||||
sleep $wait_sleep
|
||||
fi
|
||||
done
|
||||
}
|
||||
systemctl_disable() {
|
||||
retries=$1; wait_sleep=$2; timeout=$3 svcname=$4
|
||||
for i in $(seq 1 $retries); do
|
||||
timeout $timeout systemctl daemon-reload
|
||||
timeout $timeout systemctl disable $svcname && break || \
|
||||
if [ $i -eq $retries ]; then
|
||||
return 1
|
||||
else
|
||||
sleep $wait_sleep
|
||||
fi
|
||||
done
|
||||
}
|
||||
sysctl_reload() {
|
||||
retries=$1; wait_sleep=$2; timeout=$3
|
||||
for i in $(seq 1 $retries); do
|
||||
timeout $timeout sysctl --system && break || \
|
||||
if [ $i -eq $retries ]; then
|
||||
return 1
|
||||
else
|
||||
sleep $wait_sleep
|
||||
fi
|
||||
done
|
||||
}
|
||||
version_gte() {
|
||||
test "$(printf '%s\n' "$@" | sort -rV | head -n 1)" == "$1"
|
||||
}
|
||||
|
||||
systemctlEnableAndStart() {
|
||||
systemctl_restart 100 5 30 $1
|
||||
RESTART_STATUS=$?
|
||||
systemctl status $1 --no-pager -l > /var/log/azure/$1-status.log
|
||||
if [ $RESTART_STATUS -ne 0 ]; then
|
||||
echo "$1 could not be started"
|
||||
return 1
|
||||
fi
|
||||
if ! retrycmd_if_failure 120 5 25 systemctl enable $1; then
|
||||
echo "$1 could not be enabled by systemctl"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
systemctlDisableAndStop() {
|
||||
if systemctl list-units --full --all | grep -q "$1.service"; then
|
||||
systemctl_stop 20 5 25 $1 || echo "$1 could not be stopped"
|
||||
systemctl_disable 20 5 25 $1 || echo "$1 could not be disabled"
|
||||
fi
|
||||
}
|
||||
|
||||
semverCompare() {
|
||||
VERSION_A=$(echo $1 | cut -d "+" -f 1)
|
||||
VERSION_B=$(echo $2 | cut -d "+" -f 1)
|
||||
[[ "${VERSION_A}" == "${VERSION_B}" ]] && return 0
|
||||
sorted=$(echo ${VERSION_A} ${VERSION_B} | tr ' ' '\n' | sort -V )
|
||||
highestVersion=$(IFS= echo "${sorted}" | cut -d$'\n' -f2)
|
||||
[[ "${VERSION_A}" == ${highestVersion} ]] && return 0
|
||||
return 1
|
||||
}
|
||||
downloadDebPkgToFile() {
|
||||
PKG_NAME=$1
|
||||
PKG_VERSION=$2
|
||||
PKG_DIRECTORY=$3
|
||||
mkdir -p $PKG_DIRECTORY
|
||||
pushd ${PKG_DIRECTORY}
|
||||
retrycmd_if_failure 10 5 600 apt-get download ${PKG_NAME}=${PKG_VERSION}*
|
||||
popd
|
||||
}
|
||||
apt_get_download() {
|
||||
retries=$1; wait_sleep=$2; shift && shift;
|
||||
local ret=0
|
||||
pushd $APT_CACHE_DIR || return 1
|
||||
for i in $(seq 1 $retries); do
|
||||
dpkg --configure -a --force-confdef
|
||||
wait_for_apt_locks
|
||||
apt-get -o Dpkg::Options::=--force-confold download -y "${@}" && break
|
||||
if [ $i -eq $retries ]; then ret=1; else sleep $wait_sleep; fi
|
||||
done
|
||||
popd || return 1
|
||||
return $ret
|
||||
}
|
||||
getCPUArch() {
|
||||
arch=$(uname -m)
|
||||
if [[ ${arch,,} == "aarch64" || ${arch,,} == "arm64" ]]; then
|
||||
echo "arm64"
|
||||
else
|
||||
echo "amd64"
|
||||
fi
|
||||
}
|
||||
isARM64() {
|
||||
if [[ $(getCPUArch) == "arm64" ]]; then
|
||||
echo 1
|
||||
else
|
||||
echo 0
|
||||
fi
|
||||
}
|
||||
|
||||
logs_to_events() {
|
||||
local task=$1; shift
|
||||
local eventsFileName=$(date +%s%3N)
|
||||
|
||||
local startTime=$(date +"%F %T.%3N")
|
||||
${@}
|
||||
ret=$?
|
||||
local endTime=$(date +"%F %T.%3N")
|
||||
|
||||
json_string=$( jq -n \
|
||||
--arg Timestamp "${startTime}" \
|
||||
--arg OperationId "${endTime}" \
|
||||
--arg Version "1.23" \
|
||||
--arg TaskName "${task}" \
|
||||
--arg EventLevel "Informational" \
|
||||
--arg Message "Completed: $*" \
|
||||
--arg EventPid "0" \
|
||||
--arg EventTid "0" \
|
||||
'{Timestamp: $Timestamp, OperationId: $OperationId, Version: $Version, TaskName: $TaskName, EventLevel: $EventLevel, Message: $Message, EventPid: $EventPid, EventTid: $EventTid}'
|
||||
)
|
||||
echo ${json_string} > ${EVENTS_LOGGING_DIR}${eventsFileName}.json
|
||||
|
||||
if [ "$ret" != "0" ]; then
|
||||
return $ret
|
||||
fi
|
||||
}
|
||||
|
||||
should_skip_nvidia_drivers() {
|
||||
set -x
|
||||
body=$(curl -fsSL -H "Metadata: true" --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2021-02-01")
|
||||
ret=$?
|
||||
if [ "$ret" != "0" ]; then
|
||||
return $ret
|
||||
fi
|
||||
should_skip=$(echo "$body" | jq -e '.compute.tagsList | map(select(.name | test("SkipGpuDriverInstall"; "i")))[0].value // "false" | test("true"; "i")')
|
||||
echo "$should_skip"
|
||||
}
|
||||
|
||||
installJq() {
|
||||
output=$(jq --version)
|
||||
if [ -n "$output" ]; then
|
||||
echo "$output"
|
||||
else
|
||||
if [[ $OS == $MARINER_OS_NAME ]]; then
|
||||
sudo tdnf install -y jq && echo "jq was installed: $(jq --version)"
|
||||
else
|
||||
apt_get_install 5 1 60 jq && echo "jq was installed: $(jq --version)"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
check_array_size() {
|
||||
declare -n array_name=$1
|
||||
local array_size=${#array_name[@]}
|
||||
if [[ ${array_size} -gt 0 ]]; then
|
||||
last_index=$(( ${#array_name[@]} - 1 ))
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
capture_benchmark() {
|
||||
set +x
|
||||
local title="$1"
|
||||
title="${title//[[:space:]]/_}"
|
||||
title="${title//-/_}"
|
||||
benchmarks+=($title)
|
||||
check_array_size benchmarks || { echo "Benchmarks array is empty"; return; }
|
||||
declare -n current_section="${benchmarks[last_index]}"
|
||||
local is_final_section=${2:-false}
|
||||
|
||||
local current_time=$(date +%s)
|
||||
local end_timestamp=$(date +%H:%M:%S)
|
||||
if [[ "$is_final_section" == true ]]; then
|
||||
local start_timestamp=$script_start_timestamp
|
||||
local start_time=$script_start_stopwatch
|
||||
else
|
||||
local start_timestamp=$section_start_timestamp
|
||||
local start_time=$section_start_stopwatch
|
||||
fi
|
||||
|
||||
local difference_in_seconds=$((current_time - start_time))
|
||||
local elapsed_hours=$(($difference_in_seconds/3600))
|
||||
local elapsed_minutes=$((($difference_in_seconds%3600)/60))
|
||||
local elapsed_seconds=$(($difference_in_seconds%60))
|
||||
printf -v total_time_elapsed "%02d:%02d:%02d" $elapsed_hours $elapsed_minutes $elapsed_seconds
|
||||
|
||||
current_section+=($start_timestamp)
|
||||
current_section+=($end_timestamp)
|
||||
current_section+=($total_time_elapsed)
|
||||
|
||||
unset -n current_section
|
||||
|
||||
section_start_stopwatch=$(date +%s)
|
||||
section_start_timestamp=$(date +%H:%M:%S)
|
||||
|
||||
set -x
|
||||
}
|
||||
|
||||
process_benchmarks() {
|
||||
set +x
|
||||
check_array_size benchmarks || { echo "Benchmarks array is empty"; return; }
|
||||
declare -n script_stats="${benchmarks[last_index]}"
|
||||
|
||||
script_object=$(jq -n --arg script_name "$(basename $0)" --arg script_start_timestamp "${script_stats[0]}" --arg end_timestamp "${script_stats[1]}" --arg total_time_elapsed "${script_stats[2]}" '{($script_name): {"overall": {"start_time": $script_start_timestamp, "end_time": $end_timestamp, "total_time_elapsed": $total_time_elapsed}}}')
|
||||
|
||||
unset script_stats[@]
|
||||
unset -n script_stats
|
||||
|
||||
for ((i=0; i<${#benchmarks[@]} - 1; i+=1)); do
|
||||
|
||||
declare -n section_name="${benchmarks[i]}"
|
||||
|
||||
section_object=$(jq -n --arg section_name "${benchmarks[i]}" --arg section_start_timestamp "${section_name[0]}" --arg end_timestamp "${section_name[1]}" --arg total_time_elapsed "${section_name[2]}" '{($section_name): {"start_time": $section_start_timestamp, "end_time": $end_timestamp, "total_time_elapsed": $total_time_elapsed}}')
|
||||
|
||||
script_object=$(jq -n --argjson script_object "$script_object" --argjson section_object "$section_object" --arg script_name "$(basename $0)" '$script_object | .[$script_name] += $section_object')
|
||||
|
||||
unset section_name[@]
|
||||
unset -n section_name
|
||||
|
||||
done
|
||||
|
||||
echo "Benchmarks:"
|
||||
echo "$script_object" | jq -C .
|
||||
|
||||
jq ". += [$script_object]" ${VHD_BUILD_PERF_DATA} > tmp.json && mv tmp.json ${VHD_BUILD_PERF_DATA}
|
||||
chmod 755 ${VHD_BUILD_PERF_DATA}
|
||||
set -x
|
||||
}
|
||||
|
||||
#return proper release metadata for the package based on the os and osVersion
|
||||
#e.g., For os UBUNTU 18.04, if there is a release "r1804" defined in components.json, then set RELEASE to "r1804"
|
||||
#Otherwise set RELEASE to "current"
|
||||
updateRelease() {
|
||||
local package="$1"
|
||||
local os="$2"
|
||||
local osVersion="$3"
|
||||
RELEASE="current"
|
||||
local osVersionWithoutDot=$(echo "${osVersion}" | sed 's/\.//g')
|
||||
#For UBUNTU, if $osVersion is 18.04 and "r1804" is also defined in components.json, then $release is set to "r1804"
|
||||
#Similarly for 20.04 and 22.04. Otherwise $release is set to .current.
|
||||
#For MARINER, the release is always set to "current" now.
|
||||
if [[ "${os}" == "${MARINER_KATA_OS_NAME}" || "${os}" == "${MARINER_OS_NAME}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
if [[ $(echo "${package}" | jq ".downloadURIs.ubuntu.\"r${osVersionWithoutDot}\"") != "null" ]]; then
|
||||
RELEASE="\"r${osVersionWithoutDot}\""
|
||||
fi
|
||||
}
|
||||
|
||||
updatePackageVersions() {
|
||||
local package="$1"
|
||||
local os="$2"
|
||||
local osVersion="$3"
|
||||
RELEASE="current"
|
||||
updateRelease "${package}" "${os}" "${osVersion}"
|
||||
local osLowerCase=$(echo "${os}" | tr '[:upper:]' '[:lower:]')
|
||||
PACKAGE_VERSIONS=()
|
||||
|
||||
#if .downloadURIs.${osLowerCase} exist, then get the versions from there.
|
||||
#otherwise get the versions from .downloadURIs.default
|
||||
if [[ $(echo "${package}" | jq ".downloadURIs.${osLowerCase}") != "null" ]]; then
|
||||
if jq -e ".downloadURIs.${osLowerCase}.${RELEASE}.versions | length == 0" <<< "${package}" > /dev/null; then
|
||||
return
|
||||
fi
|
||||
versions=$(echo "${package}" | jq ".downloadURIs.${osLowerCase}.${RELEASE}.versions[]" -r)
|
||||
for version in ${versions[@]}; do
|
||||
PACKAGE_VERSIONS+=("${version}")
|
||||
done
|
||||
return
|
||||
fi
|
||||
versions=$(echo "${package}" | jq ".downloadURIs.default.${RELEASE}.versions[]" -r)
|
||||
for version in ${versions[@]}; do
|
||||
PACKAGE_VERSIONS+=("${version}")
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
updatePackageDownloadURL() {
|
||||
local package=$1
|
||||
local os=$2
|
||||
local osVersion=$3
|
||||
RELEASE="current"
|
||||
updateRelease "${package}" "${os}" "${osVersion}"
|
||||
local osLowerCase=$(echo "${os}" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
#if .downloadURIs.${osLowerCase} exist, then get the downloadURL from there.
|
||||
#otherwise get the downloadURL from .downloadURIs.default
|
||||
if [[ $(echo "${package}" | jq ".downloadURIs.${osLowerCase}") != "null" ]]; then
|
||||
downloadURL=$(echo "${package}" | jq ".downloadURIs.${osLowerCase}.${RELEASE}.downloadURL" -r)
|
||||
[ "${downloadURL}" = "null" ] && PACKAGE_DOWNLOAD_URL="" || PACKAGE_DOWNLOAD_URL="${downloadURL}"
|
||||
return
|
||||
fi
|
||||
downloadURL=$(echo "${package}" | jq ".downloadURIs.default.${RELEASE}.downloadURL" -r)
|
||||
[ "${downloadURL}" = "null" ] && PACKAGE_DOWNLOAD_URL="" || PACKAGE_DOWNLOAD_URL="${downloadURL}"
|
||||
return
|
||||
}
|
||||
|
||||
#HELPERSEOF
|
Загрузка…
Ссылка в новой задаче