(packer): use docker.io / refactor install scripts

This commit is contained in:
Sean Knox 2018-05-13 16:26:37 -07:00
Родитель 168f8c4434
Коммит 53d25515c0
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4C461E0C1242AF66
12 изменённых файлов: 75 добавлений и 883 удалений

Просмотреть файл

@ -1,30 +0,0 @@
#!/bin/bash
# Mounting is done here instead of etcd because of bug https://bugs.launchpad.net/cloud-init/+bug/1692093
# Once the bug is fixed, replace the below with the cloud init changes replaced in https://github.com/Azure/acs-engine/pull/661.
set -x
DISK=/dev/sdc
PARTITION=${DISK}1
MOUNTPOINT=/var/lib/etcddisk
udevadm settle
mkdir -p $MOUNTPOINT
mount | grep $MOUNTPOINT
if [ $? -eq 0 ]
then
echo "disk is already mounted"
exit 0
fi
# fill /etc/fstab
grep "/dev/sdc1" /etc/fstab
if [ $? -ne 0 ]
then
echo "$PARTITION $MOUNTPOINT auto defaults,nofail 0 2" >> /etc/fstab
fi
# check if partition exists
ls $PARTITION
if [ $? -ne 0 ]
then
# partition does not exist
/sbin/sgdisk --new 1 $DISK
/sbin/mkfs.ext4 $PARTITION -L etcd_disk -F -E lazy_itable_init=1,lazy_journal_init=1
fi
mount $MOUNTPOINT

Просмотреть файл

@ -1,714 +0,0 @@
#!/bin/bash
set -x
source /opt/azure/containers/provision_source.sh
OS=$(cat /etc/*-release | grep ^ID= | tr -d 'ID="' | awk '{print toupper($0)}')
UBUNTU_OS_NAME="UBUNTU"
RHEL_OS_NAME="RHEL"
COREOS_OS_NAME="COREOS"
KUBECTL=/usr/local/bin/kubectl
DOCKER=/usr/bin/docker
set +x
ETCD_PEER_CERT=$(echo ${ETCD_PEER_CERTIFICATES} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${MASTER_INDEX}+1)))
ETCD_PEER_KEY=$(echo ${ETCD_PEER_PRIVATE_KEYS} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${MASTER_INDEX}+1)))
set -x
if [[ $OS == $COREOS_OS_NAME ]]; then
echo "Changing default kubectl bin location"
KUBECTL=/opt/kubectl
fi
ensureRunCommandCompleted()
{
echo "waiting for runcmd to finish"
for i in {1..900}; do
if [ -e /opt/azure/containers/runcmd.complete ]; then
echo "runcmd finished, took $i seconds"
break
fi
sleep 1
done
}
ensureDockerInstallCompleted()
{
echo "waiting for docker install to finish"
for i in {1..900}; do
if [ -e /opt/azure/containers/dockerinstall.complete ]; then
echo "docker install finished, took $i seconds"
break
fi
sleep 1
done
}
echo `date`,`hostname`, startscript>>/opt/m
if [ -f /var/run/reboot-required ]; then
REBOOTREQUIRED=true
else
REBOOTREQUIRED=false
fi
if [[ ! -z "${MASTER_NODE}" ]]; then
echo "executing master node provision operations"
useradd -U "etcd"
usermod -p "$(head -c 32 /dev/urandom | base64)" "etcd"
passwd -u "etcd"
id "etcd"
echo `date`,`hostname`, beginGettingEtcdCerts>>/opt/m
APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key"
touch "${APISERVER_PRIVATE_KEY_PATH}"
chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}"
chown root:root "${APISERVER_PRIVATE_KEY_PATH}"
CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key"
touch "${CA_PRIVATE_KEY_PATH}"
chmod 0600 "${CA_PRIVATE_KEY_PATH}"
chown root:root "${CA_PRIVATE_KEY_PATH}"
ETCD_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdserver.key"
touch "${ETCD_SERVER_PRIVATE_KEY_PATH}"
chmod 0600 "${ETCD_SERVER_PRIVATE_KEY_PATH}"
chown etcd:etcd "${ETCD_SERVER_PRIVATE_KEY_PATH}"
ETCD_CLIENT_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdclient.key"
touch "${ETCD_CLIENT_PRIVATE_KEY_PATH}"
chmod 0600 "${ETCD_CLIENT_PRIVATE_KEY_PATH}"
chown root:root "${ETCD_CLIENT_PRIVATE_KEY_PATH}"
ETCD_PEER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdpeer${MASTER_INDEX}.key"
touch "${ETCD_PEER_PRIVATE_KEY_PATH}"
chmod 0600 "${ETCD_PEER_PRIVATE_KEY_PATH}"
chown etcd:etcd "${ETCD_PEER_PRIVATE_KEY_PATH}"
ETCD_SERVER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdserver.crt"
touch "${ETCD_SERVER_CERTIFICATE_PATH}"
chmod 0644 "${ETCD_SERVER_CERTIFICATE_PATH}"
chown root:root "${ETCD_SERVER_CERTIFICATE_PATH}"
ETCD_CLIENT_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdclient.crt"
touch "${ETCD_CLIENT_CERTIFICATE_PATH}"
chmod 0644 "${ETCD_CLIENT_CERTIFICATE_PATH}"
chown root:root "${ETCD_CLIENT_CERTIFICATE_PATH}"
ETCD_PEER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdpeer${MASTER_INDEX}.crt"
touch "${ETCD_PEER_CERTIFICATE_PATH}"
chmod 0644 "${ETCD_PEER_CERTIFICATE_PATH}"
chown root:root "${ETCD_PEER_CERTIFICATE_PATH}"
set +x
echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}"
echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}"
echo "${ETCD_SERVER_PRIVATE_KEY}" | base64 --decode > "${ETCD_SERVER_PRIVATE_KEY_PATH}"
echo "${ETCD_CLIENT_PRIVATE_KEY}" | base64 --decode > "${ETCD_CLIENT_PRIVATE_KEY_PATH}"
echo "${ETCD_PEER_KEY}" | base64 --decode > "${ETCD_PEER_PRIVATE_KEY_PATH}"
echo "${ETCD_SERVER_CERTIFICATE}" | base64 --decode > "${ETCD_SERVER_CERTIFICATE_PATH}"
echo "${ETCD_CLIENT_CERTIFICATE}" | base64 --decode > "${ETCD_CLIENT_CERTIFICATE_PATH}"
echo "${ETCD_PEER_CERT}" | base64 --decode > "${ETCD_PEER_CERTIFICATE_PATH}"
set -x
echo `date`,`hostname`, endGettingEtcdCerts>>/opt/m
mkdir -p /opt/azure/containers && touch /opt/azure/containers/certs.ready
else
echo "skipping master node provision operations, this is an agent node"
fi
KUBELET_PRIVATE_KEY_PATH="/etc/kubernetes/certs/client.key"
touch "${KUBELET_PRIVATE_KEY_PATH}"
chmod 0600 "${KUBELET_PRIVATE_KEY_PATH}"
chown root:root "${KUBELET_PRIVATE_KEY_PATH}"
APISERVER_PUBLIC_KEY_PATH="/etc/kubernetes/certs/apiserver.crt"
touch "${APISERVER_PUBLIC_KEY_PATH}"
chmod 0644 "${APISERVER_PUBLIC_KEY_PATH}"
chown root:root "${APISERVER_PUBLIC_KEY_PATH}"
AZURE_JSON_PATH="/etc/kubernetes/azure.json"
touch "${AZURE_JSON_PATH}"
chmod 0600 "${AZURE_JSON_PATH}"
chown root:root "${AZURE_JSON_PATH}"
set +x
echo "${KUBELET_PRIVATE_KEY}" | base64 --decode > "${KUBELET_PRIVATE_KEY_PATH}"
echo "${APISERVER_PUBLIC_KEY}" | base64 --decode > "${APISERVER_PUBLIC_KEY_PATH}"
cat << EOF > "${AZURE_JSON_PATH}"
{
"cloud":"${TARGET_ENVIRONMENT}",
"tenantId": "${TENANT_ID}",
"subscriptionId": "${SUBSCRIPTION_ID}",
"aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}",
"aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}",
"resourceGroup": "${RESOURCE_GROUP}",
"location": "${LOCATION}",
"subnetName": "${SUBNET}",
"securityGroupName": "${NETWORK_SECURITY_GROUP}",
"vnetName": "${VIRTUAL_NETWORK}",
"vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}",
"routeTableName": "${ROUTE_TABLE}",
"primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}",
"cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF},
"cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES},
"cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT},
"cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION},
"cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER},
"cloudProviderRatelimit": ${CLOUDPROVIDER_RATELIMIT},
"cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS},
"cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET},
"useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION},
"useInstanceMetadata": ${USE_INSTANCE_METADATA}
}
EOF
set -x
function ensureFilepath() {
if $REBOOTREQUIRED; then
return
fi
found=1
for i in {1..600}; do
if [ -e $1 ]
then
found=0
echo "$1 is present, took $i seconds to verify"
break
fi
sleep 1
done
if [ $found -ne 0 ]
then
echo "$1 is not present after $i seconds of trying to verify"
exit 1
fi
}
function setMaxPods () {
sed -i "s/^KUBELET_MAX_PODS=.*/KUBELET_MAX_PODS=${1}/" /etc/default/kubelet
}
function setNetworkPlugin () {
sed -i "s/^KUBELET_NETWORK_PLUGIN=.*/KUBELET_NETWORK_PLUGIN=${1}/" /etc/default/kubelet
}
function setKubeletOpts () {
sed -i "s#^KUBELET_OPTS=.*#KUBELET_OPTS=${1}#" /etc/default/kubelet
}
function setDockerOpts () {
sed -i "s#^DOCKER_OPTS=.*#DOCKER_OPTS=${1}#" /etc/default/kubelet
}
function configAzureNetworkPolicy() {
CNI_CONFIG_DIR=/etc/cni/net.d
mkdir -p $CNI_CONFIG_DIR
chown -R root:root $CNI_CONFIG_DIR
chmod 755 $CNI_CONFIG_DIR
CNI_BIN_DIR=/opt/cni/bin
mkdir -p $CNI_BIN_DIR
AZURE_CNI_TGZ_TMP=/tmp/azure_cni.tgz
retrycmd_get_tarball 60 1 $AZURE_CNI_TGZ_TMP ${VNET_CNI_PLUGINS_URL}
tar -xzf $AZURE_CNI_TGZ_TMP -C $CNI_BIN_DIR
CONTAINERNETWORKING_CNI_TGZ_TMP=/tmp/containernetworking_cni.tgz
retrycmd_get_tarball 60 1 $CONTAINERNETWORKING_CNI_TGZ_TMP ${CNI_PLUGINS_URL}
tar -xzf $CONTAINERNETWORKING_CNI_TGZ_TMP -C $CNI_BIN_DIR ./loopback ./portmap
chown -R root:root $CNI_BIN_DIR
chmod -R 755 $CNI_BIN_DIR
mv $CNI_BIN_DIR/10-azure.conflist $CNI_CONFIG_DIR/
chmod 600 $CNI_CONFIG_DIR/10-azure.conflist
/sbin/ebtables -t nat --list
configCNINetworkPolicy
}
function configCNINetworkPolicy() {
setNetworkPlugin cni
setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro"
}
function configNetworkPolicy() {
if [[ "${NETWORK_POLICY}" = "azure" ]]; then
configAzureNetworkPolicy
elif [[ "${NETWORK_POLICY}" = "calico" ]] || [[ "${NETWORK_POLICY}" = "cilium" ]]; then
configCNINetworkPolicy
else
# No policy, defaults to kubenet.
setNetworkPlugin kubenet
setDockerOpts ""
fi
}
function installClearContainersRuntime() {
# Add Clear Containers repository key
echo "Adding Clear Containers repository key..."
curl -sSL --retry 5 --retry-delay 10 --retry-max-time 30 "https://download.opensuse.org/repositories/home:clearcontainers:clear-containers-3/xUbuntu_16.04/Release.key" | apt-key add -
# Add Clear Container repository
echo "Adding Clear Containers repository..."
echo 'deb http://download.opensuse.org/repositories/home:/clearcontainers:/clear-containers-3/xUbuntu_16.04/ /' > /etc/apt/sources.list.d/cc-runtime.list
# Install Clear Containers runtime
echo "Installing Clear Containers runtime..."
apt-get update
apt-get install --no-install-recommends -y \
cc-runtime
# Install thin tools for devicemapper configuration
echo "Installing thin tools to provision devicemapper..."
apt-get install --no-install-recommends -y \
lvm2 \
thin-provisioning-tools
# Load systemd changes
echo "Loading changes to systemd service files..."
systemctl daemon-reload
# Enable and start Clear Containers proxy service
echo "Enabling and starting Clear Containers proxy service..."
systemctl enable cc-proxy
systemctl start cc-proxy
# CRIO has only been tested with the azure plugin
configAzureNetworkPolicy
setKubeletOpts " --container-runtime=remote --container-runtime-endpoint=/var/run/crio.sock"
setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro"
}
function installGo() {
export GO_SRC=/usr/local/go
export GOPATH="${HOME}/.go"
if [[ -d "$GO_SRC" ]]; then
rm -rf "$GO_SRC"
fi
if [[ -d "$GOPATH" ]]; then
rm -rf "$GOPATH"
fi
retrycmd_if_failure_no_stats 180 1 5 curl -fsSL https://golang.org/VERSION?m=text > /tmp/gover.txt
GO_VERSION=$(cat /tmp/gover.txt)
retrycmd_get_tarball 60 1 /tmp/golang.tgz https://storage.googleapis.com/golang/${GO_VERSION}.linux-amd64.tar.gz
tar -v -C /usr/local -xzf /tmp/golang.tgz
export PATH="${GO_SRC}/bin:${PATH}:${GOPATH}/bin"
}
function buildRunc() {
# Clone the runc source
echo "Cloning the runc source..."
mkdir -p "${GOPATH}/src/github.com/opencontainers"
(
cd "${GOPATH}/src/github.com/opencontainers"
git clone "https://github.com/opencontainers/runc.git"
cd runc
git reset --hard v1.0.0-rc4
make BUILDTAGS="seccomp apparmor"
make install
)
echo "Successfully built and installed runc..."
}
function buildCRIO() {
# Add CRI-O repositories
echo "Adding repositories required for cri-o..."
add-apt-repository -y ppa:projectatomic/ppa
add-apt-repository -y ppa:alexlarsson/flatpak
apt-get update
# Install CRI-O dependencies
echo "Installing dependencies for CRI-O..."
apt-get install --no-install-recommends -y \
btrfs-tools \
gcc \
git \
libapparmor-dev \
libassuan-dev \
libc6-dev \
libdevmapper-dev \
libglib2.0-dev \
libgpg-error-dev \
libgpgme11-dev \
libostree-dev \
libseccomp-dev \
libselinux1-dev \
make \
pkg-config \
skopeo-containers
installGo;
# Install md2man
go get github.com/cpuguy83/go-md2man
# Fix for templates dependency
(
go get -u github.com/docker/docker/daemon/logger/templates
cd "${GOPATH}/src/github.com/docker/docker"
mkdir -p utils
cp -r daemon/logger/templates utils/
)
buildRunc;
# Clone the CRI-O source
echo "Cloning the CRI-O source..."
mkdir -p "${GOPATH}/src/github.com/kubernetes-incubator"
(
cd "${GOPATH}/src/github.com/kubernetes-incubator"
git clone "https://github.com/kubernetes-incubator/cri-o.git"
cd cri-o
git reset --hard v1.0.0
make BUILDTAGS="seccomp apparmor"
make install
make install.config
make install.systemd
)
echo "Successfully built and installed CRI-O..."
# Cleanup the temporary directory
rm -vrf "$tmpd"
# Cleanup the Go install
rm -vrf "$GO_SRC" "$GOPATH"
setupCRIO;
}
function setupCRIO() {
# Configure CRI-O
echo "Configuring CRI-O..."
# Configure crio systemd service file
SYSTEMD_CRI_O_SERVICE_FILE="/usr/local/lib/systemd/system/crio.service"
sed -i 's#ExecStart=/usr/local/bin/crio#ExecStart=/usr/local/bin/crio -log-level debug#' "$SYSTEMD_CRI_O_SERVICE_FILE"
# Configure /etc/crio/crio.conf
CRI_O_CONFIG="/etc/crio/crio.conf"
sed -i 's#storage_driver = ""#storage_driver = "devicemapper"#' "$CRI_O_CONFIG"
sed -i 's#storage_option = \[#storage_option = \["dm.directlvm_device=/dev/sdc", "dm.thinp_percent=95", "dm.thinp_metapercent=1", "dm.thinp_autoextend_threshold=80", "dm.thinp_autoextend_percent=20", "dm.directlvm_device_force=true"#' "$CRI_O_CONFIG"
sed -i 's#runtime = "/usr/bin/runc"#runtime = "/usr/local/sbin/runc"#' "$CRI_O_CONFIG"
sed -i 's#runtime_untrusted_workload = ""#runtime_untrusted_workload = "/usr/bin/cc-runtime"#' "$CRI_O_CONFIG"
sed -i 's#default_workload_trust = "trusted"#default_workload_trust = "untrusted"#' "$CRI_O_CONFIG"
# Load systemd changes
echo "Loading changes to systemd service files..."
systemctl daemon-reload
}
function ensureCRIO() {
if [[ "$CONTAINER_RUNTIME" == "clear-containers" ]]; then
# Make sure we can nest virtualization
if grep -q vmx /proc/cpuinfo; then
# Enable and start cri-o service
# Make sure this is done after networking plugins are installed
echo "Enabling and starting cri-o service..."
systemctl enable crio crio-shutdown
systemctl start crio
fi
fi
}
function systemctlEnableAndCheck() {
systemctl enable $1
systemctl is-enabled $1
enabled=$?
for i in {1..900}; do
if [ $enabled -ne 0 ]; then
systemctl enable $1
systemctl is-enabled $1
enabled=$?
else
echo "$1 took $i seconds to be enabled by systemctl"
break
fi
sleep 1
done
if [ $enabled -ne 0 ]
then
echo "$1 could not be enabled by systemctl"
exit 5
fi
}
function ensureDocker() {
systemctlEnableAndCheck docker
# only start if a reboot is not required
if ! $REBOOTREQUIRED; then
dockerStarted=1
for i in {1..900}; do
if ! /usr/bin/docker info; then
echo "status $?"
timeout 60s /bin/systemctl restart docker
else
echo "docker started, took $i seconds"
dockerStarted=0
break
fi
sleep 1
done
if [ $dockerStarted -ne 0 ]
then
echo "docker did not start"
exit 2
fi
fi
}
function ensureKubelet() {
retrycmd_if_failure 100 1 60 docker pull $HYPERKUBE_URL
systemctlEnableAndCheck kubelet
# only start if a reboot is not required
if ! $REBOOTREQUIRED; then
systemctl restart kubelet
fi
}
function extractKubectl(){
systemctlEnableAndCheck kubectl-extract
# only start if a reboot is not required
if ! $REBOOTREQUIRED; then
systemctl restart kubectl-extract
fi
}
function ensureJournal(){
systemctl daemon-reload
systemctlEnableAndCheck systemd-journald.service
echo "Storage=persistent" >> /etc/systemd/journald.conf
echo "SystemMaxUse=1G" >> /etc/systemd/journald.conf
echo "RuntimeMaxUse=1G" >> /etc/systemd/journald.conf
echo "ForwardToSyslog=no" >> /etc/systemd/journald.conf
# only start if a reboot is not required
if ! $REBOOTREQUIRED; then
systemctl restart systemd-journald.service
fi
}
function ensureK8s() {
if $REBOOTREQUIRED; then
return
fi
k8sHealthy=1
nodesActive=1
nodesReady=1
for i in {1..600}; do
if [ -e $KUBECTL ]
then
break
fi
sleep 1
done
for i in {1..600}; do
$KUBECTL 2>/dev/null cluster-info
if [ "$?" = "0" ]
then
echo "k8s cluster is healthy, took $i seconds"
k8sHealthy=0
break
fi
sleep 1
done
if [ $k8sHealthy -ne 0 ]
then
echo "k8s cluster is not healthy after $i seconds"
exit 3
fi
for i in {1..1800}; do
nodes=$(${KUBECTL} get nodes 2>/dev/null | grep 'Ready' | wc -l)
if [ $nodes -eq $TOTAL_NODES ]
then
echo "all nodes are participating, took $i seconds"
nodesActive=0
break
fi
sleep 1
done
if [ $nodesActive -ne 0 ]
then
echo "still waiting for active nodes after $i seconds"
exit 3
fi
for i in {1..600}; do
notReady=$(${KUBECTL} get nodes 2>/dev/null | grep 'NotReady' | wc -l)
if [ $notReady -eq 0 ]
then
echo "all nodes are Ready, took $i seconds"
nodesReady=0
break
fi
sleep 1
done
if [ $nodesReady -ne 0 ]
then
echo "still waiting for Ready nodes after $i seconds"
exit 3
fi
}
function ensureEtcd() {
etcdIsRunning=1
for i in {1..600}; do
curl --cacert /etc/kubernetes/certs/ca.crt --cert /etc/kubernetes/certs/etcdclient.crt --key /etc/kubernetes/certs/etcdclient.key --max-time 60 https://127.0.0.1:2379/v2/machines;
if [ $? -eq 0 ]
then
etcdIsRunning=0
echo "Etcd setup successfully, took $i seconds"
break
fi
sleep 1
done
if [ $etcdIsRunning -ne 0 ]
then
echo "Etcd not accessible after $i seconds"
exit 3
fi
}
function ensureEtcdDataDir() {
mount | grep /dev/sdc1 | grep /var/lib/etcddisk
if [ "$?" = "0" ]
then
echo "Etcd is running with data dir at: /var/lib/etcddisk"
return
else
echo "/var/lib/etcddisk was not found at /dev/sdc1. Trying to mount all devices."
s = 5
for i in {1..60}; do
sudo mount -a && mount | grep /dev/sdc1 | grep /var/lib/etcddisk;
if [ "$?" = "0" ]
then
(( t = ${i} * ${s} ))
echo "/var/lib/etcddisk mounted at: /dev/sdc1, took $t seconds"
return
fi
sleep $s
done
fi
echo "Etcd data dir was not found at: /var/lib/etcddisk"
exit 4
}
function ensurePodSecurityPolicy(){
if $REBOOTREQUIRED; then
return
fi
POD_SECURITY_POLICY_FILE="/etc/kubernetes/manifests/pod-security-policy.yaml"
if [ -f $POD_SECURITY_POLICY_FILE ]; then
$KUBECTL create -f $POD_SECURITY_POLICY_FILE
fi
}
function writeKubeConfig() {
KUBECONFIGDIR=/home/$ADMINUSER/.kube
KUBECONFIGFILE=$KUBECONFIGDIR/config
mkdir -p $KUBECONFIGDIR
touch $KUBECONFIGFILE
chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR
chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE
chmod 700 $KUBECONFIGDIR
chmod 600 $KUBECONFIGFILE
# disable logging after secret output
set +x
echo "
---
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: \"$CA_CERTIFICATE\"
server: $KUBECONFIG_SERVER
name: \"$MASTER_FQDN\"
contexts:
- context:
cluster: \"$MASTER_FQDN\"
user: \"$MASTER_FQDN-admin\"
name: \"$MASTER_FQDN\"
current-context: \"$MASTER_FQDN\"
kind: Config
users:
- name: \"$MASTER_FQDN-admin\"
user:
client-certificate-data: \"$KUBECONFIG_CERTIFICATE\"
client-key-data: \"$KUBECONFIG_KEY\"
" > $KUBECONFIGFILE
# renable logging after secrets
set -x
}
if [[ "$CONTAINER_RUNTIME" == "clear-containers" ]]; then
# If the container runtime is "clear-containers" we need to ensure the
# run command is completed _before_ we start installing all the dependencies
# for clear-containers to make sure there is not a dpkg lock.
ensureRunCommandCompleted
echo `date`,`hostname`, RunCmdCompleted>>/opt/m
fi
if [[ $OS == $UBUNTU_OS_NAME ]]; then
# make sure walinuxagent doesn't get updated in the middle of running this script
apt-mark hold walinuxagent
fi
echo `date`,`hostname`, EnsureDockerStart>>/opt/m
ensureDockerInstallCompleted
ensureDocker
echo `date`,`hostname`, configNetworkPolicyStart>>/opt/m
configNetworkPolicy
if [[ "$CONTAINER_RUNTIME" == "clear-containers" ]]; then
# Ensure we can nest virtualization
if grep -q vmx /proc/cpuinfo; then
echo `date`,`hostname`, installClearContainersRuntimeStart>>/opt/m
installClearContainersRuntime
echo `date`,`hostname`, buildCRIOStart>>/opt/m
buildCRIO
fi
fi
echo `date`,`hostname`, setMaxPodsStart>>/opt/m
setMaxPods ${MAX_PODS}
echo `date`,`hostname`, ensureCRIOStart>>/opt/m
ensureCRIO
echo `date`,`hostname`, ensureKubeletStart>>/opt/m
ensureKubelet
echo `date`,`hostname`, extractKubctlStart>>/opt/m
extractKubectl
echo `date`,`hostname`, ensureJournalStart>>/opt/m
ensureJournal
echo `date`,`hostname`, ensureJournalDone>>/opt/m
ensureRunCommandCompleted
echo `date`,`hostname`, RunCmdCompleted>>/opt/m
if [[ ! -z "${MASTER_NODE}" ]]; then
writeKubeConfig
ensureFilepath $KUBECTL
ensureFilepath $DOCKER
ensureEtcdDataDir
ensureEtcd
ensureK8s
ensurePodSecurityPolicy
fi
if [[ $OS == $UBUNTU_OS_NAME ]]; then
# mitigation for bug https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1676635
echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind
sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local
apt-mark unhold walinuxagent
fi
echo "Install complete successfully"
if $REBOOTREQUIRED; then
# wait 1 minute to restart node, so that the custom script extension can complete
echo 'reboot required, rebooting node in 1 minute'
/bin/bash -c "shutdown -r 1 &"
fi
echo `date`,`hostname`, endscript>>/opt/m
mkdir -p /opt/azure/containers && touch /opt/azure/containers/provision.complete
ps auxfww > /opt/azure/provision-ps.log &

Просмотреть файл

@ -1,59 +0,0 @@
#!/bin/bash
source /opt/azure/containers/provision_source.sh
PROXY_CA_KEY="${PROXY_CA_KEY:=/tmp/proxy-client-ca.key}"
PROXY_CRT="${PROXY_CRT:=/tmp/proxy-client-ca.crt}"
PROXY_CLIENT_KEY="${PROXY_CLIENT_KEY:=/tmp/proxy-client.key}"
PROXY_CLIENT_CSR="${PROXY_CLIENT_CSR:=/tmp/proxy-client.csr}"
PROXY_CLIENT_CRT="${PROXY_CLIENT_CRT:=/tmp/proxy-client.crt}"
ETCD_REQUESTHEADER_CLIENT_CA="${ETCD_REQUESTHEADER_CLIENT_CA:=/proxycerts/requestheader-client-ca-file}"
ETCD_PROXY_CERT="${ETCD_PROXY_CERT:=/proxycerts/proxy-client-cert-file}"
ETCD_PROXY_KEY="${ETCD_PROXY_KEY:=/proxycerts/proxy-client-key-file}"
K8S_PROXY_CA_CRT_FILEPATH="${K8S_PROXY_CA_CRT_FILEPATH:=/etc/kubernetes/certs/proxy-ca.crt}"
K8S_PROXY_KEY_FILEPATH="${K8S_PROXY_KEY_FILEPATH:=/etc/kubernetes/certs/proxy.key}"
K8S_PROXY_CRT_FILEPATH="${K8S_PROXY_CRT_FILEPATH:=/etc/kubernetes/certs/proxy.crt}"
export ETCDCTL_ENDPOINTS="${ETCDCTL_ENDPOINTS:=https://127.0.0.1:2379}"
export ETCDCTL_CA_FILE="${ETCDCTL_CA_FILE:=/etc/kubernetes/certs/ca.crt}"
export ETCDCTL_KEY_FILE="${ETCDCTL_KEY_FILE:=/etc/kubernetes/certs/etcdclient.key}"
export ETCDCTL_CERT_FILE="${ETCDCTL_CERT_FILE:=/etc/kubernetes/certs/etcdclient.crt}"
export RANDFILE=$(mktemp)
# generate root CA
openssl genrsa -out $PROXY_CA_KEY 2048
openssl req -new -x509 -days 1826 -key $PROXY_CA_KEY -out $PROXY_CRT -subj '/CN=proxyClientCA'
# generate new cert
openssl genrsa -out $PROXY_CLIENT_KEY 2048
openssl req -new -key $PROXY_CLIENT_KEY -out $PROXY_CLIENT_CSR -subj '/CN=aggregator/O=system:masters'
openssl x509 -req -days 730 -in $PROXY_CLIENT_CSR -CA $PROXY_CRT -CAkey $PROXY_CA_KEY -set_serial 02 -out $PROXY_CLIENT_CRT
write_certs_to_disk() {
etcdctl get $ETCD_REQUESTHEADER_CLIENT_CA > $K8S_PROXY_CA_CRT_FILEPATH
etcdctl get $ETCD_PROXY_KEY > $K8S_PROXY_KEY_FILEPATH
etcdctl get $ETCD_PROXY_CERT > $K8S_PROXY_CRT_FILEPATH
# Remove whitespace padding at beginning of 1st line
sed -i '1s/\s//' $K8S_PROXY_CA_CRT_FILEPATH $K8S_PROXY_CRT_FILEPATH $K8S_PROXY_KEY_FILEPATH
chmod 600 $K8S_PROXY_KEY_FILEPATH
}
write_certs_to_disk_with_retry() {
for i in 1 2 3 4 5 6 7 8 9 10 11 12; do
write_certs_to_disk
[ $? -eq 0 ] && break || sleep 5
done
}
# block until all etcd is ready
retrycmd_if_failure 100 5 10 etcdctl cluster-health
# Make etcd keys, adding a leading whitespace because etcd won't accept a val that begins with a '-' (hyphen)!
if etcdctl mk $ETCD_REQUESTHEADER_CLIENT_CA " $(cat ${PROXY_CRT})"; then
etcdctl mk $ETCD_PROXY_KEY " $(cat ${PROXY_CLIENT_KEY})"
etcdctl mk $ETCD_PROXY_CERT " $(cat ${PROXY_CLIENT_CRT})"
sleep 5
write_certs_to_disk_with_retry
# If the etcdtl mk command failed, that means the key already exists
else
sleep 5
write_certs_to_disk_with_retry
fi

Просмотреть файл

@ -1,5 +0,0 @@
#!/bin/sh
retrycmd_if_failure() { retries=$1; wait=$2; timeout=$3; shift && shift && shift; for i in $(seq 1 $retries); do timeout $timeout ${@}; [ $? -eq 0 ] && break || sleep $wait; done; echo Executed \"$@\" $i times; }
retrycmd_if_failure_no_stats() { retries=$1; wait=$2; timeout=$3; shift && shift && shift; for i in $(seq 1 $retries); do timeout $timeout ${@}; [ $? -eq 0 ] && break || sleep $wait; done; }
retrycmd_get_tarball() { retries=$1; wait=$2; tarball=$3; url=$4; for i in $(seq 1 $retries); do tar -tzf $tarball; [ $? -eq 0 ] && break || retrycmd_if_failure_no_stats $retries 1 10 curl -fsSL $url -o $tarball; sleep $wait; done; }

Просмотреть файл

@ -45,46 +45,43 @@
},
{
"type": "file",
"source": "./prepare-vhd.sh",
"destination": "/home/packer/prepare-vhd.sh"
"source": "./cleanup-vhd.sh",
"destination": "/home/packer/cleanup-vhd.sh"
},
{
"type": "file",
"source": "./install-cri.sh",
"destination": "/home/packer/install-cri.sh"
},
{
"type": "file",
"source": "./install-cni.sh",
"destination": "/home/packer/install-cni.sh"
},
{
"type": "file",
"source": "./install-kubernetes.sh",
"destination": "/home/packer/install-kubernetes.sh"
},
{
"type": "file",
"source": "./install-etcd.sh",
"destination": "/home/packer/install-etcd.sh"
},
{
"type": "file",
"source": "./acs-engine/kubernetes_mountetcd.sh",
"destination": "/opt/azure/containers/kubernetes_mountetcd.sh"
},
{
"type": "file",
"source": "./acs-engine/kubernetesmastercustomscript.sh",
"destination": "/opt/azure/containers/kubernetesmastercustomscript.sh"
},
{
"type": "file",
"source": "./acs-engine/kubernetesmastergenerateproxycertscript.sh",
"destination": "/opt/azure/containers/kubernetesmastergenerateproxycertscript.sh"
},
{
"type": "file",
"source": "./acs-engine/kubernetesprovisionsource.sh",
"destination": "/opt/azure/containers/provision_source.sh"
},
{
"type": "file",
"source": "./docker/daemon.json",
"destination": "$HOME/daemon.json"
"destination": "/home/packer/daemon.json"
},
{
"type": "shell",
"inline": [
"sudo /bin/bash -eux /home/packer/prepare-vhd.sh",
"sudo /bin/bash -eux /home/packer/install-cri.sh",
"sudo /bin/bash -eux /home/packer/install-cni.sh",
"sudo /bin/bash -eux /home/packer/install-kubernetes.sh",
"sudo /bin/bash -eux /home/packer/install-etcd.sh",
"sudo mv $HOME/daemon.json /etc/docker",
"rm /home/packer/prepare-vhd.sh /home/packer/install-etcd.sh "
"sudo /bin/bash -eux /home/packer/cleanup-vhd.sh",
"rm /home/packer/*.sh"
]
}
]

7
packer/cleanup-vhd.sh Normal file
Просмотреть файл

@ -0,0 +1,7 @@
#!/bin/bash -eux
## Cleanup packer SSH key and machine ID generated for this boot
rm /root/.ssh/authorized_keys
rm /home/packer/.ssh/authorized_keys
rm /etc/machine-id
touch /etc/machine-id

23
packer/install-cni.sh Normal file
Просмотреть файл

@ -0,0 +1,23 @@
#!/bin/bash -eux
cni_release_tag="v1.0.4"
# Download CNI networking components
wget -q --show-progress --https-only --timestamping \
"https://github.com/Azure/azure-container-networking/releases/download/${cni_release_tag}/azure-vnet-cni-linux-amd64-${cni_release_tag}.tgz" \
"https://github.com/containernetworking/plugins/releases/download/v0.6.0/cni-plugins-amd64-v0.6.0.tgz"
# Create CNI conf and bin directories
mkdir -p \
/etc/cni/net.d \
/opt/cni/bin
# Install CNI
tar -xvf cni-plugins-amd64-v0.6.0.tgz -C /opt/cni/bin/
tar -xvf azure-vnet-cni-linux-amd64-${cni_release_tag}.tgz
mv azure-vnet azure-vnet-ipam /opt/cni/bin
mv 10-azure.conflist /etc/cni/net.d

13
packer/install-cri.sh Normal file
Просмотреть файл

@ -0,0 +1,13 @@
#!/bin/bash -eux
export DEBIAN_FRONTEND=noninteractive
apt_flags=(-o "Dpkg::Options::=--force-confnew" -qy)
# wget -q --show-progress --https-only --timestamping \
# "https://storage.googleapis.com/cri-containerd-release/cri-containerd-1.1.0.linux-amd64.tar.gz"
# sudo tar -xvf cri-containerd-1.1.0.linux-amd64.tar.gz -C /
mkdir /etc/docker
mv $HOME/daemon.json /etc/docker/daemon.json
apt-get install "${apt_flags[@]}" docker.io

Просмотреть файл

@ -1,13 +1,6 @@
#!/bin/bash -eux
kubernetes_release_tag="v1.10.2"
cni_release_tag="v1.0.4"
## Install official Kubernetes package
curl --silent "https://packages.cloud.google.com/apt/doc/apt-key.gpg" | apt-key add -
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
export DEBIAN_FRONTEND=noninteractive
apt_flags=(-o "Dpkg::Options::=--force-confnew" -qy)
@ -28,34 +21,7 @@ wget -q --show-progress --https-only --timestamping \
# Install the Kubernetes binaries
chmod +x kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy kubectl
sudo mv kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy kubectl /usr/local/bin/
# Download CNI networking components
wget -q --show-progress --https-only --timestamping \
"https://github.com/Azure/azure-container-networking/releases/download/${cni_release_tag}/azure-vnet-cni-linux-amd64-${cni_release_tag}.tgz" \
"https://github.com/containernetworking/plugins/releases/download/v0.6.0/cni-plugins-amd64-v0.6.0.tgz" \
"https://storage.googleapis.com/cri-containerd-release/cri-containerd-1.1.0.linux-amd64.tar.gz"
# Create CNI conf and bin directories
sudo mkdir -p \
/etc/cni/net.d \
/opt/cni/bin
# Install CNI
sudo tar -xvf cni-plugins-amd64-v0.6.0.tgz -C /opt/cni/bin/
sudo tar -xvf azure-vnet-cni-linux-amd64-${cni_release_tag}.tgz
sudo tar -xvf cri-containerd-1.1.0.linux-amd64.tar.gz -C /
sudo mv azure-vnet azure-vnet-ipam /opt/cni/bin
sudo mv 10-azure.conflist /etc/cni/net.d
mv kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy kubectl /usr/local/bin/
## Save release version, so that we can call `kubeadm init --use-kubernetes-version="$(cat /etc/kubernetes_community_vhd_version)` and ensure we get the same version
echo "${kubernetes_release_tag}" > /etc/kubernetes_community_vhd_version
## Cleanup packer SSH key and machine ID generated for this boot
rm /root/.ssh/authorized_keys
rm /home/packer/.ssh/authorized_keys
rm /etc/machine-id
touch /etc/machine-id
## Done!

Просмотреть файл

@ -27,7 +27,10 @@ export KUBE_API_PUBLIC_FQDN := $(CLUSTER_NAME).$(AZURE_LOCATION).cloudapp.a
export AZURE_VM_KEY_NAME ?= $(CLUSTER_NAME)
export AZURE_VM_KEY_PATH := ${DIR_KEY_PAIR}/$(CLUSTER_NAME)/${AZURE_VM_KEY_NAME}.pem
export AZURE_VHD_URI ?= https://acstackimages.blob.core.windows.net/system/Microsoft.Compute/Images/acs-vhds/acstack-1526236378-osDisk.4531b66b-1910-4754-b9c3-607f5ee2f40b.vhd
# docker.io
export AZURE_VHD_URI ?= https://acstackimages.blob.core.windows.net/system/Microsoft.Compute/Images/acs-vhds/acstack-1526251964-osDisk.7fdd6d44-e3bd-4020-8033-47877b422c07.vhd
# cri/containerd/runc
# export AZURE_VHD_URI ?= https://acstackimages.blob.core.windows.net/system/Microsoft.Compute/Images/acs-vhds/acstack-1526252790-osDisk.5879f7c8-67f8-4c2e-a94d-95551fcc06db.vhd
export INTERNAL_TLD := ${CLUSTER_NAME}.acs
export HYPERKUBE_IMAGE ?= quay.io/coreos/hyperkube

Просмотреть файл

@ -140,8 +140,8 @@ write_files:
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/local/bin/kubelet \
@ -151,8 +151,6 @@ write_files:
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--cluster-dns=${DNS_SERVICE_IP} \
--cluster-domain=cluster.local \
--container-runtime=remote \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
--image-pull-progress-deadline=2m \
--hostname-override=${HOSTNAME} \
--kubeconfig=/var/lib/kubelet/kubeconfig \

Просмотреть файл

@ -9,11 +9,6 @@ write_files:
ethernets:
eth1:
dhcp4: true
- path: "/etc/systemd/system/kubelet.service.d/0-containerd.conf"
permissions: "0755"
content: |
[Service]
Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
- path: "/etc/kubernetes/azure.json"
permissions: "0755"
content: |
@ -50,8 +45,8 @@ write_files:
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/local/bin/kubelet \
@ -65,8 +60,6 @@ write_files:
--cloud-provider=azure \
--cluster-dns=${DNS_SERVICE_IP} \
--cluster-domain=cluster.local \
--container-runtime=remote \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
--enforce-node-allocatable=pods \
--eviction-hard=memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5% \
--feature-gates=Accelerators=true \