This commit is contained in:
Jack Francis 2020-09-25 12:18:49 -07:00 коммит произвёл GitHub
Родитель 190444dc03
Коммит 1539189bf8
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
27 изменённых файлов: 1236 добавлений и 292 удалений

Просмотреть файл

@ -13,7 +13,11 @@ AKS Engine provides convenient tooling to quickly bootstrap Kubernetes clusters
## Getting started ## Getting started
Depending on how new you are to AKS Engine, you can try [a tutorial][tutorials], or just dive straight into the [documentation][docs]. - Read the [CLI Overview](docs/tutorials/cli-overview.md) for a list of features provided by the `aks-engine` command line tool.
- The [Quickstart Guide](docs/tutorials/quickstart.md) describes how to download the latest release of `aks-engine` for your environment, and demonstrates how to use `aks-engine` to create a Kubernetes cluster on Azure that you will manage and customize.
- The [complete body of documentation can be found here][docs].
Please see the [FAQ][] for answers about AKS Engine and its progenitor ACS-Engine. Please see the [FAQ][] for answers about AKS Engine and its progenitor ACS-Engine.

Просмотреть файл

@ -71,7 +71,7 @@ func newAddPoolCmd() *cobra.Command {
f.StringVarP(&apc.location, "location", "l", "", "location the cluster is deployed in") f.StringVarP(&apc.location, "location", "l", "", "location the cluster is deployed in")
f.StringVarP(&apc.resourceGroupName, "resource-group", "g", "", "the resource group where the cluster is deployed") f.StringVarP(&apc.resourceGroupName, "resource-group", "g", "", "the resource group where the cluster is deployed")
f.StringVarP(&apc.apiModelPath, "api-model", "m", "", "path to the generated apimodel.json file") f.StringVarP(&apc.apiModelPath, "api-model", "m", "", "path to the generated apimodel.json file")
f.StringVarP(&apc.nodePoolPath, "node-pool", "p", "", "path to the generated nodepool.json file") f.StringVarP(&apc.nodePoolPath, "node-pool", "p", "", "path to a JSON file that defines the new node pool spec")
addAuthFlags(&apc.authArgs, f) addAuthFlags(&apc.authArgs, f)

Просмотреть файл

@ -95,7 +95,7 @@ func newDeployCmd() *cobra.Command {
f := deployCmd.Flags() f := deployCmd.Flags()
f.StringVarP(&dc.apimodelPath, "api-model", "m", "", "path to your cluster definition file") f.StringVarP(&dc.apimodelPath, "api-model", "m", "", "path to your cluster definition file")
f.StringVarP(&dc.dnsPrefix, "dns-prefix", "p", "", "dns prefix (unique name for the cluster)") f.StringVarP(&dc.dnsPrefix, "dns-prefix", "p", "", "dns prefix (unique name for the cluster)")
f.BoolVar(&dc.autoSuffix, "auto-suffix", false, "automatically append a compressed timestamp to the dnsPrefix to ensure unique cluster name automatically") f.BoolVar(&dc.autoSuffix, "auto-suffix", false, "automatically append a compressed timestamp to the dnsPrefix to ensure cluster name uniqueness")
f.StringVarP(&dc.outputDirectory, "output-directory", "o", "", "output directory (derived from FQDN if absent)") f.StringVarP(&dc.outputDirectory, "output-directory", "o", "", "output directory (derived from FQDN if absent)")
f.StringVar(&dc.caCertificatePath, "ca-certificate-path", "", "path to the CA certificate to use for Kubernetes PKI assets") f.StringVar(&dc.caCertificatePath, "ca-certificate-path", "", "path to the CA certificate to use for Kubernetes PKI assets")
f.StringVar(&dc.caPrivateKeyPath, "ca-private-key-path", "", "path to the CA private key to use for Kubernetes PKI assets") f.StringVar(&dc.caPrivateKeyPath, "ca-private-key-path", "", "path to the CA private key to use for Kubernetes PKI assets")

Просмотреть файл

@ -67,7 +67,7 @@ type scaleCmd struct {
const ( const (
scaleName = "scale" scaleName = "scale"
scaleShortDescription = "Scale an existing AKS Engine-created Kubernetes cluster" scaleShortDescription = "Scale an existing AKS Engine-created Kubernetes cluster"
scaleLongDescription = "Scale an existing AKS Engine-created Kubernetes cluster by specifying increasing or decreasing the number of nodes in a node pool" scaleLongDescription = "Scale an existing AKS Engine-created Kubernetes cluster by specifying a new desired number of nodes in a node pool"
apiModelFilename = "apimodel.json" apiModelFilename = "apimodel.json"
) )

Просмотреть файл

@ -8,6 +8,10 @@ Azure Kubernetes Service ([AKS][]) is a Microsoft Azure service that supports fu
AKS clusters can be created in the Azure portal or with `az aks create` in the [Azure command-line tool][]. AKS Engine clusters can be created with `aks-engine deploy` (`aks-engine` is the AKS Engine command-line tool), or by generating ARM templates with `aks-engine generate` and deploying them as a separate step using the `az` command-line tool (e.g., `az group deployement create`). AKS clusters can be created in the Azure portal or with `az aks create` in the [Azure command-line tool][]. AKS Engine clusters can be created with `aks-engine deploy` (`aks-engine` is the AKS Engine command-line tool), or by generating ARM templates with `aks-engine generate` and deploying them as a separate step using the `az` command-line tool (e.g., `az group deployement create`).
### Can I Build an AKS Cluster with `aks-engine`?
No, the `aks-engine` command line tool will not create an AKS cluster. To learn about AKS, you can [read the official docs](https://docs.microsoft.com/en-us/azure/aks/).
### What's the Difference Between `acs-engine` and `aks-engine`? ### What's the Difference Between `acs-engine` and `aks-engine`?
AKS Engine is the next version of the ACS-Engine project. AKS Engine supports current and future versions of [Kubernetes][], while ACS-Engine also supported the Docker Swarm and Mesos DC/OS container orchestrators. AKS Engine is the next version of the ACS-Engine project. AKS Engine supports current and future versions of [Kubernetes][], while ACS-Engine also supported the Docker Swarm and Mesos DC/OS container orchestrators.
@ -20,10 +24,6 @@ Yes.
No further development or releases in ACS-Engine are planned. AKS Engine is a backward-compatible continuation of ACS-Engine, so all fixes and new features will target AKS Engine. No further development or releases in ACS-Engine are planned. AKS Engine is a backward-compatible continuation of ACS-Engine, so all fixes and new features will target AKS Engine.
### Can I Build an AKS Cluster with `aks-engine`?
No, using the Azure Kubernetes Service itself is the way to create a supported, managed AKS cluster. AKS Engine shares some code with AKS, but does not create managed clusters.
### Should I use the latest `aks-engine` release if I was previously using `acs-engine`? ### Should I use the latest `aks-engine` release if I was previously using `acs-engine`?
Yes. `aks-engine` released [v0.27.0][] as a continuation of the ACS-Engine project ([v0.26.2][] was the final `acs-engine` release) with all the Kubernetes fixes and features included in [v0.26.2][] and more. Yes. `aks-engine` released [v0.27.0][] as a continuation of the ACS-Engine project ([v0.26.2][] was the final `acs-engine` release) with all the Kubernetes fixes and features included in [v0.26.2][] and more.

Просмотреть файл

@ -16,12 +16,14 @@ Introductions to all the key parts of AKS Engine youll need to know.
- [More on Windows and Kubernetes](windows-and-kubernetes.md) - [More on Windows and Kubernetes](windows-and-kubernetes.md)
- [Kubernetes Windows Walkthrough](windows.md) - [Kubernetes Windows Walkthrough](windows.md)
- [Using Intel® SGX with Kubernetes](sgx.md) - [Using Intel® SGX with Kubernetes](sgx.md)
- [Monitoring Kubernetes Clusters](monitoring.md)
**Operations** **Operations**
- [Scaling Kubernetes Clusters](scale.md) - [Scaling Clusters](scale.md)
- [Upgrading Kubernetes Clusters](upgrade.md) - [Updating VMSS Node Pools](update.md)
- [Monitoring Kubernetes Clusters](monitoring.md) - [Adding Node Pools to Existing Clusters](addpool.md)
- [Upgrading Clusters](upgrade.md)
**Azure Stack** **Azure Stack**

Просмотреть файл

@ -20,7 +20,7 @@ You also need to delegate permission to the application as follows:
## Deployment ## Deployment
Follow the [deployment steps](../tutorials/deploy.md). In step #4, add the following under 'properties' section: Follow the [deployment steps]([../tutorials/quickstart.md#deploy]). In step #4, add the following under 'properties' section:
```json ```json
"aadProfile": { "aadProfile": {
"serverAppID": "", "serverAppID": "",

311
docs/topics/addpool.md Normal file
Просмотреть файл

@ -0,0 +1,311 @@
# Adding New Node Pools
## Prerequisites
All documentation in these guides assumes you have already downloaded both the Azure `az` CLI tool and the `aks-engine` binary tool. Follow the [quickstart guide](../tutorials/quickstart.md) before continuing if you're creating a Kubernetes cluster using AKS Engine for the first time.
This guide assumes you already have a running cluster deployed using the `aks-engine` CLI. For more details on how to do that see [deploy](creating_new_clusters.md#deploy) or [generate](generate.md).
## Addpool
The `aks-engine addpool` command can add a new node pool to an existing cluster. By specifing a new `agentPoolProfile` configuration as a JSON file, `aks-engine addpool` will add a node pool according to that configuration, and merge it into the pre-existing aks-engine-generated `apimodel.json`. When used in combination with a newer version of the `aks-engine` CLI compared to the version used to build the cluster originally, new node pools can be regularly added with the latest bits.
The example below will assume you have a cluster deployed, and that the API model originally used to deploy that cluster is stored at `_output/<dnsPrefix>/apimodel.json`.
To add a new pool to the cluster you will run a command like:
```sh
$ aks-engine addpool --subscription-id <subscription_id> \
--resource-group mycluster --location <location> \
--client-id '<service principal client ID>' \
--client-secret '<service principal client secret>' \
--api-model _output/mycluster/apimodel.json \
--node-pool ./pool.json
```
The above assumes that the new node pool specification is in the current working directory, and called `pool.json`. Here's an example of what that file might look like:
```json
{
"name": "pooladded",
"count": 5,
"vmSize": "Standard_D4s_v3",
"availabilityProfile": "VirtualMachineScaleSets",
"kubernetesConfig": {
"kubeletConfig": {
"--cloud-provider": "",
"--cloud-config": "",
"--azure-container-registry-config": ""
}
}
}
```
The above is a JSON object that conforms to the `agentPoolProfile` specification, just like in the API model. The `agentPoolProfile` spec is documented (here)[clusterdefinitions.md#agentpoolprofiles].
Some important considerations:
- The `"name"` value in a new pool must be unique; it may not be the same value as an existing node pool.
- The `"availabilityProfile"` value in a new pool must match the value in the existing cluster node pools. That enforced homogeneity is an AKS Engine limitation with how its provisioned LoadBalancer resources manage backend pool membership across all nodes in the cluster for svc ingress routing.
- The resultant, new Kubernetes node provisioned in your cluster is not entirely configured via its `agentPoolProfile` specification. It will also inherit certain properties from other configuration in the API model. Specifically, the version of Kubernetes may be modified in the API model JSON (not the JSON file expressing the new pool), and the new pool will be built running that version of Kubernetes. This can support experimenting with new versions of Kubernetes on new nodes (perhaps tainted or excluded from the cluster LoadBalancer) before rolling out that new version cluster-wide.
- All new nodes in the adde pool will be added to the backend pool of the Azure LoadBalancer that serves cluster svc ingress traffic. In practice this means that these new nodes can run pods that support inbound svc traffic coming into the cluster.
### Parameters
|Parameter|Required|Description|
|-----------------|---|---|
|--subscription-id|yes|The subscription id the cluster is deployed in.|
|--resource-group|yes|The resource group the cluster is deployed in.|
|--location|yes|The location the resource group is in.|
|--api-model|yes|Relative path to the generated API model for the cluster.|
|--client-id|depends| The Service Principal Client ID. This is required if the auth-method is set to service_principal/client_certificate|
|--client-secret|depends| The Service Principal Client secret. This is required if the auth-method is set to service_principal|
|--certificate-path|depends| The path to the file which contains the client certificate. This is required if the auth-method is set to client_certificate|
|--node-pool|yes|Path to JSON file expressing the `agentPoolProfile` spec of the new node pool.|
|--auth-method|no|The authentication method used. Default value is `client_secret`. Other supported values are: `cli`, `client_certificate`, and `device`.|
|--language|no|Language to return error message in. Default value is "en-us").|
## Frequently Asked Questions
### Why would I use addpool instead of update to upgrade a VMSS node pool?
Similar to `aks-engine update`, you may use the `addpool` command to try out a new node configuration in your cluster without affecting existing nodes or production workloads (although if your new configuration is risky in any way you will want to taint those nodes so that no production workloads are scheduled, until you can validate the new configuration). The primary differences are:
- Use `addpool` when the configuration delta compared to an existing node pool is significant enough where it makes sense to organize that new configuration discretely in its own pool. Especially if the new pool will only serve a particular type of traffic (e.g., GPU or confidential compute), a dedicated pool should be used for easy, discrete scaling in response to the specific load requirements of the specific workloads it will run.
- Use `addpool` when you want to run operational tests immediately, and also especially if you know the specific number of net new nodes to add, and you need them immediately. The primary operational difference between `addpool` and `update` is that `addpool` actually adds new operational capacity to your cluster immediately, whereas `update` merely changes the VMSS model, so that *the next* scale out operation renders a node with the new configuration.
### Why would I use addpool instead of upgrade to install a newer version of Kubernetes on my cluster?
If you're running a very large Kubernetes cluster, the one-node-at-a-time operation of `aks-engine upgrade` will take many hours, even days, depending on the size of the cluster. Each one of those node deletions + node additions is subject to environmental failures, and so a deterministic upgrade can indeed take many days. Depending on your tolerance for temporary additional quota, you can upgrade your nodes more quickly, one pool at a time, and use your own validation criteria to inform the progression velocity through an entire cluster upgrade workflow. Let's demonstrate how that might work using a cluster with 3 node pools:
```sh
$ kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master-26196714-0 Ready master 3m7s v1.18.8 10.255.255.5 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool1-26196714-vmss000000 Ready agent 3m7s v1.18.8 10.240.0.34 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool1-26196714-vmss000001 Ready agent 103s v1.18.8 10.240.0.65 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool1-26196714-vmss000002 Ready agent 3m7s v1.18.8 10.240.0.96 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool2-26196714-vmss000000 Ready agent 3m7s v1.18.8 10.240.1.181 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool2-26196714-vmss000001 Ready agent 3m v1.18.8 10.240.1.212 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool2-26196714-vmss000002 Ready agent 3m v1.18.8 10.240.1.243 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000000 Ready agent 3m7s v1.18.8 10.240.0.127 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000001 Ready agent 2m32s v1.18.8 10.240.0.158 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000002 Ready agent 3m7s v1.18.8 10.240.0.189 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000003 Ready agent 3m7s v1.18.8 10.240.0.220 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000004 Ready agent 3m7s v1.18.8 10.240.0.251 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000005 Ready agent 3m7s v1.18.8 10.240.1.26 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000006 Ready agent 3m7s v1.18.8 10.240.1.57 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000007 Ready agent 3m7s v1.18.8 10.240.1.88 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000008 Ready agent 3m7s v1.18.8 10.240.1.119 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000009 Ready agent 3m7s v1.18.8 10.240.1.150 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
```
Above we have a `pool1` with 3 nodes, a `pool2` with 3 nodes, and a `pool3` with 10 nodes. Rather than run a single, continuous upgrade operation across all nodes in the cluster, let's add pools, then validate the new version, and then scale those new pools up so the original nodes can be cordoned, drained, and deleted.
Before we do that, though, let's upgrade the control plane first! [You should always upgrade the control plane before your nodes](upgrade.md#what-should-i-upgrade-first-my-control-plane-nodes-or-my-worker-nodes). See the full upgrade docs [here](upgrade.md).
After our control plane has been updated to v1.19.1, we can proceed with a rolling upgrade of our nodes by gradually adding and validating new node pool. We'll use the command line `jq` tool to create three new JSON files that we'll use to initiate 3 new `aks-engine addpool` operations, derived from the original `agentPoolProfile` specifications in the API model generated during cluster deployment:
```sh
$ jq -r '.properties.agentPoolProfiles[0] | .name = "newpool1"' < _output/kubernetes-westus2-1838/apimodel.json > newpool1.json
$ jq -r '.properties.agentPoolProfiles[1] | .name = "newpool2"' < _output/kubernetes-westus2-1838/apimodel.json > newpool2.json
$ jq -r '.properties.agentPoolProfiles[2] | .name = "newpool3"' < _output/kubernetes-westus2-1838/apimodel.json > newpool3.json
```
Because those were derived from the API, those new pools are configured with a count of 3, 3, and 10, respectively. Let's change all of the node counts to 1, because we don't necessarily need full node pool capacity to validate the new Kubernetes versions against:
```sh
$ jq -r '.count = 1' < newpool1.json > newpool1-1node.json && mv newpool1-1node.json newpool1.json
$ jq -r '.count = 1' < newpool2.json > newpool2-1node.json && mv newpool2-1node.json newpool2.json
$ jq -r '.count = 1' < newpool3.json > newpool3-1node.json && mv newpool3-1node.json newpool3.json
```
Our final configuration change before running `aks-engine addpool` is updating the Kubernetes in the API model
```sh
$ jq -r '. | .properties.orchestratorProfile.orchestratorRelease = "1.19"' < _output/kubernetes-westus2-1838/apimodel.json > apimodel-1dot19.json
FrancisBookMS:aks-engine jackfrancis$ jq -r '. | .properties.orchestratorProfile.orchestratorVersion = "1.19.1"' < apimodel-1dot19.json > _output/kubernetes-westus2-1838/apimodel.json
$ grep orchestratorRelease -A 1 _output/kubernetes-westus2-1838/apimodel.json
"orchestratorRelease": "1.19",
"orchestratorVersion": "1.19.1",
```
We can now run addpool once per new pool to begin the process of validating v1.19.1 across our existing v1.18.8 cluster:
```sh
$ aks-engine addpool --subscription-id $TEST_AZURE_SUB_ID --api-model _output/kubernetes-westus2-1838/apimodel.json --node-pool newpool1.json --location westus2 --resource-group kubernetes-westus2-1838 --auth-method client_secret --client-id $TEST_AZURE_SP_ID --client-secret $TEST_AZURE_SP_PW
WARN[0003] Any new nodes will have containerd version 1.3.7
INFO[0003] Starting ARM Deployment kubernetes-westus2-1838-1942811440 in resource group kubernetes-westus2-1838. This will take some time...
INFO[0158] Finished ARM Deployment (kubernetes-westus2-1838-1942811440). Succeeded
$ aks-engine addpool --subscription-id $TEST_AZURE_SUB_ID --api-model _output/kubernetes-westus2-1838/apimodel.json --node-pool newpool2.json --location westus2 --resource-group kubernetes-westus2-1838 --auth-method client_secret --client-id $TEST_AZURE_SP_ID --client-secret $TEST_AZURE_SP_PW
WARN[0008] Any new nodes will have containerd version 1.3.7
INFO[0008] Starting ARM Deployment kubernetes-westus2-1838-25937475 in resource group kubernetes-westus2-1838. This will take some time...
INFO[0163] Finished ARM Deployment (kubernetes-westus2-1838-25937475). Succeeded
$ aks-engine addpool --subscription-id $TEST_AZURE_SUB_ID --api-model _output/kubernetes-westus2-1838/apimodel.json --node-pool newpool3.json --location westus2 --resource-group kubernetes-westus2-1838 --auth-method client_secret --client-id $TEST_AZURE_SP_ID --client-secret $TEST_AZURE_SP_PW
WARN[0004] Any new nodes will have containerd version 1.3.7
INFO[0004] Starting ARM Deployment kubernetes-westus2-1838-1370618455 in resource group kubernetes-westus2-1838. This will take some time...
INFO[0174] Finished ARM Deployment (kubernetes-westus2-1838-1370618455). Succeeded
```
At this point we now have three new nodes running v1.19.1 on our cluster, one per new pool, which correlates with one new pool per pre-existing pool:
```sh
$ k get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master-26196714-0 Ready master 36m v1.18.8 10.255.255.5 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-newpool1-26196714-vmss000000 Ready agent 8m35s v1.19.1 10.240.2.18 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-newpool2-26196714-vmss000000 Ready agent 3m41s v1.19.1 10.240.2.49 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-newpool3-26196714-vmss000000 Ready agent 21s v1.19.1 10.240.2.80 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool1-26196714-vmss000000 Ready agent 36m v1.18.8 10.240.0.34 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool1-26196714-vmss000001 Ready agent 35m v1.18.8 10.240.0.65 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool1-26196714-vmss000002 Ready agent 36m v1.18.8 10.240.0.96 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool2-26196714-vmss000000 Ready agent 36m v1.18.8 10.240.1.181 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool2-26196714-vmss000001 Ready agent 36m v1.18.8 10.240.1.212 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool2-26196714-vmss000002 Ready agent 36m v1.18.8 10.240.1.243 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000000 Ready agent 36m v1.18.8 10.240.0.127 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000001 Ready agent 36m v1.18.8 10.240.0.158 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000002 Ready agent 36m v1.18.8 10.240.0.189 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000003 Ready agent 36m v1.18.8 10.240.0.220 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000004 Ready agent 36m v1.18.8 10.240.0.251 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000005 Ready agent 36m v1.18.8 10.240.1.26 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000006 Ready agent 36m v1.18.8 10.240.1.57 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000007 Ready agent 36m v1.18.8 10.240.1.88 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000008 Ready agent 36m v1.18.8 10.240.1.119 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000009 Ready agent 36m v1.18.8 10.240.1.150 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
```
At this point we would probably taint those three nodes, and then run validations against them (using the appropriate tolerations so that they were scheduled onto the desired nodes):
```sh
$ kubectl taint nodes k8s-newpool1-26196714-vmss000000 validating:NoSchedule
node/k8s-newpool1-26196714-vmss000000 tainted
$ kubectl taint nodes k8s-newpool2-26196714-vmss000000 validating:NoSchedule
node/k8s-newpool2-26196714-vmss000000 tainted
$ kubectl taint nodes k8s-newpool3-26196714-vmss000000 validating:NoSchedule
node/k8s-newpool3-26196714-vmss000000 tainted
```
Let's say we've validated the "pool1" replacement, which we've called "newpool1". Let's scale that pool out to match the original "pool1":
```sh
$ aks-engine scale --subscription-id $TEST_AZURE_SUB_ID --client-id $TEST_AZURE_SP_ID --client-secret $TEST_AZURE_SP_PW --api-model _output/kubernetes-westus2-1838/apimodel.json --location westus2 --resource-group kubernetes-westus2-1838 --apiserver kubernetes-westus2-1838.westus2.cloudapp.azure.com --node-pool newpool1 --new-node-count 3 --auth-method client_secret --identity-system azure_ad
INFO[0003] found VMSS k8s-newpool1-26196714-vmss in resource group kubernetes-westus2-1838 that correlates with node pool newpool1
WARN[0003] Any new nodes will have containerd version 1.3.7
INFO[0003] Removing singlePlacementGroup property from [variables('newpool1VMNamePrefix')]
INFO[0003] Nodes in pool 'newpool1' before scaling:
NODE STATUS VERSION OS KERNEL
k8s-newpool1-26196714-vmss000000 Ready v1.19.1 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
INFO[0003] Starting ARM Deployment kubernetes-westus2-1838-360281667 in resource group kubernetes-westus2-1838. This will take some time...
INFO[0230] Finished ARM Deployment (kubernetes-westus2-1838-360281667). Succeeded
INFO[0230] Nodes in pool 'newpool1' after scaling:
NODE STATUS VERSION OS KERNEL
k8s-newpool1-26196714-vmss000000 Ready v1.19.1 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-newpool1-26196714-vmss000001 Ready v1.19.1 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-newpool1-26196714-vmss000002 NotReady v1.19.1 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
```
Note: you may also use the VMSS API directly (either via the `az` CLI or the Azure portal web UI) to scale out the new pools. The advantage of using `aks-engine scale` to do so is that you will get immediate signal if, for any reason, the new VMs did not come online successfully as Kubernetes nodes.
Now that we have equivalent node capacity for our new pool compared to our original pool (note: "capacity equivalence" may be a little more complicated if, as part of a process like this one, you change the VM SKU of the new pool as compared to the original pool; YMMV.) we can cordon + drain the original nodes and rely upon the Kubernetes layer to re-schedule workloads to the new nodes (note: this will require you to really ensure your workload scheduling configuration as pertains to the way your nodes are labeled, tainted, etc, makes sense and that your production workload specifications adhere to that configuration schema).
```sh
$ for node in "k8s-pool1-26196714-vmss000000 k8s-pool1-26196714-vmss000001 k8s-pool1-26196714-vmss000002"; do kubectl cordon $node; done
node/k8s-pool1-26196714-vmss000000 cordoned
node/k8s-pool1-26196714-vmss000001 cordoned
node/k8s-pool1-26196714-vmss000002 cordoned
$ for node in "k8s-pool1-26196714-vmss000000 k8s-pool1-26196714-vmss000001 k8s-pool1-26196714-vmss000002"; do kubectl drain --ignore-daemonsets $node; done
node/k8s-pool1-26196714-vmss000000 already cordoned
node/k8s-pool1-26196714-vmss000001 already cordoned
node/k8s-pool1-26196714-vmss000002 already cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/azure-cni-networkmonitor-z4tcw, kube-system/azure-ip-masq-agent-nmlnv, kube-system/blobfuse-flexvol-installer-zgjxg, kube-system/csi-secrets-store-jdmql, kube-system/csi-secrets-store-provider-azure-9d4j9, kube-system/kube-proxy-glrm6
node/k8s-pool1-26196714-vmss000000 drained
WARNING: ignoring DaemonSet-managed Pods: kube-system/azure-cni-networkmonitor-xhk8d, kube-system/azure-ip-masq-agent-lhj9p, kube-system/blobfuse-flexvol-installer-zdc4w, kube-system/csi-secrets-store-6zbx9, kube-system/csi-secrets-store-provider-azure-q2h6n, kube-system/kube-proxy-728sx
node/k8s-pool1-26196714-vmss000001 drained
WARNING: ignoring DaemonSet-managed Pods: kube-system/azure-cni-networkmonitor-mtx7c, kube-system/azure-ip-masq-agent-5p9lw, kube-system/blobfuse-flexvol-installer-cl9ls, kube-system/csi-secrets-store-provider-azure-vv8rb, kube-system/csi-secrets-store-xnjxn, kube-system/kube-proxy-rpfjt
node/k8s-pool1-26196714-vmss000002 drained
```
Note: the above example is rather brute-force. Depending on your operational reality, you may want to add some delay between draining each node. (cordon'ing all nodes at once actually makes sense, as you indeed want to stop any future scheduling onto those nodes all at the same time, once you have the required standby capacity, which in our example is the new, validated v1.19.1 nodes)
After all workloads have been drained, and moved over to the new nodes, you may delete the VMSS entirely:
```sh
$ az vmss delete -n k8s-pool1-26196714-vmss -g kubernetes-westus2-1838
$ echo $?
0
```
Now, the original "pool1" nodes are no longer participating in the cluster:
```sh
$ k get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master-26196714-0 Ready master 64m v1.18.8 10.255.255.5 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-newpool1-26196714-vmss000000 Ready agent 36m v1.19.1 10.240.2.18 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-newpool1-26196714-vmss000001 Ready agent 17m v1.19.1 10.240.2.111 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-newpool1-26196714-vmss000002 Ready agent 16m v1.19.1 10.240.2.142 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-newpool2-26196714-vmss000000 Ready agent 31m v1.19.1 10.240.2.49 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-newpool3-26196714-vmss000000 Ready agent 28m v1.19.1 10.240.2.80 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool2-26196714-vmss000000 Ready agent 64m v1.18.8 10.240.1.181 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool2-26196714-vmss000001 Ready agent 64m v1.18.8 10.240.1.212 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool2-26196714-vmss000002 Ready agent 64m v1.18.8 10.240.1.243 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000000 Ready agent 64m v1.18.8 10.240.0.127 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000001 Ready agent 63m v1.18.8 10.240.0.158 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000002 Ready agent 64m v1.18.8 10.240.0.189 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000003 Ready agent 64m v1.18.8 10.240.0.220 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000004 Ready agent 64m v1.18.8 10.240.0.251 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000005 Ready agent 64m v1.18.8 10.240.1.26 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000006 Ready agent 64m v1.18.8 10.240.1.57 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000007 Ready agent 64m v1.18.8 10.240.1.88 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000008 Ready agent 64m v1.18.8 10.240.1.119 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-pool3-26196714-vmss000009 Ready agent 64m v1.18.8 10.240.1.150 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
```
Final note: don't forget to remove the "pool1" `agentPoolProfile` JSON object from your API model!
### How do I integrate any added VMSS node pools into an existing cluster-autoscaler configuration?
If you're running the AKS Engine `cluster-autoscaler` addon, or running your own spec based on the [upstream examples](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/azure/README.md), you'll have a `cluster-autoscaler` Deployment resource installed on your cluster. The examples below will assume that the `cluster-autoscaler` componentry is installed in the `kube-system` namespace.
First, you'll need to know the VMSS name of your new node pool. Here's how to do that using the `az` CLI tool:
```sh
$ az vmss list -g kubernetes-westus2-1838 -o table
Name ResourceGroup Location Zones Capacity Overprovision UpgradePolicy
---------------------------- ------------------------ ---------- ------- ---------- --------------- ---------------
k8s-newpool-1838-vmss kubernetes-westus2-1838 westus2 1 2 1 False Manual
k8s-pool1-1838-vmss kubernetes-westus2-1838 westus2 1 2 1 False Manual
```
Now, edit the `cluster-autoscaler` deployment:
```sh
$ kubectl edit deployment -n kube-system cluster-autoscaler
```
The above will open up the YAML spec in your default editor (e.g., `vim`). What we want to do is to modify the `cluster-autoscaler` runtime command arguments, so that your new VMSS node pool is enabled for cluster-autoscaler. Specifically, you want to look for one or more lines in the YAML file that look like this:
```
- --nodes=1:9:k8s-pool1-1838-vmss
```
And then add a new line below, using the identical indentation, with the new pool. So the changes should look like this:
```
- --nodes=1:9:k8s-pool1-1838-vmss
- --nodes=1:9:k8s-newpool1-1838-vmss
```
Again, refer to the [cluster-api documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/azure/README.md) for how to configure your VMSS node pools in Azure; the above example declares that cluster-autoscaler is enabled for the VMSS node pools `k8s-pool1-26196714-vmss` and `k8s-newpool1-26196714-vmss` running in your cluster, with a minimum node count of `1`, and a maximum node count of `9`, for both pools. After you save and exit from your editor, the `cluster-autoscaler` deployment should delete the existing pod, and create a new one, with the modified changes.
If you're running cluster-autoscaler via the AKS Engine addon, and *if* you have explicitly configured the AKS Engine `cluster-autoscaler` addon to a mode of `Reconcile`, then you won't we able to simply edit the deployment spec on the cluster; instead you'll have to hop onto each control plane VM and manually edit the spec under `/etc/kubernetes/addons/cluster-autoscaler.yaml`. This is not a common situation, and one that would only occur because of an explicit configuration in the API model, such as:
```
"addons": [
{
"name": "cluster-autoscaler",
"enabled": true,
"mode": "Reconcile"
}
]
```

Просмотреть файл

@ -19,7 +19,7 @@
## Introduction ## Introduction
Specific AKS Engine [versions](#aks-engine-versions) can be used to provision self-managed Kubernetes clusters on [Azure Stack Hub](https://azure.microsoft.com/overview/azure-stack/). AKS Engine's `generate`, [deploy](../tutorials/deploy.md), [upgrade](upgrade.md), and [scale](scale.md) commands can be executed as if you were targeting Azure's public cloud. You are only required to slightly update your cluster definition to provide some extra information about your Azure Stack Hub instance. Specific AKS Engine [versions](#aks-engine-versions) can be used to provision self-managed Kubernetes clusters on [Azure Stack Hub](https://azure.microsoft.com/overview/azure-stack/). AKS Engine's `generate`, [deploy](../tutorials/quickstart.md#deploy), [upgrade](upgrade.md), and [scale](scale.md) commands can be executed as if you were targeting Azure's public cloud. You are only required to slightly update your cluster definition to provide some extra information about your Azure Stack Hub instance.
The goal of this guide is to explain how to provision Kubernetes clusters to Azure Stack Hub using AKS Engine and to capture the differences between Azure and Azure Stack Hub. Bear in mind as well that not every AKS Engine feature or configuration option is currently supported on Azure Stack Hub. In most cases, these are not available because dependent Azure components are not part of Azure Stack Hub. The goal of this guide is to explain how to provision Kubernetes clusters to Azure Stack Hub using AKS Engine and to capture the differences between Azure and Azure Stack Hub. Bear in mind as well that not every AKS Engine feature or configuration option is currently supported on Azure Stack Hub. In most cases, these are not available because dependent Azure components are not part of Azure Stack Hub.

Просмотреть файл

@ -0,0 +1,166 @@
# Creating Kubernetes Clusters
## Deploy
The `aks-engine deploy` command will create a new Kubernetes cluster from scratch into a pre-existing Azure resource group. You define an API model (cluster definition) as a JSON file, and then pass in a reference to it, as well as appropriate Azure credentials, to a command statement like this:
```sh
$ aks-engine deploy --subscription-id $SUBSCRIPTION_ID \
--dns-prefix $CLUSTER_NAME \
--resource-group $RESOURCE_GROUP \
--location $LOCATION \
--api-model examples/kubernetes.json \
--client-id $SERVICE_PRINCIPAL_ID \
--client-secret $SERVICE_PRINCIPAL_PASSWORD
```
`aks-engine deploy` is a long-running operation that creates Azure resources (e.g., Virtual Machine and/or Virtual Machine Scale Set [VMSS], Disk, Network Interface, Network Security Group, Public IP Address, Virtual Network, Load Balancer, and others) that will underly a Kubernetes cluster. All deployed VMs will be configured to run Kubernetes bootstrap scripts appropriate for the desired cluster configuration. The outcome of a successful `aks-engine deploy` operation is a fully operational Kubernetes cluster, ready for use immediately.
A more detailed walk-through of `aks-engine deploy` is in the [quickstart guide](../tutorials/quickstart.md#deploy)
### Parameters
|Parameter|Required|Description|
|-----------------|---|---|
|--api-model|yes|Relative path to the API model (cluster definition) that declares the desired cluster configuration.|
|--dns-prefix|no, if present in API model|Unique name for the cluster.|
|--auto-suffix|no|Automatically append a compressed timestamp to the dnsPrefix to ensure cluster name uniqueness.|
|--azure-env|no|The target Azure cloud (default "AzurePublicCloud") to deploy to.|
|--subscription-id|yes|The subscription id the cluster is deployed in.|
|--resource-group|yes|The resource group the cluster is deployed in.|
|--location|yes|The location to deploy to.|
|--force-overwrite|no|Automatically overwrite any existing files in the output directory (default is false).|
|--output-directory|no|Output directory (derived from FQDN if absent) to persist cluster configuration artifacts to.|
|--set|no|Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2).|
|--ca-certificate-path|no|Path to the CA certificate to use for Kubernetes PKI assets.|
|--ca-private-key-path|no|Path to the CA private key to use for Kubernetes PKI assets.|
|--client-id|depends| The Service Principal Client ID. This is required if the auth-method is set to service_principal/client_certificate|
|--client-secret|depends| The Service Principal Client secret. This is required if the auth-method is set to service_principal|
|--certificate-path|depends| The path to the file which contains the client certificate. This is required if the auth-method is set to client_certificate|
|--identity-system|no|Identity system (default is azure_ad)|
|--auth-method|no|The authentication method used. Default value is `client_secret`. Other supported values are: `cli`, `client_certificate`, and `device`.|
|--private-key-path|no|Path to private key (used with --auth-method=client_certificate).|
|--language|no|Language to return error message in. Default value is "en-us").|
## Generate
The `aks-engine generate` command will generate artifacts that you can use to implement your own cluster create workflows. Like `aks-engine deploy`, you define an API model (cluster definition) as a JSON file, and then pass in a reference to it, as well as appropriate Azure credentials, to a command statement like this:
```sh
$ aks-engine generate --api-model ./my-cluster-definition.json \
--output-directory ./cluster_artifacts
```
The above command assumes that the API model at the relative filepath `./my-cluster-definition.json` contains a minimally populated cluster definition. At a minimum is needed:
1. In order to grant the required service privileges to Kubernetes runtime processes, you need either:
Service Principal credentials in order to grant Azure privileges to the relevent Kubernetes runtime processes:
```json
{
...
"properties": {
...
"servicePrincipalProfile": {
"clientId": "<service principal ID>",
"secret": "<service principal password>"
}
...
}
```
Or, system-assigned identity enabled:
```json
{
...
"properties": {
...
"orchestratorProfile": {
"kubernetesConfig": {
"useManagedIdentity": true
...
}
...
}
...
}
```
2. To uniquely identify the cluster, you need a cluster name:
```json
{
...
"properties": {
...
"masterProfile": {
"dnsPrefix": "<name of cluster>"
...
}
...
}
```
3. To enable interactive login to node VMs via ssh key exchange, you need to provide a public key:
```json
{
...
"properties": {
...
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": "<public key data>"
}
]
}
...
}
...
}
```
### Parameters
|Parameter|Required|Description|
|-----------------|---|---|
|--api-model|yes|Relative path to the API model (cluster definition) that declares the desired cluster configuration.|
|--output-directory|no|Output directory (derived from FQDN if absent) to persist cluster configuration artifacts to.|
|--set|no|Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2).|
|--ca-certificate-path|no|Path to the CA certificate to use for Kubernetes PKI assets.|
|--ca-private-key-path|no|Path to the CA private key to use for Kubernetes PKI assets.|
|--client-id|depends| The Service Principal Client ID. This is required if the auth-method is set to service_principal/client_certificate|
|--client-secret|depends| The Service Principal Client secret. This is required if the auth-method is set to service_principal|
|--parameters-only|no|Only output parameters files.|
|--no-pretty-print|no|Skip pretty printing the output.|
As mentioned above, `aks-engine generate` expects all cluster definition data to be present in the API model JSON file. You may actually inject data into the API model at runtime by invoking the command and including that data in the `--set` argument interface. For example, this command will produce artifacts that can be used to deploy a fully functional Kubernetes cluster based on the AKS Engine defaults (the `examples/kubernetes.json` file will build a "default" single master, 2 node cluster):
```sh
$ bin/aks-engine generate --api-model ./examples/kubernetes.json \
--output-directory ./cluster_artifacts \
--set masterProfile.dnsPrefix=my-cluster,orchestratorProfile.kubernetesConfig.useManagedIdentity=true,linuxProfile.ssh.publicKeys[0].keyData=$(cat ~/.ssh/id_rsa.pub)
INFO[0000] new API model file has been generated during merge: /var/folders/jq/t_y8l4556rv__mzvjhkd61n00000gp/T/mergedApiModel831700038
WARN[0000] No "location" value was specified, AKS Engine will generate an ARM template configuration valid for regions in public cloud only
INFO[0000] Generating assets into ./cluster_artifacts...
WARN[0000] containerd will be upgraded to version 1.3.7
```
## Frequently Asked Questions
### Why would I run `aks-engine generate` vs `aks-engine deploy`?
Depending on any customization you want to do either (1) the AKS Engine-generated ARM template, or (2) the particular way that the ARM template is deployed to Azure, you may want to use `aks-engine generate` to produce an ARM template specification, and then implement your own `az deployment group create`-equivalent workflow to actually bootstrap the cluster. Especially if you plan to bootstrap multiple clusters in multiple regions from a common cluster configuration, it may make sense to re-use a single ARM template across a set of ARM deployments. `aks-engine deploy` is only able to build one cluster at a time, so especially if you're bootstrapping multiple clusters in parallel using a common config, a workflow like this is probably optimal:
1. `aks-engine generate --api-model ./common-cluster-definition.json --output-directory /path/to/re-usable-arm-template-directory`
2. For every desired cluster+location, execute in parallel:
1. `az group create -n $RESOURCE_GROUP -l $LOCATION`; then
2. `az deployment group create --name $RESOURCE_GROUP --resource-group $RESOURCE_GROUP --template-file /path/to/re-usable-arm-template-directory/azuredeploy.json --parameters /path/to/re-usable-arm-template-directory/azuredeploy.parameters.json`
In the above example we use name of the resource group as the name of the ARM deployment, following the guidance that only one cluster be built per resource group.
In summary, when creating single clusters, and especially when maintaining Kubernetes environments distinctly (i.e., not maintaining a fleet of clusters running a common config), relying upon `aks-engine deploy` as a full end-to-end convenience to bootstrap your clusters is appropriate. For more sophisticated cluster configuration re-use scenarios, and/or more sophisticated ARM deployment reconciliation (i.e., retry logic for certain failures), `aks-engine generate` + `az deployment group create` is the more appropriate choice.
### Can I re-run `aks-engine deploy` on an existing cluster to update the cluster configuration?
No. See [addpool](addpool.md), [update](update.md), [scale](scale.md), and [upgrade](upgrade.md) for documentation describing how to continue to use AKS Engine to maintain your cluster configuration over time.

Просмотреть файл

@ -2,9 +2,9 @@
## Prerequisites ## Prerequisites
All the commands in this guide require both the Azure CLI and `aks-engine`. Follow the [quickstart guide](../tutorials/quickstart.md) before continuing. All documentation in these guides assumes you have already downloaded both the Azure CLI and `aks-engine`. Follow the [quickstart guide](../tutorials/quickstart.md) before continuing.
This guide assumes you already have deployed a cluster using `aks-engine`. For more details on how to do that see [deploy](../tutorials/deploy.md). This guide assumes you already have deployed a cluster using `aks-engine`. For more details on how to do that see [deploy](../tutorials/quickstart.md#deploy).
## Retrieving Logs ## Retrieving Logs

Просмотреть файл

@ -54,7 +54,7 @@ For `VERSION` environment variable, we recommend that you provide a value which
} }
``` ```
* Run `aks-engine deploy` [as normal](../tutorials/deploy.md). * Run `aks-engine deploy` [as normal](../tutorials/quickstart.md#deploy).
## Kubernetes 1.17+ ## Kubernetes 1.17+

Просмотреть файл

@ -2,19 +2,21 @@
## Prerequisites ## Prerequisites
All the commands in this guide require both the Azure `az` CLI tool and the `aks-engine` binary tool. Follow the [quickstart guide](../tutorials/quickstart.md) before continuing. All documentation in these guides assumes you have already downloaded both the Azure `az` CLI tool and the `aks-engine` binary tool. Follow the [quickstart guide](../tutorials/quickstart.md) before continuing if you're creating a Kubernetes cluster using AKS Engine for the first time.
This guide assumes you already have deployed a cluster using `aks-engine`. For more details on how to do that see [deploy](../tutorials/deploy.md). This guide assumes you already have a running cluster deployed using the `aks-engine` CLI. For more details on how to do that see [deploy](creating_new_clusters.md#deploy) or [generate](generate.md).
## Scale ## Scale
The `aks-engine scale` command can increase or decrease the number of nodes in an existing agent pool in an AKS Engine-created Kubernetes cluster. Nodes will always be added or removed from the end of the agent pool. Nodes will be cordoned and drained before deletion. The `aks-engine scale` command can increase or decrease the number of nodes in an existing agent pool in an AKS Engine-created Kubernetes cluster. The command takes a desired node count, which means that you don't have any control over the naming of any new nodes, if the desired count is greater than the current number of nodes in the target pool (though generally new nodes are named incrementally from the "last" node); and you don't have any control over which nodes will be removed, if the desired node count is less than the current number of nodes in the target pool. For clusters that are relatively "static", using `aks-engine scale` may be appropriate. For highly dynamic clusters that want to take advantage of real-time, cluster metrics-derived scaling, we recommend running `cluster-autoscaler` in your cluster, which we document [here](../../examples/addons/cluster-autoscaler/README.md).
This guide will assume you have a cluster deployed and the API model originally used to deploy that cluster is stored at `_output/<dnsPrefix>/apimodel.json`. It will also assume there is a node pool named "agentpool1" in your cluster. Also note that for VMSS-backed node pools (the AKS Engine default node pool type), scale "in" operations will *not* cordon and drain nodes before they are removed. This is because for VMSS node pools `aks-engine scale` is simply a thin wrapper around the VMSS API, and the VMSS API doesn't have any awareness of the Kubernetes application layer in order to cordon an drain nodes prior to removing instances from the VMSS. For this reason, again, we recommend using `cluster-autoscaler` with VMSS node pools for clusters with regular, period scaling requirements in both directions (both "in" and "out").
The example below will assume you have a cluster deployed, and that the API model originally used to deploy that cluster is stored at `_output/<dnsPrefix>/apimodel.json`. It will also assume that there is a node pool named "agentpool1" in your cluster.
To scale the cluster you will run a command like: To scale the cluster you will run a command like:
```console ```sh
$ aks-engine scale --subscription-id <subscription_id> \ $ aks-engine scale --subscription-id <subscription_id> \
--resource-group mycluster --location <location> \ --resource-group mycluster --location <location> \
--client-id '<service principal client ID>' \ --client-id '<service principal client ID>' \
@ -28,16 +30,208 @@ This command will re-use the `apimodel.json` file inside the output directory as
### Parameters ### Parameters
|Parameter|Required|Description| |Parameter|Required|Description|
|---|---|---| |-----------------|---|---|
|--subscription-id|yes|The subscription id the cluster is deployed in.| |--subscription-id|yes|The subscription id the cluster is deployed in.|
|--resource-group|yes|The resource group the cluster is deployed in.| |--resource-group|yes|The resource group the cluster is deployed in.|
|--location|yes|The location the resource group is in.| |--location|yes|The location the resource group is in.|
|--api-model|yes|Relative path to the generated API model for the cluster.| |--api-model|yes|Relative path to the generated API model for the cluster.|
|--client-id|depends| The Service Principal Client ID. This is required if the auth-method is set to service_princpal/client_certificate| |--client-id|depends| The Service Principal Client ID. This is required if the auth-method is set to service_principal/client_certificate|
|--client-secret|depends| The Service Principal Client secret. This is required if the auth-method is set to service_princpal| |--client-secret|depends| The Service Principal Client secret. This is required if the auth-method is set to service_principal|
|--certificate-path|depends| The path to the file which contains the client certificate. This is required if the auth-method is set to client_certificate| |--certificate-path|depends| The path to the file which contains the client certificate. This is required if the auth-method is set to client_certificate|
|--node-pool|depends|Required if there is more than one node pool. Which node pool should be scaled.| |--node-pool|depends|Required if there is more than one node pool. Which node pool should be scaled.|
|--new-node-count|yes|Desired number of nodes in the node pool.| |--new-node-count|yes|Desired number of nodes in the node pool.|
|--apiserver|when scaling down|apiserver endpoint (required to cordon and drain nodes). This should be output as part of the create template or it can be found by looking at the public ip addresses in the resource group.| |--apiserver|when scaling down|apiserver endpoint (required to cordon and drain nodes). This should be output as part of the create template or it can be found by looking at the public ip addresses in the resource group.|
|--auth-method|no|The authentication method used. Default value is `client_secret`. Other supported values are: `cli`, `client_certificate`, and `device`.| |--auth-method|no|The authentication method used. Default value is `client_secret`. Other supported values are: `cli`, `client_certificate`, and `device`.|
|--language|no|Language to return error message in. Default value is "en-us").| |--language|no|Language to return error message in. Default value is "en-us").|
## Frequently Asked Questions
### Is it possible to scale control plane VMs?
It is not possible to increase or decrease *the number* of VMs that run the control plane. However, you may increase or decrease the *size* of the VM by modifying the `"vmSize"` property of the `masterProfile` in your cluster API model, and then run `aks-engine upgrade --control-plane-only`. See [the upgrade documentation](upgrade.md) for more information.
### What version of aks-engine should I use to run `aks-engine scale` operations?
As a general rule, we recommend that the latest released version of AKS Engine be used to scale out node pools. This is because the latest released version will have recent security updates and bug fixes to the OS layer, as well as critical system components like the container runtime. This may yield a heterogeneous node pool, but those differences should not introduce functional regressions; rather, they will ensure that a higher proportion of nodes in that pool are running the latest, validated bits. For example, here's an overview of a cluster originally built with 2 nodes in the pool "agentpool1" from `aks-engine` version `v0.52.1`, and then scaled out to 10 nodes using `aks-engine` v0.56.0:
```
$ kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-agentpool1-10367588-vmss000000 Ready agent 8m23s v1.18.3 10.240.0.34 <none> Ubuntu 18.04.4 LTS 5.3.0-1022-azure docker://3.0.12+azure
k8s-agentpool1-10367588-vmss000001 Ready agent 8m23s v1.18.3 10.240.0.65 <none> Ubuntu 18.04.4 LTS 5.3.0-1022-azure docker://3.0.12+azure
k8s-agentpool1-10367588-vmss000002 Ready agent 2m15s v1.18.3 10.240.0.96 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000003 Ready agent 2m38s v1.18.3 10.240.0.127 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000004 Ready agent 2m50s v1.18.3 10.240.0.158 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000005 Ready agent 3m38s v1.18.3 10.240.0.189 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000006 Ready agent 3m34s v1.18.3 10.240.0.220 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000007 Ready agent 3m32s v1.18.3 10.240.0.251 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000008 Ready agent 3m20s v1.18.3 10.240.1.26 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000009 Ready agent 3m33s v1.18.3 10.240.1.57 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-master-10367588-0 Ready master 8m23s v1.18.3 10.255.255.5 <none> Ubuntu 18.04.4 LTS 5.3.0-1022-azure docker://3.0.12+azure
```
As you can see, there are 2 nodes (the original 2 nodes) running a moby build identified as `docker://3.0.12+azure`, while 8 nodes (the recently added nodes) are running a moby build identified as `docker://19.3.12`. Both of those builds of moby are functionally equivalent in terms of being able to service a Kubernetes v1.18.3 node, but the latter is preferable as it will include more recent fixes (some critical) to the container runtime implementation. It is preferable to have 8 of 10 nodes running the latest bits, compared to all 10 running the older bits, despite the potential negative trade off of the loss of strict homogeneity across the nodes in the pool.
The above scale operation using a newer version of the `aks-engine` CLI also has the side effect of updating the VMSS model that underlies that node pool, which means that any future scale out operation using the VMSS API (via the `az` CLI or Azure portal web UI) will yield nodes running the latest bits.
### How do I remove nodes from my VMSS node pool without incurring production downtime?
As stated above, when scaling "in" nodes running `aks-engine scale` against a VMSS-backed node pool, the deleted nodes will not be cordon + drained prior to being deleted, which means any running workloads will be interrupted non-gracefully. For this reason, when manually scaling in, we recommend that you *not* use `aks-engine scale`, but instead manually re-balance your cluster by moving workloads off of the number of nodes you desire to remove, and then manually delete those VMSS instances.
We'll use the example cluster above and remove the original 2 nodes running the older build of moby. First, we mark those nodes as unschedulable so that no new workloads are scheduled onto them during this maintenance:
```sh
$ for node in "k8s-agentpool1-10367588-vmss000000 k8s-agentpool1-10367588-vmss000001"; do kubectl cordon $node; done
node/k8s-agentpool1-10367588-vmss000000 cordoned
node/k8s-agentpool1-10367588-vmss000001 cordoned
```
We can then instruct the Kubernetes control plane to, as gracefully as possible, move workloads off of those nodes:
```sh
$ for node in "k8s-agentpool1-10367588-vmss000000 k8s-agentpool1-10367588-vmss000001"; do kubectl drain $node; done
node/k8s-agentpool1-10367588-vmss000000 already cordoned
node/k8s-agentpool1-10367588-vmss000001 already cordoned
error: unable to drain node "k8s-agentpool1-10367588-vmss000000", aborting command...
There are pending nodes to be drained:
k8s-agentpool1-10367588-vmss000000
k8s-agentpool1-10367588-vmss000001
error: cannot delete DaemonSet-managed Pods (use --ignore-daemonsets to ignore): kube-system/azure-cni-networkmonitor-wvrg7, kube-system/azure-ip-masq-agent-qqlvf, kube-system/blobfuse-flexvol-installer-9q45x, kube-system/csi-secrets-store-provider-azure-jsgkh, kube-system/csi-secrets-store-q5wnw, kube-system/kube-proxy-cgh7g
```
It's always best to do a vanilla `kubectl drain` first to see the set of scheduled pods that require a little more forceful removal, so that you can be extra sure that you actually want to do this. In our case, we're O.K. with removing those daemonsets, so we proceed to add the `--ignore-daemonsets` option:
```sh
$ for node in "k8s-agentpool1-10367588-vmss000000 k8s-agentpool1-10367588-vmss000001"; do kubectl drain $node --ignore-daemonsets; done
node/k8s-agentpool1-10367588-vmss000000 already cordoned
node/k8s-agentpool1-10367588-vmss000001 already cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/azure-cni-networkmonitor-wvrg7, kube-system/azure-ip-masq-agent-qqlvf, kube-system/blobfuse-flexvol-installer-9q45x, kube-system/csi-secrets-store-provider-azure-jsgkh, kube-system/csi-secrets-store-q5wnw, kube-system/kube-proxy-cgh7g
evicting pod "metrics-server-bb7db87bc-xzxld"
pod/metrics-server-bb7db87bc-xzxld evicted
node/k8s-agentpool1-10367588-vmss000000 evicted
WARNING: ignoring DaemonSet-managed Pods: kube-system/azure-cni-networkmonitor-cvfqs, kube-system/azure-ip-masq-agent-p755d, kube-system/blobfuse-flexvol-installer-stc2x, kube-system/csi-secrets-store-fs9xr, kube-system/csi-secrets-store-provider-azure-7qhqt, kube-system/kube-proxy-bpdvl
evicting pod "coredns-autoscaler-5c7db64899-kp64h"
pod/coredns-autoscaler-5c7db64899-kp64h evicted
node/k8s-agentpool1-10367588-vmss000001 evicted
```
Now, delete the two VMSS instances:
```sh
$ az vmss delete-instances -g kubernetes-westus2-95121 -n k8s-agentpool1-10367588-vmss --instance-ids 0 1
$ echo $?
0
```
Following that, we can observe that the remaining 8 nodes are the ones we want.
```sh
$ kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-agentpool1-10367588-vmss000002 Ready agent 25m v1.18.3 10.240.0.96 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000003 Ready agent 26m v1.18.3 10.240.0.127 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000004 Ready agent 26m v1.18.3 10.240.0.158 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000005 Ready agent 27m v1.18.3 10.240.0.189 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000006 Ready agent 27m v1.18.3 10.240.0.220 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000007 Ready agent 27m v1.18.3 10.240.0.251 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000008 Ready agent 26m v1.18.3 10.240.1.26 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000009 Ready agent 27m v1.18.3 10.240.1.57 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-master-10367588-0 Ready master 31m v1.18.3 10.255.255.5 <none> Ubuntu 18.04.4 LTS 5.3.0-1022-azure docker://3.0.12+azure
```
Now the node pool is once again homogeneous, and all future VMSS scale operations against the VMSS API will render the nodes using the new model.
### My cluster is in a no egress (airgap) environment, using a newer version of AKS Engine to scale isn't working. What's wrong?
AKS Engine curates a VHD (publicly available OS image) for each released version which ensures that all required components are pre-installed onto the VM for all versions of Kubernetes that are supported for that particular AKS Engine release (as a rule AKS Engine supports the latest 2 known-working patch versions of any given supported Kubernetes minor version at the time of release). Because those required components are already present on the VHD, so long as you're installing an AKS Engine-supported version of Kubernetes, your cluster operation will not have to traverse out the public internet (or even traverse outside your VNET to Azure) to bootstrap the Kubernetes runtime.
However, if you're running an operation like `aks-engine` scale using a newer version of the `aks-engine` CLI compared to the version used to build your cluster originally, it is very likely that the Kubernetes version support will have evolved in the meanwhile. Using the above examples, we can observe that the original version of `aks-engine` (v0.52.1 in our example) delivered a `v1.18.3` version of Kubernetes 1.18, and that using the newer version of `aks-engine` (v0.56.0 in our example) respected that (although it *did*, by design update the container runtime, as well as various other OS-layer bits). tl;dr "We still have a `v1.18.3` node pool."
While the above outcome is fine for clusters built in VNETs with permissive egress, if your VNET does not permit general egress to the public internet, you may observe that a newer version of `aks-engine` is not able to successfully complete an operation like the above. To overcome this, we can try two things: (1) obtain the list of supported Kubernetes versions that the newer version of the `aks-engine` CLI uses, and then (2) manually update your API model to explicitly require that newer Kubernetes version. For example:
Let's get the list of supported Kubernetes versions in `v0.56.0` of `aks-engine`:
```sh
$ aks-engine get-versions
Version Upgrades
1.19.1
1.19.0 1.19.1
1.18.8 1.19.0, 1.19.1
1.18.6 1.18.8, 1.19.0, 1.19.1
1.17.11 1.18.6, 1.18.8
1.17.9 1.17.11, 1.18.6, 1.18.8
1.16.15 1.17.9, 1.17.11
1.16.14 1.16.15, 1.17.9, 1.17.11
1.15.12 1.16.14, 1.16.15
1.15.11 1.15.12, 1.16.14, 1.16.15
1.6.9 1.15.11, 1.15.12
```
We can see above that for Kubernetes 1.18, the `aks-engine` CLI being invoked now supports `v1.18.6` and `v1.18.8`. As we expect based on our observations, the API model requires `v1.18.3`:
```sh
$ grep orchestratorVersion _output/kubernetes-westus2-95121/apimodel.json
"orchestratorVersion": "1.18.3",
```
So, let's manually update that file to `"1.18.8"` instead (using vim or your preferred editor), to declare that we want the most recent, AKS Engine-supported 1.18 version of Kuberentes. After we do that:
```sh
$ grep orchestratorVersion _output/kubernetes-westus2-95121/apimodel.json
"orchestratorVersion": "1.18.8",
```
Now, let's try that scale operation again!
```sh
$ bin/aks-engine scale --subscription-id $AZURE_SUB_ID --client-id $AZURE_SP_ID --client-secret $AZURE_SP_PW --api-model _output/$RESOURCE_GROUP/apimodel.json --location westus2 --resource-group $RESOURCE_GROUP --apiserver $RESOURCE_GROUP.westus2.cloudapp.azure.com --node-pool agentpool1 --new-node-count 10 --auth-method client_secret --identity-system azure_ad
INFO[0004] found VMSS k8s-agentpool1-10367588-vmss in resource group kubernetes-westus2-95121 that correlates with node pool agentpool1
WARN[0004] Any new nodes will have Moby version 19.03.12
WARN[0004] containerd will be upgraded to version 1.3.7
INFO[0004] Removing singlePlacementGroup property from [variables('agentpool1VMNamePrefix')]
INFO[0004] Nodes in pool 'agentpool1' before scaling:
NODE STATUS VERSION OS KERNEL
k8s-agentpool1-10367588-vmss000002 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000003 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000004 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000005 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000006 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000007 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000008 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000009 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
INFO[0004] Starting ARM Deployment kubernetes-westus2-95121-1270661800 in resource group kubernetes-westus2-95121. This will take some time...
INFO[0174] Finished ARM Deployment (kubernetes-westus2-95121-1270661800). Succeeded
INFO[0174] Nodes in pool 'agentpool1' after scaling:
NODE STATUS VERSION OS KERNEL
k8s-agentpool1-10367588-vmss000002 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000003 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000004 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000005 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000006 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000007 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000008 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss000009 Ready v1.18.3 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss00000a NotReady v1.18.8 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
k8s-agentpool1-10367588-vmss00000b NotReady v1.18.8 Ubuntu 18.04.5 LTS 5.4.0-1025-azure
```
Shortly after the new nodes are `Ready`, and running `v1.18.8`:
```
$ k get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-agentpool1-10367588-vmss000002 Ready agent 49m v1.18.3 10.240.0.96 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000003 Ready agent 49m v1.18.3 10.240.0.127 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000004 Ready agent 50m v1.18.3 10.240.0.158 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000005 Ready agent 50m v1.18.3 10.240.0.189 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000006 Ready agent 50m v1.18.3 10.240.0.220 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000007 Ready agent 50m v1.18.3 10.240.0.251 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000008 Ready agent 50m v1.18.3 10.240.1.26 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss000009 Ready agent 50m v1.18.3 10.240.1.57 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss00000a Ready agent 65s v1.18.8 10.240.0.34 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-agentpool1-10367588-vmss00000b Ready agent 68s v1.18.8 10.240.0.65 <none> Ubuntu 18.04.5 LTS 5.4.0-1025-azure docker://19.3.12
k8s-master-10367588-0 Ready master 55m v1.18.3 10.255.255.5 <none> Ubuntu 18.04.4 LTS 5.3.0-1022-azure docker://3.0.12+azure
```
In summary, by updating your API model to require the latest Kubernetes version, we produce an ARM template deployment that is able to be executed successfully without traversing outside the VNET. As before, we've lost strict homogeneity, but because Kubernetes guarantees functional compatibility within a minor release channel (no breaking changes with patch releases), we now have an operationally stable cluster running the latest validated bits.

61
docs/topics/update.md Normal file
Просмотреть файл

@ -0,0 +1,61 @@
# Updating Kubernetes Node Pools
## Prerequisites
All documentation in these guides assumes you have already downloaded both the Azure `az` CLI tool and the `aks-engine` binary tool. Follow the [quickstart guide](../tutorials/quickstart.md) before continuing if you're creating a Kubernetes cluster using AKS Engine for the first time.
This guide assumes you already have a running cluster deployed using the `aks-engine` CLI. For more details on how to do that see [deploy](creating_new_clusters.md#deploy) or [generate](generate.md).
## Update
The `aks-engine update` command can update the VMSS model of a node pool according to a modified configuration of the aks-engine-generated `apimodel.json`. When used in combination with a newer version of the `aks-engine` CLI compared to the version used to build the cluster originally, node pools can be regularly refreshed so that as they scale over time, new nodes always run the latest, validated bits, using your latest, validated node configuration.
This command can *only* be used with VMSS-backed node pools (the default AKS Engine node pool type is VMSS).
The example below will assume you have a cluster deployed, and that the API model originally used to deploy that cluster is stored at `_output/<dnsPrefix>/apimodel.json`. It will also assume that there is a node pool named "agentpool1" in your cluster.
To update the cluster you will run a command like:
```sh
$ aks-engine update --subscription-id <subscription_id> \
--resource-group mycluster --location <location> \
--client-id '<service principal client ID>' \
--client-secret '<service principal client secret>' \
--api-model _output/mycluster/apimodel.json \
--node-pool agentpool1
```
The above operation will complete rather quickly, as it is only updating the VMSS model; it is not actually modifying any existing VMSS instances.
### Parameters
|Parameter|Required|Description|
|-----------------|---|---|
|--subscription-id|yes|The subscription id the cluster is deployed in.|
|--resource-group|yes|The resource group the cluster is deployed in.|
|--location|yes|The location the resource group is in.|
|--api-model|yes|Relative path to the generated API model for the cluster.|
|--client-id|depends| The Service Principal Client ID. This is required if the auth-method is set to service_principal/client_certificate|
|--client-secret|depends| The Service Principal Client secret. This is required if the auth-method is set to service_principal|
|--certificate-path|depends| The path to the file which contains the client certificate. This is required if the auth-method is set to client_certificate|
|--node-pool|yes|Which node pool should be updated.|
|--auth-method|no|The authentication method used. Default value is `client_secret`. Other supported values are: `cli`, `client_certificate`, and `device`.|
|--language|no|Language to return error message in. Default value is "en-us").|
## Frequently Asked Questions
### Why would I use update instead of upgrade to upgrade a VMSS node pool?
The `aks-engine upgrade` command actually replaces existing nodes with new nodes, one-at-a-time. Such an approach is appropriate if you are confident that the outcome of such an operation will be successful. We recommend to attain that confidence by staging a series of full end-to-end operations simulating the series of operations in your production environment. In other words:
1. Create a cluster with a specific configuration in a specific cloud environment + region using a specific version of `aks-engine`.
- All of the above must exactly match the original configuration + `aks-engine` version used to create your cluster initially.
2. Do something like the above for every `aks-engine` operation performed the time when your cluster was originally created and now
3. Run `aks-engine upgrade` with your desired upgrade configuration.
Because `aks-engine upgrade` is a destructive operation, and there is no definitive "undo" or "rollback", then if #3 above fails for any reason, in order to continue experimenting in your staging environment, you will have to re-stage the entire cluster + set of operations each time, until you land upon a repeatedly working `aks-engine upgrade` scenario that you confidently apply against your production scenario.
The above is a time consuming and imperfect workflow, and so `aks-engine update` is an alternative approach that allows more flexibility. For example:
- Because `aks-engine update` is merely a VMSS model update against a single node pool and not a "whole cluster", destructive operation, the viability of an updated node pool can be tested piecemeal, without affecting existing production traffic.
- In the event that the updated VMSS model produces undesirable new nodes, you may "undo" or "roll back" this model update change to the last known good VMSS model by running an `aks-engine update` operation using an older, known-working version of AKS Engine (for example, if you've never run `aks-engine update` before, you would use the version of AKS Engine you used to deploy your cluster originally) with an API model specification that has been tested as working.

Просмотреть файл

@ -2,9 +2,9 @@
## Prerequisites ## Prerequisites
All the commands in this guide require both the Azure CLI and `aks-engine`. Follow the [quickstart guide](../tutorials/quickstart.md) before continuing. All documentation in these guides assumes you have already downloaded both the Azure CLI and `aks-engine`. Follow the [quickstart guide](../tutorials/quickstart.md) before continuing.
This guide assumes you already have deployed a cluster using `aks-engine`. For more details on how to do that see [deploy](../tutorials/deploy.md). This guide assumes you already have deployed a cluster using `aks-engine`. For more details on how to do that see [deploy](../tutorials/quickstart.md#deploy).
## Upgrade ## Upgrade
@ -16,16 +16,16 @@ This document provides guidance on how to upgrade the Kubernetes version for an
In order to ensure that your `aks-engine upgrade` operation runs smoothly, there are a few things you should be aware of before getting started. In order to ensure that your `aks-engine upgrade` operation runs smoothly, there are a few things you should be aware of before getting started.
1) You will need access to the `apimodel.json` that was generated by `aks-engine deploy` or `aks-engine generate` (by default this file is placed into a relative directory that looks like `_output/<clustername>/`). `aks-engine` will use the `--api-model` argument to introspect the `apimodel.json` file in order to determine the cluster's current Kubernetes version, as well as all other cluster configuration data as defined by `aks-engine` during the last time that `aks-engine` was used to deploy, scale, or upgrade the cluster. 1) You will need access to the API Model (`apimodel.json`) that was generated by `aks-engine deploy` or `aks-engine generate` (by default this file is placed into a relative directory that looks like `_output/<clustername>/`).
2) `aks-engine upgrade` expects a cluster configuration that conforms to the current state of the cluster. In other words, the Azure resources inside the resource group deployed by `aks-engine` should be in the same state as when they were originally created by `aks-engine`. If you perform manual operations on your Azure IaaS resources (other than `aks-engine scale` and `aks-engine upgrade`) DO NOT use `aks-engine upgrade`, as the aks-engine-generated ARM template won't be reconcilable against the state of the Azure resources that reside in the resource group. Some examples of manual operations that will prevent upgrade from working successfully: 2) `aks-engine upgrade` expects an API model that conforms to the current state of the cluster. In other words, the Azure resources inside the resource group deployed by `aks-engine` should be in the same state as when they were originally created by `aks-engine`. If you perform manual operations on your Azure IaaS resources (other than successful `aks-engine scale`, `aks-engine update`, or `aks-engine upgrade` operations) DO NOT use `aks-engine upgrade`, as the aks-engine-generated ARM template won't be reconcilable against the state of the Azure resources that reside in the resource group. Some examples of manual operations that will prevent upgrade from working successfully:
- renaming resources - renaming resources
- executing follow-up CustomScriptExtensions against VMs after a cluster has been created: a VM or VMSS instance may only have a single CustomScriptExtension attached to it; follow-up operations CustomScriptExtension operations will essentially "replace" the CustomScriptExtension defined by aks-engine at cluster creation time, and `aks-engine upgrade` will not be able to recognize the VM resource. - executing follow-up CustomScriptExtensions against VMs after a cluster has been created: a VM or VMSS instance may only have a single CustomScriptExtension attached to it; follow-up operations CustomScriptExtension operations will essentially "replace" the CustomScriptExtension defined by aks-engine at cluster creation time, and `aks-engine upgrade` will not be able to recognize the VM resource.
`aks-engine upgrade` relies on some resources (such as VMs) to be named in accordance with the original `aks-engine` deployment. In summary, the set of Azure resources in the resource group are mutually reconcilable by `aks-engine upgrade` only if they have been exclusively created and managed as the result of a series of successive ARM template deployments originating from `aks-engine`. `aks-engine upgrade` relies on some resources (such as VMs) to be named in accordance with the original `aks-engine` deployment. In summary, the set of Azure resources in the resource group are mutually reconcilable by `aks-engine upgrade` only if they have been exclusively created and managed as the result of a series of successive ARM template deployments originating from various AKS Engine commands that have run to completion successfully.
3) `aks-engine upgrade` allows upgrading the Kubernetes version to any AKS Engine-supported patch release in the current minor release channel that is greater than the current version on the cluster (e.g., from `1.16.4` to `1.16.6`), or to the next aks-engine-supported minor version (e.g., from `1.16.6` to `1.17.2`). (Or, see [`aks-engine upgrade --force`](#force-upgrade) if you want to bypass AKS Engine "supported version requirements"). In practice, the next AKS Engine-supported minor version will commonly be a single minor version ahead of the current cluster version. However, if the cluster has not been upgraded in a significant amount of time, the "next" minor version may have actually been deprecated by aks-engine. In such a case, your long-lived cluster will be upgradable to the nearest, supported minor version that `aks-engine` supports at the time of upgrade (e.g., from `1.11.10` to `1.13.11`). 3) `aks-engine upgrade` allows upgrading the Kubernetes version to any AKS Engine-supported patch release in the current minor release channel that is greater than the current version on the cluster (e.g., from `1.18.8` to `1.18.9`), or to the next aks-engine-supported minor version (e.g., from `1.18.8` to `1.19.1`). (Or, see [`aks-engine upgrade --force`](#force-upgrade) if you want to bypass AKS Engine "supported version requirements"). In practice, the next AKS Engine-supported minor version will commonly be a single minor version ahead of the current cluster version. However, if the cluster has not been upgraded in a significant amount of time, the "next" minor version may have no longer be supported by aks-engine. In such a case, your long-lived cluster will be upgradable to the nearest, supported minor version that `aks-engine` supports at the time of upgrade (e.g., from `1.14.7` to `1.16.15`).
To get the list of all available Kubernetes versions and upgrades, run the `get-versions` command: To get the list of all available Kubernetes versions and upgrades, run the `get-versions` command:
@ -36,27 +36,51 @@ In order to ensure that your `aks-engine upgrade` operation runs smoothly, there
To get the versions of Kubernetes that your particular cluster version is upgradable to, provide its current Kubernetes version in the `version` arg: To get the versions of Kubernetes that your particular cluster version is upgradable to, provide its current Kubernetes version in the `version` arg:
```bash ```bash
./bin/aks-engine get-versions --version 1.12.8 ./bin/aks-engine get-versions --version 1.18.8
``` ```
4) `aks-engine upgrade` relies upon a working connection to the cluster control plane during upgrade, both (1) to validate successful upgrade progress, and (2) to cordon and drain nodes before upgrading them, in order to minimize operational downtime of any running cluster workloads. If you are upgrading a **private cluster**, you must run `aks-engine upgrade` from a host VM that has network access to the control plane, for example a jumpbox VM that resides in the same VNET as the master VMs. For more information on private clusters [refer to this documentation](features.md#feat-private-cluster). 4) `aks-engine upgrade` relies upon a working connection to the cluster control plane during upgrade, both (1) to validate successful upgrade progress, and (2) to cordon and drain nodes before upgrading them, in order to minimize operational downtime of any running cluster workloads. If you are upgrading a **private cluster**, you must run `aks-engine upgrade` from a host VM that has network access to the control plane, for example a jumpbox VM that resides in the same VNET as the master VMs. For more information on private clusters [refer to this documentation](features.md#feat-private-cluster).
5) If using `aks-engine upgrade` in production, it is recommended to stage an upgrade test on an cluster that was built to the same specifications (built with the same cluster configuration + the same version of the `aks-engine` binary) as your production cluster before performing the upgrade, especially if the cluster configuration is "interesting", or in other words differs significantly from defaults. The reason for this is that AKS Engine supports many different cluster configurations and the extent of E2E testing that the AKS Engine team runs cannot practically cover every possible configuration. Therefore, it is recommended that you ensure in a staging environment that your specific cluster configuration is upgradable using `aks-engine upgrade` before attempting this potentially destructive operation on your production cluster. 5) If using `aks-engine upgrade` in production, it is recommended to stage an upgrade test on an cluster that was built to the same specifications (built with the same cluster configuration + the same version of the `aks-engine` command line tool) as your production cluster before performing the upgrade, especially if the cluster configuration is "interesting", or in other words differs significantly from defaults. The reason for this is that AKS Engine supports many different cluster configurations and the extent of E2E testing that the AKS Engine team runs cannot practically cover every possible configuration. Therefore, it is recommended that you ensure in a staging environment that your specific cluster configuration is upgradable using `aks-engine upgrade` before attempting this potentially destructive operation on your production cluster.
6) `aks-engine upgrade` is backwards compatible. If you deployed with `aks-engine` version `0.27.x`, you can run upgrade with version `0.29.y`. In fact, it is recommended that you use the latest available `aks-engine` version when running an upgrade operation. This will ensure that you get the latest available software and bug fixes in your upgraded cluster. 6) `aks-engine upgrade` is backwards compatible. If you deployed with `aks-engine` version `0.27.x`, you can run upgrade with version `0.29.y`. In fact, it is recommended that you use the latest available `aks-engine` version when running an upgrade operation. This will ensure that you get the latest available software and bug fixes in your upgraded cluster.
7) `aks-engine upgrade` will automatically re-generate your cluster configuration to best pair with the desired new version of Kubernetes, and/or the version of AKS Engine that is used to execute `aks-engine upgrade`. To use an example of both: 7) `aks-engine upgrade` will automatically re-generate your cluster configuration to best pair with the desired new version of Kubernetes, and/or the version of `aks-engine` that is used to execute `aks-engine upgrade`. To use an example of both:
- When you upgrade to (for example) Kubernetes 1.14 from 1.13, AKS Engine will automatically change your control plane configuration (e.g., `coredns`, `metrics-server`, `kube-proxy`) so that the cluster component configurations have a close, known-working affinity with 1.14. - When you upgrade to (for example) Kubernetes 1.18 from 1.17, AKS Engine will automatically change your control plane configuration (e.g., `coredns`, `metrics-server`, `kube-proxy`) so that the cluster component configurations have a close, known-working affinity with 1.18.
- When you perform an upgrade, even if it is a Kubernetes patch release upgrade such as 1.14.1 to 1.14.2, but you use a newer version of AKS Engine, a newer version of `etcd` (for example) may have been validated and configured as default since the original version of AKS Engine used to build the cluster was released. So, for example, without any explicit user direction, the newly upgraded cluster will now be running etcd v3.2.26 instead of v3.2.25. _This is by design._ - When you perform an upgrade, even if it is a Kubernetes patch release upgrade such as 1.18.8 to 1.18.9, but you use a newer version of `aks-engine`, a newer version of `etcd` (for example) may have been validated and configured as default since the version of `aks-engine` used to build the cluster was released. So, for example, without any explicit user direction, the newly upgraded cluster will now be running etcd v3.2.26 instead of v3.2.25. _This is by design._
In summary, using `aks-engine upgrade` means you will freshen and re-pave the entire stack that underlies Kubernetes to reflect the best-known, recent implementation of Azure IaaS + OS + OS config + Kubernetes config. In summary, using `aks-engine upgrade` means you will freshen and re-pave the entire stack that underlies Kubernetes to reflect the best-known, recent implementation of Azure IaaS + OS + OS config + Kubernetes config.
### Parameters
|Parameter|Required|Description|
|-----------------|---|---|
|--api-model|yes|Relative path to the API model (cluster definition) that declares the desired cluster configuration.|
|--kubeconfig|no|Path to kubeconfig; if not provided, it will be generated on the fly from the API model data.|
|--upgrade-version|yes|Version of Kubernetes to upgrade to.|
|--force|no|Force upgrading the cluster to desired version, regardless of version support. Allows same-version upgrades and downgrades.|
|--control-plane-only|no|Upgrade control plane VMs only, do not upgrade node pools.|
|--cordon-drain-timeout|no|How long to wait for each vm to be cordoned in minutes (default -1, i.e., no timeout).|
|--vm-timeout|no|How long to wait for each vm to be upgraded in minutes (default -1, i.e., no timeout).|
|--upgrade-windows-vhd|no|Upgrade image reference of all Windows nodes to a new AKS Engine-validated image, if available (default is true).|
|--azure-env|no|The target Azure cloud (default "AzurePublicCloud") to deploy to.|
|--subscription-id|yes|The subscription id the cluster is deployed in.|
|--resource-group|yes|The resource group the cluster is deployed in.|
|--location|yes|The location to deploy to.|\
|--client-id|depends| The Service Principal Client ID. This is required if the auth-method is set to service_principal/client_certificate|
|--client-secret|depends| The Service Principal Client secret. This is required if the auth-method is set to service_principal|
|--certificate-path|depends| The path to the file which contains the client certificate. This is required if the auth-method is set to client_certificate|
|--identity-system|no|Identity system (default is azure_ad)|
|--auth-method|no|The authentication method used. Default value is `client_secret`. Other supported values are: `cli`, `client_certificate`, and `device`.|
|--private-key-path|no|Path to private key (used with --auth-method=client_certificate).|
|--language|no|Language to return error message in. Default value is "en-us").|
### Under the hood ### Under the hood
During the upgrade, *aks-engine* successively visits virtual machines that constitute the cluster (first the master nodes, then the agent nodes) and performs the following operations: During the upgrade, *aks-engine* successively visits virtual machines that constitute the cluster (first the master nodes, then the agent nodes) and performs the following operations:
Master nodes: Control plane nodes:
- cordon the node and drain existing workloads - cordon the node and drain existing workloads
- delete the VM - delete the VM
@ -137,7 +161,7 @@ The upgrade operation takes an optional `--force` argument:
force upgrading the cluster to desired version. Allows same version upgrades and downgrades. force upgrading the cluster to desired version. Allows same version upgrades and downgrades.
``` ```
In some situations, you might want to bypass the AKS-Engine validation of your API model versions and cluster nodes versions. This is at your own risk and you should assess the potential harm of using this flag. In some situations, you might want to bypass the AKS Engine validation of your API model versions and cluster nodes versions. This is at your own risk and you should assess the potential harm of using this flag.
The `--force` parameter instructs the upgrade process to: The `--force` parameter instructs the upgrade process to:
@ -149,3 +173,52 @@ The `--force` parameter instructs the upgrade process to:
> Note: If you pass in a version that AKS-Engine literally cannot install (e.g., a version of Kubernetes that does not exist), you may break your cluster. > Note: If you pass in a version that AKS-Engine literally cannot install (e.g., a version of Kubernetes that does not exist), you may break your cluster.
For each node, the cluster will follow the same process described in the section above: [Under the hood](#under-the-hood) For each node, the cluster will follow the same process described in the section above: [Under the hood](#under-the-hood)
## Frequently Asked Questions
### When should I use `aks-engine upgrade --control-plane-only`?
We actually recommend that you *only* use `aks-engine upgrade --control-plane-only`. There are a few reasons:
- The `aks-engine upgrade` workflow has been implemented in such a way that assumes the underlying nodes are pets, and not cattle. Each node is carefully accounted for during the operation, and every effort is made to "put the cluster back together" as if the nodes simply went away for a few minutes, but then came back. (This is in fact not what's happening under the hood, as the original VMs are in fact destroyed, and replaced with entirely new VMs; only the data disks are actually preserved.) Such an approach is appropriate for control plane VMs, because they are actually defined by AKS Engine as more or less static resources. However, individual worker nodes are not statically defined — the nodes participating in a cluster are designed to be ephemeral in response to changing operational realities.
- `aks-engine upgrade` does its best to minimize operational cluster downtime, but there will be some amount of interruption due to the fact that VMs are in fact deleted, then added, behind a distributed control plane (we're assuming you're running 3 or 5 control plane VMs). Given that a small amount of disruption is unavoidable given the architectural constraints of `aks-engine upgrade`, it is more suitable to absorb that disruption in the control plane, which is probably not user-impacting (unless your users are Kubernetes cluster administrators!). You may be able to afford a small maintenance window to update your control plane, while your existing production workloads continue to serve traffic reliably. Of course production traffic is not static, and any temporary control plane unavailability will disrupt the dynamic attributes of your cluster that ultimately serve user traffic. We do recommend upgrading the control plane during an appropriate time when it is more preferable for your cluster to be put into a "static" mode.
- A Kubernetes cluster is likely to run a variety of production workloads, each with its own requirements for downtime maintenance. Running a cluster-wide operation like `aks-engine upgrade` has the result of forcing you to schedule a maintenance window for your control plane, and all production environments simultaneously.
- More flexible node pool-specific tooling is available to upgrade various parts of your production-serving nodes. See the [addpool](addpool.md), [update](update.md), and [scale](scale.md) documentation to help you develop cluster workflows for managing node pools distinct from the control plane.
### What should I upgrade first, my control plane nodes, or my worker nodes?
tl;dr *Upgrade your control plane first!*
If following our guidance you employ `aks-engine upgrade --control-plane-only` to upgrade your control plane distinctly from your worker nodes, and a combination of `aks-engine addpool` and `aks-engine update` to upgrade worker nodes, the natural question is: which should I do first?
The Kubernetes project publishes that the control plane may be up to 2 versions higher than kubelet, but not vice versa. What this means is that you should not run a newer version of Kubernetes on a node than is running on the control plane. Relevant documentation:
- https://kubernetes.io/docs/setup/release/version-skew-policy/
[Another example from the kubeadm community project](https://v1-18.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) outlines its upgrade process, which specifies upgrading the control plane first.
### Can I use `aks-engine upgrade --control-plane-only` to change the control plane configuration irrespective of updating the Kubernetes version?
Yes, but with caveats. Essentially you may use the `aks-engine upgrade --control-plane-only` functionality to replace your control plane VMs, one-at-a-time, with newer VMs rendered from updated API model configuration. You should always stage such changes, however, by building a staging cluster (reproducing at a minimum the version of `aks-engine` used to build your production cluster, and the API model JSON used as input; in a best-case scenario it will be in the same location as well). Here are a few useful possibilities that will work:
- Updating the VM SKU by changing the `properties.masterProfile.vmSize` value
- *Certain* configurable/tuneable kubelet properties in `properties.masterProfile.kubernetesConfig.kubeletConfig`, e.g.:
- `"--feature-gates"`
- `"--node-status-update-frequency"`
- `"--pod-max-pids"`
- `"--register-with-taints"`
- `"--image-gc-high-threshold"` or `"--image-gc-low-threshold"`
Generally, don't change any listening ports or filepaths, as those may have static dependencies elsewhere.
- Again, *certain* configurable/tuneable kubelet properties in:
- `properties.orchestratorProfile.kubernetesConfig.controllerManagerConfig`
- `properties.orchestratorProfile.kubernetesConfig.cloudControllerManagerConfig`
- `properties.orchestratorProfile.kubernetesConfig.apiServerConfig`
- `properties.orchestratorProfile.kubernetesConfig.schedulerConfig`
- Control plane VM runtime kernel configuration via `properties.masterProfile.kubernetesConfig.sysctldConfig`
You *may not* change the following values, doing so may break your cluster!
- DO NOT CHANGE the number of VMs in your control plane via `masterProfile.count`
- DO NOT CHANGE the static IP address range of your control plane via `masterProfile.firstConsecutiveStaticIP`
These types of configuration changes are advanced, only do this if you're a confident, expert Kubernetes cluster administrator!

Просмотреть файл

@ -2,8 +2,7 @@
New to AKS Engine? Well, you came to the right place. New to AKS Engine? Well, you came to the right place.
- [AKS Engine CLI Overview](cli-overview.md)
- [Quickstart Guide](quickstart.md) ([Chinese](quickstart.zh-CN.md)) - [Quickstart Guide](quickstart.md) ([Chinese](quickstart.zh-CN.md))
- [Deploy a Kubernetes Cluster](deploy.md)
- ["Day 2" Operations](day2-operations.md)
- [Using a custom virtual network with AKS Engine](custom-vnet.md) - [Using a custom virtual network with AKS Engine](custom-vnet.md)
- [Using the Container Monitoring Add-on](containermonitoringaddon.md) - [Using the Container Monitoring Add-on](containermonitoringaddon.md)

Просмотреть файл

@ -0,0 +1,320 @@
# AKS Engine CLI Overview
AKS Engine is designed to be used as a CLI tool (`aks-engine`). This document outlines the functionality that `aks-engine` provides to create and maintain a Kubernetes cluster on Azure.
## `aks-engine` commands
To get a quick overview of the commands available via the `aks-engine` CLI tool, just run `aks-engine` with no arguments (or include the `--help` argument):
```sh
$ aks-engine
Usage:
aks-engine [flags]
aks-engine [command]
Available Commands:
addpool Add a node pool to an existing AKS Engine-created Kubernetes cluster
completion Generates bash completion scripts
deploy Deploy an Azure Resource Manager template
generate Generate an Azure Resource Manager template
get-logs Collect logs and current cluster nodes configuration.
get-versions Display info about supported Kubernetes versions
help Help about any command
rotate-certs (experimental) Rotate certificates on an existing AKS Engine-created Kubernetes cluster
scale Scale an existing AKS Engine-created Kubernetes cluster
update Update an existing AKS Engine-created VMSS node pool
upgrade Upgrade an existing AKS Engine-created Kubernetes cluster
version Print the version of aks-engine
Flags:
--debug enable verbose debug logs
-h, --help help for aks-engine
--show-default-model Dump the default API model to stdout
Use "aks-engine [command] --help" for more information about a command.
```
## Operational Cluster Commands
These commands are provided by AKS Engine in order to create and maintain Kubernetes clusters. Note: there is no `aks-engine` command to delete a cluster; to delete a Kubernetes cluster created by AKS Engine, you must delete the resource group that contains cluster resources. If the resource group can't be deleted because it contains other, non-Kubernetes-relate Azure resources, then you must manually delete the Virtual Machine and/or Virtual Machine Scale Set (VMSS), Disk, Network Interface, Network Security Group, Public IP Address, Virtual Network, Load Balancer, and all other resources specified in the aks-engine-generated ARM template. Because manually deleting resources is tedious and requires following serial dependencies in the correct order, it is recommended that you dedicate a resource group for the Azure resources that AKS Engine will create to run your Kubernetes cluster. If you're running more than one cluster, we recommend a dedicated resource group per cluster.
### `aks-engine deploy`
The `aks-engine deploy` command will create a new cluster from scratch, using an API model (cluster definition) file as input to define the desired cluster configuration and shape, in the subscription, region, and resource group you provide, using credentials that you provide. Use this command to create a new cluster.
```sh
$ aks-engine deploy --help
Deploy an Azure Resource Manager template, parameters file and other assets for a cluster
Usage:
aks-engine deploy [flags]
Flags:
-m, --api-model string path to your cluster definition file
--auth-method client_secret auth method (default:client_secret, `cli`, `client_certificate`, `device`) (default "client_secret")
--auto-suffix automatically append a compressed timestamp to the dnsPrefix to ensure unique cluster name automatically
--azure-env string the target Azure cloud (default "AzurePublicCloud")
--ca-certificate-path string path to the CA certificate to use for Kubernetes PKI assets
--ca-private-key-path string path to the CA private key to use for Kubernetes PKI assets
--certificate-path string path to client certificate (used with --auth-method=client_certificate)
--client-id string client id (used with --auth-method=[client_secret|client_certificate])
--client-secret string client secret (used with --auth-method=client_secret)
-p, --dns-prefix string dns prefix (unique name for the cluster)
-f, --force-overwrite automatically overwrite existing files in the output directory
-h, --help help for deploy
--identity-system azure_ad identity system (default:azure_ad, `adfs`) (default "azure_ad")
--language string language to return error messages in (default "en-us")
-l, --location string location to deploy to (required)
-o, --output-directory string output directory (derived from FQDN if absent)
--private-key-path string path to private key (used with --auth-method=client_certificate)
-g, --resource-group string resource group to deploy to (will use the DNS prefix from the apimodel if not specified)
--set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)
-s, --subscription-id string azure subscription id (required)
Global Flags:
--debug enable verbose debug logs
```
Detailed documentation on `aks-engine deploy` can be found [here](../topics/creating_new_clusters.md#deploy).
### `aks-engine scale`
The `aks-engine scale` command will scale (in or out) a specific node pool participating in a Kubernetes cluster created by AKS Engine. Use this command to manually scale a node pool to a specific number of nodes.
```sh
$ aks-engine scale --help
Scale an existing AKS Engine-created Kubernetes cluster by specifying a new desired number of nodes in a node pool
Usage:
aks-engine scale [flags]
Flags:
-m, --api-model string path to the generated apimodel.json file
--apiserver string apiserver endpoint (required to cordon and drain nodes)
--auth-method client_secret auth method (default:client_secret, `cli`, `client_certificate`, `device`) (default "client_secret")
--azure-env string the target Azure cloud (default "AzurePublicCloud")
--certificate-path string path to client certificate (used with --auth-method=client_certificate)
--client-id string client id (used with --auth-method=[client_secret|client_certificate])
--client-secret string client secret (used with --auth-method=client_secret)
-h, --help help for scale
--identity-system azure_ad identity system (default:azure_ad, `adfs`) (default "azure_ad")
--language string language to return error messages in (default "en-us")
-l, --location string location the cluster is deployed in
-c, --new-node-count int desired number of nodes
--node-pool string node pool to scale
--private-key-path string path to private key (used with --auth-method=client_certificate)
-g, --resource-group string the resource group where the cluster is deployed
-s, --subscription-id string azure subscription id (required)
Global Flags:
--debug enable verbose debug logs
```
The `scale` command has limitations for scaling in (reducing the number of nodes in a node pool):
- It accepts a new, desired node count; it does not accept a list of specific nodes to remove from the pool.
- For VMSS-backed node pools, the removed nodes will not be cordoned and drained prior to being removed, which means any running workloads on nodes-to-be-removed will be disrupted without warning, and temporary operational impact is to be expected.
We generally recommend that you manage node pool scaling dynamically using the `cluster-autoscaler` project. More documentation about `cluster-autoscaler` is [here](../../examples/addons/cluster-autoscaler/README.md), including how to automatically install and configure it at cluster creation time as an AKS Engine addon.
Detailed documentation on `aks-engine scale` can be found [here](../topics/scale.md).
### `aks-engine update`
The `aks-engine update` command will update the VMSS model of a node pool according to a modified configuration of the aks-engine-generated `apimodel.json`. The updated node configuration will not take affect on any existing nodes, but will be applied to all future, new nodes created by VMSS scale out operations. Use this command to update the node configuration (such as the OS configuration, VM SKU, or Kubernetes kubelet configuration) of an existing VMSS node pool.
```sh
$ aks-engine update --help
Update an existing AKS Engine-created VMSS node pool in a Kubernetes cluster by updating its VMSS model
Usage:
aks-engine update [flags]
Flags:
-m, --api-model string path to the generated apimodel.json file
--auth-method client_secret auth method (default:client_secret, `cli`, `client_certificate`, `device`) (default "client_secret")
--azure-env string the target Azure cloud (default "AzurePublicCloud")
--certificate-path string path to client certificate (used with --auth-method=client_certificate)
--client-id string client id (used with --auth-method=[client_secret|client_certificate])
--client-secret string client secret (used with --auth-method=client_secret)
-h, --help help for update
--identity-system azure_ad identity system (default:azure_ad, `adfs`) (default "azure_ad")
--language string language to return error messages in (default "en-us")
-l, --location string location the cluster is deployed in
--node-pool string node pool to scale
--private-key-path string path to private key (used with --auth-method=client_certificate)
-g, --resource-group string the resource group where the cluster is deployed
-s, --subscription-id string azure subscription id (required)
Global Flags:
--debug enable verbose debug logs
```
Detailed documentation on `aks-engine update` can be found [here](../topics/update.md).
### `aks-engine addpool`
The `aks-engine addpool` command will add a new node pool to an existing AKS Engine-created cluster. Using a JSON file to define a the new node pool's configuration, and referencing the aks-engine-generated `apimodel.json`, you can add new nodes to your cluster. Use this command to add a specific number of new nodes using a discrete configuration compared to existing nodes participating in your cluster.
```sh
$ aks-engine addpool --help
Add a node pool to an existing AKS Engine-created Kubernetes cluster by referencing a new agentpoolProfile spec
Usage:
aks-engine addpool [flags]
Flags:
-m, --api-model string path to the generated apimodel.json file
--auth-method client_secret auth method (default:client_secret, `cli`, `client_certificate`, `device`) (default "client_secret")
--azure-env string the target Azure cloud (default "AzurePublicCloud")
--certificate-path string path to client certificate (used with --auth-method=client_certificate)
--client-id string client id (used with --auth-method=[client_secret|client_certificate])
--client-secret string client secret (used with --auth-method=client_secret)
-h, --help help for addpool
--identity-system azure_ad identity system (default:azure_ad, `adfs`) (default "azure_ad")
--language string language to return error messages in (default "en-us")
-l, --location string location the cluster is deployed in
-p, --node-pool string path to a JSON file that defines the new node pool spec
--private-key-path string path to private key (used with --auth-method=client_certificate)
-g, --resource-group string the resource group where the cluster is deployed
-s, --subscription-id string azure subscription id (required)
Global Flags:
--debug enable verbose debug logs
```
Detailed documentation on `aks-engine addpool` can be found [here](../topics/addpool.md).
### `aks-engine upgrade`
The `aks-engine upgrade` command orchestrates a Kubernetes version upgrade across your existing cluster nodes. Use this command to upgrade the Kubernetes version running your control plane, and optionally on all your nodes as well.
```sh
$ aks-engine upgrade --help
Upgrade an existing AKS Engine-created Kubernetes cluster, one node at a time
Usage:
aks-engine upgrade [flags]
Flags:
-m, --api-model string path to the generated apimodel.json file
--auth-method client_secret auth method (default:client_secret, `cli`, `client_certificate`, `device`) (default "client_secret")
--azure-env string the target Azure cloud (default "AzurePublicCloud")
--certificate-path string path to client certificate (used with --auth-method=client_certificate)
--client-id string client id (used with --auth-method=[client_secret|client_certificate])
--client-secret string client secret (used with --auth-method=client_secret)
--control-plane-only upgrade control plane VMs only, do not upgrade node pools
--cordon-drain-timeout int how long to wait for each vm to be cordoned in minutes (default -1)
-f, --force force upgrading the cluster to desired version. Allows same version upgrades and downgrades.
-h, --help help for upgrade
--identity-system azure_ad identity system (default:azure_ad, `adfs`) (default "azure_ad")
-b, --kubeconfig string the path of the kubeconfig file
--language string language to return error messages in (default "en-us")
-l, --location string location the cluster is deployed in (required)
--private-key-path string path to private key (used with --auth-method=client_certificate)
-g, --resource-group string the resource group where the cluster is deployed (required)
-s, --subscription-id string azure subscription id (required)
-k, --upgrade-version string desired kubernetes version (required)
--upgrade-windows-vhd upgrade image reference of the Windows nodes (default true)
--vm-timeout int how long to wait for each vm to be upgraded in minutes (default -1)
Global Flags:
--debug enable verbose debug logs
```
Detailed documentation on `aks-engine upgrade` can be found [here](../topics/upgrade.md).
## Generate an ARM Template
AKS Engine also provides a command to generate a reusable ARM template only, without creating any actual Azure resources.
### `aks-engine generate`
The `aks-engine generate` command is similar to `aks-engine deploy`: it uses an API model (cluster definition) file as input to define the desired cluster configuration and shape of a new Kubernetes cluster. Unlike `deploy`, `aks-engine generate` does not actually submit any operational requests to Azure, but is instead used to generate a reusable ARM template which may be deployed at a later time. Use this command as a part of a workflow that creates one or more Kubernetes clusters via an ARM group deployment that takes an ARM template as input (e.g., `az deployment group create` using the standard `az` Azure CLI).
```sh
$ aks-engine generate --help
Generates an Azure Resource Manager template, parameters file and other assets for a cluster
Usage:
aks-engine generate [flags]
Flags:
-m, --api-model string path to your cluster definition file
--ca-certificate-path string path to the CA certificate to use for Kubernetes PKI assets
--ca-private-key-path string path to the CA private key to use for Kubernetes PKI assets
--client-id string client id
--client-secret string client secret
-h, --help help for generate
--no-pretty-print skip pretty printing the output
-o, --output-directory string output directory (derived from FQDN if absent)
--parameters-only only output parameters files
--set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)
Global Flags:
--debug enable verbose debug logs
```
Detailed documentation on `aks-engine generate` can be found [here](../topics/creating_new_clusters.md#generate).
### `aks-engine rotate-certs`
The `aks-engine rotate-certs` command is currently experimental and not recommended for use on production clusters.
### `aks-engine get-logs`
The `aks-engine get-logs` can conveniently collect host VM logs from your Linux node VMs for local troubleshooting. *This command does not support Windows nodes*. The command assumes that your node VMs have an SSH daemon listening on port 22, that all nodes share a common SSH keypair for interactive login, and that a public endpoint exists on one of the control plane VMs for accommodating SSH agent key forwarding.
```sh
$ aks-engine get-logs --help
Usage:
aks-engine get-logs [flags]
Flags:
-m, --api-model string path to the generated apimodel.json file (required)
--control-plane-only get logs from control plane VMs only
-h, --help help for get-logs
--linux-script string path to the log collection script to execute on the cluster's Linux nodes (required)
--linux-ssh-private-key string path to a valid private SSH key to access the cluster's Linux nodes (required)
-l, --location string Azure location where the cluster is deployed (required)
-o, --output-directory string collected logs destination directory, derived from --api-model if missing
--ssh-host string FQDN, or IP address, of an SSH listener that can reach all nodes in the cluster (required)
Global Flags:
--debug enable verbose debug logs
```
The `aks-engine` codebase contains a working log retrieval script in `scripts/collect-logs.sh`, so you can use it to quickly gather logs from your node VMs:
```sh
$ git clone https://github.com/Azure/aks-engine.git && cd aks-engine
Cloning into 'aks-engine'...
remote: Enumerating objects: 44, done.
remote: Counting objects: 100% (44/44), done.
remote: Compressing objects: 100% (42/42), done.
remote: Total 92107 (delta 13), reused 15 (delta 1), pack-reused 92063
Receiving objects: 100% (92107/92107), 92.86 MiB | 7.27 MiB/s, done.
Resolving deltas: 100% (64711/64711), done.
$ export LATEST_AKS_ENGINE_RELEASE=v0.56.0
$ git checkout $LATEST_AKS_ENGINE_RELEASE
Note: checking out 'v0.56.0'.
You are in 'detached HEAD' state. You can look around, make experimental
changes and commit them, and you can discard any commits you make in this
state without impacting any branches by performing another checkout.
If you want to create a new branch to retain commits you create, you may
do so (now or later) by using -b with the checkout command again. Example:
git checkout -b <new-branch-name>
HEAD is now at 666073d49 chore: updating Windows VHD with new cached artifacts (#3843)
$ bin/aks-engine get-logs --api-model _output/$CLUSTER_NAME/apimodel.json --location $CLUSTER_NAME --linux-ssh-private-key _output/$CLUSTER_NAME-ssh --linux-script ./scripts/collect-logs.sh --ssh-host $CLUSTER_NAME.$LOCATION.cloudapp.azure.com
...
INFO[0062] Logs downloaded to _output/<name of cluster>/_logs
```
The following example assumes that the `$CLUSTER_NAME` environment variable is assigned to the value of the cluster name (`properties.masterProfile.dnsPrefix` in the cluster API model), and that `$LOCATION` is assigned to the location string of the resource group that your cluster was created into.

Просмотреть файл

@ -144,7 +144,7 @@ Once you are ready with the cluster definition file, you can either use AKS engi
### Deploy using AKS Engine ### Deploy using AKS Engine
Follow the [instructions on how to deploy](deploy.md#deploy) using the cluster definition (API model) file you prepared. Follow the [instructions on how to deploy](quickstart.md#deploy) using the cluster definition (API model) file you prepared.
### Generate the cluster Azure Resource Manager template ### Generate the cluster Azure Resource Manager template

Просмотреть файл

@ -1,52 +0,0 @@
# Day 2 Operations
NOTE: The steps listed here were contributed by a community member, and is not officially supported. Please use at your own risk.
The steps listed on this page describe a way to modify a running Kubernetes cluster deployed with `aks-engine` on Azure. These steps are only tested with changes targeting actually Azure resources. Changes made to Kubernetes configuration are not tested yet.
## `generate` and `deploy`
These are the common steps (unless described otherwise) you'll have to run after modifying an existing `apimodel.json` file.
* Modify the apimodel.json file located in the `_output/<clustername>` folder
* Run `aks-engine generate --api-model _output/<clustername>/apimodel.json`. This will update the `azuredeploy*` files needed for the new ARM deployment. These files are also located in the `_output` folder.
* Apply the changes by manually starting an ARM deployment. From within the `_output/<clustername>` run
az deployment group create --template-file azuredeploy.json --parameters azuredeploy.parameters.json --resource-group "<my-resource-group>"
To use the `az` CLI tools you have to login. More info can be found here: https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest
_Note: I use `az deployment group` instead of `aks-engine deploy` because the latter seems to assume you are deploying a new cluster and as a result overwriting your private ssh keys located in the _ouput folder_
* Grab a coffee
* Profit!
## Common scenarios (tested)
### Adding a node pool
Add (or copy) an entry in the `agentPoolProfiles` array.
### Removing a node pool
* Delete the related entry from `agentPoolProfiles` section in the `_output/<clustername>/api-model.json` file
* [Drain](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) nodes from inside Kubernetes
* `generate` and `deploy` (see above)
* Delete VM's and related resources (disk, NIC, availability set) from Azure portal
* Remove the pool from the original `apimodel.json` file
### Resizing a node pool
Use the `aks-engine scale` command
aks-engine scale --location westeurope --subscription-id "xxx" --resource-group "<my-resource-group" \
--api-model ./somedir/apimodel.json --node-pool <nodepool name> --new-node-count <desired number of nodes> --apiserver <apiserver endpoint FQDN or IP address>
**Remember to also update your original api-model.json file (used for 1st deployment) or else you would end up with the original number of VM's after using the `generate` command described above**
### Resize VM's in existing agent pool
* Modify the `vmSize` in the `agentPoolProfiles` section
* `generate` and `deploy` (see above)
**Important: The default ARM deployment won't drain your Kubernetes nodes properly before 'rebooting' them. Please [drain](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) them manually before deploying the change**

Просмотреть файл

@ -1,176 +0,0 @@
# Deploy a Kubernetes Cluster
## Install Prerequisites
All the commands in this guide require both the Azure CLI and `aks-engine`. Follow the [installation instructions to download aks-engine before continuing](quickstart.md#install) or [compile from source](quickstart.md#build-aks-engine-from-source).
For installation instructions see [the Azure CLI GitHub repository](https://github.com/Azure/azure-cli#installation) for the latest release.
## Overview
`aks-engine` reads a cluster definition which describes the size, shape, and configuration of your cluster. This guide takes the default configuration of one master and two Linux agents. If you would like to change the configuration, edit `examples/kubernetes.json` before continuing.
The `aks-engine deploy` command automates creation of a Service Principal, Resource Group and SSH key for your cluster. If operators need more control or are interested in the individual steps see the ["Long Way" section below](#aks-engine-the-long-way).
**NOTE:** AKS Engine creates a _cluster_; it _doesn't_ create an Azure Container Service resource. So clusters that you create using the `aks-engine` command (or ARM templates generated by the `aks-engine` command) won't show up as AKS resources, for example when you run `az acs list`. Think of `aks-engine` as the, er, engine which AKS uses to create clusters: you can use the same engine yourself, but AKS won't know about the results.
After the cluster is deployed the upgrade and [scale](../topics/scale.md) commands can be used to make updates to your cluster.
## Gather Information
* The subscription in which you would like to provision the cluster. This is a uuid which can be found with `az account list -o table`.
* Proper access rights within the subscription. Especially the right to create and assign service principals to applications ( see AKS Engine the Long Way, Step #2)
* A valid service principal with all the required create/manage permissions. Instructions to create a new service principal can be found [here](../topics/service-principals.md).
* A `dnsPrefix` which forms part of the the hostname for your cluster (e.g. staging, prodwest, blueberry). The DNS prefix must be unique so pick a random name.
* A location to provision the cluster e.g. `westus2`.
```sh
$ az account list -o table
Name CloudName SubscriptionId State IsDefault
----------------------------------------------- ----------- ------------------------------------ ------- -----------
Contoso Subscription AzureCloud 51ac25de-afdg-9201-d923-8d8e8e8e8e8e Enabled True
```
## Deploy
For this example, the subscription id is `51ac25de-afdg-9201-d923-8d8e8e8e8e8e`, the DNS prefix is `contoso-apple`, and location is `westus2`.
Run `aks-engine deploy` with the appropriate arguments:
```sh
$ aks-engine deploy --subscription-id 51ac25de-afdg-9201-d923-8d8e8e8e8e8e \
--client-id '<service principal client ID>' \
--client-secret '<service principal client secret>' \
--dns-prefix contoso-apple \
--location westus2 \
--api-model examples/kubernetes.json
INFO[0000] new API model file has been generated during merge: /tmp/mergedApiModel619868596
WARN[0002] apimodel: missing masterProfile.dnsPrefix will use "contoso-apple"
INFO[0025] Starting ARM Deployment contoso-apple-1423145182 in resource group contoso-apple. This will take some time...
INFO[0256] Finished ARM Deployment (contoso-apple-1423145182). Succeeded
```
`aks-engine` will output Azure Resource Manager (ARM) templates, SSH keys, and a kubeconfig file in `_output/contoso-apple-59769a59` directory:
* `_output/contoso-apple-59769a59/azureuser_rsa`
* `_output/contoso-apple-59769a59/kubeconfig/kubeconfig.westus2.json`
`aks-engine` generates kubeconfig files for each possible region. Access the new cluster by using the kubeconfig generated for the cluster's location. This example used `westus2`, so the kubeconfig is `_output/<clustername>/kubeconfig/kubeconfig.westus2.json`:
```sh
$ KUBECONFIG=_output/contoso-apple-59769a59/kubeconfig/kubeconfig.westus2.json kubectl cluster-info
Kubernetes master is running at https://contoso-apple-59769a59.westus2.cloudapp.azure.com
CoreDNS is running at https://contoso-apple-59769a59.westus2.cloudapp.azure.com/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
Metrics-server is running at https://contoso-apple-59769a59.westus2.cloudapp.azure.com/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
```
Administrative note: By default, the directory where `aks-engine` stores cluster configuration (`_output/contoso-apple` above) won't be overwritten as a result of subsequent attempts to deploy a cluster using the same `--dns-prefix`) To re-use the same resource group name repeatedly, include the `--force-overwrite` command line option with your `aks-engine deploy` command. On a related note, include an `--auto-suffix` option to append a randomly generated suffix to the dns-prefix to form the resource group name, for example if your workflow requires a common prefix across multiple cluster deployments. Using the `--auto-suffix` pattern appends a compressed timestamp to ensure a unique cluster name (and thus ensure that each deployment's configuration artifacts will be stored locally under a discrete `_output/<resource-group-name>/` directory).
**Note**: If the cluster is using an existing VNET please see the [Custom VNET](custom-vnet.md) feature documentation for additional steps that must be completed after cluster provisioning.
The deploy command lets you override any values under the properties tag (even in arrays) from the cluster definition file without having to update the file. You can use the `--set` flag to do that. For example:
```bash
aks-engine deploy --resource-group "your-resource-group" \
--location "westeurope" \
--subscription-id "your-subscription-id" \
--client-id '<your service principal client ID>' \
--client-secret '<your service principal client secret>' \
--api-model "./apimodel.json" \
--set masterProfile.dnsPrefix="your-dns-prefix-override" \
--set agentPoolProfiles[0].name="your-agentpool-0-name-override" \
--set agentPoolProfiles[0].count=1 \
--set linuxProfile.ssh.publicKeys[0].keyData="ssh-rsa PUBLICKEY azureuser@linuxvm" \
--set servicePrincipalProfile.clientId="spn-client-id" \
--set servicePrincipalProfile.secret="spn-client-secret"
```
<a href="#the-long-way"></a>
## AKS Engine the Long Way
### Step 1: Generate an SSH Key
In addition to using Kubernetes APIs to interact with the clusters, cluster operators may access the master and agent machines using SSH.
If you don't have an SSH key [cluster operators may generate a new one](https://help.github.com/articles/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent/).
### Step 2: Create a Service Principal
Kubernetes clusters have integrated support for various cloud providers as core functionality. On Azure, `aks-engine` uses a Service Principal to interact with Azure Resource Manager (ARM). Follow the [instructions](../topics/service-principals.md) to create a new service principal and grant it the necessary IAM role to create Azure resources.
### Step 3: Edit your Cluster Definition
AKS Engine consumes a cluster definition which outlines the desired shape, size, and configuration of Kubernetes. There are a number of features that can be enabled through the cluster definition: check the `examples` directory for a number of... examples.
Edit the [simple Kubernetes cluster definition](../../examples/kubernetes.json) and fill out the required values:
* `dnsPrefix`: must be a region-unique name and will form part of the hostname (e.g. myprod1, staging, leapingllama) - be unique!
* `keyData`: must contain the public portion of an SSH key - this will be associated with the `adminUsername` value found in the same section of the cluster definition (e.g. 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABA....')
* `clientId`: this is the service principal's appId uuid or name from step 2
* `secret`: this is the service principal's password or randomly-generated password from step 2
Optional: attach to an existing virtual network (VNET). Details [here](custom-vnet.md)
Note: you can then use the `--set` option of the generate command to override values from the cluster definition file directly in the command line (cf. [Step 4](deploy.md#step-4-generate-the-templates))
### Step 4: Generate the Templates
The generate command takes a cluster definition and outputs a number of templates which describe your Kubernetes cluster. By default, `generate` will create a new directory named after your cluster nested in the `_output` directory. If my dnsPrefix was `larry` my cluster templates would be found in `_output/larry-`.
Run `aks-engine generate examples/kubernetes.json`
The generate command lets you override values from the cluster definition file without having to update the file. You can use the `--set` flag to do that:
```sh
aks-engine generate --set linuxProfile.adminUsername=myNewUsername,masterProfile.count=3 clusterdefinition.json
```
The `--set` flag only supports JSON properties under `properties`. You can also work with array, like the following:
```sh
aks-engine generate --set agentPoolProfiles[0].count=5,agentPoolProfiles[1].name=myPoolName clusterdefinition.json
```
### Step 5: Submit your Templates to Azure Resource Manager (ARM)
[Deploy the output azuredeploy.json and azuredeploy.parameters.json](deploy.md#deployment-usage)
* To enable the optional network policy enforcement using calico, you have to set the parameter during this step according to this [guide](../topics/features.md#optional-enable-network-policy-enforcement-using-calico)
* To enable the optional network policy enforcement using cilium, you have to set the parameter during this step according to this [guide](../topics/features.md#optional-enable-network-policy-enforcement-using-cilium)
* To enable the optional network policy enforcement using antrea, you have to set the parameter during this step according to this [guide](../topics/features.md#optional-enable-network-policy-enforcement-using-antrea)
**Note**: If the cluster is using an existing VNET please see the [Custom VNET](custom-vnet.md) feature documentation for additional steps that must be completed after cluster provisioning.
## Checking VM tags
### First we get list of Master and Agent VMs in the cluster
```sh
az vm list -g <resource group of cluster> -o table
Name ResourceGroup Location
------------------------ ------------------------------- -------------
k8s-agentpool1-22116803-1 XXXXXXXXXXXX southeastasia
k8s-master-22116803-0 XXXXXXXXXXXX southeastasia
```
### Once we have the VM Names, we can check tags associated with any of the VMs using the command below
```sh
az vm show -g <resource group of cluster> -n <name of Master or agent VM> --query tags
```
Sample JSON out of this command is shown below. This command can also be used to check the `aks-engine` version which was used to create the cluster
```json
{
"aksEngineVersion": "v0.35.1",
"creationSource": "aksengine-k8s-master-22116803-0",
"orchestrator": "Kubernetes:1.12.8",
"poolName": "master",
"resourceNameSuffix": "22116803"
}
```

Просмотреть файл

@ -1,6 +1,6 @@
# Quickstart Guide # Quickstart Guide
AKS Engine (`aks-engine`) generates ARM (Azure Resource Manager) templates for Kubernetes clusters on Microsoft Azure. The input to the `aks-engine` binary is a cluster definition JSON file (referred to throughout the docs interchangeably as either "cluster config", "cluster definition", or "API model") which describes the desired cluster configuration, including enabled or disabled features, for both the control plane running on "master" VMs and one or more node pools (referred to throughout the docs interchangeably as either "node pools" or "agent pools"). AKS Engine (`aks-engine`) generates ARM (Azure Resource Manager) templates, and also deploys them via ARM to Microsoft Azure cloud environments. The input to the `aks-engine` command line tool is a cluster definition JSON file (referred to throughout the docs interchangeably as either "API model", "cluster config", or "cluster definition") which describes the desired cluster configuration, including enabled or disabled features, for both the control plane running on "master" VMs and one or more node pools.
## Prerequisites ## Prerequisites
@ -11,24 +11,24 @@ The following prerequisites are required:
<a href="#install-aks-engine"></a> <a href="#install-aks-engine"></a>
## Install the `aks-engine` binary ## Install the `aks-engine` command line tool
Binary downloads for the latest version of AKS Engine are available [on Github](https://github.com/Azure/aks-engine/releases/latest). Download the package for your operating system, and extract the `aks-engine` binary (and optionally integrate it to your `$PATH` for more convenient CLI usage). Binary downloads for the latest version of AKS Engine are available [on Github](https://github.com/Azure/aks-engine/releases/latest). Download the package for your operating system, and extract the `aks-engine` binary (and optionally integrate it to your `$PATH` for more convenient CLI usage).
You can also choose to install the `aks-engine` binary using [gofish][gofish-about]. To do so, execute the command `gofish install aks-engine`. You can install gofish following the [instructions][gofish-install] for your OS. You can also choose to install `aks-engine` using [gofish][gofish-about]. To do so, execute the command `gofish install aks-engine`. You can install gofish following the [instructions][gofish-install] for your OS.
On macOS, you can install the `aks-engine` binary with [Homebrew][homebrew]. Run the command `brew install Azure/aks-engine/aks-engine` to do so. You can install Homebrew following these [instructions][homebrew-install]. On macOS, you can install `aks-engine` with [Homebrew][homebrew]. Run the command `brew install Azure/aks-engine/aks-engine` to do so. You can install Homebrew following these [instructions][homebrew-install].
On Windows, you can install `aks-engine.exe` via [Chocolatey][choco] by executing the command `choco install aks-engine`. You can install Chocolatey following these [instructions][choco-install]. You can also install `aks-engine.exe` via [Scoop][scoop] by executing the command `scoop install aks-engine`. You can install Scoop following these [instructions][scoop-install]. On Windows, you can install `aks-engine.exe` via [Chocolatey][choco] by executing the command `choco install aks-engine`. You can install Chocolatey following these [instructions][choco-install]. You can also install `aks-engine.exe` via [Scoop][scoop] by executing the command `scoop install aks-engine`. You can install Scoop following these [instructions][scoop-install].
On Linux, if you prefer, you can install the `aks-engine` binary via install script doing: On Linux, if you prefer, you can install `aks-engine` via install script doing:
```bash ```bash
$ curl -o get-akse.sh https://raw.githubusercontent.com/Azure/aks-engine/master/scripts/get-akse.sh $ curl -o get-akse.sh https://raw.githubusercontent.com/Azure/aks-engine/master/scripts/get-akse.sh
$ chmod 700 get-akse.sh $ chmod 700 get-akse.sh
$ ./get-akse.sh $ ./get-akse.sh
``` ```
If you would prefer to build the `aks-engine` binary from source, or if you're interested in contributing to AKS Engine, see [the developer guide][developer-guide] for more information. If you would prefer to build `aks-engine` from source, or if you're interested in contributing to AKS Engine, see [the developer guide][developer-guide] for more information.
## Completion ## Completion
@ -40,28 +40,27 @@ source <(aks-engine completion)
## Deploy your First Cluster ## Deploy your First Cluster
`aks-engine` reads a cluster definition which describes the size, shape, and configuration of your cluster. This guide takes the default configuration of a control plane configuration with one master VM, and a single node pool with two Linux nodes. If you would like to change the configuration, edit `examples/kubernetes.json` before continuing. `aks-engine` reads a cluster definition which describes the size, shape, and configuration of your cluster. This guide takes the default configuration of a control plane configuration with one master VM, and a single node pool with two Linux nodes exemplified [here](/examples/kubernetes.json). If you would like to change the configuration, edit `examples/kubernetes.json` before continuing.
The `aks-engine deploy` command automates creation of a Service Principal, Resource Group and SSH key for your cluster. If operators need more control or are interested in the individual steps see the ["Long Way" section below](#aks-engine-the-long-way). The `aks-engine deploy` command automates creation of a Service Principal, Resource Group and SSH key for your cluster. If operators need more control or are interested in the individual steps see the ["Long Way" section below](#aks-engine-the-long-way).
**NOTE:** AKS Engine creates a _cluster_; it _doesn't_ create an Azure Kubernetes Service (AKS) resource. Clusters that you create using the `aks-engine` command (or ARM templates generated by the `aks-engine` command) won't show up as AKS resources, for example when you run `az aks list`. The resultant resource group + IaaS will be entirely under your own control and management, and unknown to AKS or any other Azure service. **NOTE:** AKS Engine creates a _cluster_; it _doesn't_ create an Azure Kubernetes Service (AKS) resource. Clusters that you create using the `aks-engine` command (or ARM templates generated by the `aks-engine` command) won't show up as AKS resources, for example when you run `az aks list`. The resultant resource group + IaaS will be entirely under your own control and management, and unknown to AKS or any other Azure service.
After the cluster is deployed, the [upgrade][] and [scale][] commands may be used to make updates to your cluster, with some conditions ([upgrade][] and [scale][] docs will enumerate these conditions). After the cluster is deployed, the [scale][], [addpool][], [update][], and [upgrade][] commands may be used to make updates to your cluster, with some conditions (the [scale][], [addpool][], [update][], and [upgrade][] docs will enumerate these conditions).
### Gather Information ### Gather Information
* The subscription in which you would like to provision the cluster. This is a UUID which can be found with `az account list -o table`. * The subscription in which you would like to provision the cluster. This is a UUID which can be found with `az account list -o table`.
* Proper access rights within the subscription; especially the right to create and assign [service principals][sp] to applications
* A `dnsPrefix` which forms part of the hostname for your cluster (e.g. staging, prodwest, blueberry). In the [example](/examples/kubernetes.json) we're using we are not building a private cluster (a `true` value of `properties.orchestratorProfile.kubernetesConfig.privateCluster.enabled` indicates a private cluster configuration, see [this example](/examples/kubernetes-config/kubernetes-private-cluster.json), and so we have to consider that the value of `dnsPrefix` *must* produce a unique fully-qualified domain name DNS record composed of <value of `dnsPrefix`>.<value of `location`>.cloudapp.azure.com. Depending on the uniqueness of your `dnsPrefix`, it may be a good idea to pre-check the availability of the resultant DNS record prior to deployment. (Also see the `--auto-suffix` option below if this is onerous.)
* **NOTE:** The `location` value may be omitted in your cluster definition JSON file if you are deploying to Azure Public Cloud; it will be automatically inferred during ARM template deployment as equal to the location of the resource group at the time of resource group creation. Also **NOTE:** that the ".cloudapp.azure.com" FQDN suffix example above also assumes an Azure Public Cloud deployment. When you provide a `location` value that maps to a non-public cloud, the FQDN suffix will be concatenated appropriately for that supported cloud environment, e.g., ".cloudapp.chinacloudapi.cn" for mooncake (Azure China Cloud); or ".cloudapp.usgovcloudapi.net" for usgov (Azure Government Cloud)
* Choose a location to provision the cluster e.g. `westus2`.
```sh ```sh
$ az account list -o table $ az account list -o table
Name CloudName SubscriptionId State IsDefault Name CloudName SubscriptionId State IsDefault
----------------------------------------------- ----------- ------------------------------------ ------- ----------- ----------------------------------------------- ----------- ------------------------------------ ------- -----------
Contoso Subscription AzureCloud 51ac25de-afdg-9201-d923-8d8e8e8e8e8e Enabled True Contoso Subscription AzureCloud 51ac25de-afdg-9201-d923-8d8e8e8e8e8e Enabled True
``` ```
* Proper access rights within the subscription; especially the right to create and assign [service principals][sp] to applications
* A `dnsPrefix` which forms part of the hostname for your cluster (e.g. staging, prodwest, blueberry). In the [example](/examples/kubernetes.json) we're using we are not building a private cluster (declared by assigning a `true` value to `properties.orchestratorProfile.kubernetesConfig.privateCluster.enabled` in your API model: see [this example](/examples/kubernetes-config/kubernetes-private-cluster.json)), and so we have to consider that the value of `dnsPrefix` *must* produce a unique fully-qualified domain name DNS record composed of <value of `dnsPrefix`>.<value of `location`>.cloudapp.azure.com. Depending on the uniqueness of your `dnsPrefix`, it may be a good idea to pre-check the availability of the resultant DNS record prior to deployment. (Also see the `--auto-suffix` option below if having to do this pre-check is onerous, and you don't care about having a randomly named cluster.)
* **NOTE:** The `location` value may be omitted in your cluster definition JSON file if you are deploying to Azure Public Cloud; it will be automatically inferred during ARM template deployment as equal to the location of the resource group at the time of resource group creation. Also **NOTE:** that the ".cloudapp.azure.com" FQDN suffix example above also assumes an Azure Public Cloud deployment. When you provide a `location` value that maps to a non-public cloud, the FQDN suffix will be concatenated appropriately for that supported cloud environment, e.g., ".cloudapp.chinacloudapi.cn" for mooncake (Azure China Cloud); or ".cloudapp.usgovcloudapi.net" for usgov (Azure Government Cloud)
* Choose a location to provision the cluster e.g. `westus2`.
### Deploy ### Deploy
@ -91,7 +90,7 @@ $ az group create --name contoso-apple --location westus2
} }
``` ```
Again, because in this example we are deploying to Azure Public Cloud, we may omit the `location` property from our cluster configuration JSON; although strictly speaking we could add this to our [example](/examples/kubernetes.json) and it would be equivalent: Again, because in this example we are deploying to Azure Public Cloud, we may omit the `location` property from our API model; although strictly speaking we could add `westus2` — the region where we just created our `contoso-apple` resource group in — to our [example](/examples/kubernetes.json) if we want to be more explicit:
``` ```
{ {
@ -114,7 +113,7 @@ $ az ad sp create-for-rbac --role="Contributor" --scopes="/subscriptions/51ac25d
} }
``` ```
Make a note of the `appId` and the `password` fields, as we will be providing them as parameters in the next step. Make a note of the `appId` and the `password` fields, as we will be providing them as the values to `client-id` and `client-secret` in the next step, respectively.
Finally, run `aks-engine deploy` with the appropriate arguments: Finally, run `aks-engine deploy` with the appropriate arguments:
@ -135,15 +134,38 @@ INFO[0025] Starting ARM Deployment contoso-apple-1423145182 in resource group co
INFO[0256] Finished ARM Deployment (contoso-apple-1423145182). Succeeded INFO[0256] Finished ARM Deployment (contoso-apple-1423145182). Succeeded
``` ```
`aks-engine` will output ARM templates, SSH keys, and a kubeconfig (A specification that may be used as input to the `kubectl` command to establish a privileged connection to the Kubernetes apiserver, see [here](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) for more documentation.) file in `_output/contoso-apple-59769a59` directory: Note that we also used the `--set` CLI argument twice to inject the service principal `appId` and `password` into the API model:
* `_output/contoso-apple-59769a59/azureuser_rsa` ```
* `_output/contoso-apple-59769a59/kubeconfig/kubeconfig.westus2.json` --set servicePrincipalProfile.clientId="47a62f0b-917c-4def-aa85-9b010455e591" \
--set servicePrincipalProfile.secret="26054d2b-799b-448e-962a-783d0d6f976b"
```
`aks-engine` generates kubeconfig files for each possible region. Access the new cluster by using the kubeconfig generated for the cluster's location. This example used `westus2`, so the kubeconfig is `_output/<clustername>/kubeconfig/kubeconfig.westus2.json`: The `--set` argument allows runtime overrides of the values in the input `--api-model` file. In this case, the example API model under `examples/kubernetes.json` doesn't include any real service principal secrets, so we need to either include our desired secrets using the `--set` mechanism described above, or manually fill in these empty string values in the API model:
```
...
"servicePrincipalProfile": {
"clientId": "",
"secret": ""
}
...
```
`aks-engine` will generate ARM templates, SSH keys, and a kubeconfig (A specification that may be used as input to the `kubectl` command to establish a privileged connection to the Kubernetes apiserver, see [here](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) for more documentation.), and then persist those as local files under the `_output/contoso-apple` directory:
```sh ```sh
$ KUBECONFIG=_output/contoso-apple-59769a59/kubeconfig/kubeconfig.westus2.json kubectl cluster-info $ ls _output/contoso-apple/
apimodel.json azuredeploy.parameters.json client.crt etcdpeer0.crt kubeconfig
apiserver.crt azureuser_rsa client.key etcdpeer0.key kubectlClient.crt
apiserver.key ca.crt etcdclient.crt etcdserver.crt kubectlClient.key
azuredeploy.json ca.key etcdclient.key etcdserver.key
```
Access the new cluster by using the kubeconfig generated for the cluster's location. This example used `westus2`, so the kubeconfig is located at `_output/contoso-apple/kubeconfig/kubeconfig.westus2.json`:
```sh
$ KUBECONFIG=_output/contoso-apple/kubeconfig/kubeconfig.westus2.json kubectl cluster-info
Kubernetes master is running at https://contoso-apple-59769a59.westus2.cloudapp.azure.com Kubernetes master is running at https://contoso-apple-59769a59.westus2.cloudapp.azure.com
CoreDNS is running at https://contoso-apple-59769a59.westus2.cloudapp.azure.com/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy CoreDNS is running at https://contoso-apple-59769a59.westus2.cloudapp.azure.com/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
Metrics-server is running at https://contoso-apple-59769a59.westus2.cloudapp.azure.com/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy Metrics-server is running at https://contoso-apple-59769a59.westus2.cloudapp.azure.com/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy
@ -151,6 +173,8 @@ Metrics-server is running at https://contoso-apple-59769a59.westus2.cloudapp.azu
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
``` ```
The files saved to the `output/contoso-apple/` directory (using our example) are critical to keep save for any future cluster operations using the `aks-engine` CLI. Store them somewhere safe and reliable!
Administrative note: By default, the directory where aks-engine stores cluster configuration (`_output/contoso-apple` above) won't be overwritten as a result of subsequent attempts to deploy a cluster using the same `--dns-prefix`) To re-use the same resource group name repeatedly, include the `--force-overwrite` command line option with your `aks-engine deploy` command. On a related note, include an `--auto-suffix` option to append a randomly generated suffix to the dns-prefix to form the resource group name, for example if your workflow requires a common prefix across multiple cluster deployments. Using the `--auto-suffix` pattern appends a compressed timestamp to ensure a unique cluster name (and thus ensure that each deployment's configuration artifacts will be stored locally under a discrete `_output/<resource-group-name>/` directory). Administrative note: By default, the directory where aks-engine stores cluster configuration (`_output/contoso-apple` above) won't be overwritten as a result of subsequent attempts to deploy a cluster using the same `--dns-prefix`) To re-use the same resource group name repeatedly, include the `--force-overwrite` command line option with your `aks-engine deploy` command. On a related note, include an `--auto-suffix` option to append a randomly generated suffix to the dns-prefix to form the resource group name, for example if your workflow requires a common prefix across multiple cluster deployments. Using the `--auto-suffix` pattern appends a compressed timestamp to ensure a unique cluster name (and thus ensure that each deployment's configuration artifacts will be stored locally under a discrete `_output/<resource-group-name>/` directory).
**Note**: If the cluster is using an existing VNET, please see the [Custom VNET][custom-vnet] feature documentation for additional steps that must be completed after cluster provisioning. **Note**: If the cluster is using an existing VNET, please see the [Custom VNET][custom-vnet] feature documentation for additional steps that must be completed after cluster provisioning.

Просмотреть файл

@ -6,4 +6,4 @@ AKS Engine enables you to create a customized Kubernetes cluster on Microsoft Az
The example JSON API model file in this directory shows you how to configure up to 4 attached disks. Disks can range from 1 to 1024 GB in size. The example JSON API model file in this directory shows you how to configure up to 4 attached disks. Disks can range from 1 to 1024 GB in size.
1. **kubernetes.json** - deploying and using [Kubernetes](../../docs/tutorials/deploy.md) 1. **kubernetes.json** - deploying and using [Kubernetes](../../docs/tutorials/quickstart.md#deploy)

Просмотреть файл

@ -6,7 +6,7 @@ AKS Engine enables you to create a customized Kubernetes cluster on Microsoft Az
The example shows you how to configure installing a cert from keyvault. These certs are assumed to be in the secrets portion of your keyvault: The example shows you how to configure installing a cert from keyvault. These certs are assumed to be in the secrets portion of your keyvault:
1. **kubernetes.json** - deploying and using [Kubernetes](../../docs/tutorials/deploy.md) 1. **kubernetes.json** - deploying and using [Kubernetes](../../docs/tutorials/quickstart.md#deploy)
On windows machines certificates will be installed under the machine in the specified store. On windows machines certificates will be installed under the machine in the specified store.
On linux machines the certificates will be installed in the folder /var/lib/waagent/. There will be two files On linux machines the certificates will be installed in the folder /var/lib/waagent/. There will be two files

Просмотреть файл

@ -2,7 +2,7 @@
## Overview ## Overview
These cluster definition examples show how to create customized [Kubernetes](../../docs/tutorials/deploy.md) clusters on Microsoft Azure. These cluster definition examples show how to create customized [Kubernetes](../../docs/tutorials/quickstart.md#deploy) clusters on Microsoft Azure.
1. [**kubernetes-clustersubnet.json**](kubernetes-clustersubnet.json) - Configuring a custom cluster IP subnet. 1. [**kubernetes-clustersubnet.json**](kubernetes-clustersubnet.json) - Configuring a custom cluster IP subnet.
2. [**kubernetes-maxpods.json**](kubernetes-maxpods.json) - Configuring a custom maximum limit on the number of pods per node. 2. [**kubernetes-maxpods.json**](kubernetes-maxpods.json) - Configuring a custom maximum limit on the number of pods per node.

Просмотреть файл

@ -28,7 +28,7 @@ This template will deploy the [Kubernetes Datastore backed version of Calico](ht
If deploying on a K8s 1.8 or later cluster, then egress policies are also supported! If deploying on a K8s 1.8 or later cluster, then egress policies are also supported!
To understand how to deploy this template, please read the baseline [Kubernetes](../../docs/tutorials/deploy.md) document, and use the appropriate **kubernetes-calico-[azure|kubenet].json** example file in this folder as an API model reference. To understand how to deploy this template, please read the baseline [Kubernetes](../../docs/tutorials/quickstart.md#deploy) document, and use the appropriate **kubernetes-calico-[azure|kubenet].json** example file in this folder as an API model reference.
### Post installation ### Post installation

Просмотреть файл

@ -424,6 +424,9 @@ func (a *Properties) ValidateOrchestratorProfile(isUpdate bool) error {
func (a *Properties) validateMasterProfile(isUpdate bool) error { func (a *Properties) validateMasterProfile(isUpdate bool) error {
m := a.MasterProfile m := a.MasterProfile
if m.Count == 1 {
log.Warnf("Running only 1 control plane VM not recommended for production clusters, use 3 or 5 for control plane redundancy")
}
if a.OrchestratorProfile.OrchestratorType == Kubernetes { if a.OrchestratorProfile.OrchestratorType == Kubernetes {
if m.IsVirtualMachineScaleSets() && m.VnetSubnetID != "" && m.FirstConsecutiveStaticIP != "" { if m.IsVirtualMachineScaleSets() && m.VnetSubnetID != "" && m.FirstConsecutiveStaticIP != "" {
return errors.New("when masterProfile's availabilityProfile is VirtualMachineScaleSets and a vnetSubnetID is specified, the firstConsecutiveStaticIP should be empty and will be determined by an offset from the first IP in the vnetCidr") return errors.New("when masterProfile's availabilityProfile is VirtualMachineScaleSets and a vnetSubnetID is specified, the firstConsecutiveStaticIP should be empty and will be determined by an offset from the first IP in the vnetCidr")

Просмотреть файл

@ -3370,6 +3370,21 @@ func ExampleProperties_validateLocation() {
// level=warning msg="No \"location\" value was specified, AKS Engine will generate an ARM template configuration valid for regions in public cloud only" // level=warning msg="No \"location\" value was specified, AKS Engine will generate an ARM template configuration valid for regions in public cloud only"
} }
func ExampleProperties_validateMasterProfile() {
log.SetOutput(os.Stdout)
log.SetFormatter(&log.TextFormatter{
DisableColors: true,
DisableTimestamp: true,
})
cs := getK8sDefaultContainerService(false)
cs.Properties.MasterProfile.Count = 1
if err := cs.Properties.validateMasterProfile(false); err != nil {
log.Errorf("shouldn't error with 1 control plane VM, got %s", err.Error())
}
// Output:
// level=warning msg="Running only 1 control plane VM not recommended for production clusters, use 3 or 5 for control plane redundancy"
}
func ExampleProperties_validateZones() { func ExampleProperties_validateZones() {
log.SetOutput(os.Stdout) log.SetOutput(os.Stdout)
log.SetFormatter(&log.TextFormatter{ log.SetFormatter(&log.TextFormatter{