chore: remove support for Kubernetes v1.18.x (#4676)

This commit is contained in:
Matt Boersma 2021-10-06 10:50:16 -06:00 коммит произвёл GitHub
Родитель 1baab4907c
Коммит f4e2d249f0
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
18 изменённых файлов: 28 добавлений и 109 удалений

13
.github/workflows/create-release-branch.yaml поставляемый
Просмотреть файл

@ -40,19 +40,6 @@ jobs:
sudo chmod +x /usr/local/bin/k
- name: Build aks-engine binary
run: make build-binary
- name: Validate 1.18 no-egress scenario
env:
ORCHESTRATOR_RELEASE: "1.18"
CLUSTER_DEFINITION: "examples/no_outbound.json"
SUBSCRIPTION_ID: ${{ secrets.TEST_AZURE_SUB_ID }}
CLIENT_ID: ${{ secrets.TEST_AZURE_SP_ID }}
CLIENT_SECRET: ${{ secrets.TEST_AZURE_SP_PW }}
LOCATION: "westus2"
TENANT_ID: ${{ secrets.TEST_AZURE_TENANT_ID }}
SKIP_LOGS_COLLECTION: true
SKIP_TEST: true
AZURE_CORE_ONLY_SHOW_ERRORS: True
run: make test-kubernetes
- name: Validate 1.19 no-egress scenario
env:
ORCHESTRATOR_RELEASE: "1.19"

13
.github/workflows/release.yaml поставляемый
Просмотреть файл

@ -45,19 +45,6 @@ jobs:
- name: print git status after build
run: |
git status
- name: Validate 1.18 no-egress scenario
env:
ORCHESTRATOR_RELEASE: "1.18"
CLUSTER_DEFINITION: "examples/no_outbound.json"
SUBSCRIPTION_ID: ${{ secrets.TEST_AZURE_SUB_ID }}
CLIENT_ID: ${{ secrets.TEST_AZURE_SP_ID }}
CLIENT_SECRET: ${{ secrets.TEST_AZURE_SP_PW }}
LOCATION: "westus2"
TENANT_ID: ${{ secrets.TEST_AZURE_TENANT_ID }}
SKIP_LOGS_COLLECTION: true
SKIP_TEST: true
AZURE_CORE_ONLY_SHOW_ERRORS: True
run: make test-kubernetes
- name: Validate 1.19 no-egress scenario
env:
ORCHESTRATOR_RELEASE: "1.19"

Просмотреть файл

@ -63,16 +63,6 @@ jobs:
- script: make test
displayName: Run unit tests
- template: e2e-job-template.yaml
parameters:
name: 'k8s_1_18_docker_e2e'
k8sRelease: '1.18'
apimodel: 'examples/e2e-tests/kubernetes/release/default/definition.json'
createVNET: true
enableKMSEncryption: false
containerRuntime: 'docker'
runSSHTests: true
- template: e2e-job-template.yaml
parameters:
name: 'k8s_1_19_docker_e2e'
@ -123,16 +113,6 @@ jobs:
containerRuntime: 'docker'
runSSHTests: true
- template: e2e-job-template.yaml
parameters:
name: 'k8s_1_18_containerd_e2e'
k8sRelease: '1.18'
apimodel: 'examples/e2e-tests/kubernetes/release/default/definition-no-vnet.json'
createVNET: false
enableKMSEncryption: false
containerRuntime: 'containerd'
runSSHTests: true
- template: e2e-job-template.yaml
parameters:
name: 'k8s_1_19_containerd_e2e'

2
Jenkinsfile поставляемый
Просмотреть файл

@ -6,7 +6,7 @@ defaultEnv = [
VALIDATE_CPU_LOAD: false,
] + params
def k8sVersions = ["1.18", "1.19", "1.20", "1.21", "1.22", "1.23"]
def k8sVersions = ["1.19", "1.20", "1.21", "1.22", "1.23"]
def latestReleasedVersion = "1.22"
def tasks = [:]
def testConfigs = []

Просмотреть файл

@ -562,11 +562,6 @@ func TestExampleAPIModels(t *testing.T) {
apiModelPath: "../examples/kubernetes-msi-userassigned/kube-vmss.json",
setArgs: defaultSet,
},
{
name: "1.18 example",
apiModelPath: "../examples/kubernetes-releases/kubernetes1.18.json",
setArgs: defaultSet,
},
{
name: "1.19 example",
apiModelPath: "../examples/kubernetes-releases/kubernetes1.19.json",

Просмотреть файл

@ -206,7 +206,7 @@ $ make test-kubernetes
In practice, running E2E tests locally requires lots of environmental context, in order to tell the E2E runner what kind of cluster configuration you want to test, which tests you may want to run or skip, what level of timeout tolerance to permit, and many other runtime-configurable options that express the exact test criteria you intend to validate. A real-world E2E invocation may look this this instead:
```sh
$ ORCHESTRATOR_RELEASE=1.18 CLUSTER_DEFINITION=examples/kubernetes.json SUBSCRIPTION_ID=$TEST_AZURE_SUB_ID CLIENT_ID=$TEST_AZURE_SP_ID CLIENT_SECRET=$TEST_AZURE_SP_PW TENANT_ID=$TEST_AZURE_TENANT_ID LOCATION=$TEST_AZURE_REGION CLEANUP_ON_EXIT=false make test-kubernetes
$ ORCHESTRATOR_RELEASE=1.22 CLUSTER_DEFINITION=examples/kubernetes.json SUBSCRIPTION_ID=$TEST_AZURE_SUB_ID CLIENT_ID=$TEST_AZURE_SP_ID CLIENT_SECRET=$TEST_AZURE_SP_PW TENANT_ID=$TEST_AZURE_TENANT_ID LOCATION=$TEST_AZURE_REGION CLEANUP_ON_EXIT=false make test-kubernetes
```
Thorough guidance around effectively running E2E tests to validate source code changes can be found [here](running-tests.md).

Просмотреть файл

@ -20,7 +20,8 @@
"--location=${input:location}",
"--client-id=${env:CLIENT_ID}",
"--client-secret=${env:CLIENT_SECRET}",
"--set", "linuxProfile.ssh.publicKeys[0].keyData=${env:AKSE_PUB_KEY}"
"--set",
"linuxProfile.ssh.publicKeys[0].keyData=${env:AKSE_PUB_KEY}"
],
"showLog": true
},
@ -35,7 +36,8 @@
"generate",
"--debug",
"--api-model=${workspaceRoot}/examples/kubernetes.json",
"--set", "linuxProfile.ssh.publicKeys[0].keyData=${env:AKSE_PUB_KEY}"
"--set",
"linuxProfile.ssh.publicKeys[0].keyData=${env:AKSE_PUB_KEY}"
],
"showLog": true
},
@ -56,7 +58,7 @@
"--new-node-count=${input:newNodeCount}",
"--client-id=${env:CLIENT_ID}",
"--client-secret=${env:CLIENT_SECRET}"
]
]
},
{
"name": "upgrade",
@ -93,7 +95,7 @@
"id": "kubernetesVersion",
"type": "promptString",
"description": "Kubernetes version:",
"default": "1.18.1"
"default": "1.22.2"
},
{
"id": "location",

Просмотреть файл

@ -9,7 +9,7 @@ As mentioned briefly in the [developer guide](developer-guide.md), a `make` targ
$ make build
# run e2e tests
$ ORCHESTRATOR_RELEASE=1.18 \
$ ORCHESTRATOR_RELEASE=1.22 \
CLUSTER_DEFINITION=examples/kubernetes.json \
SUBSCRIPTION_ID=$TEST_AZURE_SUB_ID \
CLIENT_ID=$TEST_AZURE_SP_ID \
@ -21,7 +21,7 @@ $ ORCHESTRATOR_RELEASE=1.18 \
make test-kubernetes
```
The above, simple example describes an E2E test invocation against a base cluster configuration defined by the API model at `examples/kubernetes.json`, overriding any specific Kubernetes version therein to validate the most recent, supported v1.18 release; using Azure service principal authentication defined in the various `$TEST_AZURE_`* environment variables; deployed to the region defined by the environment variable `$AZURE_REGION`; and finally, we tell the E2E test runner not to delete the cluster resources (i.e., the resource group) following the completion of the tests.
The above, simple example describes an E2E test invocation against a base cluster configuration defined by the API model at `examples/kubernetes.json`, overriding any specific Kubernetes version therein to validate the most recent, supported v1.22 release; using Azure service principal authentication defined in the various `$TEST_AZURE_`* environment variables; deployed to the region defined by the environment variable `$AZURE_REGION`; and finally, we tell the E2E test runner not to delete the cluster resources (i.e., the resource group) following the completion of the tests.
Example output from such an invocation is [here](e2e-output-example.log). If your test run succeeded, you'll see this in your console stdout at the conclusion of the test run:

Просмотреть файл

@ -76,10 +76,10 @@ After that, you will have to upload a tarball (`_output/release-tars/kubernetes-
### AKS Engine API Model
* Open the AKS Engine API Model (e.g. a file from the examples directory). Set `orchestratorRelease` to 1.18 or higher so various defaults and configuration are properly applied to the ARM template and artifacts. Additionally, add the following to the `kubernetesConfig` section:
* Open the AKS Engine API Model (e.g. a file from the examples directory). Set `orchestratorRelease` to 1.19 or higher so various defaults and configuration are properly applied to the ARM template and artifacts. Additionally, add the following to the `kubernetesConfig` section:
```
"orchestratorRelease": "1.18",
"orchestratorRelease": "1.19",
"kubernetesConfig": {
...
"customKubeAPIServerImage": "<your-docker-registry>/kube-apiserver-amd64:<your-custom-tag>",

Просмотреть файл

@ -25,18 +25,18 @@ In order to ensure that your `aks-engine upgrade` operation runs smoothly, there
`aks-engine upgrade` relies on some resources (such as VMs) to be named in accordance with the original `aks-engine` deployment. In summary, the set of Azure resources in the resource group are mutually reconcilable by `aks-engine upgrade` only if they have been exclusively created and managed as the result of a series of successive ARM template deployments originating from various AKS Engine commands that have run to completion successfully.
3) `aks-engine upgrade` allows upgrading the Kubernetes version to any AKS Engine-supported patch release in the current minor release channel that is greater than the current version on the cluster (e.g., from `1.18.8` to `1.18.9`), or to the next aks-engine-supported minor version (e.g., from `1.18.8` to `1.19.1`). (Or, see [`aks-engine upgrade --force`](#force-upgrade) if you want to bypass AKS Engine "supported version requirements"). In practice, the next AKS Engine-supported minor version will commonly be a single minor version ahead of the current cluster version. However, if the cluster has not been upgraded in a significant amount of time, the "next" minor version may have no longer be supported by aks-engine. In such a case, your long-lived cluster will be upgradable to the nearest, supported minor version that `aks-engine` supports at the time of upgrade (e.g., from `1.14.7` to `1.16.15`).
3) `aks-engine upgrade` allows upgrading the Kubernetes version to any AKS Engine-supported patch release in the current minor release channel that is greater than the current version on the cluster (e.g., from `1.21.4` to `1.21.5`), or to the next aks-engine-supported minor version (e.g., from `1.21.5` to `1.22.2`). (Or, see [`aks-engine upgrade --force`](#force-upgrade) if you want to bypass AKS Engine "supported version requirements"). In practice, the next AKS Engine-supported minor version will commonly be a single minor version ahead of the current cluster version. However, if the cluster has not been upgraded in a significant amount of time, the "next" minor version may have no longer be supported by aks-engine. In such a case, your long-lived cluster will be upgradable to the nearest, supported minor version that `aks-engine` supports at the time of upgrade (e.g., from `1.17.18` to `1.19.15`).
To get the list of all available Kubernetes versions and upgrades, run the `get-versions` command:
```bash
./bin/aks-engine get-versions
aks-engine get-versions
```
To get the versions of Kubernetes that your particular cluster version is upgradable to, provide its current Kubernetes version in the `version` arg:
```bash
./bin/aks-engine get-versions --version 1.18.8
aks-engine get-versions --version 1.19.14
```
4) `aks-engine upgrade` relies upon a working connection to the cluster control plane during upgrade, both (1) to validate successful upgrade progress, and (2) to cordon and drain nodes before upgrading them, in order to minimize operational downtime of any running cluster workloads. If you are upgrading a **private cluster**, you must run `aks-engine upgrade` from a host VM that has network access to the control plane, for example a jumpbox VM that resides in the same VNET as the master VMs. For more information on private clusters [refer to this documentation](features.md#feat-private-cluster).
@ -47,8 +47,8 @@ In order to ensure that your `aks-engine upgrade` operation runs smoothly, there
7) `aks-engine upgrade` will automatically re-generate your cluster configuration to best pair with the desired new version of Kubernetes, and/or the version of `aks-engine` that is used to execute `aks-engine upgrade`. To use an example of both:
- When you upgrade to (for example) Kubernetes 1.18 from 1.17, AKS Engine will automatically change your control plane configuration (e.g., `coredns`, `metrics-server`, `kube-proxy`) so that the cluster component configurations have a close, known-working affinity with 1.18.
- When you perform an upgrade, even if it is a Kubernetes patch release upgrade such as 1.18.8 to 1.18.9, but you use a newer version of `aks-engine`, a newer version of `etcd` (for example) may have been validated and configured as default since the version of `aks-engine` used to build the cluster was released. So, for example, without any explicit user direction, the newly upgraded cluster will now be running etcd v3.2.26 instead of v3.2.25. _This is by design._
- When you upgrade to (for example) Kubernetes 1.21 from 1.20, AKS Engine will automatically change your control plane configuration (e.g., `coredns`, `metrics-server`, `kube-proxy`) so that the cluster component configurations have a close, known-working affinity with 1.21.
- When you perform an upgrade, even if it is a Kubernetes patch release upgrade such as 1.21.4 to 1.21.5, but you use a newer version of `aks-engine`, a newer version of `etcd` (for example) may have been validated and configured as default since the version of `aks-engine` used to build the cluster was released. So, for example, without any explicit user direction, the newly upgraded cluster will now be running etcd v3.2.26 instead of v3.2.25. _This is by design._
In summary, using `aks-engine upgrade` means you will freshen and re-pave the entire stack that underlies Kubernetes to reflect the best-known, recent implementation of Azure IaaS + OS + OS config + Kubernetes config.
@ -129,7 +129,7 @@ If you use Key Vault for secrets, you must specify a local [kubeconfig file](htt
--api-model _output/mycluster/apimodel.json \
--location westus \
--resource-group test-upgrade \
--upgrade-version 1.18.7 \
--upgrade-version 1.21.5 \
--kubeconfig ./path/to/kubeconfig.json
```

Просмотреть файл

@ -5,7 +5,7 @@
"enableIPv6Only": true
},
"orchestratorProfile": {
"orchestratorRelease": "1.18",
"orchestratorRelease": "1.19",
"kubernetesConfig": {
"loadBalancerSku": "Standard",
"excludeMasterFromStandardLB": true,

Просмотреть файл

@ -1,30 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorRelease": "1.18"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v3"
},
"agentPoolProfiles": [
{
"name": "agentpool1",
"count": 3,
"vmSize": "Standard_D2_v3"
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
}
}
}

Просмотреть файл

@ -57,9 +57,9 @@ const (
const (
// KubernetesDefaultRelease is the default Kubernetes release
KubernetesDefaultRelease string = "1.18"
KubernetesDefaultRelease string = "1.19"
// KubernetesDefaultReleaseWindows is the default Kubernetes release for Windows
KubernetesDefaultReleaseWindows string = "1.18"
KubernetesDefaultReleaseWindows string = "1.19"
// KubernetesDefaultReleaseAzureStack is the default Kubernetes release on Azure Stack
KubernetesDefaultReleaseAzureStack string = "1.19"
// KubernetesDefaultReleaseWindowsAzureStack is the default Kubernetes release for Windows on Azure Stack

Просмотреть файл

@ -224,7 +224,7 @@ var AllKubernetesSupportedVersions = map[string]bool{
"1.18.17": false,
"1.18.18": false,
"1.18.19": false,
"1.18.20": true,
"1.18.20": false,
"1.19.0-alpha.1": false,
"1.19.0-alpha.2": false,
"1.19.0-alpha.3": false,

Просмотреть файл

@ -84,8 +84,8 @@ func TestControllerManagerConfigDefaultFeatureGates(t *testing.T) {
cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false)
cs.setControllerManagerConfig()
cm := cs.Properties.OrchestratorProfile.KubernetesConfig.ControllerManagerConfig
if cm["--feature-gates"] != "LocalStorageCapacityIsolation=true,ServiceNodeExclusion=true" {
t.Fatalf("got unexpected '--feature-gates' Controller Manager config value for \"--feature-gates\": \"LocalStorageCapacityIsolation=true,ServiceNodeExclusion=true\": %s",
if cm["--feature-gates"] != "LocalStorageCapacityIsolation=true" {
t.Fatalf("got unexpected '--feature-gates' Controller Manager config value for \"--feature-gates\": \"LocalStorageCapacityIsolation=true\": %s",
cm["--feature-gates"])
}
@ -104,8 +104,8 @@ func TestControllerManagerConfigDefaultFeatureGates(t *testing.T) {
cm = cs.Properties.OrchestratorProfile.KubernetesConfig.ControllerManagerConfig
cm["--feature-gates"] = "TaintBasedEvictions=true"
cs.setControllerManagerConfig()
if cm["--feature-gates"] != "LocalStorageCapacityIsolation=true,ServiceNodeExclusion=true,TaintBasedEvictions=true" {
t.Fatalf("got unexpected '--feature-gates' Controller Manager config value for \"--feature-gates\": \"LocalStorageCapacityIsolation=true,ServiceNodeExclusion=true\": %s",
if cm["--feature-gates"] != "LocalStorageCapacityIsolation=true,TaintBasedEvictions=true" {
t.Fatalf("got unexpected '--feature-gates' Controller Manager config value for \"--feature-gates\": \"LocalStorageCapacityIsolation=true,TaintBasedEvictions=true\": %s",
cm["--feature-gates"])
}
}

Просмотреть файл

@ -3661,12 +3661,12 @@ func TestCloudProviderBackoff(t *testing.T) {
},
},
{
name: "Kubernetes 1.18.2",
name: "Kubernetes 1.19.15",
cs: ContainerService{
Properties: &Properties{
OrchestratorProfile: &OrchestratorProfile{
OrchestratorType: Kubernetes,
OrchestratorVersion: "1.18.2",
OrchestratorVersion: "1.19.15",
},
MasterProfile: &MasterProfile{},
},

Просмотреть файл

@ -119,7 +119,6 @@ function Get-FilesToCacheOnVHD {
"c:\akse-cache\win-k8s\" = @(
"https://kubernetesartifacts.azureedge.net/kubernetes/v1.19.15-azs/windowszip/v1.19.15-azs-1int.zip",
"https://kubernetesartifacts.azureedge.net/kubernetes/v1.20.11-azs/windowszip/v1.20.11-azs-1int.zip",
"https://kubernetesartifacts.azureedge.net/kubernetes/v1.18.20/windowszip/v1.18.20-1int.zip",
"https://kubernetesartifacts.azureedge.net/kubernetes/v1.19.15/windowszip/v1.19.15-1int.zip",
"https://kubernetesartifacts.azureedge.net/kubernetes/v1.20.11/windowszip/v1.20.11-1int.zip",
"https://kubernetesartifacts.azureedge.net/kubernetes/v1.21.5/windowszip/v1.21.5-1int.zip",

Просмотреть файл

@ -233,7 +233,6 @@ K8S_VERSIONS="
1.20.11-azs
1.19.15
1.19.15-azs
1.18.20
"
for KUBERNETES_VERSION in ${K8S_VERSIONS}; do
for component in kube-apiserver kube-controller-manager kube-proxy kube-scheduler; do