Availability zone support for k8s (#3453)

This commit is contained in:
Sertaç Özercan 2018-08-30 13:46:49 -07:00 коммит произвёл Jack Francis
Родитель 93a02674a7
Коммит 9897327966
25 изменённых файлов: 614 добавлений и 6 удалений

Просмотреть файл

@ -490,6 +490,8 @@ A cluster can have 0 to 12 agent pool profiles. Agent Pool Profiles are used for
| ---------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| availabilityProfile | no | Supported values are `VirtualMachineScaleSets` (default, except for Kubernetes clusters before version 1.10) and `AvailabilitySet`. |
| count | yes | Describes the node count |
| availabilityZones | no | To protect your cluster from datacenter-level failures, you can provide Availability Zones for each of your agentPool. Only applies to Kubernetes clusters version 1.12+. Supported values are arrays of strings, each representing a supported availability zone in a region for your subscription. e.g. `"availabilityZones": ["1","2"]` represents zone 1 and zone 2 can be used. To get supported zones for a region in your subscription, run `az vm list-skus --location centralus --query "[?name=='Standard_DS2_v2'].[locationInfo, restrictions"] -o table`. You should see values like `'zones': ['2', '3', '1']` appear in the first column. If `NotAvailableForSubscription` appears in the output, then you need to create an Azure support ticket to enable zones for that region. Note: For availability zones, only standard load balancer is supported. ([Availability zone example](../examples/e2e-tests/kubernetes/zones)). |
| singlePlacementGroup | no | Supported values are `true` (default) and `false`. Only applies to clusters with availabilityProfile `VirtualMachineScaleSets`. `true`: A VMSS with a single placement group and has a range of 0-100 VMs. `false`: A VMSS with multiple placement groups and has a range of 0-1,000 VMs. For more information, check out [virtual machine scale sets placement groups](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups). |
| scaleSetPriority | no | Supported values are `Regular` (default) and `Low`. Only applies to clusters with availabilityProfile `VirtualMachineScaleSets`. Enables the usage of [Low-priority VMs on Scale Sets](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-use-low-priority). |
| scaleSetEvictionPolicy | no | Supported values are `Delete` (default) and `Deallocate`. Only applies to clusters with availabilityProfile of `VirtualMachineScaleSets` and scaleSetPriority of `Low`. |
| diskSizesGB | no | Describes an array of up to 4 attached disk sizes. Valid disk size values are between 1 and 1024 |

Просмотреть файл

@ -0,0 +1,39 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Kubernetes",
"orchestratorRelease": "1.12"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_DS2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool",
"count": 4,
"vmSize": "Standard_DS2_v2",
"availabilityZones": [
"1",
"2"
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
},
"servicePrincipalProfile": {
"clientId": "",
"secret": ""
}
}
}

Просмотреть файл

@ -48,6 +48,14 @@
},
"type": "string"
},
{{if HasAvailabilityZones .}}
"{{.Name}}AvailabilityZones": {
"metadata": {
"description": "Agent availability zones"
},
"type": "array"
},
{{end}}
"{{.Name}}osImageName": {
"defaultValue": "",
"metadata": {

Просмотреть файл

@ -26,6 +26,9 @@
"poolName" : "{{.Name}}"
},
"location": "[variables('location')]",
{{ if HasAvailabilityZones .}}
"zones": "[parameters('{{.Name}}AvailabilityZones')]",
{{ end }}
"name": "[variables('{{.Name}}VMNamePrefix')]",
{{if UseManagedIdentity}}
{{if UserAssignedIDEnabled}}
@ -47,6 +50,7 @@
"name": "[variables('{{.Name}}VMSize')]"
},
"properties": {
"singlePlacementGroup": {{UseSinglePlacementGroup .}},
"overprovision": false,
"upgradePolicy": {
"mode": "Manual"

Просмотреть файл

@ -281,7 +281,13 @@ function ensureK8sControlPlane() {
return
fi
wait_for_file 600 1 $KUBECTL || exit $ERR_FILE_WATCH_TIMEOUT
retrycmd_if_failure 900 1 20 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT
# workaround for 1.12 bug https://github.com/Azure/acs-engine/issues/3681 will remove once upstream is fixed
if [[ "${KUBERNETES_VERSION}" = 1.12.* ]]; then
ensureKubelet
retrycmd_if_failure 900 1 20 $KUBECTL 2>/dev/null cluster-info || ensureKubelet && retrycmd_if_failure 900 1 20 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT
else
retrycmd_if_failure 900 1 20 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT
fi
ensurePodSecurityPolicy
}

Просмотреть файл

@ -26,6 +26,9 @@
"poolName" : "{{.Name}}"
},
"location": "[variables('location')]",
{{ if HasAvailabilityZones .}}
"zones": "[parameters('{{.Name}}AvailabilityZones')]",
{{ end }}
"name": "[variables('{{.Name}}VMNamePrefix')]",
{{if UseManagedIdentity}}
"identity": {
@ -38,6 +41,7 @@
"name": "[variables('{{.Name}}VMSize')]"
},
"properties": {
"singlePlacementGroup": {{UseSinglePlacementGroup .}},
"overprovision": false,
"upgradePolicy": {
"mode": "Manual"

Просмотреть файл

@ -241,6 +241,7 @@ func setPropertiesDefaults(cs *api.ContainerService, isUpgrade, isScale bool) (b
setStorageDefaults(properties)
setExtensionDefaults(properties)
setVMSSDefaults(properties)
certsGenerated, e := setDefaultCerts(properties)
if e != nil {
@ -559,6 +560,28 @@ func setMasterNetworkDefaults(a *api.Properties, isUpgrade bool) {
}
}
// setVMSSDefaults
func setVMSSDefaults(a *api.Properties) {
for _, profile := range a.AgentPoolProfiles {
if profile.AvailabilityProfile == api.VirtualMachineScaleSets {
if profile.Count > 100 {
profile.SinglePlacementGroup = helpers.PointerToBool(false)
}
if profile.SinglePlacementGroup == nil {
profile.SinglePlacementGroup = helpers.PointerToBool(api.DefaultSinglePlacementGroup)
}
if profile.SinglePlacementGroup == helpers.PointerToBool(false) {
profile.StorageProfile = api.ManagedDisks
}
if profile.HasAvailabilityZones() {
a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = "Standard"
a.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB = helpers.PointerToBool(api.DefaultExcludeMasterFromStandardLB)
}
}
}
}
// SetAgentNetworkDefaults for agents
func setAgentNetworkDefaults(a *api.Properties, isUpgrade, isScale bool) {
// configure the subnets if not in custom VNET

Просмотреть файл

@ -588,6 +588,48 @@ func TestIsAzureCNINetworkmonitorAddon(t *testing.T) {
}
}
// TestSetVMSSDefaults covers tests for setVMSSDefaults
func TestSetVMSSDefaults(t *testing.T) {
mockCS := getMockBaseContainerService("1.10.3")
properties := mockCS.Properties
properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
properties.AgentPoolProfiles[0].Count = 4
setPropertiesDefaults(&mockCS, false, false)
if !properties.AgentPoolProfiles[0].IsVirtualMachineScaleSets() {
t.Fatalf("AgentPoolProfile[0].AvailabilityProfile did not have the expected configuration, got %s, expected %s",
properties.AgentPoolProfiles[0].AvailabilityProfile, api.VirtualMachineScaleSets)
}
if *properties.AgentPoolProfiles[0].SinglePlacementGroup != api.DefaultSinglePlacementGroup {
t.Fatalf("AgentPoolProfile[0].SinglePlacementGroup default did not have the expected configuration, got %t, expected %t",
*properties.AgentPoolProfiles[0].SinglePlacementGroup, api.DefaultSinglePlacementGroup)
}
if properties.AgentPoolProfiles[0].HasAvailabilityZones() {
if properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != "Standard" {
t.Fatalf("OrchestratorProfile.KubernetesConfig.LoadBalancerSku did not have the expected configuration, got %s, expected %s",
properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku, "Standard")
}
if properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB != helpers.PointerToBool(api.DefaultExcludeMasterFromStandardLB) {
t.Fatalf("OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB did not have the expected configuration, got %t, expected %t",
*properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB, api.DefaultExcludeMasterFromStandardLB)
}
}
properties.AgentPoolProfiles[0].Count = 110
setPropertiesDefaults(&mockCS, false, false)
if *properties.AgentPoolProfiles[0].SinglePlacementGroup != false {
t.Fatalf("AgentPoolProfile[0].SinglePlacementGroup did not have the expected configuration, got %t, expected %t",
*properties.AgentPoolProfiles[0].SinglePlacementGroup, false)
}
if *properties.AgentPoolProfiles[0].SinglePlacementGroup == false && properties.AgentPoolProfiles[0].StorageProfile != api.ManagedDisks {
t.Fatalf("AgentPoolProfile[0].StorageProfile did not have the expected configuration, got %s, expected %s",
properties.AgentPoolProfiles[0].StorageProfile, api.ManagedDisks)
}
}
func getMockAddon(name string) api.KubernetesAddon {
return api.KubernetesAddon{
Name: name,

Просмотреть файл

@ -173,6 +173,9 @@ func getParameters(cs *api.ContainerService, generatorCode string, acsengineVers
for _, agentProfile := range properties.AgentPoolProfiles {
addValue(parametersMap, fmt.Sprintf("%sCount", agentProfile.Name), agentProfile.Count)
addValue(parametersMap, fmt.Sprintf("%sVMSize", agentProfile.Name), agentProfile.VMSize)
if agentProfile.HasAvailabilityZones() {
addValue(parametersMap, fmt.Sprintf("%sAvailabilityZones", agentProfile.Name), agentProfile.AvailabilityZones)
}
if agentProfile.IsCustomVNET() {
addValue(parametersMap, fmt.Sprintf("%sVnetSubnetID", agentProfile.Name), agentProfile.VnetSubnetID)
} else {

Просмотреть файл

@ -700,6 +700,12 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat
"IsNSeriesSKU": func(profile *api.AgentPoolProfile) bool {
return isNSeriesSKU(profile)
},
"UseSinglePlacementGroup": func(profile *api.AgentPoolProfile) bool {
return *profile.SinglePlacementGroup
},
"HasAvailabilityZones": func(profile *api.AgentPoolProfile) bool {
return profile.HasAvailabilityZones()
},
"HasLinuxSecrets": func() bool {
return cs.Properties.LinuxProfile.HasSecrets()
},

Просмотреть файл

@ -156,6 +156,9 @@ const (
NetworkPluginKubenet = "kubenet"
// NetworkPluginAzure is thee string expression for Azure CNI plugin.
NetworkPluginAzure = "azure"
// DefaultSinglePlacementGroup determines the acs-engine provided default for supporting large VMSS
// (true = single placement group 0-100 VMs, false = multiple placement group 0-1000 VMs)
DefaultSinglePlacementGroup = true
)
const (

Просмотреть файл

@ -998,6 +998,8 @@ func convertAgentPoolProfileToVLabs(api *AgentPoolProfile, p *vlabs.AgentPoolPro
p.FQDN = api.FQDN
p.CustomNodeLabels = map[string]string{}
p.AcceleratedNetworkingEnabled = api.AcceleratedNetworkingEnabled
p.AvailabilityZones = api.AvailabilityZones
p.SinglePlacementGroup = api.SinglePlacementGroup
for k, v := range api.CustomNodeLabels {
p.CustomNodeLabels[k] = v

Просмотреть файл

@ -1011,6 +1011,8 @@ func convertVLabsAgentPoolProfile(vlabs *vlabs.AgentPoolProfile, api *AgentPoolP
api.IPAddressCount = vlabs.IPAddressCount
api.FQDN = vlabs.FQDN
api.AcceleratedNetworkingEnabled = vlabs.AcceleratedNetworkingEnabled
api.AvailabilityZones = vlabs.AvailabilityZones
api.SinglePlacementGroup = vlabs.SinglePlacementGroup
api.CustomNodeLabels = map[string]string{}
for k, v := range vlabs.CustomNodeLabels {

Просмотреть файл

@ -466,6 +466,8 @@ type AgentPoolProfile struct {
MaxCount *int `json:"maxCount,omitempty"`
MinCount *int `json:"minCount,omitempty"`
EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"`
AvailabilityZones []string `json:"availabilityZones,omitempty"`
SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"`
}
// AgentPoolProfileRole represents an agent role
@ -787,6 +789,11 @@ func (a *AgentPoolProfile) HasDisks() bool {
return len(a.DiskSizesGB) > 0
}
// HasAvailabilityZones returns true if the agent pool has availability zones
func (a *AgentPoolProfile) HasAvailabilityZones() bool {
return a.AvailabilityZones != nil && len(a.AvailabilityZones) > 0
}
// HasSecrets returns true if the customer specified secrets to install
func (w *WindowsProfile) HasSecrets() bool {
return len(w.Secrets) > 0

Просмотреть файл

@ -438,6 +438,8 @@ type AgentPoolProfile struct {
CustomNodeLabels map[string]string `json:"customNodeLabels,omitempty"`
PreProvisionExtension *Extension `json:"preProvisionExtension"`
Extensions []Extension `json:"extensions"`
SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"`
AvailabilityZones []string `json:"availabilityZones,omitempty"`
}
// AgentPoolProfileRole represents an agent role
@ -498,6 +500,16 @@ func (p *Properties) HasWindows() bool {
return false
}
// HasAvailabilityZones returns true if the cluster contains pools with zones
func (p *Properties) HasAvailabilityZones() bool {
for _, agentPoolProfile := range p.AgentPoolProfiles {
if agentPoolProfile.HasAvailabilityZones() {
return true
}
}
return false
}
// IsCustomVNET returns true if the customer brought their own VNET
func (m *MasterProfile) IsCustomVNET() bool {
return len(m.VnetSubnetID) > 0
@ -598,6 +610,11 @@ func (a *AgentPoolProfile) SetSubnet(subnet string) {
a.subnet = subnet
}
// HasAvailabilityZones returns true if the agent pool has availability zones
func (a *AgentPoolProfile) HasAvailabilityZones() bool {
return a.AvailabilityZones != nil && len(a.AvailabilityZones) > 0
}
// HasSearchDomain returns true if the customer specified secrets to install
func (l *LinuxProfile) HasSearchDomain() bool {
if l.CustomSearchDomain != nil {

Просмотреть файл

@ -177,3 +177,17 @@ func TestAgentPoolProfile(t *testing.T) {
t.Fatalf("unexpectedly detected AgentPoolProfile.AvailabilitySets != VirtualMachineScaleSets after unmarshal")
}
}
func TestContainerServiceProperties(t *testing.T) {
// Agent pool with availability zones
ContainerServicePropertiesText := `{"orchestratorProfile": {"orchestratorType": "Kubernetes","orchestratorRelease": "1.11"}, "agentPoolProfiles":[{ "name": "linuxpool1", "osType" : "Linux", "count": 1, "vmSize": "Standard_D2_v2",
"availabilityProfile": "VirtualMachineScaleSets", "AvailabilityZones": ["1","2"]}]}`
prop := &Properties{}
if e := json.Unmarshal([]byte(ContainerServicePropertiesText), prop); e != nil {
t.Fatalf("unexpectedly detected unmarshal failure for ContainerServiceProperties, %+v", e)
}
if !prop.HasAvailabilityZones() {
t.Fatalf("unexpectedly detected ContainerServiceProperties HasAvailabilityZones returns false after unmarshal")
}
}

Просмотреть файл

@ -183,6 +183,23 @@ func (a *Properties) validateOrchestratorProfile(isUpdate bool) error {
return errors.Errorf("the following OrchestratorProfile configuration is not supported: OrchestratorType: \"%s\", OrchestratorRelease: \"%s\", OrchestratorVersion: \"%s\". Please use one of the following versions: %v", o.OrchestratorType, o.OrchestratorRelease, o.OrchestratorVersion, common.GetAllSupportedKubernetesVersions(false, false))
}
sv, err := semver.Make(version)
if err != nil {
return errors.Errorf("could not validate version %s", version)
}
if a.HasAvailabilityZones() {
// TODO: update this to 1.12 after it's released
minVersion, err := semver.Make("1.12.0-beta.0")
if err != nil {
return errors.New("could not validate version")
}
if sv.LT(minVersion) {
return errors.New("availabilityZone is only available in Kubernetes version 1.12 or greater")
}
}
if o.KubernetesConfig != nil {
err := o.KubernetesConfig.Validate(version, a.HasWindows())
if err != nil {
@ -192,10 +209,6 @@ func (a *Properties) validateOrchestratorProfile(isUpdate bool) error {
if err != nil {
return errors.New("could not validate version")
}
sv, err := semver.Make(version)
if err != nil {
return errors.Errorf("could not validate version %s", version)
}
if o.KubernetesConfig.EnableAggregatedAPIs {
if sv.LT(minVersion) {
@ -401,6 +414,22 @@ func (a *Properties) validateAgentPoolProfiles() error {
if a.AgentPoolProfiles[i].AvailabilityProfile != a.AgentPoolProfiles[0].AvailabilityProfile {
return errors.New("mixed mode availability profiles are not allowed. Please set either VirtualMachineScaleSets or AvailabilitySet in availabilityProfile for all agent pools")
}
if a.AgentPoolProfiles[i].AvailabilityProfile == AvailabilitySet {
if a.AgentPoolProfiles[i].HasAvailabilityZones() {
return errors.New("Availability Zones are not supported with an AvailabilitySet. Please either remove availabilityProfile or set availabilityProfile to VirtualMachineScaleSets")
}
}
if a.AgentPoolProfiles[i].HasAvailabilityZones() {
if a.AgentPoolProfiles[i].Count < len(a.AgentPoolProfiles[i].AvailabilityZones)*2 {
return errors.New("the node count and the number of availability zones provided can result in zone imbalance. To achieve zone balance, each zone should have at least 2 nodes or more")
}
}
if a.AgentPoolProfiles[i].SinglePlacementGroup != nil && a.AgentPoolProfiles[i].AvailabilityProfile == AvailabilitySet {
return errors.New("singlePlacementGroup is only supported with VirtualMachineScaleSets")
}
}
if a.OrchestratorProfile.OrchestratorType == OpenShift {

Просмотреть файл

@ -915,9 +915,9 @@ func TestValidateKubernetesLabelKey(t *testing.T) {
}
func Test_AadProfile_Validate(t *testing.T) {
properties := getK8sDefaultProperties(false)
t.Run("Valid aadProfile should pass", func(t *testing.T) {
t.Parallel()
properties := getK8sDefaultProperties(false)
for _, aadProfile := range []*AADProfile{
{
ClientAppID: "92444486-5bc3-4291-818b-d53ae480991b",
@ -938,6 +938,7 @@ func Test_AadProfile_Validate(t *testing.T) {
t.Run("Invalid aadProfiles should NOT pass", func(t *testing.T) {
t.Parallel()
properties := getK8sDefaultProperties(false)
for _, aadProfile := range []*AADProfile{
{
ClientAppID: "1",
@ -968,6 +969,7 @@ func Test_AadProfile_Validate(t *testing.T) {
t.Run("aadProfiles should not be supported non-Kubernetes orchestrators", func(t *testing.T) {
t.Parallel()
properties := getK8sDefaultProperties(false)
properties.OrchestratorProfile = &OrchestratorProfile{
OrchestratorType: OpenShift,
}
@ -1439,6 +1441,99 @@ func TestProperties_ValidateAddon(t *testing.T) {
t.Errorf("expected error with message : %s, but got : %s", expectedMsg, err.Error())
}
}
func TestProperties_ValidateZones(t *testing.T) {
tests := []struct {
name string
orchestratorVersion string
agentProfiles []*AgentPoolProfile
expectedErr string
}{
{
name: "Agent profile with zones version",
orchestratorVersion: "1.11.0",
agentProfiles: []*AgentPoolProfile{
{
Name: "agentpool",
VMSize: "Standard_DS2_v2",
Count: 4,
AvailabilityProfile: VirtualMachineScaleSets,
AvailabilityZones: []string{"1", "2"},
},
},
expectedErr: "availabilityZone is only available in Kubernetes version 1.12 or greater",
},
{
name: "Agent profile with zones node count",
orchestratorVersion: "1.12.0-beta.0",
agentProfiles: []*AgentPoolProfile{
{
Name: "agentpool",
VMSize: "Standard_DS2_v2",
Count: 2,
AvailabilityProfile: VirtualMachineScaleSets,
AvailabilityZones: []string{"1", "2"},
},
},
expectedErr: "the node count and the number of availability zones provided can result in zone imbalance. To achieve zone balance, each zone should have at least 2 nodes or more",
},
{
name: "Agent profile with zones vmss",
orchestratorVersion: "1.12.0-beta.0",
agentProfiles: []*AgentPoolProfile{
{
Name: "agentpool",
VMSize: "Standard_DS2_v2",
Count: 4,
AvailabilityProfile: AvailabilitySet,
AvailabilityZones: []string{"1", "2"},
},
},
expectedErr: "Availability Zones are not supported with an AvailabilitySet. Please either remove availabilityProfile or set availabilityProfile to VirtualMachineScaleSets",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
t.Parallel()
p := getK8sDefaultProperties(true)
p.AgentPoolProfiles = test.agentProfiles
p.OrchestratorProfile.OrchestratorVersion = test.orchestratorVersion
var err error
if test.orchestratorVersion == "1.11.0" {
err = p.validateOrchestratorProfile(false)
} else {
err = p.Validate(true)
}
expectedMsg := test.expectedErr
if err.Error() != expectedMsg {
t.Errorf("expected error with message : %s, but got : %s", expectedMsg, err.Error())
}
})
}
}
func TestProperties_ValidateSinglePlacementGroup(t *testing.T) {
p := getK8sDefaultProperties(true)
p.AgentPoolProfiles = []*AgentPoolProfile{
{
Name: "agentpool",
VMSize: "Standard_DS2_v2",
Count: 2,
AvailabilityProfile: AvailabilitySet,
SinglePlacementGroup: helpers.PointerToBool(false),
},
}
p.OrchestratorProfile.OrchestratorVersion = "1.12.0-beta.0"
err := p.Validate(true)
expectedMsg := "singlePlacementGroup is only supported with VirtualMachineScaleSets"
if err.Error() != expectedMsg {
t.Errorf("expected error with message : %s, but got : %s", expectedMsg, err.Error())
}
}
func TestProperties_ValidateVNET(t *testing.T) {
validVNetSubnetID := "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME"

Просмотреть файл

@ -243,6 +243,20 @@ func (e *Engine) HasNetworkPolicy(name string) bool {
return false
}
// HasAllZonesAgentPools will return true if all of the agent pools have zones
func (e *Engine) HasAllZonesAgentPools() bool {
count := 0
for _, ap := range e.ExpandedDefinition.Properties.AgentPoolProfiles {
if ap.HasAvailabilityZones() {
count++
}
}
if count == len(e.ExpandedDefinition.Properties.AgentPoolProfiles) {
return true
}
return false
}
// Write will write the cluster definition to disk
func (e *Engine) Write() error {
json, err := helpers.JSONMarshal(e.ClusterDefinition, false)

Просмотреть файл

@ -8,6 +8,7 @@ import (
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/Azure/acs-engine/pkg/api/common"
@ -17,6 +18,7 @@ import (
"github.com/Azure/acs-engine/test/e2e/kubernetes/job"
"github.com/Azure/acs-engine/test/e2e/kubernetes/networkpolicy"
"github.com/Azure/acs-engine/test/e2e/kubernetes/node"
"github.com/Azure/acs-engine/test/e2e/kubernetes/persistentvolume"
"github.com/Azure/acs-engine/test/e2e/kubernetes/persistentvolumeclaims"
"github.com/Azure/acs-engine/test/e2e/kubernetes/pod"
"github.com/Azure/acs-engine/test/e2e/kubernetes/service"
@ -847,6 +849,90 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
})
})
Describe("with all zoned agent pools", func() {
It("should be labeled with zones for each node", func() {
if eng.HasAllZonesAgentPools() {
nodeList, err := node.Get()
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Nodes {
role := node.Metadata.Labels["kubernetes.io/role"]
if role == "agent" {
By("Ensuring that we get zones for each agent node")
zones := node.Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
contains := strings.Contains(zones, "-")
Expect(contains).To(Equal(true))
}
}
} else {
Skip("Availability zones was not configured for this Cluster Definition")
}
})
It("should create pv with zone labels and node affinity", func() {
if eng.HasAllZonesAgentPools() {
By("Creating a persistent volume claim")
pvcName := "azure-managed-disk" // should be the same as in pvc-premium.yaml
pvc, err := persistentvolumeclaims.CreatePersistentVolumeClaimsFromFile(filepath.Join(WorkloadDir, "pvc-premium.yaml"), pvcName, "default")
Expect(err).NotTo(HaveOccurred())
ready, err := pvc.WaitOnReady("default", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
pvList, err := persistentvolume.Get()
Expect(err).NotTo(HaveOccurred())
pvZone := ""
for _, pv := range pvList.PersistentVolumes {
By("Ensuring that we get zones for the pv")
// zone is chosen by round-robin across all zones
pvZone = pv.Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
fmt.Printf("pvZone: %s\n", pvZone)
contains := strings.Contains(pvZone, "-")
Expect(contains).To(Equal(true))
// VolumeScheduling feature gate is set to true by default starting v1.10+
for _, expression := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions {
if expression.Key == "failure-domain.beta.kubernetes.io/zone" {
By("Ensuring that we get nodeAffinity for each pv")
value := expression.Values[0]
fmt.Printf("NodeAffinity value: %s\n", value)
contains := strings.Contains(value, "-")
Expect(contains).To(Equal(true))
}
}
}
By("Launching a pod using the volume claim")
podName := "zone-pv-pod" // should be the same as in pod-pvc.yaml
testPod, err := pod.CreatePodFromFile(filepath.Join(WorkloadDir, "pod-pvc.yaml"), podName, "default")
Expect(err).NotTo(HaveOccurred())
ready, err = testPod.WaitOnReady(5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
By("Checking that the pod can access volume")
valid, err := testPod.ValidatePVC("/mnt/azure", 10, 10*time.Second)
Expect(valid).To(BeTrue())
Expect(err).NotTo(HaveOccurred())
By("Ensuring that attached volume pv has the same zone as the zone of the node")
nodeName := testPod.Spec.NodeName
nodeList, err := node.GetByPrefix(nodeName)
Expect(err).NotTo(HaveOccurred())
nodeZone := nodeList[0].Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
fmt.Printf("pvZone: %s\n", pvZone)
fmt.Printf("nodeZone: %s\n", nodeZone)
Expect(nodeZone == pvZone).To(Equal(true))
By("Cleaning up after ourselves")
err = testPod.Delete()
Expect(err).NotTo(HaveOccurred())
err = pvc.Delete()
Expect(err).NotTo(HaveOccurred())
} else {
Skip("Availability zones was not configured for this Cluster Definition")
}
})
})
Describe("after the cluster has been up for awhile", func() {
It("dns-liveness pod should not have any restarts", func() {
if !eng.HasNetworkPolicy("calico") {

Просмотреть файл

@ -0,0 +1,124 @@
package persistentvolume
import (
"context"
"encoding/json"
"log"
"os/exec"
"time"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
"github.com/pkg/errors"
)
// PersistentVolume is used to parse data from kubectl get pv
type PersistentVolume struct {
Metadata Metadata `json:"metadata"`
Spec Spec `json:"spec"`
Status Status `json:"status"`
}
// Metadata holds information like name, create time, and namespace
type Metadata struct {
CreatedAt time.Time `json:"creationTimestamp"`
Labels map[string]string `json:"labels"`
Name string `json:"name"`
}
// Spec holds information like storageClassName, nodeAffinity
type Spec struct {
StorageClassName string `json:"storageClassName"`
NodeAffinity NodeAffinity `json:"nodeAffinity"`
}
// NodeAffinity holds information like required nodeselector
type NodeAffinity struct {
Required *NodeSelector `json:"required"`
}
// NodeSelector represents the union of the results of one or more label queries
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms"`
}
// NodeSelectorTerm represents node selector requirements
type NodeSelectorTerm struct {
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty"`
MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty"`
}
// NodeSelectorRequirement is a selector that contains values, a key, and an operator
type NodeSelectorRequirement struct {
Key string `json:"key"`
Values []string `json:"values,omitempty"`
}
// Status holds information like phase
type Status struct {
Phase string `json:"phase"`
}
// List is used to parse out PersistentVolume from a list
type List struct {
PersistentVolumes []PersistentVolume `json:"items"`
}
// Get returns the current pvs for a given kubeconfig
func Get() (*List, error) {
cmd := exec.Command("kubectl", "get", "pv", "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'kubectl get pv':%s", string(out))
return nil, err
}
pvl := List{}
err = json.Unmarshal(out, &pvl)
if err != nil {
log.Printf("Error unmarshalling pvs json:%s", err)
}
return &pvl, nil
}
// WaitOnReady will block until all pvs are in ready state
func WaitOnReady(pvCount int, sleep, duration time.Duration) bool {
readyCh := make(chan bool, 1)
errCh := make(chan error)
ctx, cancel := context.WithTimeout(context.Background(), duration)
defer cancel()
go func() {
for {
select {
case <-ctx.Done():
errCh <- errors.Errorf("Timeout exceeded (%s) while waiting for PVs to become Bound", duration.String())
default:
if AreAllReady(pvCount) {
readyCh <- true
}
time.Sleep(sleep)
}
}
}()
for {
select {
case <-errCh:
return false
case ready := <-readyCh:
return ready
}
}
}
// AreAllReady returns a bool depending on cluster state
func AreAllReady(pvCount int) bool {
list, _ := Get()
if list != nil && len(list.PersistentVolumes) == pvCount {
for _, pv := range list.PersistentVolumes {
if pv.Status.Phase == "Bound" {
return true
}
}
}
return false
}

Просмотреть файл

@ -70,6 +70,18 @@ func Get(pvcName, namespace string) (*PersistentVolumeClaims, error) {
return &pvc, nil
}
// Delete will delete a PersistentVolumeClaims in a given namespace
func (pvc *PersistentVolumeClaims) Delete() error {
cmd := exec.Command("kubectl", "delete", "pvc", "-n", pvc.Metadata.NameSpace, pvc.Metadata.Name)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to delete PVC %s in namespace %s:%s\n", pvc.Metadata.Name, pvc.Metadata.NameSpace, string(out))
return err
}
return nil
}
// WaitOnReady will block until PersistentVolumeClaims is available
func (pvc *PersistentVolumeClaims) WaitOnReady(namespace string, sleep, duration time.Duration) (bool, error) {
readyCh := make(chan bool, 1)

Просмотреть файл

@ -42,6 +42,7 @@ type Metadata struct {
// Spec holds information like containers
type Spec struct {
Containers []Container `json:"containers"`
NodeName string `json:"nodeName"`
}
// Container holds information like image and ports
@ -659,6 +660,45 @@ func (p *Pod) ValidateAzureFile(mountPath string, sleep, duration time.Duration)
}
}
// ValidatePVC will keep retrying the check if azure disk is mounted in Pod
func (p *Pod) ValidatePVC(mountPath string, sleep, duration time.Duration) (bool, error) {
readyCh := make(chan bool, 1)
errCh := make(chan error)
ctx, cancel := context.WithTimeout(context.Background(), duration)
defer cancel()
go func() {
for {
select {
case <-ctx.Done():
errCh <- errors.Errorf("Timeout exceeded (%s) while waiting for Pod (%s) to check azure disk mounted", duration.String(), p.Metadata.Name)
default:
out, err := p.Exec("--", "mkdir", mountPath+"/"+testDir)
if err == nil {
out, err := p.Exec("--", "ls", mountPath)
if err == nil && strings.Contains(string(out), testDir) {
readyCh <- true
} else {
log.Printf("Error:%s\n", err)
log.Printf("Out:%s\n", out)
}
} else {
log.Printf("Error:%s\n", err)
log.Printf("Out:%s\n", out)
}
time.Sleep(sleep)
}
}
}()
for {
select {
case err := <-errCh:
return false, err
case ready := <-readyCh:
return ready, nil
}
}
}
// ValidateResources checks that an addon has the expected memory/cpu limits and requests
func (c *Container) ValidateResources(a api.KubernetesContainerSpec) error {
expectedCPURequests := a.CPURequests

Просмотреть файл

@ -0,0 +1,15 @@
kind: Pod
apiVersion: v1
metadata:
name: zone-pv-pod
spec:
containers:
- name: myfrontend
image: nginx
volumeMounts:
- mountPath: "/mnt/azure"
name: volume
volumes:
- name: volume
persistentVolumeClaim:
claimName: azure-managed-disk

Просмотреть файл

@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: azure-managed-disk
spec:
accessModes:
- ReadWriteOnce
storageClassName: managed-premium
resources:
requests:
storage: 5Gi