зеркало из https://github.com/Azure/aks-engine.git
feat: proximity placement group support (#3056)
This commit is contained in:
Родитель
b8aae45be2
Коммит
101555e013
|
@ -709,6 +709,7 @@ Below is a list of sysctl configuration that aks-engine will configure by defaul
|
|||
| ultraSSDEnabled | no | Enable UltraSSD feature for each node VM. More details about [Ultra disk](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-types#ultra-ssd-preview). |
|
||||
| customVMTags | no | Specifies a list of custom tags to be added to the master VMs or Scale Sets. Each tag is a key/value pair (ie: `"myTagKey": "myTagValue"`). |
|
||||
| sysctldConfig | no | Configure Linux kernel parameters via /etc/sysctl.d/. See `sysctldConfig` [below](#feat-sysctld-config) |
|
||||
| proximityPlacementGroupID | no | Specifies the resource id of the Proximity Placement Group (PPG) to be used for master VMs. Please find more details about PPG in this [Azure blog](https://azure.microsoft.com/en-us/blog/introducing-proximity-placement-groups). Note that the PPG should be created in advance. The following [Azure CLI documentation](https://docs.microsoft.com/en-us/cli/azure/ppg?view=azure-cli-latest#az-ppg-create) explains how to create a PPG. |
|
||||
|
||||
### agentPoolProfiles
|
||||
|
||||
|
@ -746,8 +747,8 @@ A cluster can have 0 to 12 agent pool profiles. Agent Pool Profiles are used for
|
|||
| ultraSSDEnabled | no | Enable UltraSSD feature for each node VM. More details about [Ultra disk](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-types#ultra-ssd-preview). |
|
||||
| extensions | no | Specifies list of extensions to enable for the agent profile. More details about [agentPoolProfiles extensions](extensions.md#agentpoolprofiles) |
|
||||
| preProvisionExtension | no | Specifies an extension to be run before the cluster is brought up. More details about [agentPoolProfiles extensions](extensions.md#agentpoolprofiles) |
|
||||
| sysctldConfig | no | Configure Linux kernel parameters via /etc/sysctl.d/. See `sysctldConfig` [below](#feat-sysctld-config) |
|
||||
|
||||
| sysctldConfig | no | Configure Linux kernel parameters via /etc/sysctl.d/. See `sysctldConfig` [below](#feat-sysctld-config) |
|
||||
| proximityPlacementGroupID | no | Specifies the resource id of the Proximity Placement Group (PPG) to be used for this agentpool. Please find more details about PPG in this [Azure blog](https://azure.microsoft.com/en-us/blog/introducing-proximity-placement-groups). Note that the PPG should be created in advance. The following [Azure CLI documentation](https://docs.microsoft.com/en-us/cli/azure/ppg?view=azure-cli-latest#az-ppg-create) explains how to create a PPG. |
|
||||
### linuxProfile
|
||||
|
||||
`linuxProfile` provides the linux configuration for each linux node in the cluster
|
||||
|
|
|
@ -524,6 +524,7 @@ func convertMasterProfileToVLabs(api *MasterProfile, vlabsProfile *vlabs.MasterP
|
|||
vlabsProfile.AuditDEnabled = api.AuditDEnabled
|
||||
vlabsProfile.UltraSSDEnabled = api.UltraSSDEnabled
|
||||
vlabsProfile.EncryptionAtHost = api.EncryptionAtHost
|
||||
vlabsProfile.ProximityPlacementGroupID = api.ProximityPlacementGroupID
|
||||
convertCustomFilesToVlabs(api, vlabsProfile)
|
||||
vlabsProfile.SysctlDConfig = map[string]string{}
|
||||
for key, val := range api.SysctlDConfig {
|
||||
|
@ -576,6 +577,7 @@ func convertAgentPoolProfileToVLabs(api *AgentPoolProfile, p *vlabs.AgentPoolPro
|
|||
p.UltraSSDEnabled = api.UltraSSDEnabled
|
||||
p.DiskEncryptionSetID = api.DiskEncryptionSetID
|
||||
p.EncryptionAtHost = api.EncryptionAtHost
|
||||
p.ProximityPlacementGroupID = api.ProximityPlacementGroupID
|
||||
|
||||
for k, v := range api.CustomNodeLabels {
|
||||
p.CustomNodeLabels[k] = v
|
||||
|
|
|
@ -674,6 +674,25 @@ func TestTelemetryDefaultToVLabs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPPGToVLabs(t *testing.T) {
|
||||
ppgResourceID1 := "ppgResourceID1"
|
||||
ppgResourceID2 := "ppgResourceID2"
|
||||
cs := getDefaultContainerService()
|
||||
cs.Properties.MasterProfile.ProximityPlacementGroupID = ppgResourceID1
|
||||
cs.Properties.AgentPoolProfiles[0].ProximityPlacementGroupID = ppgResourceID2
|
||||
vlabsCS := ConvertContainerServiceToVLabs(cs)
|
||||
if vlabsCS == nil {
|
||||
t.Errorf("expected the converted containerService struct to be non-nil")
|
||||
}
|
||||
if vlabsCS.Properties.MasterProfile.ProximityPlacementGroupID != ppgResourceID1 {
|
||||
t.Errorf("expected the agent pool profile proximity placement group to be %s", ppgResourceID1)
|
||||
}
|
||||
|
||||
if vlabsCS.Properties.AgentPoolProfiles[0].ProximityPlacementGroupID != ppgResourceID2 {
|
||||
t.Errorf("expected the agent pool profile proximity placement group to be %s", ppgResourceID2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPlatformFaultDomainCountToVLabs(t *testing.T) {
|
||||
cs := getDefaultContainerService()
|
||||
cs.Properties.MasterProfile.PlatformFaultDomainCount = to.IntPtr(3)
|
||||
|
|
|
@ -581,6 +581,7 @@ func convertVLabsMasterProfile(vlabs *vlabs.MasterProfile, api *MasterProfile) {
|
|||
api.UltraSSDEnabled = vlabs.UltraSSDEnabled
|
||||
api.EncryptionAtHost = vlabs.EncryptionAtHost
|
||||
api.AuditDEnabled = vlabs.AuditDEnabled
|
||||
api.ProximityPlacementGroupID = vlabs.ProximityPlacementGroupID
|
||||
convertCustomFilesToAPI(vlabs, api)
|
||||
api.SysctlDConfig = map[string]string{}
|
||||
for key, val := range vlabs.SysctlDConfig {
|
||||
|
@ -622,6 +623,7 @@ func convertVLabsAgentPoolProfile(vlabs *vlabs.AgentPoolProfile, api *AgentPoolP
|
|||
api.DiskEncryptionSetID = vlabs.DiskEncryptionSetID
|
||||
api.UltraSSDEnabled = vlabs.UltraSSDEnabled
|
||||
api.EncryptionAtHost = vlabs.EncryptionAtHost
|
||||
api.ProximityPlacementGroupID = vlabs.ProximityPlacementGroupID
|
||||
|
||||
api.CustomNodeLabels = map[string]string{}
|
||||
for k, v := range vlabs.CustomNodeLabels {
|
||||
|
|
|
@ -579,8 +579,9 @@ type MasterProfile struct {
|
|||
// Not used during PUT, returned as part of GET
|
||||
FQDN string `json:"fqdn,omitempty"`
|
||||
// True: uses cosmos etcd endpoint instead of installing etcd on masters
|
||||
CosmosEtcd *bool `json:"cosmosEtcd,omitempty"`
|
||||
SysctlDConfig map[string]string `json:"sysctldConfig,omitempty"`
|
||||
CosmosEtcd *bool `json:"cosmosEtcd,omitempty"`
|
||||
SysctlDConfig map[string]string `json:"sysctldConfig,omitempty"`
|
||||
ProximityPlacementGroupID string `json:"proximityPlacementGroupID,omitempty"`
|
||||
}
|
||||
|
||||
// ImageReference represents a reference to an Image resource in Azure.
|
||||
|
@ -660,6 +661,7 @@ type AgentPoolProfile struct {
|
|||
SysctlDConfig map[string]string `json:"sysctldConfig,omitempty"`
|
||||
UltraSSDEnabled *bool `json:"ultraSSDEnabled,omitempty"`
|
||||
EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"`
|
||||
ProximityPlacementGroupID string `json:"proximityPlacementGroupID,omitempty"`
|
||||
}
|
||||
|
||||
// AgentPoolProfileRole represents an agent role
|
||||
|
|
|
@ -451,7 +451,8 @@ type MasterProfile struct {
|
|||
FQDN string `json:"fqdn,omitempty"`
|
||||
|
||||
// True: uses cosmos etcd endpoint instead of installing etcd on masters
|
||||
CosmosEtcd *bool `json:"cosmosEtcd,omitempty"`
|
||||
CosmosEtcd *bool `json:"cosmosEtcd,omitempty"`
|
||||
ProximityPlacementGroupID string `json:"proximityPlacementGroupID,omitempty"`
|
||||
}
|
||||
|
||||
// ImageReference represents a reference to an Image resource in Azure.
|
||||
|
@ -525,6 +526,7 @@ type AgentPoolProfile struct {
|
|||
EnableVMSSNodePublicIP *bool `json:"enableVMSSNodePublicIP,omitempty"`
|
||||
LoadBalancerBackendAddressPoolIDs []string `json:"loadBalancerBackendAddressPoolIDs,omitempty"`
|
||||
SysctlDConfig map[string]string `json:"sysctldConfig,omitempty"`
|
||||
ProximityPlacementGroupID string `json:"proximityPlacementGroupID,omitempty"`
|
||||
}
|
||||
|
||||
// AgentPoolProfileRole represents an agent role
|
||||
|
|
|
@ -24,11 +24,12 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
validate *validator.Validate
|
||||
keyvaultIDRegex *regexp.Regexp
|
||||
labelValueRegex *regexp.Regexp
|
||||
labelKeyRegex *regexp.Regexp
|
||||
diskEncryptionSetIDRegex *regexp.Regexp
|
||||
validate *validator.Validate
|
||||
keyvaultIDRegex *regexp.Regexp
|
||||
labelValueRegex *regexp.Regexp
|
||||
labelKeyRegex *regexp.Regexp
|
||||
diskEncryptionSetIDRegex *regexp.Regexp
|
||||
proximityPlacementGroupIDRegex *regexp.Regexp
|
||||
// Any version has to be available in a container image from mcr.microsoft.com/oss/etcd-io/etcd:v[Version]
|
||||
etcdValidVersions = [...]string{"2.2.5", "2.3.0", "2.3.1", "2.3.2", "2.3.3", "2.3.4", "2.3.5", "2.3.6", "2.3.7", "2.3.8",
|
||||
"3.0.0", "3.0.1", "3.0.2", "3.0.3", "3.0.4", "3.0.5", "3.0.6", "3.0.7", "3.0.8", "3.0.9", "3.0.10", "3.0.11", "3.0.12", "3.0.13", "3.0.14", "3.0.15", "3.0.16", "3.0.17",
|
||||
|
@ -114,6 +115,7 @@ func init() {
|
|||
labelValueRegex = regexp.MustCompile(labelValueFormat)
|
||||
labelKeyRegex = regexp.MustCompile(labelKeyFormat)
|
||||
diskEncryptionSetIDRegex = regexp.MustCompile(`^/subscriptions/\S+/resourceGroups/\S+/providers/Microsoft.Compute/diskEncryptionSets/[^/\s]+$`)
|
||||
proximityPlacementGroupIDRegex = regexp.MustCompile(`^/subscriptions/\S+/resourceGroups/\S+/providers/Microsoft.Compute/proximityPlacementGroups/[^/\s]+$`)
|
||||
}
|
||||
|
||||
// Validate implements APIObject
|
||||
|
@ -416,6 +418,10 @@ func (a *Properties) validateMasterProfile(isUpdate bool) error {
|
|||
return errors.New("singlePlacementGroup is only supported with VirtualMachineScaleSets")
|
||||
}
|
||||
|
||||
if e := validateProximityPlacementGroupID(m.ProximityPlacementGroupID); e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
distroValues := DistroValues
|
||||
if isUpdate {
|
||||
distroValues = append(distroValues, AKSDockerEngine, AKS1604Deprecated, AKS1804Deprecated)
|
||||
|
@ -556,6 +562,10 @@ func (a *Properties) validateAgentPoolProfiles(isUpdate bool) error {
|
|||
if agentPoolProfile.IsEphemeral() {
|
||||
log.Warnf("Ephemeral disks are enabled for Agent Pool %s. This feature in AKS-Engine is experimental, and data could be lost in some cases.", agentPoolProfile.Name)
|
||||
}
|
||||
|
||||
if e := validateProximityPlacementGroupID(agentPoolProfile.ProximityPlacementGroupID); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -1229,6 +1239,15 @@ func (a *AgentPoolProfile) validateLoadBalancerBackendAddressPoolIDs() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validateProximityPlacementGroupID(ppgID string) error {
|
||||
if ppgID != "" {
|
||||
if !proximityPlacementGroupIDRegex.MatchString(ppgID) {
|
||||
return errors.Errorf("ProximityPlacementGroupID(%s) is of incorrect format, correct format: %s", ppgID, proximityPlacementGroupIDRegex.String())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateKeyVaultSecrets(secrets []KeyVaultSecrets, requireCertificateStore bool) error {
|
||||
for _, s := range secrets {
|
||||
if len(s.VaultCertificates) == 0 {
|
||||
|
|
|
@ -3303,6 +3303,121 @@ func TestProperties_ValidateSinglePlacementGroup(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestProperties_ValidatePPGID(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
masterProfile *MasterProfile
|
||||
agentPoolProfiles []*AgentPoolProfile
|
||||
expectedMsg string
|
||||
}{
|
||||
{
|
||||
name: "Master profile VMAs with faulty PPG",
|
||||
masterProfile: &MasterProfile{
|
||||
Count: 1,
|
||||
DNSPrefix: "foo",
|
||||
VMSize: "Standard_DS2_v2",
|
||||
AvailabilityProfile: AvailabilitySet,
|
||||
ProximityPlacementGroupID: "faultyPPG",
|
||||
},
|
||||
expectedMsg: `ProximityPlacementGroupID(faultyPPG) is of incorrect format, correct format: ^/subscriptions/\S+/resourceGroups/\S+/providers/Microsoft.Compute/proximityPlacementGroups/[^/\s]+$`,
|
||||
},
|
||||
{
|
||||
name: "Agent profile VMSS with faulty PPG",
|
||||
masterProfile: &MasterProfile{
|
||||
Count: 1,
|
||||
DNSPrefix: "foo",
|
||||
VMSize: "Standard_DS2_v2",
|
||||
AvailabilityProfile: AvailabilitySet,
|
||||
},
|
||||
agentPoolProfiles: []*AgentPoolProfile{
|
||||
{
|
||||
Name: "agentpool",
|
||||
VMSize: "Standard_DS2_v2",
|
||||
Count: 4,
|
||||
AvailabilityProfile: VirtualMachineScaleSets,
|
||||
ProximityPlacementGroupID: "faultyPPG",
|
||||
},
|
||||
},
|
||||
expectedMsg: `ProximityPlacementGroupID(faultyPPG) is of incorrect format, correct format: ^/subscriptions/\S+/resourceGroups/\S+/providers/Microsoft.Compute/proximityPlacementGroups/[^/\s]+$`,
|
||||
},
|
||||
{
|
||||
name: "Faulty PPG",
|
||||
masterProfile: &MasterProfile{
|
||||
Count: 1,
|
||||
DNSPrefix: "foo",
|
||||
VMSize: "Standard_DS2_v2",
|
||||
AvailabilityProfile: AvailabilitySet,
|
||||
},
|
||||
agentPoolProfiles: []*AgentPoolProfile{
|
||||
{
|
||||
Name: "agentpool",
|
||||
VMSize: "Standard_DS2_v2",
|
||||
Count: 4,
|
||||
AvailabilityProfile: VirtualMachineScaleSets,
|
||||
ProximityPlacementGroupID: "/subscriptions/11111111-0000-1111-0000-111111111111/resourceGroups/test-nodepool-ppg-rg/providers/Microsoft.Compute/proximityPlacementGroups",
|
||||
},
|
||||
},
|
||||
expectedMsg: `ProximityPlacementGroupID(/subscriptions/11111111-0000-1111-0000-111111111111/resourceGroups/test-nodepool-ppg-rg/providers/Microsoft.Compute/proximityPlacementGroups) is of incorrect format, correct format: ^/subscriptions/\S+/resourceGroups/\S+/providers/Microsoft.Compute/proximityPlacementGroups/[^/\s]+$`,
|
||||
},
|
||||
{
|
||||
name: "Correct PPGs in both master and nodepool",
|
||||
masterProfile: &MasterProfile{
|
||||
Count: 1,
|
||||
DNSPrefix: "foo",
|
||||
VMSize: "Standard_DS2_v2",
|
||||
AvailabilityProfile: AvailabilitySet,
|
||||
ProximityPlacementGroupID: "/subscriptions/11111111-0000-1111-0000-111111111111/resourceGroups/test-master-ppg-rg/providers/Microsoft.Compute/proximityPlacementGroups/test-master-ppg",
|
||||
},
|
||||
agentPoolProfiles: []*AgentPoolProfile{
|
||||
{
|
||||
Name: "agentpool",
|
||||
VMSize: "Standard_DS2_v2",
|
||||
Count: 4,
|
||||
AvailabilityProfile: VirtualMachineScaleSets,
|
||||
ProximityPlacementGroupID: "/subscriptions/11111111-0000-1111-0000-111111111111/resourceGroups/test-nodepool-ppg-rg/providers/Microsoft.Compute/proximityPlacementGroups/test-nodepool-ppg",
|
||||
},
|
||||
},
|
||||
expectedMsg: ``,
|
||||
},
|
||||
{
|
||||
name: "Without PPG settings",
|
||||
masterProfile: &MasterProfile{
|
||||
Count: 1,
|
||||
DNSPrefix: "foo",
|
||||
VMSize: "Standard_DS2_v2",
|
||||
AvailabilityProfile: AvailabilitySet,
|
||||
},
|
||||
agentPoolProfiles: []*AgentPoolProfile{
|
||||
{
|
||||
Name: "agentpool",
|
||||
VMSize: "Standard_DS2_v2",
|
||||
Count: 4,
|
||||
AvailabilityProfile: VirtualMachineScaleSets,
|
||||
},
|
||||
},
|
||||
expectedMsg: ``,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cs := getK8sDefaultContainerService(true)
|
||||
cs.Properties.OrchestratorProfile.OrchestratorRelease = "1.15"
|
||||
cs.Properties.MasterProfile = test.masterProfile
|
||||
cs.Properties.AgentPoolProfiles = test.agentPoolProfiles
|
||||
err := cs.Validate(true)
|
||||
if err != nil {
|
||||
if err.Error() != test.expectedMsg {
|
||||
t.Errorf("expected error message : %s, but got %s", test.expectedMsg, err.Error())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProperties_ValidateVNET(t *testing.T) {
|
||||
validVNetSubnetID := "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME"
|
||||
validVNetSubnetID2 := "/subscriptions/SUB_ID2/resourceGroups/RG_NAME2/providers/Microsoft.Network/virtualNetworks/VNET_NAME2/subnets/SUBNET_NAME"
|
||||
|
|
|
@ -242,6 +242,10 @@ func TestGetAvailabilitySet(t *testing.T) {
|
|||
if *vmas.PlatformFaultDomainCount != expected {
|
||||
t.Fatalf("expected PlatformFaultDomainCount of %d but got %v", expected, *vmas.PlatformFaultDomainCount)
|
||||
}
|
||||
|
||||
if vmas.ProximityPlacementGroup != nil && vmas.ProximityPlacementGroup.ID != nil {
|
||||
t.Fatalf("expected ProximityPlacementGroup of %q but got %v", "", *vmas.ProximityPlacementGroup.ID)
|
||||
}
|
||||
if *vmas.PlatformUpdateDomainCount != expected {
|
||||
t.Fatalf("expected PlatformUpdateDomainCount of %d but got %v", expected, *vmas.PlatformUpdateDomainCount)
|
||||
}
|
||||
|
|
|
@ -245,6 +245,11 @@ func TestGetAvailabilitySet(t *testing.T) {
|
|||
if *vmas.PlatformUpdateDomainCount != expected {
|
||||
t.Fatalf("expected PlatformUpdateDomainCount of %d but got %v", expected, *vmas.PlatformUpdateDomainCount)
|
||||
}
|
||||
|
||||
if vmas.ProximityPlacementGroup != nil && vmas.ProximityPlacementGroup.ID != nil {
|
||||
t.Fatalf("expected ProximityPlacementGroup of %q but got %v", "", *vmas.ProximityPlacementGroup.ID)
|
||||
}
|
||||
|
||||
l := "eastus"
|
||||
if *vmas.Location != l {
|
||||
t.Fatalf("expected Location of %s but got %v", l, *vmas.Location)
|
||||
|
|
|
@ -74,6 +74,9 @@ func TestMarshalJSONAvailabilitySetARM(t *testing.T) {
|
|||
AvailabilitySetProperties: &compute.AvailabilitySetProperties{
|
||||
PlatformFaultDomainCount: to.Int32Ptr(3),
|
||||
PlatformUpdateDomainCount: to.Int32Ptr(3),
|
||||
ProximityPlacementGroup: &compute.SubResource{
|
||||
ID: to.StringPtr("ProximityPlacementGroupResourceID"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -81,7 +84,10 @@ func TestMarshalJSONAvailabilitySetARM(t *testing.T) {
|
|||
"apiVersion": "[variables('apiVersionCompute')]",
|
||||
"properties": {
|
||||
"platformFaultDomainCount": 3,
|
||||
"platformUpdateDomainCount": 3
|
||||
"platformUpdateDomainCount": 3,
|
||||
"proximityPlacementGroup": {
|
||||
"id": "ProximityPlacementGroupResourceID"
|
||||
}
|
||||
},
|
||||
"sku": {
|
||||
"name": "Aligned"
|
||||
|
@ -105,6 +111,9 @@ func TestMarshalJSONAvailabilitySetARM(t *testing.T) {
|
|||
},
|
||||
AvailabilitySetProperties: &compute.AvailabilitySetProperties{
|
||||
PlatformUpdateDomainCount: to.Int32Ptr(3),
|
||||
ProximityPlacementGroup: &compute.SubResource{
|
||||
ID: to.StringPtr("ProximityPlacementGroupResourceID"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -112,7 +121,10 @@ func TestMarshalJSONAvailabilitySetARM(t *testing.T) {
|
|||
"apiVersion": "[variables('apiVersionCompute')]",
|
||||
"properties": {
|
||||
"platformFaultDomainCount": "[if(contains(split('canadacentral,centralus,eastus,eastus2,northcentralus,northeurope,southcentralus,westeurope,westus',','),variables('location')),3,if(equals('centraluseuap',variables('location')),1,2))]",
|
||||
"platformUpdateDomainCount": 3
|
||||
"platformUpdateDomainCount": 3,
|
||||
"proximityPlacementGroup": {
|
||||
"id": "ProximityPlacementGroupResourceID"
|
||||
}
|
||||
},
|
||||
"sku": {
|
||||
"name": "Aligned"
|
||||
|
|
|
@ -34,6 +34,11 @@ func CreateAvailabilitySet(cs *api.ContainerService, isManagedDisks bool) Availa
|
|||
p := int32(*cs.Properties.MasterProfile.PlatformUpdateDomainCount)
|
||||
avSet.PlatformUpdateDomainCount = to.Int32Ptr(p)
|
||||
}
|
||||
if cs.Properties.MasterProfile.ProximityPlacementGroupID != "" {
|
||||
avSet.ProximityPlacementGroup = &compute.SubResource{
|
||||
ID: to.StringPtr(cs.Properties.MasterProfile.ProximityPlacementGroupID),
|
||||
}
|
||||
}
|
||||
avSet.Sku = &compute.Sku{
|
||||
Name: to.StringPtr("Aligned"),
|
||||
}
|
||||
|
@ -70,6 +75,12 @@ func createAgentAvailabilitySets(profile *api.AgentPoolProfile) AvailabilitySetA
|
|||
p := int32(*profile.PlatformUpdateDomainCount)
|
||||
avSet.PlatformUpdateDomainCount = to.Int32Ptr(p)
|
||||
}
|
||||
if profile.ProximityPlacementGroupID != "" {
|
||||
avSet.ProximityPlacementGroup = &compute.SubResource{
|
||||
ID: to.StringPtr(profile.ProximityPlacementGroupID),
|
||||
}
|
||||
}
|
||||
|
||||
avSet.Sku = &compute.Sku{
|
||||
Name: to.StringPtr("Aligned"),
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ const (
|
|||
windowsConfigurationFieldName = "windowsConfiguration"
|
||||
platformFaultDomainCountFieldName = "platformFaultDomainCount"
|
||||
singlePlacementGroupFieldName = "singlePlacementGroup"
|
||||
proximityPlacementGroupFieldName = "proximityPlacementGroup"
|
||||
|
||||
// ARM resource Types
|
||||
nsgResourceType = "Microsoft.Network/networkSecurityGroups"
|
||||
|
@ -138,6 +139,7 @@ func (t *Transformer) RemoveImmutableResourceProperties(logger *logrus.Entry, te
|
|||
if resource.Type() == vmssResourceType {
|
||||
resource.RemoveProperty(logger, platformFaultDomainCountFieldName)
|
||||
resource.RemoveProperty(logger, singlePlacementGroupFieldName)
|
||||
resource.RemoveProperty(logger, proximityPlacementGroupFieldName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -194,6 +194,9 @@ func TestNormalizeForK8sVMASScalingUp_ShouldRemoveVMAS(t *testing.T) {
|
|||
"managed": "true",
|
||||
"platformFaultDomainCount": 2,
|
||||
"platformUpdateDomainCount": 3,
|
||||
"proximityPlacementGroup": map[string]interface{}{
|
||||
"id": "toremove",
|
||||
},
|
||||
},
|
||||
"type": "Microsoft.Compute/availabilitySets",
|
||||
}
|
||||
|
@ -231,6 +234,7 @@ func TestRemoveImmutableFields(t *testing.T) {
|
|||
"overprovision": false,
|
||||
"platformFaultDomainCount": 3,
|
||||
"singlePlacementGroup": true,
|
||||
"proximityPlacementGroup": "resourceID",
|
||||
},
|
||||
"type": "Microsoft.Compute/virtualMachineScaleSets",
|
||||
}
|
||||
|
@ -250,6 +254,7 @@ func TestRemoveImmutableFields(t *testing.T) {
|
|||
Expect(transformedVmss["properties"]).ToNot(HaveKey("platformFaultDomainCount"))
|
||||
Expect(transformedVmss["properties"]).ToNot(HaveKey("singlePlacementGroup"))
|
||||
Expect(transformedVmas["properties"]).ToNot(HaveKey("singlePlacementGroup"))
|
||||
Expect(transformedVmas["properties"]).ToNot(HaveKey("proximityPlacementGroup"))
|
||||
}
|
||||
|
||||
func ValidateTemplate(templateMap map[string]interface{}, expectedFileContents []byte, testFileName string) {
|
||||
|
|
|
@ -110,6 +110,11 @@ func CreateMasterVMSS(cs *api.ContainerService) VirtualMachineScaleSetARM {
|
|||
if masterProfile.PlatformFaultDomainCount != nil {
|
||||
vmProperties.PlatformFaultDomainCount = to.Int32Ptr(int32(*masterProfile.PlatformFaultDomainCount))
|
||||
}
|
||||
if masterProfile.ProximityPlacementGroupID != "" {
|
||||
vmProperties.ProximityPlacementGroup = &compute.SubResource{
|
||||
ID: to.StringPtr(masterProfile.ProximityPlacementGroupID),
|
||||
}
|
||||
}
|
||||
vmProperties.SinglePlacementGroup = masterProfile.SinglePlacementGroup
|
||||
vmProperties.Overprovision = to.BoolPtr(false)
|
||||
vmProperties.UpgradePolicy = &compute.UpgradePolicy{
|
||||
|
@ -441,6 +446,12 @@ func CreateAgentVMSS(cs *api.ContainerService, profile *api.AgentPoolProfile) Vi
|
|||
vmssProperties.PlatformFaultDomainCount = to.Int32Ptr(int32(*profile.PlatformFaultDomainCount))
|
||||
}
|
||||
|
||||
if profile.ProximityPlacementGroupID != "" {
|
||||
vmssProperties.ProximityPlacementGroup = &compute.SubResource{
|
||||
ID: to.StringPtr(profile.ProximityPlacementGroupID),
|
||||
}
|
||||
}
|
||||
|
||||
if to.Bool(profile.VMSSOverProvisioningEnabled) {
|
||||
vmssProperties.DoNotRunExtensionsOnOverprovisionedVMs = to.BoolPtr(true)
|
||||
}
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -640,6 +640,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() {
|
|||
Expect(cs.Properties.MasterProfile.PlatformFaultDomainCount).To(BeNil())
|
||||
for _, pool := range cs.Properties.AgentPoolProfiles {
|
||||
Expect(pool.PlatformFaultDomainCount).To(BeNil())
|
||||
Expect(pool.ProximityPlacementGroupID).To(BeEmpty())
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -703,6 +704,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() {
|
|||
Expect(cs.Properties.MasterProfile.PlatformFaultDomainCount).To(BeNil())
|
||||
for _, pool := range cs.Properties.AgentPoolProfiles {
|
||||
Expect(pool.PlatformFaultDomainCount).To(BeNil())
|
||||
Expect(pool.ProximityPlacementGroupID).To(BeEmpty())
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
@ -308,6 +308,11 @@ func Build(cfg *config.Config, masterSubnetID string, agentSubnetIDs []string, i
|
|||
str = strings.Replace(str, "RESOURCE_GROUP", config.InfraResourceGroup, 1)
|
||||
pool.DiskEncryptionSetID = str
|
||||
}
|
||||
if pool.ProximityPlacementGroupID != "" {
|
||||
str := strings.Replace(pool.ProximityPlacementGroupID, "SUB_ID", config.SubscriptionID, 1)
|
||||
str = strings.Replace(str, "RESOURCE_GROUP", config.InfraResourceGroup, 1)
|
||||
pool.ProximityPlacementGroupID = str
|
||||
}
|
||||
}
|
||||
|
||||
if config.Distro != "" {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"env": {
|
||||
"REGION_OPTIONS": "northeurope",
|
||||
"SKIP_TESTS": "true"
|
||||
"SKIP_TESTS": "false"
|
||||
},
|
||||
"options": {
|
||||
"allowedOrchestratorVersions": ["latestReleasedVersion"]
|
||||
|
@ -24,7 +24,8 @@
|
|||
"count": 2,
|
||||
"vmSize": "Standard_D2s_v3",
|
||||
"availabilityProfile": "VirtualMachineScaleSets",
|
||||
"diskEncryptionSetID": "/subscriptions/SUB_ID/resourceGroups/RESOURCE_GROUP/providers/Microsoft.Compute/diskEncryptionSets/des-northeurope"
|
||||
"diskEncryptionSetID": "/subscriptions/SUB_ID/resourceGroups/RESOURCE_GROUP/providers/Microsoft.Compute/diskEncryptionSets/des-northeurope",
|
||||
"proximityPlacementGroupID": "/subscriptions/SUB_ID/resourceGroups/RESOURCE_GROUP/providers/Microsoft.Compute/proximityPlacementGroups/ppg-northeurope"
|
||||
}
|
||||
],
|
||||
"linuxProfile": {
|
Загрузка…
Ссылка в новой задаче