* Fix prow set up

* e2e changes

* removing openshift artifacts

* accelerated networking rationalization, with tests

* remove additional sed statements for ip-masq addons

* Update go-dev tools image for go 1.11.2

* remove unused azconst methods

* add support PB6 vm skus

* update azure_const unit test

* update tiller versions in the recent versions of kubernetes

* VSTS VHD pipeline hosted ubuntu pool

* azureconst cruft

* scale: persist scale down in api model

* Add support for Kubernetes 1.11.5

* Fix docker-engine install in VHD pipeline

* remove IsOpenShift from E2E

* replace premature aks-engine reference

* make validate-headers doesn’t exist, revert rename
This commit is contained in:
Jack Francis 2018-11-29 11:46:29 -08:00 коммит произвёл GitHub
Родитель 7df12dd1ea
Коммит ab0fd8ddfc
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
34 изменённых файлов: 445 добавлений и 1568 удалений

Просмотреть файл

@ -3,7 +3,7 @@ version: 2
defaults: &defaults
working_directory: /go/src/github.com/Azure/acs-engine
docker:
- image: quay.io/deis/go-dev:v1.17.2
- image: quay.io/deis/go-dev:v1.17.3
environment:
GOPATH: /go
@ -24,9 +24,6 @@ jobs:
- run:
name: Install dependencies
command: make bootstrap
- run:
name: Run validation rules
command: make validate-generated
- run:
name: Run linting rules
command: make test-style

Просмотреть файл

@ -2,13 +2,13 @@ prow: prow-config prow-secrets prow-services
.PHONY: prow
prow-config:
kubectl create cm config --from-file=config=config.yaml
kubectl create cm plugins --from-file=plugins=plugins.yaml
kubectl create cm config --from-file=config.yaml=config.yaml
kubectl create cm plugins --from-file=plugins.yaml=plugins.yaml
.PHONY: prow-config
prow-config-update:
kubectl create cm config --from-file=config=config.yaml -o yaml --dry-run | kubectl replace -f -
kubectl create cm plugins --from-file=plugins=plugins.yaml -o yaml --dry-run | kubectl replace -f -
kubectl create cm config --from-file=config.yaml=config.yaml -o yaml --dry-run | kubectl replace -f -
kubectl create cm plugins --from-file=plugins.yaml=plugins.yaml -o yaml --dry-run | kubectl replace -f -
.PHONY: prow-config-update
prow-secrets:

Просмотреть файл

@ -6,10 +6,10 @@ Prow in [upstream docs][0].
## acs-engine setup
Prow is optimized to run as a Kubernetes application. There are some pre-installation
steps that need to happen in a new Kubernetes cluster before deploying Prow. These
involve setting up an Ingress controller and a mechanism to do TLS. The [Azure docs][1]
explain how to setup Ingress with TLS on top of a Kubernetes cluster in Azure.
Deploy a new Kubernetes cluster (eg. `az aks create -g acse-test-prow-ci -n prow)
Set up an Ingress controller and a mechanism to do TLS. The [Azure docs][1]
explain how to setup Ingress with TLS on top of a Kubernetes cluster in Azure. (make sure you specify `--set rbac.create=true` when creating the ingress controller)
A Github webhook also needs to be setup in the repo that points to `dns-name/hook`.
`dns-name` is the DNS name setup during the DNS configuration of the Ingress controller.
@ -35,6 +35,5 @@ appropriately on Github. `deck` is installed as the Prow frontend. Last, `tide`
is also installed that takes care of merging pull requests that pass all tests
and satisfy a set of label requirements.
[0]: https://github.com/kubernetes/test-infra/tree/master/prow#prow
[1]: https://docs.microsoft.com/en-us/azure/aks/ingress

Просмотреть файл

@ -1,7 +1,7 @@
log_level: debug
tide:
# target_url: http://ci-bot-aks-ingress.eastus.cloudapp.azure.com/tide.html
# target_url: http://prow-ci-bot-ingress.eastus.cloudapp.azure.com/tide.html
merge_method:
Azure/acs-engine: squash
queries:

Просмотреть файл

@ -39,7 +39,7 @@ items:
spec:
containers:
- name: hook
image: quay.io/kargakis/hook:workaround
image: registry.svc.ci.openshift.org/ci/hook:latest
imagePullPolicy: IfNotPresent
args:
- --dry-run=false

Просмотреть файл

@ -9,9 +9,9 @@ spec:
tls:
- secretName: prow-tls
hosts:
- ci-bot-aks-ingress.eastus.cloudapp.azure.com
- prow-ci-bot-ingress.eastus.cloudapp.azure.com
rules:
- host: ci-bot-aks-ingress.eastus.cloudapp.azure.com
- host: prow-ci-bot-ingress.eastus.cloudapp.azure.com
http:
paths:
- path: /*

Просмотреть файл

@ -38,7 +38,7 @@ items:
serviceAccountName: tide
containers:
- name: tide
image: quay.io/kargakis/tide:workaround
image: registry.svc.ci.openshift.org/ci/tide:latest
imagePullPolicy: IfNotPresent
args:
- --dry-run=false

Просмотреть файл

@ -9,7 +9,7 @@ trigger: none
phases:
- phase: build_vhd
queue:
name: Hosted Linux Preview
name: Hosted Ubuntu 1604
timeoutInMinutes: 120
steps:
- script: |

Просмотреть файл

@ -3,7 +3,7 @@ DIST_DIRS = find * -type d -exec
.NOTPARALLEL:
.PHONY: bootstrap build test test_fmt validate-generated fmt lint ci devenv
.PHONY: bootstrap build test test_fmt fmt lint ci devenv
ifdef DEBUG
GOFLAGS := -gcflags="-N -l"
@ -25,7 +25,7 @@ GITTAG := $(VERSION_SHORT)
endif
REPO_PATH := github.com/Azure/acs-engine
DEV_ENV_IMAGE := quay.io/deis/go-dev:v1.17.2
DEV_ENV_IMAGE := quay.io/deis/go-dev:v1.17.3
DEV_ENV_WORK_DIR := /go/src/${REPO_PATH}
DEV_ENV_OPTS := --rm -v ${CURDIR}:${DEV_ENV_WORK_DIR} -w ${DEV_ENV_WORK_DIR} ${DEV_ENV_VARS}
DEV_ENV_CMD := docker run ${DEV_ENV_OPTS} ${DEV_ENV_IMAGE}
@ -44,10 +44,6 @@ all: build
dev:
$(DEV_ENV_CMD_IT) bash
.PHONY: validate-generated
validate-generated: bootstrap
./scripts/validate-generated.sh
.PHONY: validate-dependencies
validate-dependencies: bootstrap
./scripts/validate-dependencies.sh

Просмотреть файл

@ -55,6 +55,7 @@ const (
scaleName = "scale"
scaleShortDescription = "Scale an existing Kubernetes or OpenShift cluster"
scaleLongDescription = "Scale an existing Kubernetes or OpenShift cluster by specifying increasing or decreasing the node count of an agentpool"
apiModelFilename = "apimodel.json"
)
// NewScaleCmd run a command to upgrade a Kubernetes cluster
@ -137,7 +138,7 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error {
}
// load apimodel from the deployment directory
sc.apiModelPath = path.Join(sc.deploymentDirectory, "apimodel.json")
sc.apiModelPath = path.Join(sc.deploymentDirectory, apiModelFilename)
if _, err = os.Stat(sc.apiModelPath); os.IsNotExist(err) {
return errors.Errorf("specified api model does not exist (%s)", sc.apiModelPath)
@ -308,7 +309,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
return err
}
return nil
return sc.saveAPIModel()
}
} else {
for vmssListPage, err := sc.client.ListVirtualMachineScaleSets(ctx, sc.resourceGroupName); vmssListPage.NotDone(); vmssListPage.Next() {
@ -423,6 +424,11 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
return err
}
return sc.saveAPIModel()
}
func (sc *scaleCmd) saveAPIModel() error {
var err error
apiloader := &api.Apiloader{
Translator: &i18n.Translator{
Locale: sc.locale,
@ -447,7 +453,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error {
},
}
return f.SaveFile(sc.deploymentDirectory, "apimodel.json", b)
return f.SaveFile(sc.deploymentDirectory, apiModelFilename, b)
}
func (sc *scaleCmd) vmInAgentPool(vmName string, tags map[string]*string) bool {

Просмотреть файл

@ -1,5 +1,5 @@
$REPO_PATH = "github.com/Azure/acs-engine"
$DEV_ENV_IMAGE = "quay.io/deis/go-dev:v1.17.2"
$DEV_ENV_IMAGE = "quay.io/deis/go-dev:v1.17.3"
$DEV_ENV_WORK_DIR = "/go/src/$REPO_PATH"
docker.exe run -it --rm -w $DEV_ENV_WORK_DIR -v `"$($PWD)`":$DEV_ENV_WORK_DIR $DEV_ENV_IMAGE bash

Просмотреть файл

@ -14,6 +14,7 @@ installEtcd
installDeps
if [[ ${FEATURE_FLAGS} == *"docker-engine"* ]]; then
DOCKER_ENGINE_REPO="https://apt.dockerproject.org/repo"
installDockerEngine
installGPUDrivers
else
@ -90,7 +91,7 @@ for TILLER_VERSION in ${TILLER_VERSIONS}; do
pullContainerImage "docker" "gcr.io/kubernetes-helm/tiller:v${TILLER_VERSION}"
done
CLUSTER_AUTOSCALER_VERSIONS="1.3.3 1.3.1 1.3.0 1.2.2 1.1.2"
CLUSTER_AUTOSCALER_VERSIONS="1.3.4 1.3.3 1.3.1 1.3.0 1.2.2 1.1.2"
for CLUSTER_AUTOSCALER_VERSION in ${CLUSTER_AUTOSCALER_VERSIONS}; do
pullContainerImage "docker" "k8s.gcr.io/cluster-autoscaler:v${CLUSTER_AUTOSCALER_VERSION}"
done
@ -153,7 +154,7 @@ done
pullContainerImage "docker" "busybox"
# TODO: fetch supported k8s versions from an acs-engine command instead of hardcoding them here
K8S_VERSIONS="1.7.15 1.7.16 1.8.14 1.8.15 1.9.10 1.9.11 1.10.8 1.10.9 1.11.3 1.11.4 1.12.1 1.12.2"
K8S_VERSIONS="1.7.15 1.7.16 1.8.14 1.8.15 1.9.10 1.9.11 1.10.8 1.10.9 1.11.4 1.11.5 1.12.1 1.12.2"
for KUBERNETES_VERSION in ${K8S_VERSIONS}; do
HYPERKUBE_URL="k8s.gcr.io/hyperkube-amd64:v${KUBERNETES_VERSION}"

Просмотреть файл

@ -58,7 +58,11 @@ metadata:
data:
ip-masq-agent: |-
nonMasqueradeCIDRs:
- <nonmasqCIDR>
- <nonmasqCNIIP>
masqLinkLocal: <masqLink>
- {{ContainerConfig "non-masquerade-cidr"}}
{{- if ContainerConfig "non-masq-cni-cidr"}}
- {{ContainerConfig "non-masq-cni-cidr"}}
masqLinkLocal: true
{{else -}}
masqLinkLocal: false
{{end -}}
resyncInterval: 60s

Просмотреть файл

@ -350,14 +350,6 @@ MASTER_ARTIFACTS_CONFIG_PLACEHOLDER
{{if HasCustomSearchDomain}}
sed -i "s|<searchDomainName>|{{WrapAsParameter "searchDomainName"}}|g; s|<searchDomainRealmUser>|{{WrapAsParameter "searchDomainRealmUser"}}|g; s|<searchDomainRealmPassword>|{{WrapAsParameter "searchDomainRealmPassword"}}|g" /opt/azure/containers/setup-custom-search-domains.sh
{{end}}
a=/etc/kubernetes/addons/ip-masq-agent.yaml
sed -i "s|<nonmasqCIDR>|{{WrapAsParameter "kubernetesNonMasqueradeCidr"}}|g" $a
{{if IsAzureCNI}}
sed -i "s|<nonmasqCNIIP>|168.63.129.16/32|g; s|<masqLink>|true|g" $a
{{else}}
sed -i "\|<nonmasqCNIIP>|d" $a
sed -i "s|<masqLink>|false|g" $a
{{end}}
- path: /opt/azure/containers/mountetcd.sh
permissions: "0744"

Просмотреть файл

@ -195,15 +195,6 @@
},
"type": "string"
},
{{if not IsHostedMaster}}
"kubernetesNonMasqueradeCidr": {
"metadata": {
"description": "kubernetesNonMasqueradeCidr cluster subnet"
},
"defaultValue": "{{GetDefaultVNETCIDR}}",
"type": "string"
},
{{end}}
"kubernetesKubeletClusterDomain": {
"metadata": {
"description": "--cluster-domain Kubelet config"

Просмотреть файл

@ -96,17 +96,6 @@ func assignKubernetesParameters(properties *api.Properties, parametersMap params
CloudProviderRateLimitBucket: kubernetesConfig.CloudProviderRateLimitBucket,
})
addValue(parametersMap, "kubeClusterCidr", kubernetesConfig.ClusterSubnet)
if !properties.IsHostedMasterProfile() {
if properties.OrchestratorProfile.IsAzureCNI() {
if properties.MasterProfile != nil && properties.MasterProfile.IsCustomVNET() {
addValue(parametersMap, "kubernetesNonMasqueradeCidr", properties.MasterProfile.VnetCidr)
} else {
addValue(parametersMap, "kubernetesNonMasqueradeCidr", DefaultVNETCIDR)
}
} else {
addValue(parametersMap, "kubernetesNonMasqueradeCidr", properties.OrchestratorProfile.KubernetesConfig.ClusterSubnet)
}
}
addValue(parametersMap, "kubernetesKubeletClusterDomain", kubernetesConfig.KubeletConfig["--cluster-domain"])
addValue(parametersMap, "dockerBridgeCidr", kubernetesConfig.DockerBridgeSubnet)
addValue(parametersMap, "networkPolicy", kubernetesConfig.NetworkPolicy)

Просмотреть файл

@ -534,16 +534,13 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat
if cs.Properties.OrchestratorProfile.OrchestratorType == api.DCOS {
return helpers.GetDCOSMasterAllowedSizes()
}
return helpers.GetMasterAgentAllowedSizes()
return helpers.GetKubernetesAllowedSizes()
},
"GetDefaultVNETCIDR": func() string {
return DefaultVNETCIDR
},
"GetAgentAllowedSizes": func() string {
if cs.Properties.OrchestratorProfile.IsKubernetes() || cs.Properties.OrchestratorProfile.IsOpenShift() {
return helpers.GetKubernetesAgentAllowedSizes()
}
return helpers.GetMasterAgentAllowedSizes()
return helpers.GetKubernetesAllowedSizes()
},
"getSwarmVersions": func() string {
return getSwarmVersions(api.SwarmVersion, api.SwarmDockerComposeVersion)

Просмотреть файл

@ -204,6 +204,10 @@ func (cs *ContainerService) setAddonsConfig(isUpdate bool) {
Image: specConfig.KubernetesImageBase + "ip-masq-agent-amd64:v2.0.0",
},
},
Config: map[string]string{
"non-masquerade-cidr": cs.Properties.GetNonMasqueradeCIDR(),
"non-masq-cni-cidr": cs.Properties.GetAzureCNICidr(),
},
}
defaultAzureCNINetworkMonitorAddonsConfig := KubernetesAddon{

Просмотреть файл

@ -78,8 +78,9 @@ var AllKubernetesSupportedVersions = map[string]bool{
"1.11.0": false,
"1.11.1": false,
"1.11.2": false,
"1.11.3": true,
"1.11.3": false,
"1.11.4": true,
"1.11.5": true,
"1.12.0-alpha.1": false,
"1.12.0-beta.0": false,
"1.12.0-beta.1": false,

Просмотреть файл

@ -75,6 +75,8 @@ const (
DefaultFirstConsecutiveKubernetesStaticIP = "10.240.255.5"
// DefaultFirstConsecutiveKubernetesStaticIPVMSS specifies the static IP address on Kubernetes master 0 of VMSS
DefaultFirstConsecutiveKubernetesStaticIPVMSS = "10.240.0.4"
//DefaultCNICIDR specifies the default value for
DefaultCNICIDR = "168.63.129.16/32"
// DefaultKubernetesFirstConsecutiveStaticIPOffset specifies the IP address offset of master 0
// when VNET integration is enabled.
DefaultKubernetesFirstConsecutiveStaticIPOffset = 5
@ -194,6 +196,8 @@ const (
ARMVirtualNetworksResourceType = "virtualNetworks"
// DefaultAcceleratedNetworkingWindowsEnabled determines the acs-engine provided default for enabling accelerated networking on Windows nodes
DefaultAcceleratedNetworkingWindowsEnabled = false
// DefaultAcceleratedNetworking determines the acs-engine provided default for enabling accelerated networking on Linux nodes
DefaultAcceleratedNetworking = true
// DefaultDNSAutoscalerAddonName is the name of the dns-autoscaler addon
DefaultDNSAutoscalerAddonName = "dns-autoscaler"
)

Просмотреть файл

@ -431,11 +431,11 @@ func (p *Properties) setAgentProfileDefaults(isUpgrade, isScale bool) {
// On instances that support hyperthreading, Accelerated Networking is supported on VM instances with 4 or more vCPUs.
// Supported series are: D/DSv3, E/ESv3, Fsv2, and Ms/Mms.
if profile.AcceleratedNetworkingEnabled == nil {
profile.AcceleratedNetworkingEnabled = helpers.PointerToBool(!isUpgrade && !isScale && helpers.AcceleratedNetworkingSupported(profile.VMSize))
profile.AcceleratedNetworkingEnabled = helpers.PointerToBool(DefaultAcceleratedNetworking && !isUpgrade && !isScale && helpers.AcceleratedNetworkingSupported(profile.VMSize))
}
if profile.AcceleratedNetworkingEnabledWindows == nil {
profile.AcceleratedNetworkingEnabledWindows = helpers.PointerToBool(DefaultAcceleratedNetworkingWindowsEnabled)
profile.AcceleratedNetworkingEnabledWindows = helpers.PointerToBool(DefaultAcceleratedNetworkingWindowsEnabled && !isUpgrade && !isScale && helpers.AcceleratedNetworkingSupported(profile.VMSize))
}
if profile.OSType != Windows {

Просмотреть файл

@ -183,7 +183,7 @@ func TestAddonsIndexByName(t *testing.T) {
func TestAssignDefaultAddonImages(t *testing.T) {
addonNameMap := map[string]string{
DefaultTillerAddonName: "gcr.io/kubernetes-helm/tiller:v2.8.1",
DefaultTillerAddonName: "gcr.io/kubernetes-helm/tiller:v2.11.0",
DefaultACIConnectorAddonName: "microsoft/virtual-kubelet:latest",
DefaultClusterAutoscalerAddonName: "k8s.gcr.io/cluster-autoscaler:v1.2.2",
DefaultBlobfuseFlexVolumeAddonName: "mcr.microsoft.com/k8s/flexvolume/blobfuse-flexvolume",
@ -372,6 +372,74 @@ func TestAssignDefaultAddonVals(t *testing.T) {
}
func TestAcceleratedNetworking(t *testing.T) {
mockCS := getMockBaseContainerService("1.10.8")
mockCS.Properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled = nil
mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows = nil
isUpgrade := true
mockCS.SetPropertiesDefaults(isUpgrade, false)
// In upgrade scenario, nil AcceleratedNetworkingEnabled should always render as false (i.e., we never turn on this feature on an existing vm that didn't have it before)
if helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled) {
t.Errorf("expected nil acceleratedNetworkingEnabled to be false after upgrade, instead got %t", helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled))
}
// In upgrade scenario, nil AcceleratedNetworkingEnabledWindows should always render as false (i.e., we never turn on this feature on an existing vm that didn't have it before)
if helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows) {
t.Errorf("expected nil acceleratedNetworkingEnabledWindows to be false after upgrade, instead got %t", helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows))
}
mockCS = getMockBaseContainerService("1.10.8")
mockCS.Properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled = nil
mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows = nil
isScale := true
mockCS.SetPropertiesDefaults(false, isScale)
// In scale scenario, nil AcceleratedNetworkingEnabled should always render as false (i.e., we never turn on this feature on an existing agent pool / vmss that didn't have it before)
if helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled) {
t.Errorf("expected nil acceleratedNetworkingEnabled to be false after upgrade, instead got %t", helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled))
}
// In scale scenario, nil AcceleratedNetworkingEnabledWindows should always render as false (i.e., we never turn on this feature on an existing vm that didn't have it before)
if helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows) {
t.Errorf("expected nil acceleratedNetworkingEnabledWindows to be false after upgrade, instead got %t", helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows))
}
mockCS = getMockBaseContainerService("1.10.8")
mockCS.Properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled = nil
mockCS.Properties.AgentPoolProfiles[0].VMSize = "Standard_D2_v2"
mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows = nil
mockCS.Properties.AgentPoolProfiles[0].VMSize = "Standard_D2_v2"
mockCS.SetPropertiesDefaults(false, false)
// In create scenario, nil AcceleratedNetworkingEnabled should be the defaults
if helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled) != DefaultAcceleratedNetworking {
t.Errorf("expected default acceleratedNetworkingEnabled to be %t, instead got %t", DefaultAcceleratedNetworking, helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled))
}
// In create scenario, nil AcceleratedNetworkingEnabledWindows should be the defaults
if helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows) != DefaultAcceleratedNetworkingWindowsEnabled {
t.Errorf("expected default acceleratedNetworkingEnabledWindows to be %t, instead got %t", DefaultAcceleratedNetworkingWindowsEnabled, helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows))
}
mockCS = getMockBaseContainerService("1.10.8")
mockCS.Properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled = nil
mockCS.Properties.AgentPoolProfiles[0].VMSize = "Standard_D666_v2"
mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows = nil
mockCS.Properties.AgentPoolProfiles[0].VMSize = "Standard_D666_v2"
mockCS.SetPropertiesDefaults(false, false)
// In non-supported VM SKU scenario, acceleratedNetworkingEnabled should always be false
if helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled) {
t.Errorf("expected acceleratedNetworkingEnabled to be %t for an unsupported VM SKU, instead got %t", false, helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabled))
}
// In non-supported VM SKU scenario, acceleratedNetworkingEnabledWindows should always be false
if helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows) {
t.Errorf("expected acceleratedNetworkingEnabledWindows to be %t for an unsupported VM SKU, instead got %t", false, helpers.IsTrueBoolPointer(mockCS.Properties.AgentPoolProfiles[0].AcceleratedNetworkingEnabledWindows))
}
}
func TestKubeletFeatureGatesEnsureFeatureGatesOnAgentsFor1_6_0(t *testing.T) {
mockCS := getMockBaseContainerService("1.6.0")
properties := mockCS.Properties

Просмотреть файл

@ -22,7 +22,7 @@ var k8sComponentVersions = map[string]map[string]string{
"addon-manager": "kube-addon-manager-amd64:v8.8",
"dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.10",
"pause": "pause-amd64:3.1",
"tiller": "tiller:v2.8.1",
"tiller": "tiller:v2.11.0",
"rescheduler": "rescheduler:v0.4.0",
"aci-connector": "virtual-kubelet:latest",
ContainerMonitoringAddonName: "oms:ciprod10162018-2",
@ -55,7 +55,7 @@ var k8sComponentVersions = map[string]map[string]string{
"addon-manager": "kube-addon-manager-amd64:v8.7",
"dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.10",
"pause": "pause-amd64:3.1",
"tiller": "tiller:v2.8.1",
"tiller": "tiller:v2.11.0",
"rescheduler": "rescheduler:v0.4.0",
"aci-connector": "virtual-kubelet:latest",
ContainerMonitoringAddonName: "oms:ciprod10162018-2",
@ -87,12 +87,12 @@ var k8sComponentVersions = map[string]map[string]string{
"addon-manager": "kube-addon-manager-amd64:v8.6",
"dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.10",
"pause": "pause-amd64:3.1",
"tiller": "tiller:v2.8.1",
"tiller": "tiller:v2.11.0",
"rescheduler": "rescheduler:v0.4.0",
"aci-connector": "virtual-kubelet:latest",
ContainerMonitoringAddonName: "oms:ciprod10162018-2",
AzureCNINetworkMonitoringAddonName: "networkmonitor:v0.0.4",
"cluster-autoscaler": "cluster-autoscaler:v1.3.0",
"cluster-autoscaler": "cluster-autoscaler:v1.3.4",
NVIDIADevicePluginAddonName: "k8s-device-plugin:1.11",
"k8s-dns-sidecar": "k8s-dns-sidecar-amd64:1.14.10",
"nodestatusfreq": DefaultKubernetesNodeStatusUpdateFrequency,
@ -119,7 +119,7 @@ var k8sComponentVersions = map[string]map[string]string{
"addon-manager": "kube-addon-manager-amd64:v8.6",
"dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.8",
"pause": "pause-amd64:3.1",
"tiller": "tiller:v2.8.1",
"tiller": "tiller:v2.11.0",
"rescheduler": "rescheduler:v0.3.1",
"aci-connector": "virtual-kubelet:latest",
ContainerMonitoringAddonName: "oms:ciprod10162018-2",
@ -151,7 +151,7 @@ var k8sComponentVersions = map[string]map[string]string{
"addon-manager": "kube-addon-manager-amd64:v8.6",
"dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.8",
"pause": "pause-amd64:3.1",
"tiller": "tiller:v2.8.1",
"tiller": "tiller:v2.11.0",
"rescheduler": "rescheduler:v0.3.1",
"aci-connector": "virtual-kubelet:latest",
ContainerMonitoringAddonName: "oms:ciprod10162018-2",
@ -182,7 +182,7 @@ var k8sComponentVersions = map[string]map[string]string{
"addon-manager": "kube-addon-manager-amd64:v8.6",
"dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.8",
"pause": "pause-amd64:3.1",
"tiller": "tiller:v2.8.1",
"tiller": "tiller:v2.11.0",
"rescheduler": "rescheduler:v0.3.1",
"aci-connector": "virtual-kubelet:latest",
ContainerMonitoringAddonName: "oms:ciprod10162018-2",
@ -211,7 +211,7 @@ var k8sComponentVersions = map[string]map[string]string{
"addon-manager": "kube-addon-manager-amd64:v8.6",
"dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.5",
"pause": "pause-amd64:3.1",
"tiller": "tiller:v2.8.1",
"tiller": "tiller:v2.11.0",
"rescheduler": "rescheduler:v0.3.1",
"aci-connector": "virtual-kubelet:latest",
ContainerMonitoringAddonName: "oms:ciprod10162018-2",
@ -240,7 +240,7 @@ var k8sComponentVersions = map[string]map[string]string{
"addon-manager": "kube-addon-manager-amd64:v6.5",
"dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.5",
"pause": "pause-amd64:3.0",
"tiller": "tiller:v2.8.1",
"tiller": "tiller:v2.11.0",
"rescheduler": "rescheduler:v0.3.1",
"aci-connector": "virtual-kubelet:latest",
ContainerMonitoringAddonName: "oms:ciprod10162018-2",

Просмотреть файл

@ -958,6 +958,32 @@ func (p *Properties) HasAvailabilityZones() bool {
return hasZones
}
// GetNonMasqueradeCIDR returns the non-masquerade CIDR for the ip-masq-agent.
func (p *Properties) GetNonMasqueradeCIDR() string {
var nonMasqCidr string
if !p.IsHostedMasterProfile() {
if p.OrchestratorProfile.IsAzureCNI() {
if p.MasterProfile != nil && p.MasterProfile.IsCustomVNET() {
nonMasqCidr = p.MasterProfile.VnetCidr
} else {
nonMasqCidr = DefaultVNETCIDR
}
} else {
nonMasqCidr = p.OrchestratorProfile.KubernetesConfig.ClusterSubnet
}
}
return nonMasqCidr
}
// GetAzureCNICidr returns the default CNI Cidr if Azure CNI is enabled.
func (p *Properties) GetAzureCNICidr() string {
var masqCNIIP string
if p.OrchestratorProfile != nil && p.OrchestratorProfile.IsAzureCNI() {
masqCNIIP = DefaultCNICIDR
}
return masqCNIIP
}
// IsCustomVNET returns true if the customer brought their own VNET
func (m *MasterProfile) IsCustomVNET() bool {
return len(m.VnetSubnetID) > 0

Просмотреть файл

@ -23,7 +23,6 @@ def getAllSizes():
return sizeMap
min_cores_dcos = 2
min_cores_k8s = 1
dcos_masters_ephemeral_disk_min = 16384
def getDcosMasterMap(sizeMap):
@ -37,16 +36,6 @@ def getDcosMasterMap(sizeMap):
return masterMap
def getMasterAgentMap(sizeMap):
agentMap = {}
for key in sizeMap.keys():
size = sizeMap[key]
if size['numberOfCores'] >= min_cores_k8s:
agentMap[size['name']] = size
return agentMap
def getLocations():
locations = json.loads(subprocess.check_output(['az', 'account', 'list-locations']).decode('utf-8'))
@ -71,7 +60,7 @@ def getStorageAccountType(sizeName):
else:
return "Standard_LRS"
def getFileContents(dcosMasterMap, masterAgentMap, kubernetesAgentMap, sizeMap, locations):
def getFileContents(dcosMasterMap, kubernetesSizeMap, sizeMap, locations):
text = r"""package helpers
// AUTOGENERATED FILE """
@ -114,23 +103,11 @@ func GetDCOSMasterAllowedSizes() string {
`
}
// GetMasterAgentAllowedSizes returns the agent allowed sizes
func GetMasterAgentAllowedSizes() string {
// GetKubernetesAllowedSizes returns the allowed sizes for Kubernetes agent
func GetKubernetesAllowedSizes() string {
return ` "allowedValues": [
"""
masterAgentMapKeys = sorted(masterAgentMap.keys())
for key in masterAgentMapKeys[:-1]:
text += ' "' + key + '",\n'
text += ' "' + masterAgentMapKeys[-1] + '"\n'
text += r""" ],
`
}
// GetKubernetesAgentAllowedSizes returns the allowed sizes for Kubernetes agent
func GetKubernetesAgentAllowedSizes() string {
return ` "allowedValues": [
"""
kubernetesAgentMapKeys = sorted(kubernetesAgentMap.keys())
kubernetesAgentMapKeys = sorted(kubernetesSizeMap.keys())
for key in kubernetesAgentMapKeys[:-1]:
text += ' "' + key + '",\n'
text += ' "' + kubernetesAgentMapKeys[-1] + '"\n'
@ -144,7 +121,7 @@ func GetSizeMap() string {
"""
mergedMap = {}
for key in kubernetesAgentMapKeys:
size = kubernetesAgentMap[key]
size = kubernetesSizeMap[key]
if not key in mergedMap:
mergedMap[size['name']] = size
@ -163,50 +140,16 @@ func GetSizeMap() string {
text += r""" }
`
}
// GetClassicAllowedSizes returns the classic allowed sizes
func GetClassicAllowedSizes() string {
return ` "allowedValues": [
"""
sizeMapKeys = sorted(sizeMap.keys())
for key in sizeMapKeys[:-1]:
text += ' "' + sizeMap[key]['name'] + '",\n'
key = sizeMapKeys[-1]
text += ' "' + sizeMap[key]['name'] + '"\n'
text += r""" ],
`
}
// GetClassicSizeMap returns the size / storage map
func GetClassicSizeMap() string {
return ` "vmSizesMap": {
"""
sizeMapKeys = sorted(sizeMap.keys())
for key in sizeMapKeys[:-1]:
text += ' "' + sizeMap[key]['name'] + '": {\n'
storageAccountType = getStorageAccountType(size['name'])
text += ' "storageAccountType": "' + storageAccountType + '"\n },\n'
key = sizeMapKeys[-1]
text += ' "' + sizeMap[key]['name'] + '": {\n'
storageAccountType = getStorageAccountType(size['name'])
text += ' "storageAccountType": "' + storageAccountType + '"\n }\n'
text += r""" }
`
}"""
return text
def main():
outfile = 'pkg/helpers/azureconst.go'
allSizes = getAllSizes()
dcosMasterMap = getDcosMasterMap(allSizes)
masterAgentMap = getMasterAgentMap(allSizes)
kubernetesAgentMap = allSizes
kubernetesSizeMap = getAllSizes()
locations = getLocations()
text = getFileContents(dcosMasterMap, masterAgentMap, kubernetesAgentMap, allSizes, locations)
text = getFileContents(dcosMasterMap, kubernetesSizeMap, locations)
with open(outfile, 'w') as f:
f.write(text)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,240 @@
package helpers
import (
"strings"
"testing"
)
func TestKubernetesAllowedSizes(t *testing.T) {
sizes := GetKubernetesAllowedSizes()
if len(sizes) == 0 {
t.Errorf("expected GetKubernetesAllowedSizes to return a non empty string")
}
expectedSizes := []string{
"Standard_A0",
"Standard_A1",
"Standard_A10",
"Standard_A11",
"Standard_A1_v2",
"Standard_A2",
"Standard_A2_v2",
"Standard_A2m_v2",
"Standard_A3",
"Standard_A4",
"Standard_A4_v2",
"Standard_A4m_v2",
"Standard_A5",
"Standard_A6",
"Standard_A7",
"Standard_A8",
"Standard_A8_v2",
"Standard_A8m_v2",
"Standard_A9",
"Standard_B1ms",
"Standard_B1s",
"Standard_B2ms",
"Standard_B2s",
"Standard_B4ms",
"Standard_B8ms",
"Standard_D1",
"Standard_D11",
"Standard_D11_v2",
"Standard_D11_v2_Promo",
"Standard_D12",
"Standard_D12_v2",
"Standard_D12_v2_Promo",
"Standard_D13",
"Standard_D13_v2",
"Standard_D13_v2_Promo",
"Standard_D14",
"Standard_D14_v2",
"Standard_D14_v2_Promo",
"Standard_D15_v2",
"Standard_D16_v3",
"Standard_D16s_v3",
"Standard_D1_v2",
"Standard_D2",
"Standard_D2_v2",
"Standard_D2_v2_Promo",
"Standard_D2_v3",
"Standard_D2s_v3",
"Standard_D3",
"Standard_D32_v3",
"Standard_D32s_v3",
"Standard_D3_v2",
"Standard_D3_v2_Promo",
"Standard_D4",
"Standard_D4_v2",
"Standard_D4_v2_Promo",
"Standard_D4_v3",
"Standard_D4s_v3",
"Standard_D5_v2",
"Standard_D5_v2_Promo",
"Standard_D64_v3",
"Standard_D64s_v3",
"Standard_D8_v3",
"Standard_D8s_v3",
"Standard_DC2s",
"Standard_DC4s",
"Standard_DS1",
"Standard_DS11",
"Standard_DS11-1_v2",
"Standard_DS11_v2",
"Standard_DS11_v2_Promo",
"Standard_DS12",
"Standard_DS12-1_v2",
"Standard_DS12-2_v2",
"Standard_DS12_v2",
"Standard_DS12_v2_Promo",
"Standard_DS13",
"Standard_DS13-2_v2",
"Standard_DS13-4_v2",
"Standard_DS13_v2",
"Standard_DS13_v2_Promo",
"Standard_DS14",
"Standard_DS14-4_v2",
"Standard_DS14-8_v2",
"Standard_DS14_v2",
"Standard_DS14_v2_Promo",
"Standard_DS15_v2",
"Standard_DS1_v2",
"Standard_DS2",
"Standard_DS2_v2",
"Standard_DS2_v2_Promo",
"Standard_DS3",
"Standard_DS3_v2",
"Standard_DS3_v2_Promo",
"Standard_DS4",
"Standard_DS4_v2",
"Standard_DS4_v2_Promo",
"Standard_DS5_v2",
"Standard_DS5_v2_Promo",
"Standard_E16-4s_v3",
"Standard_E16-8s_v3",
"Standard_E16_v3",
"Standard_E16s_v3",
"Standard_E20_v3",
"Standard_E20s_v3",
"Standard_E2_v3",
"Standard_E2s_v3",
"Standard_E32-16s_v3",
"Standard_E32-8s_v3",
"Standard_E32_v3",
"Standard_E32s_v3",
"Standard_E4-2s_v3",
"Standard_E4_v3",
"Standard_E4s_v3",
"Standard_E64-16s_v3",
"Standard_E64-32s_v3",
"Standard_E64_v3",
"Standard_E64i_v3",
"Standard_E64is_v3",
"Standard_E64s_v3",
"Standard_E8-2s_v3",
"Standard_E8-4s_v3",
"Standard_E8_v3",
"Standard_E8s_v3",
"Standard_F1",
"Standard_F16",
"Standard_F16s",
"Standard_F16s_v2",
"Standard_F1s",
"Standard_F2",
"Standard_F2s",
"Standard_F2s_v2",
"Standard_F32s_v2",
"Standard_F4",
"Standard_F4s",
"Standard_F4s_v2",
"Standard_F64s_v2",
"Standard_F72s_v2",
"Standard_F8",
"Standard_F8s",
"Standard_F8s_v2",
"Standard_G1",
"Standard_G2",
"Standard_G3",
"Standard_G4",
"Standard_G5",
"Standard_GS1",
"Standard_GS2",
"Standard_GS3",
"Standard_GS4",
"Standard_GS4-4",
"Standard_GS4-8",
"Standard_GS5",
"Standard_GS5-16",
"Standard_GS5-8",
"Standard_H16",
"Standard_H16m",
"Standard_H16mr",
"Standard_H16r",
"Standard_H8",
"Standard_H8m",
"Standard_L16s",
"Standard_L16s_v2",
"Standard_L32s",
"Standard_L32s_v2",
"Standard_L4s",
"Standard_L64s_v2",
"Standard_L80s_v2",
"Standard_L8s",
"Standard_L8s_v2",
"Standard_M128",
"Standard_M128-32ms",
"Standard_M128-64ms",
"Standard_M128m",
"Standard_M128ms",
"Standard_M128s",
"Standard_M16-4ms",
"Standard_M16-8ms",
"Standard_M16ms",
"Standard_M32-16ms",
"Standard_M32-8ms",
"Standard_M32ls",
"Standard_M32ms",
"Standard_M32ts",
"Standard_M64",
"Standard_M64-16ms",
"Standard_M64-32ms",
"Standard_M64ls",
"Standard_M64m",
"Standard_M64ms",
"Standard_M64s",
"Standard_M8-2ms",
"Standard_M8-4ms",
"Standard_M8ms",
"Standard_NC12",
"Standard_NC12s_v2",
"Standard_NC12s_v3",
"Standard_NC24",
"Standard_NC24r",
"Standard_NC24rs_v2",
"Standard_NC24rs_v3",
"Standard_NC24s_v2",
"Standard_NC24s_v3",
"Standard_NC6",
"Standard_NC6s_v2",
"Standard_NC6s_v3",
"Standard_ND12s",
"Standard_ND24rs",
"Standard_ND24s",
"Standard_ND6s",
"Standard_NV12",
"Standard_NV12s_v2",
"Standard_NV24",
"Standard_NV24s_v2",
"Standard_NV6",
"Standard_NV6s_v2",
"Standard_PB12s",
"Standard_PB24s",
"Standard_PB6s",
}
for _, expectedSize := range expectedSizes {
if !strings.Contains(sizes, expectedSize) {
t.Errorf("expected %s to be present in the list of allowedValues", expectedSize)
}
}
}

Просмотреть файл

@ -1,35 +0,0 @@
#!/usr/bin/env bash
####################################################
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
####################################################
set -x
T="$(mktemp -d)"
trap "rm -rf ${T}" EXIT
cp -a "${DIR}/.." "${T}/"
(cd "${T}/" && go generate ./...)
GENERATED_FILES=(
"pkg/openshift/certgen/unstable/templates/bindata.go"
"pkg/openshift/certgen/release39/templates/bindata.go"
)
for file in $GENERATED_FILES; do
if ! diff -r "${DIR}/../${file}" "${T}/${file}" 2>&1 ; then
echo "go generate produced changes that were not already present"
exit 1
fi
done
echo "Generated assets have no material difference than what is committed."

Просмотреть файл

@ -25,7 +25,7 @@ type Config struct {
Location string `envconfig:"LOCATION"` // Location where you want to create the cluster
Regions []string `envconfig:"REGIONS"` // A whitelist of availableregions
ClusterDefinition string `envconfig:"CLUSTER_DEFINITION" required:"true" default:"examples/kubernetes.json"` // ClusterDefinition is the path on disk to the json template these are normally located in examples/
CleanUpOnExit bool `envconfig:"CLEANUP_ON_EXIT" default:"true"` // if set the tests will not clean up rgs when tests finish
CleanUpOnExit bool `envconfig:"CLEANUP_ON_EXIT" default:"false"` // if true the tests will clean up rgs when tests finish
CleanUpIfFail bool `envconfig:"CLEANUP_IF_FAIL" default:"true"`
RetainSSH bool `envconfig:"RETAIN_SSH" default:"true"`
StabilityIterations int `envconfig:"STABILITY_ITERATIONS"`
@ -40,10 +40,6 @@ type Config struct {
const (
kubernetesOrchestrator = "kubernetes"
dcosOrchestrator = "dcos"
swarmModeOrchestrator = "swarmmode"
swarmOrchestrator = "swarm"
openShiftOrchestrator = "openshift"
)
// ParseConfig will parse needed environment variables for running the tests
@ -66,17 +62,7 @@ func (c *Config) GetKubeConfig() string {
case c.IsKubernetes():
file := fmt.Sprintf("kubeconfig.%s.json", c.Location)
kubeconfigPath = filepath.Join(c.CurrentWorkingDir, "_output", c.Name, "kubeconfig", file)
case c.IsOpenShift():
artifactsDir := filepath.Join(c.CurrentWorkingDir, "_output", c.Name)
masterTarball := filepath.Join(artifactsDir, "master.tar.gz")
out, err := exec.Command("tar", "-xzf", masterTarball, "-C", artifactsDir).CombinedOutput()
if err != nil {
log.Fatalf("Cannot untar master tarball: %v: %v", string(out), err)
}
kubeconfigPath = filepath.Join(artifactsDir, "etc", "origin", "master", "admin.kubeconfig")
}
return kubeconfigPath
}
@ -158,26 +144,6 @@ func (c *Config) IsKubernetes() bool {
return c.Orchestrator == kubernetesOrchestrator
}
// IsDCOS will return true if the ORCHESTRATOR env var is set to dcos
func (c *Config) IsDCOS() bool {
return c.Orchestrator == dcosOrchestrator
}
// IsSwarmMode will return true if the ORCHESTRATOR env var is set to dcos
func (c *Config) IsSwarmMode() bool {
return c.Orchestrator == swarmModeOrchestrator
}
// IsSwarm will return true if the ORCHESTRATOR env var is set to dcos
func (c *Config) IsSwarm() bool {
return c.Orchestrator == swarmOrchestrator
}
// IsOpenShift will return true if the ORCHESTRATOR env var is set to openshift
func (c *Config) IsOpenShift() bool {
return c.Orchestrator == openShiftOrchestrator
}
// SetRandomRegion sets Location to a random region
func (c *Config) SetRandomRegion() {
var regions []string

Просмотреть файл

@ -1,8 +1,6 @@
package engine
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
@ -41,14 +39,14 @@ type Config struct {
ClusterDefinitionPath string // The original template we want to use to build the cluster from.
ClusterDefinitionTemplate string // This is the template after we splice in the environment variables
GeneratedDefinitionPath string // Holds the contents of running acs-engine generate
GeneratedDefinitionPath string // Holds the contents of running aks-engine generate
OutputPath string // This is the root output path
DefinitionName string // Unique cluster name
GeneratedTemplatePath string // azuredeploy.json path
GeneratedParametersPath string // azuredeploy.parameters.json path
}
// Engine holds necessary information to interact with acs-engine cli
// Engine holds necessary information to interact with aks-engine cli
type Engine struct {
Config *Config
ClusterDefinition *api.VlabsARMContainerService // Holds the parsed ClusterDefinition
@ -86,82 +84,48 @@ func Build(cfg *config.Config, masterSubnetID string, agentSubnetID string, isVM
if err != nil {
return nil, err
}
prop := cs.ContainerService.Properties
if config.ClientID != "" && config.ClientSecret != "" {
cs.ContainerService.Properties.ServicePrincipalProfile = &vlabs.ServicePrincipalProfile{
prop.ServicePrincipalProfile = &vlabs.ServicePrincipalProfile{
ClientID: config.ClientID,
Secret: config.ClientSecret,
}
}
if cfg.IsOpenShift() {
// azProfile
cs.ContainerService.Properties.AzProfile = &vlabs.AzProfile{
TenantID: config.TenantID,
SubscriptionID: config.SubscriptionID,
ResourceGroup: cfg.Name,
Location: cfg.Location,
}
// openshiftConfig
pass, err := generateRandomString(32)
if err != nil {
return nil, err
}
cs.ContainerService.Properties.OrchestratorProfile.OpenShiftConfig = &vlabs.OpenShiftConfig{
ClusterUsername: "test-user",
ClusterPassword: pass,
}
// master and agent config
cs.ContainerService.Properties.MasterProfile.Distro = vlabs.Distro(config.Distro)
cs.ContainerService.Properties.MasterProfile.ImageRef = nil
if config.ImageName != "" && config.ImageResourceGroup != "" {
cs.ContainerService.Properties.MasterProfile.ImageRef = &vlabs.ImageReference{
Name: config.ImageName,
ResourceGroup: config.ImageResourceGroup,
}
}
for i := range cs.ContainerService.Properties.AgentPoolProfiles {
cs.ContainerService.Properties.AgentPoolProfiles[i].Distro = vlabs.Distro(config.Distro)
cs.ContainerService.Properties.AgentPoolProfiles[i].ImageRef = nil
if config.ImageName != "" && config.ImageResourceGroup != "" {
cs.ContainerService.Properties.AgentPoolProfiles[i].ImageRef = &vlabs.ImageReference{
Name: config.ImageName,
ResourceGroup: config.ImageResourceGroup,
}
}
}
}
if config.MasterDNSPrefix != "" {
cs.ContainerService.Properties.MasterProfile.DNSPrefix = config.MasterDNSPrefix
prop.MasterProfile.DNSPrefix = config.MasterDNSPrefix
}
if !cfg.IsKubernetes() && !cfg.IsOpenShift() && config.AgentDNSPrefix != "" {
for idx, pool := range cs.ContainerService.Properties.AgentPoolProfiles {
if !cfg.IsKubernetes() && config.AgentDNSPrefix != "" {
for idx, pool := range prop.AgentPoolProfiles {
pool.DNSPrefix = fmt.Sprintf("%v-%v", config.AgentDNSPrefix, idx)
}
}
if prop.LinuxProfile != nil {
if config.PublicSSHKey != "" {
cs.ContainerService.Properties.LinuxProfile.SSH.PublicKeys[0].KeyData = config.PublicSSHKey
if cs.ContainerService.Properties.OrchestratorProfile.KubernetesConfig != nil && cs.ContainerService.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster != nil && cs.ContainerService.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile != nil {
cs.ContainerService.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile.PublicKey = config.PublicSSHKey
prop.LinuxProfile.SSH.PublicKeys[0].KeyData = config.PublicSSHKey
if prop.OrchestratorProfile.KubernetesConfig != nil && prop.OrchestratorProfile.KubernetesConfig.PrivateCluster != nil && prop.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile != nil {
prop.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile.PublicKey = config.PublicSSHKey
}
}
}
if config.WindowsAdminPasssword != "" {
cs.ContainerService.Properties.WindowsProfile.AdminPassword = config.WindowsAdminPasssword
prop.WindowsProfile.AdminPassword = config.WindowsAdminPasssword
}
// If the parsed api model input has no expressed version opinion, we check if ENV does have an opinion
if cs.ContainerService.Properties.OrchestratorProfile.OrchestratorRelease == "" &&
cs.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion == "" {
if prop.OrchestratorProfile.OrchestratorRelease == "" &&
prop.OrchestratorProfile.OrchestratorVersion == "" {
// First, prefer the release string if ENV declares it
if config.OrchestratorRelease != "" {
cs.ContainerService.Properties.OrchestratorProfile.OrchestratorRelease = config.OrchestratorRelease
prop.OrchestratorProfile.OrchestratorRelease = config.OrchestratorRelease
// Or, choose the version string if ENV declares it
} else if config.OrchestratorVersion != "" {
cs.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion = config.OrchestratorVersion
// If ENV similarly has no version opinion, we will rely upon the acs-engine default
prop.OrchestratorProfile.OrchestratorVersion = config.OrchestratorVersion
// If ENV similarly has no version opinion, we will rely upon the aks-engine default
} else {
log.Println("No orchestrator version specified, will use the default.")
}
@ -169,25 +133,25 @@ func Build(cfg *config.Config, masterSubnetID string, agentSubnetID string, isVM
if config.CreateVNET {
if isVMSS {
cs.ContainerService.Properties.MasterProfile.VnetSubnetID = masterSubnetID
cs.ContainerService.Properties.MasterProfile.AgentVnetSubnetID = agentSubnetID
for _, p := range cs.ContainerService.Properties.AgentPoolProfiles {
prop.MasterProfile.VnetSubnetID = masterSubnetID
prop.MasterProfile.AgentVnetSubnetID = agentSubnetID
for _, p := range prop.AgentPoolProfiles {
p.VnetSubnetID = agentSubnetID
}
} else {
cs.ContainerService.Properties.MasterProfile.VnetSubnetID = masterSubnetID
for _, p := range cs.ContainerService.Properties.AgentPoolProfiles {
prop.MasterProfile.VnetSubnetID = masterSubnetID
for _, p := range prop.AgentPoolProfiles {
p.VnetSubnetID = masterSubnetID
}
}
}
if config.EnableKMSEncryption && config.ClientObjectID != "" {
if cs.ContainerService.Properties.OrchestratorProfile.KubernetesConfig == nil {
cs.ContainerService.Properties.OrchestratorProfile.KubernetesConfig = &vlabs.KubernetesConfig{}
if prop.OrchestratorProfile.KubernetesConfig == nil {
prop.OrchestratorProfile.KubernetesConfig = &vlabs.KubernetesConfig{}
}
cs.ContainerService.Properties.OrchestratorProfile.KubernetesConfig.EnableEncryptionWithExternalKms = &config.EnableKMSEncryption
cs.ContainerService.Properties.ServicePrincipalProfile.ObjectID = config.ClientObjectID
prop.OrchestratorProfile.KubernetesConfig.EnableEncryptionWithExternalKms = &config.EnableKMSEncryption
prop.ServicePrincipalProfile.ObjectID = config.ClientObjectID
}
return &Engine{
@ -198,8 +162,8 @@ func Build(cfg *config.Config, masterSubnetID string, agentSubnetID string, isVM
// NodeCount returns the number of nodes that should be provisioned for a given cluster definition
func (e *Engine) NodeCount() int {
expectedCount := e.ClusterDefinition.Properties.MasterProfile.Count
for _, pool := range e.ClusterDefinition.Properties.AgentPoolProfiles {
expectedCount := e.ExpandedDefinition.Properties.MasterProfile.Count
for _, pool := range e.ExpandedDefinition.Properties.AgentPoolProfiles {
expectedCount = expectedCount + pool.Count
}
return expectedCount
@ -287,17 +251,3 @@ func ParseOutput(path string) (*api.ContainerService, error) {
}
return containerService, nil
}
func generateRandomBytes(n int) ([]byte, error) {
b := make([]byte, n)
_, err := rand.Read(b)
if err != nil {
return nil, err
}
return b, nil
}
func generateRandomString(s int) (string, error) {
b, err := generateRandomBytes(s)
return base64.URLEncoding.EncodeToString(b), err
}

Просмотреть файл

@ -140,7 +140,9 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
})
It("should report all nodes in a Ready state", func() {
ready := node.WaitOnReady(eng.NodeCount(), 10*time.Second, cfg.Timeout)
nodeCount := eng.NodeCount()
log.Printf("Checking for %d Ready nodes\n", nodeCount)
ready := node.WaitOnReady(nodeCount, 10*time.Second, cfg.Timeout)
cmd := exec.Command("kubectl", "get", "nodes", "-o", "wide")
out, _ := cmd.CombinedOutput()
log.Printf("%s\n", out)
@ -1014,109 +1016,6 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
}
})
It("should be able to scale an iis webserver", func() {
if eng.HasWindowsAgents() {
iisImage := "microsoft/iis:windowsservercore-1803" // BUG: This should be set based on the host OS version
By("Creating a deployment with 1 pod running IIS")
r := rand.New(rand.NewSource(time.Now().UnixNano()))
deploymentName := fmt.Sprintf("iis-%s-%v", cfg.Name, r.Intn(99999))
iisDeploy, err := deployment.CreateWindowsDeploy(iisImage, deploymentName, "default", 80, -1)
Expect(err).NotTo(HaveOccurred())
By("Waiting on pod to be Ready")
running, err := pod.WaitOnReady(deploymentName, "default", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Exposing a LoadBalancer for the pod")
err = iisDeploy.Expose("LoadBalancer", 80, 80)
Expect(err).NotTo(HaveOccurred())
iisService, err := service.Get(deploymentName, "default")
Expect(err).NotTo(HaveOccurred())
By("Verifying that the service is reachable and returns the default IIS start page")
valid := iisService.Validate("(IIS Windows Server)", 10, 10*time.Second, cfg.Timeout)
Expect(valid).To(BeTrue())
By("Checking that each pod can reach http://www.bing.com")
iisPods, err := iisDeploy.Pods()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).ToNot(BeZero())
for _, iisPod := range iisPods {
pass, err := iisPod.CheckWindowsOutboundConnection("www.bing.com", 10*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
}
By("Scaling deployment to 5 pods")
err = iisDeploy.ScaleDeployment(5)
Expect(err).NotTo(HaveOccurred())
_, err = iisDeploy.WaitForReplicas(5, 5, 2*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Waiting on 5 pods to be Ready")
running, err = pod.WaitOnReady(deploymentName, "default", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
iisPods, err = iisDeploy.Pods()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).To(Equal(5))
By("Verifying that the service is reachable and returns the default IIS start page")
valid = iisService.Validate("(IIS Windows Server)", 10, 10*time.Second, cfg.Timeout)
Expect(valid).To(BeTrue())
By("Checking that each pod can reach http://www.bing.com")
iisPods, err = iisDeploy.Pods()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).ToNot(BeZero())
for _, iisPod := range iisPods {
pass, err := iisPod.CheckWindowsOutboundConnection("www.bing.com", 10*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
}
By("Checking that no pods restart")
for _, iisPod := range iisPods {
log.Printf("Checking %s", iisPod.Metadata.Name)
Expect(iisPod.Status.ContainerStatuses[0].Ready).To(BeTrue())
Expect(iisPod.Status.ContainerStatuses[0].RestartCount).To(Equal(0))
}
By("Scaling deployment to 2 pods")
err = iisDeploy.ScaleDeployment(2)
Expect(err).NotTo(HaveOccurred())
_, err = iisDeploy.WaitForReplicas(2, 2, 2*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
iisPods, err = iisDeploy.Pods()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).To(Equal(2))
By("Verifying that the service is reachable and returns the default IIS start page")
valid = iisService.Validate("(IIS Windows Server)", 10, 10*time.Second, cfg.Timeout)
Expect(valid).To(BeTrue())
By("Checking that each pod can reach http://www.bing.com")
iisPods, err = iisDeploy.Pods()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).ToNot(BeZero())
for _, iisPod := range iisPods {
pass, err := iisPod.CheckWindowsOutboundConnection("www.bing.com", 10*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
}
By("Verifying pods & services can be deleted")
err = iisDeploy.Delete(deleteResourceRetries)
Expect(err).NotTo(HaveOccurred())
err = iisService.Delete(deleteResourceRetries)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("No windows agent was provisioned for this Cluster Definition")
}
})
It("should be able to resolve DNS across windows and linux deployments", func() {
if eng.HasWindowsAgents() {
iisImage := "microsoft/iis:windowsservercore-1803" // BUG: This should be set based on the host OS version

Просмотреть файл

@ -12,7 +12,6 @@ import (
"github.com/Azure/acs-engine/test/e2e/config"
"github.com/Azure/acs-engine/test/e2e/engine"
"github.com/Azure/acs-engine/test/e2e/metrics"
outil "github.com/Azure/acs-engine/test/e2e/openshift/util"
"github.com/Azure/acs-engine/test/e2e/runner"
)
@ -197,24 +196,6 @@ func teardown() {
log.Printf("cliProvisioner.FetchProvisioningMetrics error: %s\n", err)
}
}
if cliProvisioner.Config.IsOpenShift() {
sshKeyPath := cfg.GetSSHKeyPath()
adminName := eng.ClusterDefinition.Properties.LinuxProfile.AdminUsername
version := eng.Config.OrchestratorVersion
distro := eng.Config.Distro
if err := outil.FetchWaagentLogs(sshKeyPath, adminName, cfg.Name, cfg.Location, logsPath); err != nil {
log.Printf("cannot fetch waagent logs: %v", err)
}
if err := outil.FetchOpenShiftLogs(distro, version, sshKeyPath, adminName, cfg.Name, cfg.Location, logsPath); err != nil {
log.Printf("cannot get openshift logs: %v", err)
}
if err := outil.FetchClusterInfo(logsPath); err != nil {
log.Printf("cannot get pod and node info: %v", err)
}
if err := outil.FetchOpenShiftMetrics(logsPath); err != nil {
log.Printf("cannot fetch openshift metrics: %v", err)
}
}
if !cfg.SkipLogsCollection {
if err := cliProvisioner.FetchActivityLog(acct, logsPath); err != nil {
log.Printf("cannot fetch the activity log: %v", err)

Просмотреть файл

@ -11,18 +11,15 @@ import (
"strings"
"time"
"github.com/kelseyhightower/envconfig"
"github.com/Azure/acs-engine/pkg/helpers"
"github.com/Azure/acs-engine/test/e2e/azure"
"github.com/Azure/acs-engine/test/e2e/config"
"github.com/Azure/acs-engine/test/e2e/dcos"
"github.com/Azure/acs-engine/test/e2e/engine"
"github.com/Azure/acs-engine/test/e2e/kubernetes/node"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
"github.com/Azure/acs-engine/test/e2e/metrics"
onode "github.com/Azure/acs-engine/test/e2e/openshift/node"
"github.com/Azure/acs-engine/test/e2e/remote"
"github.com/kelseyhightower/envconfig"
"github.com/pkg/errors"
)
@ -226,9 +223,9 @@ func (cli *CLIProvisioner) generateAndDeploy() error {
}
cli.Engine.ExpandedDefinition = csGenerated
// Both Openshift and Kubernetes deployments should have a kubeconfig available
// Kubernetes deployments should have a kubeconfig available
// at this point.
if (cli.Config.IsKubernetes() || cli.Config.IsOpenShift()) && !cli.IsPrivate() {
if cli.Config.IsKubernetes() && !cli.IsPrivate() {
cli.Config.SetKubeConfig()
}
@ -251,7 +248,7 @@ func (cli *CLIProvisioner) generateName() string {
}
func (cli *CLIProvisioner) waitForNodes() error {
if cli.Config.IsKubernetes() || cli.Config.IsOpenShift() {
if cli.Config.IsKubernetes() {
if !cli.IsPrivate() {
log.Println("Waiting on nodes to go into ready state...")
ready := node.WaitOnReady(cli.Engine.NodeCount(), 10*time.Second, cli.Config.Timeout)
@ -265,8 +262,6 @@ func (cli *CLIProvisioner) waitForNodes() error {
var err error
if cli.Config.IsKubernetes() {
version, err = node.Version()
} else if cli.Config.IsOpenShift() {
version, err = onode.Version()
}
if err != nil {
log.Printf("Ready nodes did not return a version: %s", err)
@ -283,25 +278,6 @@ func (cli *CLIProvisioner) waitForNodes() error {
}
}
if cli.Config.IsDCOS() {
host := fmt.Sprintf("%s.%s.cloudapp.azure.com", cli.Config.Name, cli.Config.Location)
user := cli.Engine.ClusterDefinition.Properties.LinuxProfile.AdminUsername
log.Printf("SSH Key: %s\n", cli.Config.GetSSHKeyPath())
log.Printf("Master Node: %s@%s\n", user, host)
log.Printf("SSH Command: ssh -i %s -p 2200 %s@%s", cli.Config.GetSSHKeyPath(), user, host)
cluster, err := dcos.NewCluster(cli.Config, cli.Engine)
if err != nil {
return err
}
err = cluster.InstallDCOSClient()
if err != nil {
return errors.Wrap(err, "Error trying to install dcos client")
}
ready := cluster.WaitForNodes(cli.Engine.NodeCount(), 10*time.Second, cli.Config.Timeout)
if !ready {
return errors.New("Error: Not all nodes in a healthy state")
}
}
return nil
}
@ -358,7 +334,7 @@ func (cli *CLIProvisioner) FetchProvisioningMetrics(path string, cfg *config.Con
// IsPrivate will return true if the cluster has no public IPs
func (cli *CLIProvisioner) IsPrivate() bool {
return (cli.Config.IsKubernetes() || cli.Config.IsOpenShift()) &&
return cli.Config.IsKubernetes() &&
cli.Engine.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster != nil &&
helpers.IsTrueBoolPointer(cli.Engine.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster.Enabled)
}

Просмотреть файл

@ -48,7 +48,7 @@ func (g *Ginkgo) Run() error {
err = cmd.Wait()
if err != nil {
g.Point.RecordTestError()
if g.Config.IsKubernetes() || g.Config.IsOpenShift() {
if g.Config.IsKubernetes() {
kubectl := exec.Command("kubectl", "get", "all", "--all-namespaces", "-o", "wide")
util.PrintCommand(kubectl)
kubectl.CombinedOutput()