Kubernetes E2E: test addons if present (#2156)

* conditional addon tests

* uses generated model to introspect cluster features

* I heart output

* deployment flows need expanded cluster definition

* reverting to ClusterDefinition for node counts

* standard stdout implementation for all commands

* typo

* disable broken chmod command

* stdout tweaks
This commit is contained in:
Jack Francis 2018-01-26 11:45:55 -08:00 коммит произвёл GitHub
Родитель 965298daae
Коммит 539c6b344c
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
18 изменённых файлов: 357 добавлений и 119 удалений

Просмотреть файл

@ -8,6 +8,7 @@ import (
"time"
"github.com/Azure/acs-engine/test/e2e/engine"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
"github.com/kelseyhightower/envconfig"
)
@ -70,8 +71,12 @@ func (a *Account) Login() error {
// SetSubscription will call az account set --subscription for the given Account
func (a *Account) SetSubscription() error {
_, err := exec.Command("az", "account", "set", "--subscription", a.SubscriptionID).CombinedOutput()
cmd := exec.Command("az", "account", "set", "--subscription", a.SubscriptionID)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to set subscription (%s):%s", a.SubscriptionID, err)
log.Printf("Output:%s\n", out)
return err
}
return nil
@ -81,9 +86,11 @@ func (a *Account) SetSubscription() error {
//--tags "type=${RESOURCE_GROUP_TAG_TYPE:-}" "now=$(date +%s)" "job=${JOB_BASE_NAME:-}" "buildno=${BUILD_NUM:-}"
func (a *Account) CreateGroup(name, location string) error {
now := fmt.Sprintf("now=%v", time.Now().Unix())
out, err := exec.Command("az", "group", "create", "--name", name, "--location", location, "--tags", now).CombinedOutput()
cmd := exec.Command("az", "group", "create", "--name", name, "--location", location, "--tags", now)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying create resource group (%s) in %s:%s", name, location, err)
log.Printf("Error while trying to create resource group (%s) in %s:%s", name, location, err)
log.Printf("Output:%s\n", out)
return err
}
@ -97,13 +104,14 @@ func (a *Account) CreateGroup(name, location string) error {
// DeleteGroup deletes a given resource group by name
func (a *Account) DeleteGroup(name string, wait bool) error {
var out []byte
var err error
var cmd *exec.Cmd
if !wait {
out, err = exec.Command("az", "group", "delete", "--name", name, "--no-wait", "--yes").CombinedOutput()
cmd = exec.Command("az", "group", "delete", "--name", name, "--no-wait", "--yes")
} else {
out, err = exec.Command("az", "group", "delete", "--name", name, "--yes").CombinedOutput()
cmd = exec.Command("az", "group", "delete", "--name", name, "--yes")
}
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to delete resource group (%s):%s", name, out)
return err
@ -113,7 +121,6 @@ func (a *Account) DeleteGroup(name string, wait bool) error {
// CreateDeployment will deploy a cluster to a given resource group using the template and parameters on disk
func (a *Account) CreateDeployment(name string, e *engine.Engine) error {
log.Print("Creating deployment, this will take a few minutes.")
d := Deployment{
Name: name,
TemplateDirectory: e.Config.GeneratedDefinitionPath,
@ -134,14 +141,16 @@ func (a *Account) CreateDeployment(name string, e *engine.Engine) error {
}
}()
output, err := exec.Command("az", "group", "deployment", "create",
cmd := exec.Command("az", "group", "deployment", "create",
"--name", d.Name,
"--resource-group", a.ResourceGroup.Name,
"--template-file", e.Config.GeneratedTemplatePath,
"--parameters", e.Config.GeneratedParametersPath).CombinedOutput()
"--parameters", e.Config.GeneratedParametersPath)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("\nError from deployment for %s in resource group %s:%s\n", d.Name, a.ResourceGroup.Name, err)
log.Printf("Command Output: %s\n", output)
log.Printf("Command Output: %s\n", out)
return err
}
quit <- true
@ -151,7 +160,9 @@ func (a *Account) CreateDeployment(name string, e *engine.Engine) error {
// GetCurrentAccount will run an az account show and parse that into an account strcut
func GetCurrentAccount() (*Account, error) {
out, err := exec.Command("az", "account", "show").CombinedOutput()
cmd := exec.Command("az", "account", "show")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'account show':%s\n", err)
return nil, err
@ -167,7 +178,9 @@ func GetCurrentAccount() (*Account, error) {
// CreateVnet will create a vnet in a resource group
func (a *Account) CreateVnet(vnet, addressPrefixes, subnetName, subnetPrefix string) error {
out, err := exec.Command("az", "network", "vnet", "create", "-g", a.ResourceGroup.Name, "-n", vnet, "--address-prefixes", addressPrefixes, "--subnet-name", subnetName, "--subnet-prefix", subnetPrefix).CombinedOutput()
cmd := exec.Command("az", "network", "vnet", "create", "-g", a.ResourceGroup.Name, "-n", vnet, "--address-prefixes", addressPrefixes, "--subnet-name", subnetName, "--subnet-prefix", subnetPrefix)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to create vnet with the following command:\n az network vnet create -g %s -n %s --address-prefixes %s --subnet-name %s --subnet-prefix %s \n Output:%s\n", a.ResourceGroup.Name, vnet, addressPrefixes, subnetName, subnetPrefix, out)
return err
@ -186,7 +199,8 @@ type RouteTable struct {
// UpdateRouteTables is used to updated a vnet with the appropriate route tables
func (a *Account) UpdateRouteTables(subnet, vnet string) error {
out, err := exec.Command("az", "network", "route-table", "list", "-g", a.ResourceGroup.Name).CombinedOutput()
cmd := exec.Command("az", "network", "route-table", "list", "-g", a.ResourceGroup.Name)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to get route table list!\n Output:%s\n", out)
return err
@ -194,7 +208,9 @@ func (a *Account) UpdateRouteTables(subnet, vnet string) error {
rts := []RouteTable{}
json.Unmarshal(out, &rts)
out, err = exec.Command("az", "network", "vnet", "subnet", "update", "-n", subnet, "-g", a.ResourceGroup.Name, "--vnet-name", vnet, "--route-table", rts[0].Name).CombinedOutput()
cmd = exec.Command("az", "network", "vnet", "subnet", "update", "-n", subnet, "-g", a.ResourceGroup.Name, "--vnet-name", vnet, "--route-table", rts[0].Name)
util.PrintCommand(cmd)
out, err = cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to update vnet route tables:%s\n", out)
return err

Просмотреть файл

@ -31,7 +31,7 @@ var _ = BeforeSuite(func() {
engCfg, err := engine.ParseConfig(c.CurrentWorkingDir, c.ClusterDefinition, c.Name)
Expect(err).NotTo(HaveOccurred())
cs, err := engine.Parse(engCfg.ClusterDefinitionTemplate)
cs, err := engine.ParseInput(engCfg.ClusterDefinitionTemplate)
Expect(err).NotTo(HaveOccurred())
eng = engine.Engine{
Config: engCfg,

Просмотреть файл

@ -3,11 +3,15 @@ package engine
import (
"log"
"os/exec"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
)
// Generate will run acs-engine generate on a given cluster definition
func (e *Engine) Generate() error {
out, err := exec.Command("./bin/acs-engine", "generate", e.Config.ClusterDefinitionTemplate, "--output-directory", e.Config.GeneratedDefinitionPath).CombinedOutput()
cmd := exec.Command("./bin/acs-engine", "generate", e.Config.ClusterDefinitionTemplate, "--output-directory", e.Config.GeneratedDefinitionPath)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to generate acs-engine template with cluster definition - %s: %s\n", e.Config.ClusterDefinitionTemplate, err)
log.Printf("Command:./bin/acs-engine generate %s --output-directory %s\n", e.Config.ClusterDefinitionTemplate, e.Config.GeneratedDefinitionPath)

Просмотреть файл

@ -10,6 +10,7 @@ import (
"github.com/Azure/acs-engine/pkg/api"
"github.com/Azure/acs-engine/pkg/api/vlabs"
"github.com/Azure/acs-engine/pkg/helpers"
"github.com/Azure/acs-engine/pkg/i18n"
"github.com/Azure/acs-engine/test/e2e/config"
"github.com/kelseyhightower/envconfig"
)
@ -38,8 +39,9 @@ type Config struct {
// Engine holds necessary information to interact with acs-engine cli
type Engine struct {
Config *Config
ClusterDefinition *api.VlabsARMContainerService // Holds the parsed ClusterDefinition
Config *Config
ClusterDefinition *api.VlabsARMContainerService // Holds the parsed ClusterDefinition
ExpandedDefinition *api.ContainerService // Holds the expanded ClusterDefinition
}
// ParseConfig will return a new engine config struct taking values from env vars
@ -69,7 +71,7 @@ func Build(cfg *config.Config, subnetID string) (*Engine, error) {
log.Printf("Error while trying to build Engine Configuration:%s\n", err)
}
cs, err := Parse(config.ClusterDefinitionPath)
cs, err := ParseInput(config.ClusterDefinitionPath)
if err != nil {
return nil, err
}
@ -138,7 +140,7 @@ func (e *Engine) NodeCount() int {
// HasLinuxAgents will return true if there is at least 1 linux agent pool
func (e *Engine) HasLinuxAgents() bool {
for _, ap := range e.ClusterDefinition.Properties.AgentPoolProfiles {
for _, ap := range e.ExpandedDefinition.Properties.AgentPoolProfiles {
if ap.OSType == "" || ap.OSType == "Linux" {
return true
}
@ -148,7 +150,7 @@ func (e *Engine) HasLinuxAgents() bool {
// HasWindowsAgents will return true is there is at least 1 windows agent pool
func (e *Engine) HasWindowsAgents() bool {
for _, ap := range e.ClusterDefinition.Properties.AgentPoolProfiles {
for _, ap := range e.ExpandedDefinition.Properties.AgentPoolProfiles {
if ap.OSType == "Windows" {
return true
}
@ -156,6 +158,46 @@ func (e *Engine) HasWindowsAgents() bool {
return false
}
// HasDashboard will return true if kubernetes-dashboard addon is enabled
func (e *Engine) HasDashboard() bool {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "kubernetes-dashboard" {
return *addon.Enabled
}
}
return false
}
// HasTiller will return true if tiller addon is enabled
func (e *Engine) HasTiller() bool {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "tiller" {
return *addon.Enabled
}
}
return false
}
// HasACIConnector will return true if aci-connector addon is enabled
func (e *Engine) HasACIConnector() bool {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "aci-connector" {
return *addon.Enabled
}
}
return false
}
// HasRescheduler will return true if rescheduler addon is enabled
func (e *Engine) HasRescheduler() bool {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "rescheduler" {
return *addon.Enabled
}
}
return false
}
// OrchestratorVersion1Dot8AndUp will return true if the orchestrator version is 1.8 and up
func (e *Engine) OrchestratorVersion1Dot8AndUp() bool {
return e.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion >= "1.8"
@ -172,11 +214,12 @@ func (e *Engine) Write() error {
if err != nil {
log.Printf("Error while trying to write container service definition to file (%s):%s\n%s\n", e.Config.ClusterDefinitionTemplate, err, string(json))
}
return nil
}
// Parse takes a template path and will parse that into a api.VlabsARMContainerService
func Parse(path string) (*api.VlabsARMContainerService, error) {
// ParseInput takes a template path and will parse that into a api.VlabsARMContainerService
func ParseInput(path string) (*api.VlabsARMContainerService, error) {
contents, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("Error while trying to read cluster definition at (%s):%s\n", path, err)
@ -189,3 +232,21 @@ func Parse(path string) (*api.VlabsARMContainerService, error) {
}
return &cs, nil
}
// ParseOutput takes the generated api model and will parse that into a api.ContainerService
func ParseOutput(path string) (*api.ContainerService, error) {
locale, err := i18n.LoadTranslations()
if err != nil {
return nil, fmt.Errorf(fmt.Sprintf("error loading translation files: %s", err.Error()))
}
apiloader := &api.Apiloader{
Translator: &i18n.Translator{
Locale: locale,
},
}
containerService, _, err := apiloader.LoadContainerServiceFromFile(path, true, false, nil)
if err != nil {
return nil, err
}
return containerService, nil
}

Просмотреть файл

@ -5,6 +5,8 @@ import (
"log"
"os/exec"
"strings"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
)
// Config represents a kubernetes config object
@ -25,7 +27,9 @@ type ClusterInfo struct {
// GetConfig returns a Config value representing the current kubeconfig
func GetConfig() (*Config, error) {
out, err := exec.Command("kubectl", "config", "view", "-o", "json").CombinedOutput()
cmd := exec.Command("kubectl", "config", "view", "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'kubectl config view':%s\n", err)
return nil, err

Просмотреть файл

@ -8,6 +8,7 @@ import (
"time"
"github.com/Azure/acs-engine/test/e2e/kubernetes/pod"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
)
// List holds a list of deployments returned from kubectl get deploy
@ -56,14 +57,15 @@ type Container struct {
// CreateLinuxDeploy will create a deployment for a given image with a name in a namespace
// --overrides='{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}'
func CreateLinuxDeploy(image, name, namespace, miscOpts string) (*Deployment, error) {
var err error
var out []byte
var cmd *exec.Cmd
overrides := `{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}`
if miscOpts != "" {
out, err = exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--overrides", overrides, miscOpts).CombinedOutput()
cmd = exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--overrides", overrides, miscOpts)
} else {
out, err = exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--overrides", overrides).CombinedOutput()
cmd = exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--overrides", overrides)
}
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out))
return nil, err
@ -80,7 +82,9 @@ func CreateLinuxDeploy(image, name, namespace, miscOpts string) (*Deployment, er
// --overrides='{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}'
func RunLinuxDeploy(image, name, namespace, command string, replicas int) (*Deployment, error) {
overrides := `{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}`
out, err := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--replicas", strconv.Itoa(replicas), "--overrides", overrides, "--command", "--", "/bin/sh", "-c", command).CombinedOutput()
cmd := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--replicas", strconv.Itoa(replicas), "--overrides", overrides, "--command", "--", "/bin/sh", "-c", command)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out))
return nil, err
@ -96,7 +100,9 @@ func RunLinuxDeploy(image, name, namespace, command string, replicas int) (*Depl
// CreateWindowsDeploy will crete a deployment for a given image with a name in a namespace
func CreateWindowsDeploy(image, name, namespace string, port int, hostport int) (*Deployment, error) {
overrides := `{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"windows"}}}}}`
out, err := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--port", strconv.Itoa(port), "--hostport", strconv.Itoa(hostport), "--overrides", overrides).CombinedOutput()
cmd := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--port", strconv.Itoa(port), "--hostport", strconv.Itoa(hostport), "--overrides", overrides)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out))
return nil, err
@ -111,7 +117,9 @@ func CreateWindowsDeploy(image, name, namespace string, port int, hostport int)
// Get returns a deployment from a name and namespace
func Get(name, namespace string) (*Deployment, error) {
out, err := exec.Command("kubectl", "get", "deploy", "-o", "json", "-n", namespace, name).CombinedOutput()
cmd := exec.Command("kubectl", "get", "deploy", "-o", "json", "-n", namespace, name)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to fetch deployment %s in namespace %s:%s\n", name, namespace, string(out))
return nil, err
@ -127,7 +135,9 @@ func Get(name, namespace string) (*Deployment, error) {
// Delete will delete a deployment in a given namespace
func (d *Deployment) Delete() error {
out, err := exec.Command("kubectl", "delete", "deploy", "-n", d.Metadata.Namespace, d.Metadata.Name).CombinedOutput()
cmd := exec.Command("kubectl", "delete", "deploy", "-n", d.Metadata.Namespace, d.Metadata.Name)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to delete deployment %s in namespace %s:%s\n", d.Metadata.Namespace, d.Metadata.Name, string(out))
return err
@ -137,7 +147,9 @@ func (d *Deployment) Delete() error {
// Expose will create a load balancer and expose the deployment on a given port
func (d *Deployment) Expose(svcType string, targetPort, exposedPort int) error {
out, err := exec.Command("kubectl", "expose", "deployment", d.Metadata.Name, "--type", svcType, "-n", d.Metadata.Namespace, "--target-port", strconv.Itoa(targetPort), "--port", strconv.Itoa(exposedPort)).CombinedOutput()
cmd := exec.Command("kubectl", "expose", "deployment", d.Metadata.Name, "--type", svcType, "-n", d.Metadata.Namespace, "--target-port", strconv.Itoa(targetPort), "--port", strconv.Itoa(exposedPort))
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to expose (%s) target port (%v) for deployment %s in namespace %s on port %v:%s\n", svcType, targetPort, d.Metadata.Name, d.Metadata.Namespace, exposedPort, string(out))
return err

Просмотреть файл

@ -17,6 +17,7 @@ import (
"github.com/Azure/acs-engine/test/e2e/kubernetes/node"
"github.com/Azure/acs-engine/test/e2e/kubernetes/pod"
"github.com/Azure/acs-engine/test/e2e/kubernetes/service"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@ -41,11 +42,14 @@ var _ = BeforeSuite(func() {
engCfg, err := engine.ParseConfig(c.CurrentWorkingDir, c.ClusterDefinition, c.Name)
Expect(err).NotTo(HaveOccurred())
cs, err := engine.Parse(engCfg.ClusterDefinitionTemplate)
csInput, err := engine.ParseInput(engCfg.ClusterDefinitionTemplate)
Expect(err).NotTo(HaveOccurred())
csGenerated, err := engine.ParseOutput(engCfg.GeneratedDefinitionPath + "/apimodel.json")
Expect(err).NotTo(HaveOccurred())
eng = engine.Engine{
Config: engCfg,
ClusterDefinition: cs,
Config: engCfg,
ClusterDefinition: csInput,
ExpandedDefinition: csGenerated,
}
})
@ -121,66 +125,107 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
})
It("should have tiller running", func() {
running, err := pod.WaitOnReady("tiller", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
if eng.HasTiller() {
running, err := pod.WaitOnReady("tiller", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
} else {
Skip("tiller disabled for this cluster, will not test")
}
})
It("should be able to access the dashboard from each node", func() {
running, err := pod.WaitOnReady("kubernetes-dashboard", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
if eng.HasDashboard() {
By("Ensuring that the kubernetes-dashboard pod is Running")
kubeConfig, err := GetConfig()
Expect(err).NotTo(HaveOccurred())
sshKeyPath := cfg.GetSSHKeyPath()
running, err := pod.WaitOnReady("kubernetes-dashboard", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
s, err := service.Get("kubernetes-dashboard", "kube-system")
Expect(err).NotTo(HaveOccurred())
dashboardPort := 80
version, err := node.Version()
Expect(err).NotTo(HaveOccurred())
By("Ensuring that the kubernetes-dashboard service is Running")
re := regexp.MustCompile("v1.9")
if re.FindString(version) != "" {
dashboardPort = 443
}
port := s.GetNodePort(dashboardPort)
s, err := service.Get("kubernetes-dashboard", "kube-system")
Expect(err).NotTo(HaveOccurred())
master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName())
nodeList, err := node.Get()
Expect(err).NotTo(HaveOccurred())
if !eng.HasWindowsAgents() {
for _, node := range nodeList.Nodes {
success := false
for i := 0; i < 60; i++ {
dashboardURL := fmt.Sprintf("http://%s:%v", node.Status.GetAddressByType("InternalIP").Address, port)
curlCMD := fmt.Sprintf("curl --max-time 60 %s", dashboardURL)
_, err := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD).CombinedOutput()
if err == nil {
success = true
break
}
if i > 58 {
log.Println(curlCMD)
log.Println(err.Error())
log.Printf("%#v\n", err)
}
time.Sleep(10 * time.Second)
if !eng.HasWindowsAgents() {
By("Gathering connection information to determine whether or not to connect via HTTP or HTTPS")
dashboardPort := 80
version, err := node.Version()
Expect(err).NotTo(HaveOccurred())
re := regexp.MustCompile("v1.9")
if re.FindString(version) != "" {
dashboardPort = 443
}
port := s.GetNodePort(dashboardPort)
kubeConfig, err := GetConfig()
Expect(err).NotTo(HaveOccurred())
master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName())
sshKeyPath := cfg.GetSSHKeyPath()
if dashboardPort == 80 {
By("Ensuring that we can connect via HTTP to the dashboard on any one node")
} else {
By("Ensuring that we can connect via HTTPS to the dashboard on any one node")
}
nodeList, err := node.Get()
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Nodes {
success := false
for i := 0; i < 60; i++ {
dashboardURL := fmt.Sprintf("http://%s:%v", node.Status.GetAddressByType("InternalIP").Address, port)
curlCMD := fmt.Sprintf("curl --max-time 60 %s", dashboardURL)
cmd := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err == nil {
success = true
break
}
if i > 58 {
log.Printf("Error while connecting to Windows dashboard:%s\n", err)
log.Println(string(out))
}
time.Sleep(10 * time.Second)
}
Expect(success).To(BeTrue())
}
Expect(success).To(BeTrue())
}
} else {
Skip("kubernetes-dashboard disabled for this cluster, will not test")
}
})
It("should have aci-connector running", func() {
if eng.HasACIConnector() {
running, err := pod.WaitOnReady("aci-connector", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
} else {
Skip("aci-connector disabled for this cluster, will not test")
}
})
It("should have rescheduler running", func() {
if eng.HasRescheduler() {
running, err := pod.WaitOnReady("rescheduler", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
} else {
Skip("rescheduler disabled for this cluster, will not test")
}
})
})
Describe("with a linux agent pool", func() {
It("should be able to autoscale", func() {
By("Determining whether this version of Kubernetes can hpa autoscale")
version, err := node.Version()
Expect(err).NotTo(HaveOccurred())
re := regexp.MustCompile("v1.9")
if eng.HasLinuxAgents() && re.FindString(version) == "" {
By("Creating a test php-apache deployment with request limit thresholds")
// Inspired by http://blog.kubernetes.io/2016/07/autoscaling-in-kubernetes.html
r := rand.New(rand.NewSource(time.Now().UnixNano()))
phpApacheName := fmt.Sprintf("php-apache-%s-%v", cfg.Name, r.Intn(99999))
@ -190,6 +235,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
}
Expect(err).NotTo(HaveOccurred())
By("Ensuring that one php-apache pod is running before autoscale configuration or load applied")
running, err := pod.WaitOnReady(phpApacheName, "default", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
@ -199,20 +245,23 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
// We should have exactly 1 pod to begin
Expect(len(phpPods)).To(Equal(1))
By("Exposing TCP 80 internally on the php-apache deployment")
err = phpApacheDeploy.Expose("ClusterIP", 80, 80)
Expect(err).NotTo(HaveOccurred())
s, err := service.Get(phpApacheName, "default")
Expect(err).NotTo(HaveOccurred())
By("Assigning hpa configuration to the php-apache deployment")
// Apply autoscale characteristics to deployment
_, err = exec.Command("kubectl", "autoscale", "deployment", phpApacheName, "--cpu-percent=5", "--min=1", "--max=10").CombinedOutput()
cmd := exec.Command("kubectl", "autoscale", "deployment", phpApacheName, "--cpu-percent=5", "--min=1", "--max=10")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while configuring autoscale against deployment %s:%s\n", phpApacheName, string(out))
}
Expect(err).NotTo(HaveOccurred())
phpPods, err = phpApacheDeploy.Pods()
Expect(err).NotTo(HaveOccurred())
// We should still have exactly 1 pod after autoscale config but before load
Expect(len(phpPods)).To(Equal(1))
By("Sending load to the php-apache service by creating a 3 replica deployment")
// Launch a simple busybox pod that wget's continuously to the apache serviceto simulate load
commandString := fmt.Sprintf("while true; do wget -q -O- http://%s.default.svc.cluster.local; done", phpApacheName)
loadTestName := fmt.Sprintf("load-test-%s-%v", cfg.Name, r.Intn(99999))
@ -220,6 +269,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
loadTestDeploy, err := deployment.RunLinuxDeploy("busybox", loadTestName, "default", commandString, numLoadTestPods)
Expect(err).NotTo(HaveOccurred())
By("Ensuring there are 3 load test pods")
running, err = pod.WaitOnReady(loadTestName, "default", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
@ -229,46 +279,57 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
Expect(err).NotTo(HaveOccurred())
Expect(len(loadTestPods)).To(Equal(numLoadTestPods))
By("Waiting 3 minutes for load to take effect")
// Wait 3 minutes for autoscaler to respond to load
time.Sleep(3 * time.Minute)
By("Ensuring we have more than 1 apache-php pods due to hpa enforcement")
phpPods, err = phpApacheDeploy.Pods()
Expect(err).NotTo(HaveOccurred())
// We should have > 1 pods after autoscale effects
Expect(len(phpPods) > 1).To(BeTrue())
By("Cleaning up after ourselves")
err = loadTestDeploy.Delete()
Expect(err).NotTo(HaveOccurred())
err = phpApacheDeploy.Delete()
Expect(err).NotTo(HaveOccurred())
err = s.Delete()
Expect(err).NotTo(HaveOccurred())
} else {
Skip("This flavor/version of Kubernetes doesn't support hpa autoscale")
}
})
It("should be able to deploy an nginx service", func() {
if eng.HasLinuxAgents() {
By("Creating a nginx deployment")
r := rand.New(rand.NewSource(time.Now().UnixNano()))
deploymentName := fmt.Sprintf("nginx-%s-%v", cfg.Name, r.Intn(99999))
nginxDeploy, err := deployment.CreateLinuxDeploy("library/nginx:latest", deploymentName, "default", "")
Expect(err).NotTo(HaveOccurred())
By("Ensure there is a Running nginx pod")
running, err := pod.WaitOnReady(deploymentName, "default", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Exposing TCP 80 LB on the nginx deployment")
err = nginxDeploy.Expose("LoadBalancer", 80, 80)
Expect(err).NotTo(HaveOccurred())
By("Ensuring we can connect to the service")
s, err := service.Get(deploymentName, "default")
Expect(err).NotTo(HaveOccurred())
s, err = s.WaitForExternalIP(cfg.Timeout, 5*time.Second)
Expect(err).NotTo(HaveOccurred())
Expect(s.Status.LoadBalancer.Ingress).NotTo(BeEmpty())
By("Ensuring the service root URL returns the expected payload")
valid := s.Validate("(Welcome to nginx)", 5, 5*time.Second)
Expect(valid).To(BeTrue())
By("Ensuring we have outbound internet access from the nginx pods")
nginxPods, err := nginxDeploy.Pods()
Expect(err).NotTo(HaveOccurred())
Expect(len(nginxPods)).ToNot(BeZero())
@ -278,6 +339,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
Expect(pass).To(BeTrue())
}
By("Cleaning up after ourselves")
err = nginxDeploy.Delete()
Expect(err).NotTo(HaveOccurred())
err = s.Delete()

Просмотреть файл

@ -5,6 +5,8 @@ import (
"log"
"os/exec"
"time"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
)
// Namespace holds namespace metadata
@ -20,7 +22,9 @@ type Metadata struct {
// Create a namespace with the given name
func Create(name string) (*Namespace, error) {
out, err := exec.Command("kubectl", "create", "namespace", name).CombinedOutput()
cmd := exec.Command("kubectl", "create", "namespace", name)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to create namespace (%s):%s\n", name, string(out))
return nil, err
@ -30,7 +34,9 @@ func Create(name string) (*Namespace, error) {
// Get returns a namespace for with a given name
func Get(name string) (*Namespace, error) {
out, err := exec.Command("kubectl", "get", "namespace", name, "-o", "json").CombinedOutput()
cmd := exec.Command("kubectl", "get", "namespace", name, "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to get namespace (%s):%s\n", name, string(out))
return nil, err
@ -45,7 +51,9 @@ func Get(name string) (*Namespace, error) {
// Delete a namespace
func (n *Namespace) Delete() error {
out, err := exec.Command("kubectl", "delete", "namespace", n.Metadata.Name).CombinedOutput()
cmd := exec.Command("kubectl", "delete", "namespace", n.Metadata.Name)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to delete namespace (%s):%s\n", n.Metadata.Name, out)
return err

Просмотреть файл

@ -9,6 +9,8 @@ import (
"regexp"
"strings"
"time"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
)
const (
@ -113,7 +115,9 @@ func WaitOnReady(nodeCount int, sleep, duration time.Duration) bool {
// Get returns the current nodes for a given kubeconfig
func Get() (*List, error) {
out, err := exec.Command("kubectl", "get", "nodes", "-o", "json").CombinedOutput()
cmd := exec.Command("kubectl", "get", "nodes", "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'kubectl get nodes':%s", string(out))
return nil, err
@ -128,7 +132,9 @@ func Get() (*List, error) {
// Version get the version of the server
func Version() (string, error) {
out, err := exec.Command("kubectl", "version", "--short").CombinedOutput()
cmd := exec.Command("kubectl", "version", "--short")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'kubectl version':%s", string(out))
return "", err

Просмотреть файл

@ -7,6 +7,8 @@ import (
"log"
"os/exec"
"time"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
)
// PersistentVolumeClaims is used to parse data from kubectl get pvc
@ -36,7 +38,9 @@ type Status struct {
// CreatePersistentVolumeClaimsFromFile will create a StorageClass from file with a name
func CreatePersistentVolumeClaimsFromFile(filename, name, namespace string) (*PersistentVolumeClaims, error) {
out, err := exec.Command("kubectl", "apply", "-f", filename).CombinedOutput()
cmd := exec.Command("kubectl", "apply", "-f", filename)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to create PersistentVolumeClaims %s in namespace %s:%s\n", name, namespace, string(out))
return nil, err
@ -51,7 +55,9 @@ func CreatePersistentVolumeClaimsFromFile(filename, name, namespace string) (*Pe
// Get will return a PersistentVolumeClaims with a given name and namespace
func Get(pvcName, namespace string) (*PersistentVolumeClaims, error) {
out, err := exec.Command("kubectl", "get", "pvc", pvcName, "-n", namespace, "-o", "json").CombinedOutput()
cmd := exec.Command("kubectl", "get", "pvc", pvcName, "-n", namespace, "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, err
}

Просмотреть файл

@ -9,6 +9,8 @@ import (
"regexp"
"strings"
"time"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
)
const (
@ -62,7 +64,9 @@ type Status struct {
// CreatePodFromFile will create a Pod from file with a name
func CreatePodFromFile(filename, name, namespace string) (*Pod, error) {
out, err := exec.Command("kubectl", "apply", "-f", filename).CombinedOutput()
cmd := exec.Command("kubectl", "apply", "-f", filename)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to create Pod %s:%s\n", name, string(out))
return nil, err
@ -77,7 +81,9 @@ func CreatePodFromFile(filename, name, namespace string) (*Pod, error) {
// GetAll will return all pods in a given namespace
func GetAll(namespace string) (*List, error) {
out, err := exec.Command("kubectl", "get", "pods", "-n", namespace, "-o", "json").CombinedOutput()
cmd := exec.Command("kubectl", "get", "pods", "-n", namespace, "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, err
}
@ -92,7 +98,9 @@ func GetAll(namespace string) (*List, error) {
// Get will return a pod with a given name and namespace
func Get(podName, namespace string) (*Pod, error) {
out, err := exec.Command("kubectl", "get", "pods", podName, "-n", namespace, "-o", "json").CombinedOutput()
cmd := exec.Command("kubectl", "get", "pods", podName, "-n", namespace, "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, err
}
@ -125,7 +133,7 @@ func GetAllByPrefix(prefix, namespace string) ([]Pod, error) {
return pods, nil
}
// AreAllPodsRunning will return true if all pods are in a Running State
// AreAllPodsRunning will return true if all pods in a given namespace are in a Running State
func AreAllPodsRunning(podPrefix, namespace string) (bool, error) {
pl, err := GetAll(namespace)
if err != nil {
@ -206,15 +214,17 @@ func (p *Pod) WaitOnReady(sleep, duration time.Duration) (bool, error) {
}
// Exec will execute the given command in the pod
func (p *Pod) Exec(cmd ...string) ([]byte, error) {
func (p *Pod) Exec(c ...string) ([]byte, error) {
execCmd := []string{"exec", p.Metadata.Name, "-n", p.Metadata.Namespace}
for _, s := range cmd {
for _, s := range c {
execCmd = append(execCmd, s)
}
out, err := exec.Command("kubectl", execCmd...).CombinedOutput()
cmd := exec.Command("kubectl", execCmd...)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'kubectl exec':%s\n", string(out))
log.Printf("Command:kubectl exec %s -n %s %s \n", p.Metadata.Name, p.Metadata.Namespace, cmd)
log.Printf("Command:kubectl exec %s -n %s %s \n", p.Metadata.Name, p.Metadata.Namespace, c)
return nil, err
}
return out, nil
@ -222,7 +232,9 @@ func (p *Pod) Exec(cmd ...string) ([]byte, error) {
// Delete will delete a Pod in a given namespace
func (p *Pod) Delete() error {
out, err := exec.Command("kubectl", "delete", "po", "-n", p.Metadata.Namespace, p.Metadata.Name).CombinedOutput()
cmd := exec.Command("kubectl", "delete", "po", "-n", p.Metadata.Namespace, p.Metadata.Name)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to delete Pod %s in namespace %s:%s\n", p.Metadata.Namespace, p.Metadata.Name, string(out))
return err
@ -337,9 +349,11 @@ func (p *Pod) ValidateHostPort(check string, attempts int, sleep time.Duration,
curlCMD := fmt.Sprintf("curl --max-time 60 %s", url)
for i := 0; i < attempts; i++ {
resp, err := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD).CombinedOutput()
cmd := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err == nil {
matched, _ := regexp.MatchString(check, string(resp))
matched, _ := regexp.MatchString(check, string(out))
if matched == true {
return true
}

Просмотреть файл

@ -10,6 +10,8 @@ import (
"os/exec"
"regexp"
"time"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
)
// Service represents a kubernetes service
@ -54,7 +56,9 @@ type LoadBalancer struct {
// Get returns the service definition specified in a given namespace
func Get(name, namespace string) (*Service, error) {
out, err := exec.Command("kubectl", "get", "svc", "-o", "json", "-n", namespace, name).CombinedOutput()
cmd := exec.Command("kubectl", "get", "svc", "-o", "json", "-n", namespace, name)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'kubectl get svc':%s\n", string(out))
return nil, err
@ -70,7 +74,9 @@ func Get(name, namespace string) (*Service, error) {
// Delete will delete a service in a given namespace
func (s *Service) Delete() error {
out, err := exec.Command("kubectl", "delete", "svc", "-n", s.Metadata.Namespace, s.Metadata.Name).CombinedOutput()
cmd := exec.Command("kubectl", "delete", "svc", "-n", s.Metadata.Namespace, s.Metadata.Name)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to delete service %s in namespace %s:%s\n", s.Metadata.Namespace, s.Metadata.Name, string(out))
return err

Просмотреть файл

@ -7,6 +7,8 @@ import (
"log"
"os/exec"
"time"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
)
// StorageClass is used to parse data from kubectl get storageclass
@ -28,7 +30,9 @@ type Parameters struct {
// CreateStorageClassFromFile will create a StorageClass from file with a name
func CreateStorageClassFromFile(filename, name string) (*StorageClass, error) {
out, err := exec.Command("kubectl", "apply", "-f", filename).CombinedOutput()
cmd := exec.Command("kubectl", "apply", "-f", filename)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to create StorageClass %s:%s\n", name, string(out))
return nil, err
@ -43,7 +47,9 @@ func CreateStorageClassFromFile(filename, name string) (*StorageClass, error) {
// Get will return a StorageClass with a given name and namespace
func Get(scName string) (*StorageClass, error) {
out, err := exec.Command("kubectl", "get", "storageclass", scName, "-o", "json").CombinedOutput()
cmd := exec.Command("kubectl", "get", "storageclass", scName, "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, err
}

Просмотреть файл

@ -0,0 +1,12 @@
package util
import (
"fmt"
"os/exec"
"strings"
)
// PrintCommand prints a command string
func PrintCommand(cmd *exec.Cmd) {
fmt.Printf("\n$ %s\n", strings.Join(cmd.Args, " "))
}

Просмотреть файл

@ -10,6 +10,7 @@ import (
"os/exec"
"time"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
@ -91,9 +92,11 @@ func (c *Connection) Execute(cmd string) ([]byte, error) {
}
func (c *Connection) Write(data, path string) error {
cmd := fmt.Sprintf("echo %s > %s", data, path)
remoteCommand := fmt.Sprintf("echo %s > %s", data, path)
connectString := fmt.Sprintf("%s@%s", c.User, c.Host)
out, err := exec.Command("ssh", "-i", c.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, "-p", c.Port, cmd).CombinedOutput()
cmd := exec.Command("ssh", "-i", c.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, "-p", c.Port, remoteCommand)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error output:%s\n", out)
return err
@ -102,9 +105,11 @@ func (c *Connection) Write(data, path string) error {
}
func (c *Connection) Read(path string) ([]byte, error) {
cmd := fmt.Sprintf("cat %s", path)
remoteCommand := fmt.Sprintf("cat %s", path)
connectString := fmt.Sprintf("%s@%s", c.User, c.Host)
out, err := exec.Command("ssh", "-i", c.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, "-p", c.Port, cmd).CombinedOutput()
cmd := exec.Command("ssh", "-i", c.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, "-p", c.Port, remoteCommand)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error output:%s\n", out)
return nil, err

Просмотреть файл

@ -74,7 +74,7 @@ func main() {
teardown()
log.Fatalf("Error trying to parse Engine config:%s\n", err)
}
cs, err := engine.Parse(engCfg.ClusterDefinitionTemplate)
cs, err := engine.ParseInput(engCfg.ClusterDefinitionTemplate)
if err != nil {
teardown()
log.Fatalf("Error trying to parse engine template into memory:%s\n", err)

Просмотреть файл

@ -15,6 +15,7 @@ import (
"github.com/Azure/acs-engine/test/e2e/dcos"
"github.com/Azure/acs-engine/test/e2e/engine"
"github.com/Azure/acs-engine/test/e2e/kubernetes/node"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
"github.com/Azure/acs-engine/test/e2e/metrics"
"github.com/Azure/acs-engine/test/e2e/remote"
"github.com/kelseyhightower/envconfig"
@ -80,17 +81,17 @@ func (cli *CLIProvisioner) provision() error {
cli.Config.Name = cli.Config.SoakClusterName
}
os.Setenv("NAME", cli.Config.Name)
log.Printf("Cluster name:%s\n", cli.Config.Name)
outputPath := filepath.Join(cli.Config.CurrentWorkingDir, "_output")
os.Mkdir(outputPath, 0755)
if cli.Config.SoakClusterName == "" {
out, err := exec.Command("ssh-keygen", "-f", cli.Config.GetSSHKeyPath(), "-q", "-N", "", "-b", "2048", "-t", "rsa").CombinedOutput()
cmd := exec.Command("ssh-keygen", "-f", cli.Config.GetSSHKeyPath(), "-q", "-N", "", "-b", "2048", "-t", "rsa")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Error while trying to generate ssh key:%s\nOutput:%s", err, out)
}
exec.Command("chmod", "0600", cli.Config.GetSSHKeyPath()+"*")
}
publicSSHKey, err := cli.Config.ReadPublicSSHKey()
@ -133,6 +134,17 @@ func (cli *CLIProvisioner) provision() error {
return fmt.Errorf("Error while trying to generate acs-engine template:%s", err)
}
c, err := config.ParseConfig()
engCfg, err := engine.ParseConfig(cli.Config.CurrentWorkingDir, c.ClusterDefinition, c.Name)
if err != nil {
return fmt.Errorf("unable to parse config")
}
csGenerated, err := engine.ParseOutput(engCfg.GeneratedDefinitionPath + "/apimodel.json")
if err != nil {
return fmt.Errorf("unable to parse output")
}
cli.Engine.ExpandedDefinition = csGenerated
// Lets start by just using the normal az group deployment cli for creating a cluster
err = cli.Account.CreateDeployment(cli.Config.Name, eng)
if err != nil {

Просмотреть файл

@ -7,6 +7,7 @@ import (
"os/exec"
"github.com/Azure/acs-engine/test/e2e/config"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
"github.com/Azure/acs-engine/test/e2e/metrics"
"github.com/kelseyhightower/envconfig"
)
@ -33,7 +34,8 @@ func BuildGinkgoRunner(cfg *config.Config, pt *metrics.Point) (*Ginkgo, error) {
func (g *Ginkgo) Run() error {
g.Point.SetTestStart()
testDir := fmt.Sprintf("test/e2e/%s", g.Config.Orchestrator)
cmd := exec.Command("ginkgo", "-slowSpecThreshold", "180", "-r", testDir)
cmd := exec.Command("ginkgo", "-slowSpecThreshold", "180", "-r", "-v", testDir)
util.PrintCommand(cmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Start()
@ -47,10 +49,12 @@ func (g *Ginkgo) Run() error {
if err != nil {
g.Point.RecordTestError()
if g.Config.IsKubernetes() {
out, _ := exec.Command("kubectl", "get", "all", "--all-namespaces", "-o", "wide").CombinedOutput()
log.Printf("Running kubectl get all:\n%s\n", out)
out, _ = exec.Command("kubectl", "get", "nodes", "-o", "wide").CombinedOutput()
log.Printf("Running kubectl get nodes:\n%s\n", out)
kubectl := exec.Command("kubectl", "get", "all", "--all-namespaces", "-o", "wide")
util.PrintCommand(kubectl)
kubectl.CombinedOutput()
kubectl = exec.Command("kubectl", "get", "nodes", "-o", "wide")
util.PrintCommand(kubectl)
kubectl.CombinedOutput()
}
return err
}