зеркало из https://github.com/Azure/acs-engine.git
E2E test - 50 nodes (#2260)
This commit is contained in:
Родитель
bb26c506e5
Коммит
2bdd3c0040
|
@ -0,0 +1,39 @@
|
|||
{
|
||||
"apiVersion": "vlabs",
|
||||
"properties": {
|
||||
"orchestratorProfile": {
|
||||
"orchestratorType": "Kubernetes",
|
||||
"orchestratorRelease": "1.8"
|
||||
},
|
||||
"masterProfile": {
|
||||
"count": 5,
|
||||
"dnsPrefix": "",
|
||||
"vmSize": "Standard_D2_v2",
|
||||
"OSDiskSizeGB": 200
|
||||
},
|
||||
"agentPoolProfiles": [
|
||||
{
|
||||
"name": "agentpool1",
|
||||
"count": 50,
|
||||
"vmSize": "Standard_D2_v2",
|
||||
"osType": "Linux",
|
||||
"storageProfile": "ManagedDisks",
|
||||
"availabilityProfile": "AvailabilitySet"
|
||||
}
|
||||
],
|
||||
"linuxProfile": {
|
||||
"adminUsername": "azureuser",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"keyData": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"servicePrincipalProfile": {
|
||||
"clientId": "",
|
||||
"secret": ""
|
||||
}
|
||||
}
|
||||
}
|
|
@ -169,13 +169,13 @@ func (e *Engine) HasDashboard() bool {
|
|||
}
|
||||
|
||||
// HasTiller will return true if tiller addon is enabled
|
||||
func (e *Engine) HasTiller() bool {
|
||||
func (e *Engine) HasTiller() (bool, api.KubernetesAddon) {
|
||||
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
|
||||
if addon.Name == "tiller" {
|
||||
return *addon.Enabled
|
||||
return *addon.Enabled, addon
|
||||
}
|
||||
}
|
||||
return false
|
||||
return false, api.KubernetesAddon{}
|
||||
}
|
||||
|
||||
// HasACIConnector will return true if aci-connector addon is enabled
|
||||
|
|
|
@ -125,23 +125,20 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
|
|||
})
|
||||
|
||||
It("should have tiller running", func() {
|
||||
if eng.HasTiller() {
|
||||
if hasTiller, tillerAddon := eng.HasTiller(); hasTiller {
|
||||
running, err := pod.WaitOnReady("tiller", "kube-system", 3, 30*time.Second, cfg.Timeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(running).To(Equal(true))
|
||||
} else {
|
||||
Skip("tiller disabled for this cluster, will not test")
|
||||
}
|
||||
})
|
||||
|
||||
It("should have a tiller max-history of 5", func() {
|
||||
if eng.HasTiller() {
|
||||
pods, err := pod.GetAllByPrefix("tiller-deploy", "kube-system")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// There is only one tiller pod and one container in that pod.
|
||||
actualTillerMaxHistory, err := pods[0].Spec.Containers[0].GetEnvironmentVariable("TILLER_HISTORY_MAX")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(actualTillerMaxHistory).To(Equal("5"))
|
||||
if tillerAddon.Config != nil {
|
||||
By("Ensuring that the correct max-history has been applied")
|
||||
maxHistory := tillerAddon.Config["max-history"]
|
||||
pods, err := pod.GetAllByPrefix("tiller-deploy", "kube-system")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// There is only one tiller pod and one container in that pod.
|
||||
actualTillerMaxHistory, err := pods[0].Spec.Containers[0].GetEnvironmentVariable("TILLER_HISTORY_MAX")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(actualTillerMaxHistory).To(Equal(maxHistory))
|
||||
}
|
||||
} else {
|
||||
Skip("tiller disabled for this cluster, will not test")
|
||||
}
|
||||
|
|
|
@ -30,7 +30,8 @@ type Connection struct {
|
|||
func NewConnection(host, port, user, keyPath string) (*Connection, error) {
|
||||
conn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Printf("unable to establish net connection $SSH_AUTH_SOCK has value %s\n", os.Getenv("SSH_AUTH_SOCK"))
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
ag := agent.NewClient(conn)
|
||||
|
@ -51,7 +52,8 @@ func NewConnection(host, port, user, keyPath string) (*Connection, error) {
|
|||
ag.Add(addKey)
|
||||
signers, err := ag.Signers()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Println("unable to add key to agent")
|
||||
return nil, err
|
||||
}
|
||||
auths := []ssh.AuthMethod{ssh.PublicKeys(signers...)}
|
||||
|
||||
|
|
|
@ -32,6 +32,8 @@ type CLIProvisioner struct {
|
|||
Point *metrics.Point
|
||||
ResourceGroups []string
|
||||
Engine *engine.Engine
|
||||
Masters []azure.VM
|
||||
Agents []azure.VM
|
||||
}
|
||||
|
||||
// BuildCLIProvisioner will return a ProvisionerConfig object which is used to run a provision
|
||||
|
@ -155,6 +157,22 @@ func (cli *CLIProvisioner) provision() error {
|
|||
return fmt.Errorf("Error while trying to create deployment:%s", err)
|
||||
}
|
||||
|
||||
// Store the hosts for future introspection
|
||||
hosts, err := cli.Account.GetHosts(cli.Config.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var masters, agents []azure.VM
|
||||
for _, host := range hosts {
|
||||
if strings.Contains(host.Name, "master") {
|
||||
masters = append(masters, host)
|
||||
} else if strings.Contains(host.Name, "agent") {
|
||||
agents = append(agents, host)
|
||||
}
|
||||
}
|
||||
cli.Masters = masters
|
||||
cli.Agents = agents
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -205,18 +223,6 @@ func (cli *CLIProvisioner) waitForNodes() error {
|
|||
|
||||
// FetchProvisioningMetrics gets provisioning files from all hosts in a cluster
|
||||
func (cli *CLIProvisioner) FetchProvisioningMetrics(path string, cfg *config.Config, acct *azure.Account) error {
|
||||
var masters, agents []string
|
||||
hosts, err := acct.GetHosts("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, host := range hosts {
|
||||
if strings.Contains(host.Name, "master") {
|
||||
masters = append(masters, host.Name)
|
||||
} else if strings.Contains(host.Name, "agent") {
|
||||
agents = append(agents, host.Name)
|
||||
}
|
||||
}
|
||||
agentFiles := []string{"/var/log/azure/cluster-provision.log", "/var/log/cloud-init.log",
|
||||
"/var/log/cloud-init-output.log", "/var/log/syslog", "/var/log/azure/custom-script/handler.log",
|
||||
"/opt/m", "/opt/azure/containers/kubelet.sh", "/opt/azure/containers/provision.sh",
|
||||
|
@ -228,18 +234,18 @@ func (cli *CLIProvisioner) FetchProvisioningMetrics(path string, cfg *config.Con
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, master := range masters {
|
||||
for _, master := range cli.Masters {
|
||||
for _, fp := range masterFiles {
|
||||
err := conn.CopyRemote(master, fp)
|
||||
err := conn.CopyRemote(master.Name, fp)
|
||||
if err != nil {
|
||||
log.Printf("Error reading file from path (%s):%s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, agent := range agents {
|
||||
for _, agent := range cli.Agents {
|
||||
for _, fp := range agentFiles {
|
||||
err := conn.CopyRemote(agent, fp)
|
||||
err := conn.CopyRemote(agent.Name, fp)
|
||||
if err != nil {
|
||||
log.Printf("Error reading file from path (%s):%s", path, err)
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче