зеркало из https://github.com/Azure/aks-engine.git
fix(test/e2e/kubernetes_test): Fix the access dashboard test (#1311)
* Update logic around waiting for pod readiness * Make sure we add the ssh key to the agent before running the tests * If we get an error in this test output the stdout/stderr
This commit is contained in:
Родитель
e9b2e1ea18
Коммит
f0037df289
|
@ -22,3 +22,6 @@ pkg/i18n/translations.go
|
|||
_logs/
|
||||
test/acs-engine-test/report/TestReport.json
|
||||
*.swp
|
||||
|
||||
# I have no idea why these get generated when I run the e2e test
|
||||
test/e2e/kubernetes/translations/
|
|
@ -106,28 +106,28 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
|
|||
})
|
||||
|
||||
It("should have kube-dns running", func() {
|
||||
pod.WaitOnReady("kube-dns", "kube-system", 5*time.Second, 3*time.Minute)
|
||||
running, err := pod.AreAllPodsRunning("kube-dns", "kube-system")
|
||||
running, err := pod.WaitOnReady("kube-dns", "kube-system", 5*time.Second, 10*time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(running).To(Equal(true))
|
||||
})
|
||||
|
||||
It("should have kube-dashboard running", func() {
|
||||
pod.WaitOnReady("kubernetes-dashboard", "kube-system", 5*time.Second, 3*time.Minute)
|
||||
running, err := pod.AreAllPodsRunning("kubernetes-dashboard", "kube-system")
|
||||
running, err := pod.WaitOnReady("kubernetes-dashboard", "kube-system", 5*time.Second, 10*time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(running).To(Equal(true))
|
||||
})
|
||||
|
||||
It("should have kube-proxy running", func() {
|
||||
pod.WaitOnReady("kube-proxy", "kube-system", 5*time.Second, 3*time.Minute)
|
||||
running, err := pod.AreAllPodsRunning("kube-proxy", "kube-system")
|
||||
running, err := pod.WaitOnReady("kube-proxy", "kube-system", 5*time.Second, 10*time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(running).To(Equal(true))
|
||||
})
|
||||
|
||||
It("should be able to access the dashboard from each node", func() {
|
||||
pod.WaitOnReady("kubernetes-dashboard", "kube-system", 5*time.Second, 3*time.Minute)
|
||||
running, err := pod.WaitOnReady("kube-proxy", "kube-system", 5*time.Second, 10*time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(running).To(Equal(true))
|
||||
|
||||
kubeConfig, err := GetConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -145,7 +145,10 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
|
|||
for _, node := range nodeList.Nodes {
|
||||
dashboardURL := fmt.Sprintf("http://%s:%v", node.Status.GetAddressByType("InternalIP").Address, port)
|
||||
curlCMD := fmt.Sprintf("curl --max-time 60 %s", dashboardURL)
|
||||
_, err := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD).CombinedOutput()
|
||||
output, err := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("\n\nOutput:%s\n\n", string(output))
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -71,13 +71,13 @@ func Get(podName, namespace string) (*Pod, error) {
|
|||
|
||||
// AreAllPodsRunning will return true if all pods are in a Running State
|
||||
func AreAllPodsRunning(podPrefix, namespace string) (bool, error) {
|
||||
status := true
|
||||
pl, err := GetAll(namespace)
|
||||
if err != nil {
|
||||
log.Printf("Error while trying to check if all pods are in running state:%s", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
var status []bool
|
||||
for _, pod := range pl.Pods {
|
||||
matched, err := regexp.MatchString(podPrefix+"-.*", pod.Metadata.Name)
|
||||
if err != nil {
|
||||
|
@ -86,16 +86,28 @@ func AreAllPodsRunning(podPrefix, namespace string) (bool, error) {
|
|||
}
|
||||
if matched {
|
||||
if pod.Status.Phase != "Running" {
|
||||
status = false
|
||||
status = append(status, false)
|
||||
} else {
|
||||
status = append(status, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return status, nil
|
||||
if len(status) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, s := range status {
|
||||
if s == false {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// WaitOnReady will block until all nodes are in ready state
|
||||
func WaitOnReady(podPrefix, namespace string, sleep, duration time.Duration) bool {
|
||||
func WaitOnReady(podPrefix, namespace string, sleep, duration time.Duration) (bool, error) {
|
||||
readyCh := make(chan bool, 1)
|
||||
errCh := make(chan error)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), duration)
|
||||
|
@ -112,17 +124,18 @@ func WaitOnReady(podPrefix, namespace string, sleep, duration time.Duration) boo
|
|||
}
|
||||
if ready == true {
|
||||
readyCh <- true
|
||||
} else {
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-errCh:
|
||||
return false
|
||||
case err := <-errCh:
|
||||
return false, err
|
||||
case ready := <-readyCh:
|
||||
return ready
|
||||
return ready, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,9 +31,14 @@ if [ -z "${NAME}" ]; then
|
|||
echo "Generating new SSH Keys"
|
||||
ssh-keygen -f _output/${SSH_KEY_NAME} -b 2048 -t rsa -q -N ''
|
||||
chmod 0600 _output/${SSH_KEY_NAME}*
|
||||
ssh-add _output/${SSH_KEY_NAME}
|
||||
fi
|
||||
|
||||
export PUBLIC_SSH_KEY="$(cat _output/${SSH_KEY_NAME}.pub)"
|
||||
export DNS_PREFIX=test-$(echo $RANDOM)
|
||||
|
||||
ginkgo -slowSpecThreshold 60 -r test/e2e/${TEST}
|
||||
ginkgo -slowSpecThreshold 60 -r test/e2e/${TEST}
|
||||
|
||||
if [ -z "${CLEANUP_ON_EXIT}" ] || [ "${CLEANUP_ON_EXIT}" != false ]; then
|
||||
ssh-add -d _output/${SSH_KEY_NAME}
|
||||
fi
|
Загрузка…
Ссылка в новой задаче