Merge pull request #1066 from hongchaodeng/r2
e2eutil: port WaitBackupPod() and MakeBackup()
This commit is contained in:
Коммит
ba178d3d55
|
@ -105,10 +105,12 @@ func testBackupStatus(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed to create 1 members etcd cluster: %v", err)
|
||||
}
|
||||
if err := waitBackupPodUp(f, testEtcd.Metadata.Name, 60*time.Second); err != nil {
|
||||
err = e2eutil.WaitBackupPodUp(t, f.KubeClient, f.Namespace, testEtcd.Metadata.Name, 60*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create backup pod: %v", err)
|
||||
}
|
||||
if err := makeBackup(f, testEtcd.Metadata.Name); err != nil {
|
||||
err = e2eutil.MakeBackup(f.KubeClient, f.Namespace, testEtcd.Metadata.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("fail to make backup: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -16,13 +16,19 @@ package e2eutil
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd-operator/client/experimentalclient"
|
||||
"github.com/coreos/etcd-operator/pkg/util/constants"
|
||||
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
|
||||
"github.com/coreos/etcd-operator/pkg/util/retryutil"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
@ -37,6 +43,50 @@ func KillMembers(kubecli kubernetes.Interface, ns string, names ...string) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func WaitBackupPodUp(t *testing.T, kubecli kubernetes.Interface, ns, clusterName string, timeout time.Duration) error {
|
||||
ls := labels.SelectorFromSet(k8sutil.BackupSidecarLabels(clusterName))
|
||||
return retryutil.Retry(5*time.Second, int(timeout/(5*time.Second)), func() (done bool, err error) {
|
||||
podList, err := kubecli.CoreV1().Pods(ns).List(metav1.ListOptions{
|
||||
LabelSelector: ls.String(),
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range podList.Items {
|
||||
if podList.Items[i].Status.Phase == v1.PodRunning {
|
||||
LogfWithTimestamp(t, "backup pod (%s) is running", podList.Items[i].Name)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func MakeBackup(kubecli kubernetes.Interface, ns, clusterName string) error {
|
||||
ls := labels.SelectorFromSet(k8sutil.BackupSidecarLabels(clusterName))
|
||||
podList, err := kubecli.CoreV1().Pods(ns).List(metav1.ListOptions{
|
||||
LabelSelector: ls.String(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(podList.Items) < 1 {
|
||||
return fmt.Errorf("no backup pod found")
|
||||
}
|
||||
|
||||
// TODO: We are assuming Kubernetes pod network is accessible from test machine.
|
||||
addr := fmt.Sprintf("%s:%d", podList.Items[0].Status.PodIP, constants.DefaultBackupPodHTTPPort)
|
||||
bc := experimentalclient.NewBackupWithAddr(&http.Client{}, "http", addr)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err = bc.Request(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("backup pod (%s): %v", podList.Items[0].Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func LogfWithTimestamp(t *testing.T, format string, args ...interface{}) {
|
||||
t.Log(time.Now(), fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
|
|
@ -130,13 +130,15 @@ func testDisasterRecoveryWithBackupPolicy(t *testing.T, numToKill int, backupPol
|
|||
t.Fatalf("failed to create 3 members etcd cluster: %v", err)
|
||||
}
|
||||
fmt.Println("reached to 3 members cluster")
|
||||
if err := waitBackupPodUp(f, testEtcd.Metadata.Name, 60*time.Second); err != nil {
|
||||
err = e2eutil.WaitBackupPodUp(t, f.KubeClient, f.Namespace, testEtcd.Metadata.Name, 60*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create backup pod: %v", err)
|
||||
}
|
||||
// No left pod to make a backup from. We need to back up ahead.
|
||||
// If there is any left pod, ooperator should be able to make a backup from it.
|
||||
if numToKill == testEtcd.Spec.Size {
|
||||
if err := makeBackup(f, testEtcd.Metadata.Name); err != nil {
|
||||
err = e2eutil.MakeBackup(f.KubeClient, f.Namespace, testEtcd.Metadata.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("fail to make a latest backup: %v", err)
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -96,10 +96,12 @@ func testClusterRestoreWithBackupPolicy(t *testing.T, needDataClone bool, backup
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := waitBackupPodUp(f, testEtcd.Metadata.Name, 60*time.Second); err != nil {
|
||||
err = e2eutil.WaitBackupPodUp(t, f.KubeClient, f.Namespace, testEtcd.Metadata.Name, 60*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create backup pod: %v", err)
|
||||
}
|
||||
if err := makeBackup(f, testEtcd.Metadata.Name); err != nil {
|
||||
err = e2eutil.MakeBackup(f.KubeClient, f.Namespace, testEtcd.Metadata.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("fail to make a backup: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -15,21 +15,9 @@
|
|||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd-operator/client/experimentalclient"
|
||||
"github.com/coreos/etcd-operator/pkg/util/constants"
|
||||
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
|
||||
"github.com/coreos/etcd-operator/pkg/util/retryutil"
|
||||
"github.com/coreos/etcd-operator/test/e2e/framework"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -40,56 +28,6 @@ const (
|
|||
etcdValBar = "bar"
|
||||
)
|
||||
|
||||
func waitBackupPodUp(f *framework.Framework, clusterName string, timeout time.Duration) error {
|
||||
ls := labels.SelectorFromSet(map[string]string{
|
||||
"app": k8sutil.BackupPodSelectorAppField,
|
||||
"etcd_cluster": clusterName,
|
||||
}).String()
|
||||
return retryutil.Retry(5*time.Second, int(timeout/(5*time.Second)), func() (done bool, err error) {
|
||||
podList, err := f.KubeClient.CoreV1().Pods(f.Namespace).List(metav1.ListOptions{
|
||||
LabelSelector: ls,
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range podList.Items {
|
||||
if podList.Items[i].Status.Phase == v1.PodRunning {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func makeBackup(f *framework.Framework, clusterName string) error {
|
||||
ls := labels.SelectorFromSet(map[string]string{
|
||||
"app": k8sutil.BackupPodSelectorAppField,
|
||||
"etcd_cluster": clusterName,
|
||||
}).String()
|
||||
podList, err := f.KubeClient.CoreV1().Pods(f.Namespace).List(metav1.ListOptions{
|
||||
LabelSelector: ls,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(podList.Items) < 1 {
|
||||
return fmt.Errorf("no backup pod found")
|
||||
}
|
||||
|
||||
// We are assuming Kubernetes pod network is accessible from test machine.
|
||||
// TODO: remove this assumption.
|
||||
addr := fmt.Sprintf("%s:%d", podList.Items[0].Status.PodIP, constants.DefaultBackupPodHTTPPort)
|
||||
bc := experimentalclient.NewBackupWithAddr(&http.Client{}, "http", addr)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err = bc.Request(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("backup pod (%s): %v", podList.Items[0].Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createEtcdClient(addr string) (*clientv3.Client, error) {
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: []string{addr},
|
||||
|
|
Загрузка…
Ссылка в новой задаче