e2eutil: port killMembers
This commit is contained in:
Родитель
1c4259de41
Коммит
caf1efafef
|
@ -100,7 +100,7 @@ func testStopOperator(t *testing.T, kill bool) {
|
|||
}
|
||||
}
|
||||
|
||||
if err := killMembers(f, names[0]); err != nil {
|
||||
if err := e2eutil.KillMembers(f.KubeClient, f.Namespace, names[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := e2eutil.WaitUntilSizeReached(t, f.KubeClient, 2, 10*time.Second, testEtcd); err != nil {
|
||||
|
|
|
@ -20,9 +20,27 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
func KillMembers(kubecli kubernetes.Interface, ns string, names ...string) error {
|
||||
for _, name := range names {
|
||||
err := kubecli.CoreV1().Pods(ns).Delete(name, metav1.NewDeleteOptions(0))
|
||||
if err != nil && !k8sutil.IsKubernetesResourceNotFoundError(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func LogfWithTimestamp(t *testing.T, format string, args ...interface{}) {
|
||||
t.Log(time.Now(), fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func printContainerStatus(buf *bytes.Buffer, ss []v1.ContainerStatus) {
|
||||
for _, s := range ss {
|
||||
if s.State.Waiting != nil {
|
||||
|
@ -33,7 +51,3 @@ func printContainerStatus(buf *bytes.Buffer, ss []v1.ContainerStatus) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func LogfWithTimestamp(t *testing.T, format string, args ...interface{}) {
|
||||
t.Log(time.Now(), fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func testOneMemberRecovery(t *testing.T) {
|
|||
fmt.Println("reached to 3 members cluster")
|
||||
|
||||
// The last pod could have not come up serving yet. If we are not killing the last pod, we should wait.
|
||||
if err := killMembers(f, names[2]); err != nil {
|
||||
if err := e2eutil.KillMembers(f.KubeClient, f.Namespace, names[2]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := e2eutil.WaitUntilSizeReached(t, f.KubeClient, 3, 60*time.Second, testEtcd); err != nil {
|
||||
|
@ -148,7 +148,7 @@ func testDisasterRecoveryWithBackupPolicy(t *testing.T, numToKill int, backupPol
|
|||
toKill := names[:numToKill]
|
||||
e2eutil.LogfWithTimestamp(t, "killing pods: %v", toKill)
|
||||
// TODO: race: members could be recovered between being deleted one by one.
|
||||
if err := killMembers(f, toKill...); err != nil {
|
||||
if err := e2eutil.KillMembers(f.KubeClient, f.Namespace, toKill...); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := e2eutil.WaitUntilSizeReached(t, f.KubeClient, 3, 120*time.Second, testEtcd); err != nil {
|
||||
|
|
|
@ -90,16 +90,6 @@ func makeBackup(f *framework.Framework, clusterName string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func killMembers(f *framework.Framework, names ...string) error {
|
||||
for _, name := range names {
|
||||
err := f.KubeClient.CoreV1().Pods(f.Namespace).Delete(name, metav1.NewDeleteOptions(0))
|
||||
if err != nil && !k8sutil.IsKubernetesResourceNotFoundError(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createEtcdClient(addr string) (*clientv3.Client, error) {
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: []string{addr},
|
||||
|
|
Загрузка…
Ссылка в новой задаче