*: update client-go to 3.0-beta
This commit is contained in:
Родитель
5365c3384b
Коммит
83250bbc10
|
@ -8,8 +8,11 @@ import (
|
|||
|
||||
"github.com/coreos/etcd-operator/pkg/util/retryutil"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
// for gcp auth
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
|
@ -31,7 +34,7 @@ func TestClient(t *testing.T) {
|
|||
kubecli := mustNewKubeClient()
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -50,9 +53,9 @@ func TestClient(t *testing.T) {
|
|||
if _, err := kubecli.CoreV1().Pods(namespace).Create(pod); err != nil {
|
||||
t.Fatalf("fail to create job (%s): %v", name, err)
|
||||
}
|
||||
defer kubecli.CoreV1().Pods(namespace).Delete(name, v1.NewDeleteOptions(1))
|
||||
defer kubecli.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(1))
|
||||
err := retryutil.Retry(5*time.Second, 6, func() (bool, error) {
|
||||
pod, err := kubecli.CoreV1().Pods(namespace).Get(name)
|
||||
pod, err := kubecli.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/coreos/etcd-operator/pkg/spec"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
apierrors "k8s.io/client-go/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -7,10 +7,10 @@ import (
|
|||
"github.com/coreos/etcd-operator/pkg/spec"
|
||||
|
||||
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
|
@ -34,7 +34,7 @@ type Operator interface {
|
|||
}
|
||||
|
||||
var (
|
||||
groupversion = unversioned.GroupVersion{
|
||||
groupversion = schema.GroupVersion{
|
||||
Group: spec.TPRGroup,
|
||||
Version: spec.TPRVersion,
|
||||
}
|
||||
|
@ -47,8 +47,8 @@ func init() {
|
|||
groupversion,
|
||||
&spec.Cluster{},
|
||||
&spec.ClusterList{},
|
||||
&v1.ListOptions{},
|
||||
&v1.DeleteOptions{},
|
||||
&metav1.ListOptions{},
|
||||
&metav1.DeleteOptions{},
|
||||
)
|
||||
return nil
|
||||
})
|
||||
|
@ -77,7 +77,7 @@ func NewOperator(namespace string) (Operator, error) {
|
|||
|
||||
func (o *operator) Create(ctx context.Context, name string, cspec spec.ClusterSpec) error {
|
||||
cluster := &spec.Cluster{
|
||||
Metadata: v1.ObjectMeta{
|
||||
Metadata: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: cspec,
|
||||
|
@ -145,7 +145,7 @@ func (o *operator) List(ctx context.Context) (*spec.ClusterList, error) {
|
|||
err := o.tprClient.Get().
|
||||
Resource(o.tprKindPlural).
|
||||
Namespace(o.ns).
|
||||
VersionedParams(&v1.ListOptions{}, api.ParameterCodec).
|
||||
VersionedParams(&metav1.ListOptions{}, api.ParameterCodec).
|
||||
Do().Into(clusters)
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -32,14 +32,15 @@ import (
|
|||
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
|
||||
"github.com/coreos/etcd-operator/pkg/util/k8sutil/election"
|
||||
"github.com/coreos/etcd-operator/pkg/util/k8sutil/election/resourcelock"
|
||||
"github.com/coreos/etcd-operator/pkg/util/retryutil"
|
||||
"github.com/coreos/etcd-operator/version"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/coreos/etcd-operator/pkg/util/retryutil"
|
||||
"golang.org/x/time/rate"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/labels"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
|
@ -209,7 +210,7 @@ func newControllerConfig() controller.Config {
|
|||
func getMyPodServiceAccount(kubecli kubernetes.Interface) (string, error) {
|
||||
var sa string
|
||||
err := retryutil.Retry(5*time.Second, 100, func() (bool, error) {
|
||||
pod, err := kubecli.CoreV1().Pods(namespace).Get(name)
|
||||
pod, err := kubecli.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("fail to get operator pod (%s): %v", name, err)
|
||||
return false, nil
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package: github.com/coreos/etcd-operator
|
||||
import:
|
||||
- package: k8s.io/client-go
|
||||
version: v2.0.0
|
||||
version: v3.0.0-beta.0
|
||||
- package: github.com/coreos/etcd
|
||||
version: v3.1.0
|
||||
- package: github.com/Sirupsen/logrus
|
||||
|
|
|
@ -20,9 +20,10 @@ import (
|
|||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/labels"
|
||||
)
|
||||
|
||||
// Monkeys knows how to crush pods and nodes.
|
||||
|
@ -64,7 +65,7 @@ func (m *Monkeys) CrushPods(ctx context.Context, c *CrashConfig) {
|
|||
continue
|
||||
}
|
||||
|
||||
pods, err := m.kubecli.CoreV1().Pods(ns).List(v1.ListOptions{LabelSelector: ls})
|
||||
pods, err := m.kubecli.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: ls})
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to list pods for selector %v: %v", ls, err)
|
||||
continue
|
||||
|
@ -88,7 +89,7 @@ func (m *Monkeys) CrushPods(ctx context.Context, c *CrashConfig) {
|
|||
}
|
||||
|
||||
for tokill := range tokills {
|
||||
err = m.kubecli.CoreV1().Pods(ns).Delete(tokill, v1.NewDeleteOptions(0))
|
||||
err = m.kubecli.CoreV1().Pods(ns).Delete(tokill, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to kill pod %v: %v", tokill, err)
|
||||
continue
|
||||
|
|
|
@ -17,7 +17,7 @@ package cluster
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/coreos/etcd-operator/pkg/spec"
|
||||
"github.com/coreos/etcd-operator/pkg/util/constants"
|
||||
|
@ -26,7 +26,7 @@ import (
|
|||
func TestNewBackupManagerWithNonePVProvisioner(t *testing.T) {
|
||||
cfg := Config{PVProvisioner: constants.PVProvisionerNone}
|
||||
cl := &spec.Cluster{
|
||||
Metadata: v1.ObjectMeta{Name: "testing"},
|
||||
Metadata: metav1.ObjectMeta{Name: "testing"},
|
||||
Spec: spec.ClusterSpec{
|
||||
Backup: &spec.BackupPolicy{
|
||||
StorageType: spec.BackupStorageTypePersistentVolume,
|
||||
|
@ -46,7 +46,7 @@ func TestNewBackupManagerWithNonePVProvisioner(t *testing.T) {
|
|||
func TestNewBackupManagerWithoutS3Config(t *testing.T) {
|
||||
cfg := Config{}
|
||||
cl := &spec.Cluster{
|
||||
Metadata: v1.ObjectMeta{Name: "testing"},
|
||||
Metadata: metav1.ObjectMeta{Name: "testing"},
|
||||
Spec: spec.ClusterSpec{
|
||||
Backup: &spec.BackupPolicy{
|
||||
StorageType: spec.BackupStorageTypeS3,
|
||||
|
|
|
@ -30,8 +30,9 @@ import (
|
|||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/pborman/uuid"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
apierrors "k8s.io/client-go/pkg/api/errors"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
|
@ -428,7 +429,7 @@ func (c *Cluster) removePodAndService(name string) error {
|
|||
}
|
||||
}
|
||||
|
||||
opts := v1.NewDeleteOptions(podTerminationGracePeriod)
|
||||
opts := metav1.NewDeleteOptions(podTerminationGracePeriod)
|
||||
err = c.config.KubeCli.Core().Pods(ns).Delete(name, opts)
|
||||
if err != nil {
|
||||
if !k8sutil.IsKubernetesResourceNotFoundError(err) {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (c *Cluster) upgradeOneMember(memberName string) error {
|
||||
|
@ -25,7 +26,7 @@ func (c *Cluster) upgradeOneMember(memberName string) error {
|
|||
|
||||
ns := c.cluster.Metadata.Namespace
|
||||
|
||||
pod, err := c.config.KubeCli.CoreV1().Pods(ns).Get(memberName)
|
||||
pod, err := c.config.KubeCli.CoreV1().Pods(ns).Get(memberName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to get pod (%s): %v", memberName, err)
|
||||
}
|
||||
|
|
|
@ -31,10 +31,10 @@ import (
|
|||
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kwatch "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
v1beta1extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
kwatch "k8s.io/client-go/pkg/watch"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -270,7 +270,7 @@ func (c *Controller) initResource() (string, error) {
|
|||
|
||||
func (c *Controller) createTPR() error {
|
||||
tpr := &v1beta1extensions.ThirdPartyResource{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: spec.TPRName(),
|
||||
},
|
||||
Versions: []v1beta1extensions.APIVersion{
|
||||
|
|
|
@ -18,8 +18,8 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/watch"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
"github.com/coreos/etcd-operator/pkg/cluster"
|
||||
"github.com/coreos/etcd-operator/pkg/spec"
|
||||
|
@ -29,7 +29,7 @@ func TestHandleClusterEventUpdateFailedCluster(t *testing.T) {
|
|||
c := New(Config{})
|
||||
|
||||
clus := &spec.Cluster{
|
||||
Metadata: v1.ObjectMeta{
|
||||
Metadata: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Status: spec.ClusterStatus{
|
||||
|
@ -51,7 +51,7 @@ func TestHandleClusterEventDeleteFailedCluster(t *testing.T) {
|
|||
c := New(Config{})
|
||||
name := "tests"
|
||||
clus := &spec.Cluster{
|
||||
Metadata: v1.ObjectMeta{
|
||||
Metadata: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Status: spec.ClusterStatus{
|
||||
|
|
|
@ -21,15 +21,16 @@ import (
|
|||
|
||||
"github.com/coreos/etcd-operator/pkg/backup/s3/s3config"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func setupS3Env(kubecli kubernetes.Interface, s3Ctx s3config.S3Context, ns string) error {
|
||||
cm, err := kubecli.CoreV1().ConfigMaps(ns).Get(s3Ctx.AWSConfig)
|
||||
cm, err := kubecli.CoreV1().ConfigMaps(ns).Get(s3Ctx.AWSConfig, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
se, err := kubecli.CoreV1().Secrets(ns).Get(s3Ctx.AWSSecret)
|
||||
se, err := kubecli.CoreV1().Secrets(ns).Get(s3Ctx.AWSSecret, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
|
||||
"github.com/coreos/etcd-operator/pkg/spec"
|
||||
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
kwatch "k8s.io/client-go/pkg/watch"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kwatch "k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
type rawEvent struct {
|
||||
|
@ -31,7 +31,7 @@ type rawEvent struct {
|
|||
Object json.RawMessage
|
||||
}
|
||||
|
||||
func pollEvent(decoder *json.Decoder) (*Event, *unversioned.Status, error) {
|
||||
func pollEvent(decoder *json.Decoder) (*Event, *metav1.Status, error) {
|
||||
re := &rawEvent{}
|
||||
err := decoder.Decode(re)
|
||||
if err != nil {
|
||||
|
@ -42,10 +42,10 @@ func pollEvent(decoder *json.Decoder) (*Event, *unversioned.Status, error) {
|
|||
}
|
||||
|
||||
if re.Type == kwatch.Error {
|
||||
status := &unversioned.Status{}
|
||||
status := &metav1.Status{}
|
||||
err = json.Unmarshal(re.Object, status)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("fail to decode (%s) into unversioned.Status (%v)", re.Object, err)
|
||||
return nil, nil, fmt.Errorf("fail to decode (%s) into metav1.Status (%v)", re.Object, err)
|
||||
}
|
||||
return nil, status, nil
|
||||
}
|
||||
|
|
|
@ -18,10 +18,11 @@ import (
|
|||
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/labels"
|
||||
"k8s.io/client-go/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -64,7 +65,7 @@ func (gc *GC) FullyCollect() error {
|
|||
clusterUIDSet[c.Metadata.UID] = true
|
||||
}
|
||||
|
||||
option := v1.ListOptions{
|
||||
option := metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(map[string]string{
|
||||
"app": "etcd",
|
||||
}).String(),
|
||||
|
@ -74,7 +75,7 @@ func (gc *GC) FullyCollect() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (gc *GC) collectResources(option v1.ListOptions, runningSet map[types.UID]bool) {
|
||||
func (gc *GC) collectResources(option metav1.ListOptions, runningSet map[types.UID]bool) {
|
||||
if err := gc.collectPods(option, runningSet); err != nil {
|
||||
gc.logger.Errorf("gc pods failed: %v", err)
|
||||
}
|
||||
|
@ -86,7 +87,7 @@ func (gc *GC) collectResources(option v1.ListOptions, runningSet map[types.UID]b
|
|||
}
|
||||
}
|
||||
|
||||
func (gc *GC) collectPods(option v1.ListOptions, runningSet map[types.UID]bool) error {
|
||||
func (gc *GC) collectPods(option metav1.ListOptions, runningSet map[types.UID]bool) error {
|
||||
pods, err := gc.kubecli.CoreV1().Pods(gc.ns).List(option)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -100,7 +101,7 @@ func (gc *GC) collectPods(option v1.ListOptions, runningSet map[types.UID]bool)
|
|||
// Pods failed due to liveness probe are also collected
|
||||
if !runningSet[p.OwnerReferences[0].UID] || p.Status.Phase == v1.PodFailed {
|
||||
// kill bad pods without grace period to kill it immediately
|
||||
err = gc.kubecli.CoreV1().Pods(gc.ns).Delete(p.GetName(), v1.NewDeleteOptions(0))
|
||||
err = gc.kubecli.CoreV1().Pods(gc.ns).Delete(p.GetName(), metav1.NewDeleteOptions(0))
|
||||
if err != nil && !k8sutil.IsKubernetesResourceNotFoundError(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -110,7 +111,7 @@ func (gc *GC) collectPods(option v1.ListOptions, runningSet map[types.UID]bool)
|
|||
return nil
|
||||
}
|
||||
|
||||
func (gc *GC) collectServices(option v1.ListOptions, runningSet map[types.UID]bool) error {
|
||||
func (gc *GC) collectServices(option metav1.ListOptions, runningSet map[types.UID]bool) error {
|
||||
srvs, err := gc.kubecli.CoreV1().Services(gc.ns).List(option)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -133,7 +134,7 @@ func (gc *GC) collectServices(option v1.ListOptions, runningSet map[types.UID]bo
|
|||
return nil
|
||||
}
|
||||
|
||||
func (gc *GC) collectReplicaSet(option v1.ListOptions, runningSet map[types.UID]bool) error {
|
||||
func (gc *GC) collectReplicaSet(option metav1.ListOptions, runningSet map[types.UID]bool) error {
|
||||
rss, err := gc.kubecli.ExtensionsV1beta1().ReplicaSets(gc.ns).List(option)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -151,7 +152,7 @@ func (gc *GC) collectReplicaSet(option v1.ListOptions, runningSet map[types.UID]
|
|||
orphanOption := false
|
||||
// set gracePeriod to delete the replica set immediately
|
||||
gracePeriod := int64(0)
|
||||
err = gc.kubecli.ExtensionsV1beta1().ReplicaSets(gc.ns).Delete(rs.GetName(), &v1.DeleteOptions{
|
||||
err = gc.kubecli.ExtensionsV1beta1().ReplicaSets(gc.ns).Delete(rs.GetName(), &metav1.DeleteOptions{
|
||||
OrphanDependents: &orphanOption,
|
||||
GracePeriodSeconds: &gracePeriod,
|
||||
})
|
||||
|
|
|
@ -21,8 +21,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/api/meta/metatypes"
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
|
@ -45,17 +44,17 @@ func TPRName() string {
|
|||
}
|
||||
|
||||
type Cluster struct {
|
||||
unversioned.TypeMeta `json:",inline"`
|
||||
Metadata v1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec ClusterSpec `json:"spec"`
|
||||
Status ClusterStatus `json:"status"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
Metadata metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec ClusterSpec `json:"spec"`
|
||||
Status ClusterStatus `json:"status"`
|
||||
}
|
||||
|
||||
func (c *Cluster) AsOwner() metatypes.OwnerReference {
|
||||
func (c *Cluster) AsOwner() metav1.OwnerReference {
|
||||
trueVar := true
|
||||
// TODO: In 1.6 this is gonna be "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
// Both api.OwnerReference and metatypes.OwnerReference are combined into that.
|
||||
return metatypes.OwnerReference{
|
||||
return metav1.OwnerReference{
|
||||
APIVersion: c.APIVersion,
|
||||
Kind: c.Kind,
|
||||
Name: c.Metadata.Name,
|
||||
|
|
|
@ -17,15 +17,15 @@ package spec
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ClusterList is a list of etcd clusters.
|
||||
type ClusterList struct {
|
||||
unversioned.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
|
||||
Metadata unversioned.ListMeta `json:"metadata,omitempty"`
|
||||
Metadata metav1.ListMeta `json:"metadata,omitempty"`
|
||||
// Items is a list of third party objects
|
||||
Items []Cluster `json:"items"`
|
||||
}
|
||||
|
|
|
@ -26,14 +26,13 @@ import (
|
|||
"github.com/coreos/etcd-operator/pkg/util/constants"
|
||||
"github.com/coreos/etcd-operator/pkg/util/retryutil"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api/meta/metatypes"
|
||||
"k8s.io/client-go/pkg/api/resource"
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
v1beta1extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
v1beta1storage "k8s.io/client-go/pkg/apis/storage/v1beta1"
|
||||
"k8s.io/client-go/pkg/util/intstr"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -53,7 +52,7 @@ func CreateStorageClass(kubecli kubernetes.Interface, pvProvisioner string) erro
|
|||
// We need to get rid of prefix because naming doesn't support "/".
|
||||
name := storageClassPrefix + "-" + path.Base(pvProvisioner)
|
||||
class := &v1beta1storage.StorageClass{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Provisioner: pvProvisioner,
|
||||
|
@ -66,7 +65,7 @@ func CreateAndWaitPVC(kubecli kubernetes.Interface, clusterName, ns, pvProvision
|
|||
name := makePVCName(clusterName)
|
||||
storageClassName := storageClassPrefix + "-" + path.Base(pvProvisioner)
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"etcd_cluster": clusterName,
|
||||
|
@ -96,7 +95,7 @@ func CreateAndWaitPVC(kubecli kubernetes.Interface, clusterName, ns, pvProvision
|
|||
// Change the wait time once there are official p99 SLA.
|
||||
err = retryutil.Retry(4*time.Second, 15, func() (bool, error) {
|
||||
var err error
|
||||
claim, err = kubecli.CoreV1().PersistentVolumeClaims(ns).Get(name)
|
||||
claim, err = kubecli.CoreV1().PersistentVolumeClaims(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -211,17 +210,17 @@ func backupNameAndLabel(clusterName string) (string, map[string]string) {
|
|||
return name, labels
|
||||
}
|
||||
|
||||
func NewBackupReplicaSetManifest(clusterName string, ps v1.PodSpec, owner metatypes.OwnerReference) *v1beta1extensions.ReplicaSet {
|
||||
func NewBackupReplicaSetManifest(clusterName string, ps v1.PodSpec, owner metav1.OwnerReference) *v1beta1extensions.ReplicaSet {
|
||||
name, labels := backupNameAndLabel(clusterName)
|
||||
rs := &v1beta1extensions.ReplicaSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: newLablesForCluster(clusterName),
|
||||
},
|
||||
Spec: v1beta1extensions.ReplicaSetSpec{
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: labels},
|
||||
Selector: &metav1.LabelSelector{MatchLabels: labels},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: ps,
|
||||
|
@ -232,10 +231,10 @@ func NewBackupReplicaSetManifest(clusterName string, ps v1.PodSpec, owner metaty
|
|||
return rs
|
||||
}
|
||||
|
||||
func NewBackupServiceManifest(clusterName string, owner metatypes.OwnerReference) *v1.Service {
|
||||
func NewBackupServiceManifest(clusterName string, owner metav1.OwnerReference) *v1.Service {
|
||||
name, labels := backupNameAndLabel(clusterName)
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: newLablesForCluster(clusterName),
|
||||
},
|
||||
|
@ -268,7 +267,7 @@ func CopyVolume(kubecli kubernetes.Interface, fromClusterName, toClusterName, ns
|
|||
to := path.Join(constants.BackupMountDir, PVBackupV1, toClusterName)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: copyVolumePodName(toClusterName),
|
||||
Labels: map[string]string{
|
||||
"etcd_cluster": toClusterName,
|
||||
|
@ -319,7 +318,7 @@ func CopyVolume(kubecli kubernetes.Interface, fromClusterName, toClusterName, ns
|
|||
var phase v1.PodPhase
|
||||
// Delay could be very long due to k8s controller detaching the volume
|
||||
err := retryutil.Retry(10*time.Second, 12, func() (bool, error) {
|
||||
p, err := kubecli.CoreV1().Pods(ns).Get(pod.Name)
|
||||
p, err := kubecli.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -340,7 +339,7 @@ func CopyVolume(kubecli kubernetes.Interface, fromClusterName, toClusterName, ns
|
|||
return fmt.Errorf("failed to wait backup copy pod (%s, phase: %s) to succeed: %v", pod.Name, phase, err)
|
||||
}
|
||||
// Delete the pod to detach the volume from the node
|
||||
return kubecli.CoreV1().Pods(ns).Delete(pod.Name, v1.NewDeleteOptions(0))
|
||||
return kubecli.CoreV1().Pods(ns).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}
|
||||
|
||||
func copyVolumePodName(clusterName string) string {
|
||||
|
|
|
@ -53,10 +53,10 @@ import (
|
|||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/api/errors"
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
"k8s.io/client-go/pkg/util/runtime"
|
||||
"k8s.io/client-go/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
rl "github.com/coreos/etcd-operator/pkg/util/k8sutil/election/resourcelock"
|
||||
"github.com/golang/glog"
|
||||
|
@ -211,7 +211,7 @@ func (le *LeaderElector) renew() {
|
|||
// else it tries to renew the lease if it has already been acquired. Returns true
|
||||
// on success else returns false.
|
||||
func (le *LeaderElector) tryAcquireOrRenew() bool {
|
||||
now := unversioned.Now()
|
||||
now := metav1.Now()
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
HolderIdentity: le.config.Lock.Identity(),
|
||||
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
@ -38,7 +39,7 @@ type EndpointsLock struct {
|
|||
func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name)
|
||||
el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -60,7 +61,7 @@ func (el *EndpointsLock) Create(ler LeaderElectionRecord) error {
|
|||
return err
|
||||
}
|
||||
el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: el.EndpointsMeta.Name,
|
||||
Namespace: el.EndpointsMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package resourcelock
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
|
@ -30,11 +30,11 @@ const (
|
|||
// with a random string (e.g. UUID) with only slight modification of this code.
|
||||
// TODO(mikedanese): this should potentially be versioned
|
||||
type LeaderElectionRecord struct {
|
||||
HolderIdentity string `json:"holderIdentity"`
|
||||
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
|
||||
AcquireTime unversioned.Time `json:"acquireTime"`
|
||||
RenewTime unversioned.Time `json:"renewTime"`
|
||||
LeaderTransitions int `json:"leaderTransitions"`
|
||||
HolderIdentity string `json:"holderIdentity"`
|
||||
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
|
||||
AcquireTime metav1.Time `json:"acquireTime"`
|
||||
RenewTime metav1.Time `json:"renewTime"`
|
||||
LeaderTransitions int `json:"leaderTransitions"`
|
||||
}
|
||||
|
||||
// ResourceLockConfig common data that exists across different
|
||||
|
|
|
@ -28,17 +28,18 @@ import (
|
|||
"github.com/coreos/etcd-operator/pkg/util/etcdutil"
|
||||
"github.com/coreos/etcd-operator/pkg/util/retryutil"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
apierrors "k8s.io/client-go/pkg/api/errors"
|
||||
"k8s.io/client-go/pkg/api/meta"
|
||||
"k8s.io/client-go/pkg/api/meta/metatypes"
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/labels"
|
||||
"k8s.io/client-go/pkg/runtime"
|
||||
"k8s.io/client-go/pkg/runtime/serializer"
|
||||
"k8s.io/client-go/pkg/util/intstr"
|
||||
// for gcp auth
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
|
@ -121,7 +122,7 @@ func BackupServiceName(clusterName string) string {
|
|||
return fmt.Sprintf("%s-backup-sidecar", clusterName)
|
||||
}
|
||||
|
||||
func CreateClientService(kubecli kubernetes.Interface, clusterName, ns string, owner metatypes.OwnerReference) error {
|
||||
func CreateClientService(kubecli kubernetes.Interface, clusterName, ns string, owner metav1.OwnerReference) error {
|
||||
return createService(kubecli, ClientServiceName(clusterName), clusterName, ns, "", 2379, owner)
|
||||
}
|
||||
|
||||
|
@ -129,11 +130,11 @@ func ClientServiceName(clusterName string) string {
|
|||
return clusterName + "-client"
|
||||
}
|
||||
|
||||
func CreatePeerService(kubecli kubernetes.Interface, clusterName, ns string, owner metatypes.OwnerReference) error {
|
||||
func CreatePeerService(kubecli kubernetes.Interface, clusterName, ns string, owner metav1.OwnerReference) error {
|
||||
return createService(kubecli, clusterName, clusterName, ns, v1.ClusterIPNone, 2380, owner)
|
||||
}
|
||||
|
||||
func createService(kubecli kubernetes.Interface, svcName, clusterName, ns, clusterIP string, port int32, owner metatypes.OwnerReference) error {
|
||||
func createService(kubecli kubernetes.Interface, svcName, clusterName, ns, clusterIP string, port int32, owner metav1.OwnerReference) error {
|
||||
svc := newEtcdServiceManifest(svcName, clusterName, clusterIP, port)
|
||||
addOwnerRefToObject(svc.GetObjectMeta(), owner)
|
||||
_, err := kubecli.CoreV1().Services(ns).Create(svc)
|
||||
|
@ -151,7 +152,7 @@ func CreateAndWaitPod(kubecli kubernetes.Interface, ns string, pod *v1.Pod, time
|
|||
interval := 3 * time.Second
|
||||
var retPod *v1.Pod
|
||||
retryutil.Retry(interval, int(timeout/(interval)), func() (bool, error) {
|
||||
retPod, err = kubecli.CoreV1().Pods(ns).Get(pod.Name)
|
||||
retPod, err = kubecli.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -174,7 +175,7 @@ func newEtcdServiceManifest(svcName, clusterName string, clusterIP string, port
|
|||
"etcd_cluster": clusterName,
|
||||
}
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: svcName,
|
||||
Labels: labels,
|
||||
},
|
||||
|
@ -199,11 +200,11 @@ func AddRecoveryToPod(pod *v1.Pod, clusterName, token string, m *etcdutil.Member
|
|||
makeRestoreInitContainerSpec(BackupServiceAddr(clusterName), token, cs.Version, m)
|
||||
}
|
||||
|
||||
func addOwnerRefToObject(o meta.Object, r metatypes.OwnerReference) {
|
||||
func addOwnerRefToObject(o metav1.Object, r metav1.OwnerReference) {
|
||||
o.SetOwnerReferences(append(o.GetOwnerReferences(), r))
|
||||
}
|
||||
|
||||
func NewEtcdPod(m *etcdutil.Member, initialCluster []string, clusterName, state, token string, cs spec.ClusterSpec, owner metatypes.OwnerReference) *v1.Pod {
|
||||
func NewEtcdPod(m *etcdutil.Member, initialCluster []string, clusterName, state, token string, cs spec.ClusterSpec, owner metav1.OwnerReference) *v1.Pod {
|
||||
commands := fmt.Sprintf("/usr/local/bin/etcd --data-dir=%s --name=%s --initial-advertise-peer-urls=%s "+
|
||||
"--listen-peer-urls=%s --listen-client-urls=http://0.0.0.0:2379 --advertise-client-urls=%s "+
|
||||
"--initial-cluster=%s --initial-cluster-state=%s",
|
||||
|
@ -235,7 +236,7 @@ func NewEtcdPod(m *etcdutil.Member, initialCluster []string, clusterName, state,
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: m.Name,
|
||||
Labels: map[string]string{
|
||||
"app": "etcd",
|
||||
|
@ -301,7 +302,7 @@ func NewTPRClient() (*rest.RESTClient, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
config.GroupVersion = &unversioned.GroupVersion{
|
||||
config.GroupVersion = &schema.GroupVersion{
|
||||
Group: spec.TPRGroup,
|
||||
Version: spec.TPRVersion,
|
||||
}
|
||||
|
@ -325,8 +326,8 @@ func IsKubernetesResourceNotFoundError(err error) bool {
|
|||
}
|
||||
|
||||
// We are using internal api types for cluster related.
|
||||
func ClusterListOpt(clusterName string) v1.ListOptions {
|
||||
return v1.ListOptions{
|
||||
func ClusterListOpt(clusterName string) metav1.ListOptions {
|
||||
return metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(newLablesForCluster(clusterName)).String(),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,8 +18,8 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
|
@ -86,7 +86,7 @@ func PodWithAntiAffinity(pod *v1.Pod, clusterName string) *v1.Pod {
|
|||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &unversioned.LabelSelector{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"etcd_cluster": clusterName,
|
||||
},
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"github.com/coreos/etcd-operator/pkg/spec"
|
||||
|
||||
"k8s.io/client-go/pkg/api/meta/metatypes"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
|
@ -65,7 +65,7 @@ func PodWithAddMemberInitContainer(p *v1.Pod, endpoints []string, name string, p
|
|||
return p
|
||||
}
|
||||
|
||||
func NewSelfHostedEtcdPod(name string, initialCluster []string, clusterName, ns, state, token string, cs spec.ClusterSpec, owner metatypes.OwnerReference) *v1.Pod {
|
||||
func NewSelfHostedEtcdPod(name string, initialCluster []string, clusterName, ns, state, token string, cs spec.ClusterSpec, owner metav1.OwnerReference) *v1.Pod {
|
||||
selfHostedDataDir := path.Join(etcdVolumeMountDir, ns+"-"+name)
|
||||
commands := fmt.Sprintf("/usr/local/bin/etcd --data-dir=%s --name=%s --initial-advertise-peer-urls=http://$(MY_POD_IP):2380 "+
|
||||
"--listen-peer-urls=http://$(MY_POD_IP):2380 --listen-client-urls=http://$(MY_POD_IP):2379 --advertise-client-urls=http://$(MY_POD_IP):2379 "+
|
||||
|
@ -95,7 +95,7 @@ func NewSelfHostedEtcdPod(name string, initialCluster []string, clusterName, ns,
|
|||
}
|
||||
c.Env = []v1.EnvVar{envPodIP}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"app": "etcd",
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"github.com/coreos/etcd-operator/pkg/spec"
|
||||
"github.com/coreos/etcd-operator/pkg/util/retryutil"
|
||||
|
||||
apierrors "k8s.io/client-go/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/Sirupsen/logrus"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
@ -101,7 +102,7 @@ func (f *Framework) SetupEtcdOperator() error {
|
|||
cmd += " --backup-aws-secret=aws --backup-aws-config=aws --backup-s3-bucket=jenkins-etcd-operator"
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "etcd-operator",
|
||||
Labels: map[string]string{"name": "etcd-operator"},
|
||||
},
|
||||
|
@ -149,7 +150,7 @@ func (f *Framework) DeleteEtcdOperatorCompletely() error {
|
|||
return err
|
||||
}
|
||||
err = retryutil.Retry(2*time.Second, 5, func() (bool, error) {
|
||||
_, err := f.KubeClient.CoreV1().Pods(f.Namespace).Get("etcd-operator")
|
||||
_, err := f.KubeClient.CoreV1().Pods(f.Namespace).Get("etcd-operator", metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -165,7 +166,7 @@ func (f *Framework) DeleteEtcdOperatorCompletely() error {
|
|||
}
|
||||
|
||||
func (f *Framework) deleteEtcdOperator() error {
|
||||
return f.KubeClient.CoreV1().Pods(f.Namespace).Delete("etcd-operator", v1.NewDeleteOptions(1))
|
||||
return f.KubeClient.CoreV1().Pods(f.Namespace).Delete("etcd-operator", metav1.NewDeleteOptions(1))
|
||||
}
|
||||
|
||||
func (f *Framework) setupAWS() error {
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/coreos/etcd-operator/test/e2e/framework"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestClusterRestore(t *testing.T) {
|
||||
|
@ -85,7 +86,7 @@ func testClusterRestoreWithBackupPolicy(t *testing.T, needDataClone bool, backup
|
|||
t.Fatalf("failed to create 3 members etcd cluster: %v", err)
|
||||
}
|
||||
|
||||
pod, err := f.KubeClient.CoreV1().Pods(f.Namespace).Get(names[0])
|
||||
pod, err := f.KubeClient.CoreV1().Pods(f.Namespace).Get(names[0], metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -138,7 +139,7 @@ func testClusterRestoreWithBackupPolicy(t *testing.T, needDataClone bool, backup
|
|||
t.Fatalf("failed to create 3 members etcd cluster: %v", err)
|
||||
}
|
||||
|
||||
pod, err = f.KubeClient.CoreV1().Pods(f.Namespace).Get(names[0])
|
||||
pod, err = f.KubeClient.CoreV1().Pods(f.Namespace).Get(names[0], metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -27,9 +27,10 @@ import (
|
|||
"github.com/coreos/etcd-operator/pkg/spec"
|
||||
"github.com/coreos/etcd-operator/pkg/util/retryutil"
|
||||
"github.com/coreos/etcd-operator/test/e2e/framework"
|
||||
|
||||
"github.com/coreos/etcd/embed"
|
||||
"github.com/coreos/etcd/pkg/netutil"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
|
@ -132,7 +133,7 @@ func testCreateSelfHostedClusterWithBootMember(t *testing.T) {
|
|||
|
||||
func cleanupSelfHostedHostpath() {
|
||||
f := framework.Global
|
||||
nodes, err := f.KubeClient.CoreV1().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := f.KubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -144,7 +145,7 @@ func cleanupSelfHostedHostpath() {
|
|||
|
||||
name := fmt.Sprintf("cleanup-selfhosted-%s", nodeName)
|
||||
p := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -174,7 +175,7 @@ func cleanupSelfHostedHostpath() {
|
|||
return
|
||||
}
|
||||
retryutil.Retry(5*time.Second, 5, func() (bool, error) {
|
||||
get, err := f.KubeClient.CoreV1().Pods(f.Namespace).Get(name)
|
||||
get, err := f.KubeClient.CoreV1().Pods(f.Namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@ import (
|
|||
"github.com/coreos/etcd-operator/pkg/spec"
|
||||
"github.com/coreos/etcd-operator/test/e2e/e2eutil"
|
||||
"github.com/coreos/etcd-operator/test/e2e/framework"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestPeerTLS(t *testing.T) {
|
||||
|
@ -58,7 +60,7 @@ func TestPeerTLS(t *testing.T) {
|
|||
t.Fatalf("failed to create 3 members etcd cluster: %v", err)
|
||||
}
|
||||
|
||||
pod, err := f.KubeClient.CoreV1().Pods(f.Namespace).Get(members[0])
|
||||
pod, err := f.KubeClient.CoreV1().Pods(f.Namespace).Get(members[0], metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -34,9 +34,9 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/labels"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -55,7 +55,7 @@ func waitBackupPodUp(f *framework.Framework, clusterName string, timeout time.Du
|
|||
"etcd_cluster": clusterName,
|
||||
}).String()
|
||||
return retryutil.Retry(5*time.Second, int(timeout/(5*time.Second)), func() (done bool, err error) {
|
||||
podList, err := f.KubeClient.CoreV1().Pods(f.Namespace).List(v1.ListOptions{
|
||||
podList, err := f.KubeClient.CoreV1().Pods(f.Namespace).List(metav1.ListOptions{
|
||||
LabelSelector: ls,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -75,7 +75,7 @@ func makeBackup(f *framework.Framework, clusterName string) error {
|
|||
"app": k8sutil.BackupPodSelectorAppField,
|
||||
"etcd_cluster": clusterName,
|
||||
}).String()
|
||||
podList, err := f.KubeClient.CoreV1().Pods(f.Namespace).List(v1.ListOptions{
|
||||
podList, err := f.KubeClient.CoreV1().Pods(f.Namespace).List(metav1.ListOptions{
|
||||
LabelSelector: ls,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -151,7 +151,7 @@ func waitSizeReachedWithAccept(t *testing.T, f *framework.Framework, clusterName
|
|||
|
||||
func killMembers(f *framework.Framework, names ...string) error {
|
||||
for _, name := range names {
|
||||
err := f.KubeClient.CoreV1().Pods(f.Namespace).Delete(name, v1.NewDeleteOptions(0))
|
||||
err := f.KubeClient.CoreV1().Pods(f.Namespace).Delete(name, metav1.NewDeleteOptions(0))
|
||||
if err != nil && !k8sutil.IsKubernetesResourceNotFoundError(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -161,11 +161,11 @@ func killMembers(f *framework.Framework, names ...string) error {
|
|||
|
||||
func newClusterSpec(genName string, size int) *spec.Cluster {
|
||||
return &spec.Cluster{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: strings.Title(spec.TPRKind),
|
||||
APIVersion: spec.TPRGroup + "/" + spec.TPRVersion,
|
||||
},
|
||||
Metadata: v1.ObjectMeta{
|
||||
Metadata: metav1.ObjectMeta{
|
||||
GenerateName: genName,
|
||||
},
|
||||
Spec: spec.ClusterSpec{
|
||||
|
@ -321,7 +321,7 @@ func waitBackupDeleted(f *framework.Framework, c *spec.Cluster) error {
|
|||
"app": k8sutil.BackupPodSelectorAppField,
|
||||
"etcd_cluster": c.Metadata.Name,
|
||||
}).String()
|
||||
pl, err := f.KubeClient.CoreV1().Pods(f.Namespace).List(v1.ListOptions{
|
||||
pl, err := f.KubeClient.CoreV1().Pods(f.Namespace).List(metav1.ListOptions{
|
||||
LabelSelector: ls,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
Загрузка…
Ссылка в новой задаче