зеркало из https://github.com/Azure/ARO-RP.git
Merge pull request #1344 from jim-minter/standardise-imports
Standardise imports
This commit is contained in:
Коммит
140b7ce660
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
deployer "github.com/Azure/ARO-RP/pkg/deploy"
|
||||
pkgdeploy "github.com/Azure/ARO-RP/pkg/deploy"
|
||||
"github.com/Azure/ARO-RP/pkg/env"
|
||||
"github.com/Azure/ARO-RP/pkg/util/version"
|
||||
)
|
||||
|
@ -50,12 +50,12 @@ func deploy(ctx context.Context, log *logrus.Entry) error {
|
|||
return fmt.Errorf("location %s must be lower case", location)
|
||||
}
|
||||
|
||||
config, err := deployer.GetConfig(flag.Arg(1), location)
|
||||
config, err := pkgdeploy.GetConfig(flag.Arg(1), location)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deployer, err := deployer.New(ctx, log, env, config, deployVersion)
|
||||
deployer, err := pkgdeploy.New(ctx, log, env, config, deployVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/tools/metrics"
|
||||
kmetrics "k8s.io/client-go/tools/metrics"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/database"
|
||||
"github.com/Azure/ARO-RP/pkg/env"
|
||||
|
@ -47,7 +47,7 @@ func monitor(ctx context.Context, log *logrus.Entry) error {
|
|||
m := statsd.New(ctx, log.WithField("component", "metrics"), _env, os.Getenv("MDM_ACCOUNT"), os.Getenv("MDM_NAMESPACE"))
|
||||
|
||||
tracing.Register(azure.New(m))
|
||||
metrics.Register(metrics.RegisterOpts{
|
||||
kmetrics.Register(kmetrics.RegisterOpts{
|
||||
RequestResult: k8s.NewResult(m),
|
||||
RequestLatency: k8s.NewLatency(m),
|
||||
})
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/tools/metrics"
|
||||
kmetrics "k8s.io/client-go/tools/metrics"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
_ "github.com/Azure/ARO-RP/pkg/api/admin"
|
||||
|
@ -69,7 +69,7 @@ func rp(ctx context.Context, log, audit *logrus.Entry) error {
|
|||
m := statsd.New(ctx, log.WithField("component", "metrics"), _env, os.Getenv("MDM_ACCOUNT"), os.Getenv("MDM_NAMESPACE"))
|
||||
|
||||
tracing.Register(azure.New(m))
|
||||
metrics.Register(metrics.RegisterOpts{
|
||||
kmetrics.Register(kmetrics.RegisterOpts{
|
||||
RequestResult: k8s.NewResult(m),
|
||||
RequestLatency: k8s.NewLatency(m),
|
||||
})
|
||||
|
|
1
go.mod
1
go.mod
|
@ -111,7 +111,6 @@ require (
|
|||
sigs.k8s.io/cluster-api-provider-openstack v0.3.3 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.8.2
|
||||
sigs.k8s.io/controller-tools v0.3.1-0.20200617211605-651903477185
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
replace (
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer/json"
|
||||
kjson "k8s.io/apimachinery/pkg/runtime/serializer/json"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
|
@ -51,9 +51,9 @@ func genRBAC(restconfig *rest.Config) error {
|
|||
Verbs: []string{"get"},
|
||||
})
|
||||
|
||||
serializer := json.NewSerializerWithOptions(
|
||||
json.DefaultMetaFactory, scheme.Scheme, scheme.Scheme,
|
||||
json.SerializerOptions{Yaml: true},
|
||||
serializer := kjson.NewSerializerWithOptions(
|
||||
kjson.DefaultMetaFactory, scheme.Scheme, scheme.Scheme,
|
||||
kjson.SerializerOptions{Yaml: true},
|
||||
)
|
||||
|
||||
yaml := scheme.Codecs.CodecForVersions(serializer, nil, schema.GroupVersions(scheme.Scheme.PrioritizedVersionsAllGroups()), nil)
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
utiltls "github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -51,7 +51,7 @@ func run(name string) error {
|
|||
}
|
||||
}
|
||||
|
||||
key, cert, err := tls.GenerateKeyAndCertificate(name, signingKey, signingCert, *ca, *client)
|
||||
key, cert, err := utiltls.GenerateKeyAndCertificate(name, signingKey, signingCert, *ca, *client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/openshift/installer/pkg/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
_ "github.com/Azure/ARO-RP/pkg/cluster"
|
||||
"github.com/Azure/ARO-RP/pkg/env"
|
||||
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/storage"
|
||||
utillog "github.com/Azure/ARO-RP/pkg/util/log"
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
package main
|
||||
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the Apache License 2.0.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var rv int
|
||||
|
||||
for _, path := range os.Args[1:] {
|
||||
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() || !strings.HasSuffix(path, ".go") {
|
||||
return nil
|
||||
}
|
||||
|
||||
fset := &token.FileSet{}
|
||||
|
||||
f, err := parser.ParseFile(fset, path, nil, parser.ImportsOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, validator := range []func(string, *token.FileSet, *ast.File) []error{
|
||||
validateGroups,
|
||||
validateImports,
|
||||
} {
|
||||
for _, err := range validator(path, fset, f) {
|
||||
fmt.Printf("%s: %v\n", path, err)
|
||||
rv = 1
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
rv = 1
|
||||
}
|
||||
}
|
||||
|
||||
os.Exit(rv)
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
package main
|
||||
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the Apache License 2.0.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const local = "github.com/Azure/ARO-RP"
|
||||
|
||||
type importType int
|
||||
|
||||
// at most one import group of each type may exist in a validated source file,
|
||||
// specifically in the order declared below
|
||||
const (
|
||||
importStd importType = 1 << iota // go standard library
|
||||
importDot // "." imports (ginkgo and gomega)
|
||||
importOther // non-local imports
|
||||
importLocal // local imports
|
||||
)
|
||||
|
||||
func typeForImport(imp *ast.ImportSpec) importType {
|
||||
path := strings.Trim(imp.Path.Value, `"`)
|
||||
|
||||
switch {
|
||||
case imp.Name != nil && imp.Name.Name == ".":
|
||||
return importDot
|
||||
case strings.HasPrefix(path, local+"/"):
|
||||
return importLocal
|
||||
case strings.ContainsRune(path, '.'):
|
||||
return importOther
|
||||
default:
|
||||
return importStd
|
||||
}
|
||||
}
|
||||
|
||||
func validateGroups(path string, fset *token.FileSet, f *ast.File) (errs []error) {
|
||||
var groups [][]*ast.ImportSpec
|
||||
|
||||
for i, imp := range f.Imports {
|
||||
// if there's more than one line between this and the previous import,
|
||||
// break open a new import group
|
||||
if i == 0 || fset.Position(f.Imports[i].Pos()).Line-fset.Position(f.Imports[i-1].Pos()).Line > 1 {
|
||||
groups = append(groups, []*ast.ImportSpec{})
|
||||
}
|
||||
|
||||
groups[len(groups)-1] = append(groups[len(groups)-1], imp)
|
||||
}
|
||||
|
||||
// seenTypes holds a bitmask of the importTypes seen up to this point, so
|
||||
// that we can detect duplicate groups. We can also detect misordered
|
||||
// groups, because when we set a bit (say 0b0100), we actually set all the
|
||||
// trailing bits (0b0111) as sentinels
|
||||
var seenTypes importType
|
||||
|
||||
for groupnum, group := range groups {
|
||||
if !sort.SliceIsSorted(group, func(i, j int) bool { return group[i].Path.Value < group[j].Path.Value }) {
|
||||
errs = append(errs, fmt.Errorf("group %d: imports are not sorted", groupnum+1))
|
||||
}
|
||||
|
||||
groupImportType := typeForImport(group[0])
|
||||
if (seenTypes & groupImportType) != 0 { // check if single bit is already set...
|
||||
errs = append(errs, fmt.Errorf("group %d: duplicate group or invalid group ordering", groupnum+1))
|
||||
}
|
||||
seenTypes |= groupImportType<<1 - 1 // ...but set all trailing bits
|
||||
|
||||
for _, imp := range group {
|
||||
if typeForImport(imp) != groupImportType {
|
||||
errs = append(errs, fmt.Errorf("group %d: mixed import type", groupnum+1))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -6,138 +6,262 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const local = "github.com/Azure/ARO-RP"
|
||||
|
||||
type importSpecs []*ast.ImportSpec
|
||||
|
||||
func (is importSpecs) Len() int { return len(is) }
|
||||
func (is importSpecs) Less(i, j int) bool { return is[i].Path.Value < is[j].Path.Value }
|
||||
func (is importSpecs) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
|
||||
|
||||
var _ sort.Interface = importSpecs{}
|
||||
|
||||
type importType int
|
||||
|
||||
// at most one import group of each type may exist in a validated source file,
|
||||
// specifically in the order declared below
|
||||
const (
|
||||
importStd importType = 1 << iota // go standard library
|
||||
importDot // "." imports (ginkgo and gomega)
|
||||
importOther // non-local imports
|
||||
importLocal // local imports
|
||||
)
|
||||
|
||||
func typeForImport(imp *ast.ImportSpec) importType {
|
||||
path := strings.Trim(imp.Path.Value, `"`)
|
||||
|
||||
switch {
|
||||
case imp.Name != nil && imp.Name.Name == ".":
|
||||
return importDot
|
||||
case strings.HasPrefix(path, local+"/"):
|
||||
return importLocal
|
||||
case strings.ContainsRune(path, '.'):
|
||||
return importOther
|
||||
default:
|
||||
return importStd
|
||||
}
|
||||
func isStandardLibrary(path string) bool {
|
||||
return !strings.ContainsRune(strings.SplitN(path, "/", 2)[0], '.')
|
||||
}
|
||||
|
||||
func validateImport(imp *ast.ImportSpec) (errs []error) {
|
||||
path := strings.Trim(imp.Path.Value, `"`)
|
||||
|
||||
switch typeForImport(imp) {
|
||||
case importDot:
|
||||
switch path {
|
||||
case "github.com/onsi/ginkgo",
|
||||
"github.com/onsi/gomega",
|
||||
"github.com/onsi/gomega/gstruct":
|
||||
default:
|
||||
errs = append(errs, fmt.Errorf("invalid . import %s", imp.Path.Value))
|
||||
}
|
||||
func validateDotImport(path string) error {
|
||||
switch path {
|
||||
case "github.com/onsi/ginkgo",
|
||||
"github.com/onsi/gomega":
|
||||
return nil
|
||||
}
|
||||
|
||||
return
|
||||
return fmt.Errorf("invalid . import %s", path)
|
||||
}
|
||||
|
||||
func check(path string) (errs []error) {
|
||||
var fset token.FileSet
|
||||
|
||||
f, err := parser.ParseFile(&fset, path, nil, parser.ImportsOnly)
|
||||
if err != nil {
|
||||
return []error{err}
|
||||
func validateUnderscoreImport(path string) error {
|
||||
if regexp.MustCompile(`^github.com/Azure/ARO-RP/pkg/api/(admin|v[^/]+)$`).MatchString(path) {
|
||||
return nil
|
||||
}
|
||||
|
||||
var groups [][]*ast.ImportSpec
|
||||
|
||||
for i, imp := range f.Imports {
|
||||
// if there's more than one line between this and the previous import,
|
||||
// break open a new import group
|
||||
if i == 0 || fset.Position(f.Imports[i].Pos()).Line-fset.Position(f.Imports[i-1].Pos()).Line > 1 {
|
||||
groups = append(groups, []*ast.ImportSpec{})
|
||||
}
|
||||
|
||||
groups[len(groups)-1] = append(groups[len(groups)-1], imp)
|
||||
switch path {
|
||||
case "net/http/pprof",
|
||||
"github.com/Azure/ARO-RP/pkg/util/scheme":
|
||||
return nil
|
||||
}
|
||||
|
||||
// seenTypes holds a bitmask of the importTypes seen up to this point, so
|
||||
// that we can detect duplicate groups. We can also detect misordered
|
||||
// groups, because when we set a bit (say 0b0100), we actually set all the
|
||||
// trailing bits (0b0111) as sentinels
|
||||
var seenTypes importType
|
||||
|
||||
for groupnum, group := range groups {
|
||||
if !sort.IsSorted(importSpecs(group)) {
|
||||
errs = append(errs, fmt.Errorf("group %d: imports are not sorted", groupnum+1))
|
||||
}
|
||||
|
||||
groupImportType := typeForImport(group[0])
|
||||
if (seenTypes & groupImportType) != 0 { // check if single bit is already set...
|
||||
errs = append(errs, fmt.Errorf("group %d: duplicate group or invalid group ordering", groupnum+1))
|
||||
}
|
||||
seenTypes |= groupImportType<<1 - 1 // ...but set all trailing bits
|
||||
|
||||
for _, imp := range group {
|
||||
errs = append(errs, validateImport(imp)...)
|
||||
}
|
||||
|
||||
for _, imp := range group {
|
||||
if typeForImport(imp) != groupImportType {
|
||||
errs = append(errs, fmt.Errorf("group %d: mixed import type", groupnum+1))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return fmt.Errorf("invalid _ import %s", path)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var rv int
|
||||
for _, path := range os.Args[1:] {
|
||||
if err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// acceptableNames returns a list of acceptable names for an import; empty
|
||||
// string = no import override; nil list = don't care
|
||||
func acceptableNames(path string) []string {
|
||||
m := regexp.MustCompile(`^github.com/Azure/ARO-RP/pkg/api/(v[^/]*[0-9])$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{m[1]}
|
||||
}
|
||||
|
||||
if !info.IsDir() && strings.HasSuffix(path, ".go") {
|
||||
for _, err := range check(path) {
|
||||
fmt.Printf("%s: %v\n", path, err)
|
||||
rv = 1
|
||||
}
|
||||
}
|
||||
m = regexp.MustCompile(`^github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/([^/]+)/redhatopenshift$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{"mgmtredhatopenshift" + strings.ReplaceAll(m[1], "-", "")}
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^github.com/Azure/ARO-RP/pkg/(deploy|mirror|monitor|operator|portal)$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{"", "pkg" + m[1]}
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/redhatopenshift/([^/]+)/redhatopenshift$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{"redhatopenshift" + strings.ReplaceAll(m[1], "-", "")}
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^github.com/Azure/ARO-RP/pkg/util/(log|pem|tls)$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{"util" + m[1]}
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^github.com/Azure/ARO-RP/pkg/util/mocks/(?:.+/)?([^/]+)$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{"mock_" + m[1]}
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^github.com/Azure/azure-sdk-for-go/services/(?:preview/)?(?:[^/]+)/mgmt/(?:[^/]+)/([^/]+)$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{"mgmt" + m[1]}
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^github.com/openshift/api/([^/]+)/(v[^/]+)$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{m[1] + m[2]}
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^github.com/openshift/client-go/([^/]+)/clientset/versioned$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{m[1] + "client"}
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^github.com/openshift/client-go/([^/]+)/clientset/versioned/fake$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{m[1] + "fake"}
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^k8s.io/api/([^/]+)/(v[^/]+)$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{m[1] + m[2]}
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^k8s.io/kubernetes/pkg/apis/[^/]+/v[^/]+$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
m = regexp.MustCompile(`^k8s.io/client-go/kubernetes/typed/([^/]+)/(v[^/]+)$`).FindStringSubmatch(path)
|
||||
if m != nil {
|
||||
return []string{m[1] + m[2] + "client"}
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "github.com/Azure/ARO-RP/pkg/frontend/middleware":
|
||||
return []string{"", "frontendmiddleware"}
|
||||
case "github.com/Azure/ARO-RP/pkg/metrics/statsd/cosmosdb":
|
||||
return []string{"dbmetrics"}
|
||||
case "github.com/Azure/ARO-RP/pkg/operator/apis/aro.openshift.io/v1alpha1":
|
||||
return []string{"arov1alpha1"}
|
||||
case "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned":
|
||||
return []string{"aroclient"}
|
||||
case "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned/fake":
|
||||
return []string{"arofake"}
|
||||
case "github.com/Azure/ARO-RP/pkg/util/dynamichelper/discovery":
|
||||
return []string{"utildiscovery"}
|
||||
case "github.com/Azure/ARO-RP/pkg/util/namespace":
|
||||
return []string{"", "utilnamespace"}
|
||||
case "github.com/Azure/ARO-RP/test/database":
|
||||
return []string{"testdatabase"}
|
||||
case "github.com/Azure/ARO-RP/test/util/log":
|
||||
return []string{"testlog"}
|
||||
case "github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac":
|
||||
return []string{"azgraphrbac"}
|
||||
case "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault":
|
||||
return []string{"azkeyvault"}
|
||||
case "github.com/Azure/azure-sdk-for-go/storage":
|
||||
return []string{"azstorage"}
|
||||
case "github.com/googleapis/gnostic/openapiv2":
|
||||
return []string{"openapi_v2"}
|
||||
case "github.com/openshift/console-operator/pkg/api":
|
||||
return []string{"consoleapi"}
|
||||
case "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1":
|
||||
return []string{"machinev1beta1"}
|
||||
case "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned":
|
||||
return []string{"maoclient"}
|
||||
case "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/fake":
|
||||
return []string{"maofake"}
|
||||
case "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1":
|
||||
return []string{"mcv1"}
|
||||
case "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned":
|
||||
return []string{"mcoclient"}
|
||||
case "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/fake":
|
||||
return []string{"mcofake"}
|
||||
case "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/typed/machineconfiguration.openshift.io/v1":
|
||||
return []string{"mcoclientv1"}
|
||||
case "github.com/openshift/installer/pkg/asset/installconfig/azure":
|
||||
return []string{"icazure"}
|
||||
case "github.com/openshift/installer/pkg/types/azure":
|
||||
return []string{"azuretypes"}
|
||||
case "github.com/satori/go.uuid":
|
||||
return []string{"uuid"}
|
||||
case "golang.org/x/crypto/ssh":
|
||||
return []string{"", "cryptossh"}
|
||||
case "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1":
|
||||
return []string{"extensionsv1beta1"}
|
||||
case "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1":
|
||||
return []string{"extensionsv1"}
|
||||
case "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset":
|
||||
return []string{"extensionsclient"}
|
||||
case "k8s.io/apimachinery/pkg/api/errors":
|
||||
return []string{"kerrors"}
|
||||
case "k8s.io/apimachinery/pkg/apis/meta/v1":
|
||||
return []string{"metav1"}
|
||||
case "k8s.io/apimachinery/pkg/runtime/serializer/json":
|
||||
return []string{"kjson"}
|
||||
case "k8s.io/apimachinery/pkg/util/runtime":
|
||||
return []string{"utilruntime"}
|
||||
case "k8s.io/apimachinery/pkg/version":
|
||||
return []string{"kversion"}
|
||||
case "k8s.io/client-go/testing":
|
||||
return []string{"ktesting"}
|
||||
case "k8s.io/client-go/tools/clientcmd/api/v1":
|
||||
return []string{"clientcmdv1"}
|
||||
case "k8s.io/client-go/tools/metrics":
|
||||
return []string{"kmetrics"}
|
||||
case "sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1":
|
||||
return []string{"azureproviderv1beta1"}
|
||||
case "sigs.k8s.io/controller-runtime":
|
||||
return []string{"ctrl"}
|
||||
}
|
||||
|
||||
return []string{""}
|
||||
}
|
||||
|
||||
func validateImports(path string, fset *token.FileSet, f *ast.File) (errs []error) {
|
||||
for _, prefix := range []string{
|
||||
"pkg/client/",
|
||||
"pkg/database/cosmosdb/zz_generated_",
|
||||
"pkg/operator/apis",
|
||||
"pkg/operator/clientset",
|
||||
"pkg/util/mocks/",
|
||||
} {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
return nil
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
os.Exit(rv)
|
||||
|
||||
nextImport:
|
||||
for _, imp := range f.Imports {
|
||||
value := strings.Trim(imp.Path.Value, `"`)
|
||||
|
||||
if imp.Name != nil && imp.Name.Name == "." {
|
||||
err := validateDotImport(value)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if imp.Name != nil && imp.Name.Name == "_" {
|
||||
err := validateUnderscoreImport(value)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch value {
|
||||
case "sigs.k8s.io/yaml", "gopkg.in/yaml.v2":
|
||||
errs = append(errs, fmt.Errorf("%s is imported; use github.com/ghodss/yaml", value))
|
||||
continue nextImport
|
||||
case "github.com/google/uuid":
|
||||
errs = append(errs, fmt.Errorf("%s is imported; use github.com/satori/go.uuid", value))
|
||||
continue nextImport
|
||||
}
|
||||
|
||||
if strings.HasPrefix(value, "github.com/Azure/azure-sdk-for-go/profiles") {
|
||||
errs = append(errs, fmt.Errorf("%s is imported; use github.com/Azure/azure-sdk-for-go/services/*", value))
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(value, "/scheme") &&
|
||||
value != "k8s.io/client-go/kubernetes/scheme" {
|
||||
errs = append(errs, fmt.Errorf("%s is imported; should probably use k8s.io/client-go/kubernetes/scheme", value))
|
||||
continue
|
||||
}
|
||||
|
||||
if isStandardLibrary(value) {
|
||||
if imp.Name != nil {
|
||||
errs = append(errs, fmt.Errorf("overridden import %s", value))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
names := acceptableNames(value)
|
||||
if names == nil {
|
||||
continue
|
||||
}
|
||||
for _, name := range names {
|
||||
if name == "" && imp.Name == nil ||
|
||||
name != "" && imp.Name != nil && imp.Name.Name == name {
|
||||
continue nextImport
|
||||
}
|
||||
}
|
||||
|
||||
errs = append(errs, fmt.Errorf("%s is imported as %q, should be %q", value, imp.Name, names))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"github.com/Azure/ARO-RP/pkg/env"
|
||||
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/authorization"
|
||||
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/network"
|
||||
utilpermissions "github.com/Azure/ARO-RP/pkg/util/permissions"
|
||||
"github.com/Azure/ARO-RP/pkg/util/permissions"
|
||||
"github.com/Azure/ARO-RP/pkg/util/refreshable"
|
||||
"github.com/Azure/ARO-RP/pkg/util/steps"
|
||||
"github.com/Azure/ARO-RP/pkg/util/subnet"
|
||||
|
@ -172,7 +172,7 @@ func (dv *dynamic) validateActions(ctx context.Context, r *azure.Resource, actio
|
|||
|
||||
return wait.PollImmediateUntil(20*time.Second, func() (bool, error) {
|
||||
dv.log.Debug("retry validateActions")
|
||||
permissions, err := dv.permissions.ListForResource(ctx, r.ResourceGroup, r.Provider, "", r.ResourceType, r.ResourceName)
|
||||
perms, err := dv.permissions.ListForResource(ctx, r.ResourceGroup, r.Provider, "", r.ResourceType, r.ResourceName)
|
||||
|
||||
if detailedErr, ok := err.(autorest.DetailedError); ok &&
|
||||
detailedErr.StatusCode == http.StatusForbidden {
|
||||
|
@ -183,7 +183,7 @@ func (dv *dynamic) validateActions(ctx context.Context, r *azure.Resource, actio
|
|||
}
|
||||
|
||||
for _, action := range actions {
|
||||
ok, err := utilpermissions.CanDoAction(permissions, action)
|
||||
ok, err := permissions.CanDoAction(perms, action)
|
||||
if !ok || err != nil {
|
||||
// TODO(jminter): I don't understand if there are genuinely
|
||||
// cases where CanDoAction can return false then true shortly
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
mgmtnetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network"
|
||||
mgmtfeatures "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features"
|
||||
"github.com/apparentlymart/go-cidr/cidr"
|
||||
jwt "github.com/form3tech-oss/jwt-go"
|
||||
"github.com/form3tech-oss/jwt-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
|
|
|
@ -23,14 +23,14 @@ import (
|
|||
"github.com/Azure/ARO-RP/pkg/util/encryption"
|
||||
mock_cluster "github.com/Azure/ARO-RP/pkg/util/mocks/cluster"
|
||||
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
|
||||
testdb "github.com/Azure/ARO-RP/test/database"
|
||||
testdatabase "github.com/Azure/ARO-RP/test/database"
|
||||
)
|
||||
|
||||
type backendTestStruct struct {
|
||||
name string
|
||||
mocks func(*mock_cluster.MockInterface, database.OpenShiftClusters)
|
||||
fixture func(*testdb.Fixture)
|
||||
checker func(*testdb.Checker)
|
||||
fixture func(*testdatabase.Fixture)
|
||||
checker func(*testdatabase.Checker)
|
||||
}
|
||||
|
||||
func TestBackendTry(t *testing.T) {
|
||||
|
@ -40,7 +40,7 @@ func TestBackendTry(t *testing.T) {
|
|||
for _, tt := range []backendTestStruct{
|
||||
{
|
||||
name: "StateCreating success that sets an InstallPhase stays it in Creating",
|
||||
fixture: func(f *testdb.Fixture) {
|
||||
fixture: func(f *testdatabase.Fixture) {
|
||||
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
OpenShiftCluster: &api.OpenShiftCluster{
|
||||
|
@ -57,7 +57,7 @@ func TestBackendTry(t *testing.T) {
|
|||
ID: mockSubID,
|
||||
})
|
||||
},
|
||||
checker: func(c *testdb.Checker) {
|
||||
checker: func(c *testdatabase.Checker) {
|
||||
c.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
OpenShiftCluster: &api.OpenShiftCluster{
|
||||
|
@ -86,7 +86,7 @@ func TestBackendTry(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "StateCreating success without an InstallPhase marks provisioning as succeeded",
|
||||
fixture: func(f *testdb.Fixture) {
|
||||
fixture: func(f *testdatabase.Fixture) {
|
||||
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
OpenShiftCluster: &api.OpenShiftCluster{
|
||||
|
@ -103,7 +103,7 @@ func TestBackendTry(t *testing.T) {
|
|||
ID: mockSubID,
|
||||
})
|
||||
},
|
||||
checker: func(c *testdb.Checker) {
|
||||
checker: func(c *testdatabase.Checker) {
|
||||
c.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
OpenShiftCluster: &api.OpenShiftCluster{
|
||||
|
@ -129,7 +129,7 @@ func TestBackendTry(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "StateCreating that fails marks ProvisioningState as Failed",
|
||||
fixture: func(f *testdb.Fixture) {
|
||||
fixture: func(f *testdatabase.Fixture) {
|
||||
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
OpenShiftCluster: &api.OpenShiftCluster{
|
||||
|
@ -146,7 +146,7 @@ func TestBackendTry(t *testing.T) {
|
|||
ID: mockSubID,
|
||||
})
|
||||
},
|
||||
checker: func(c *testdb.Checker) {
|
||||
checker: func(c *testdatabase.Checker) {
|
||||
c.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
Dequeues: 1,
|
||||
|
@ -170,7 +170,7 @@ func TestBackendTry(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "StateAdminUpdating success sets the last ProvisioningState and clears LastAdminUpdateError",
|
||||
fixture: func(f *testdb.Fixture) {
|
||||
fixture: func(f *testdatabase.Fixture) {
|
||||
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
OpenShiftCluster: &api.OpenShiftCluster{
|
||||
|
@ -189,7 +189,7 @@ func TestBackendTry(t *testing.T) {
|
|||
ID: mockSubID,
|
||||
})
|
||||
},
|
||||
checker: func(c *testdb.Checker) {
|
||||
checker: func(c *testdatabase.Checker) {
|
||||
c.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
OpenShiftCluster: &api.OpenShiftCluster{
|
||||
|
@ -209,7 +209,7 @@ func TestBackendTry(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "StateAdminUpdating run failure populates LastAdminUpdateError and restores previous provisioning state + failed provisioning state",
|
||||
fixture: func(f *testdb.Fixture) {
|
||||
fixture: func(f *testdatabase.Fixture) {
|
||||
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
OpenShiftCluster: &api.OpenShiftCluster{
|
||||
|
@ -228,7 +228,7 @@ func TestBackendTry(t *testing.T) {
|
|||
ID: mockSubID,
|
||||
})
|
||||
},
|
||||
checker: func(c *testdb.Checker) {
|
||||
checker: func(c *testdatabase.Checker) {
|
||||
c.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
OpenShiftCluster: &api.OpenShiftCluster{
|
||||
|
@ -250,7 +250,7 @@ func TestBackendTry(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "StateDeleting success deletes the document",
|
||||
fixture: func(f *testdb.Fixture) {
|
||||
fixture: func(f *testdatabase.Fixture) {
|
||||
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
|
||||
Key: strings.ToLower(resourceID),
|
||||
OpenShiftCluster: &api.OpenShiftCluster{
|
||||
|
@ -267,7 +267,7 @@ func TestBackendTry(t *testing.T) {
|
|||
ID: mockSubID,
|
||||
})
|
||||
},
|
||||
checker: func(c *testdb.Checker) {},
|
||||
checker: func(c *testdatabase.Checker) {},
|
||||
mocks: func(manager *mock_cluster.MockInterface, dbOpenShiftClusters database.OpenShiftClusters) {
|
||||
manager.EXPECT().Delete(gomock.Any()).Return(nil)
|
||||
},
|
||||
|
@ -283,10 +283,10 @@ func TestBackendTry(t *testing.T) {
|
|||
_env := mock_env.NewMockInterface(controller)
|
||||
_env.EXPECT().DeploymentMode().Return(deployment.Development)
|
||||
|
||||
dbOpenShiftClusters, clientOpenShiftClusters := testdb.NewFakeOpenShiftClusters()
|
||||
dbSubscriptions, _ := testdb.NewFakeSubscriptions()
|
||||
dbOpenShiftClusters, clientOpenShiftClusters := testdatabase.NewFakeOpenShiftClusters()
|
||||
dbSubscriptions, _ := testdatabase.NewFakeSubscriptions()
|
||||
|
||||
f := testdb.NewFixture().WithOpenShiftClusters(dbOpenShiftClusters).WithSubscriptions(dbSubscriptions)
|
||||
f := testdatabase.NewFixture().WithOpenShiftClusters(dbOpenShiftClusters).WithSubscriptions(dbSubscriptions)
|
||||
tt.mocks(manager, dbOpenShiftClusters)
|
||||
tt.fixture(f)
|
||||
err := f.Create()
|
||||
|
@ -319,7 +319,7 @@ func TestBackendTry(t *testing.T) {
|
|||
// wait on the workers to finish their tasks
|
||||
b.waitForWorkerCompletion()
|
||||
|
||||
c := testdb.NewChecker()
|
||||
c := testdatabase.NewChecker()
|
||||
tt.checker(c)
|
||||
|
||||
errs := c.CheckOpenShiftClusters(clientOpenShiftClusters)
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
"github.com/Azure/ARO-RP/pkg/env"
|
||||
"github.com/Azure/ARO-RP/pkg/operator/controllers/genevalogging"
|
||||
"github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
utiltls "github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
"github.com/Azure/ARO-RP/pkg/util/version"
|
||||
)
|
||||
|
||||
|
@ -24,12 +24,12 @@ func GetConfig(env env.Interface, doc *api.OpenShiftClusterDocument) (*bootstrap
|
|||
|
||||
key, cert := env.ClusterGenevaLoggingSecret()
|
||||
|
||||
gcsKeyBytes, err := tls.PrivateKeyAsBytes(key)
|
||||
gcsKeyBytes, err := utiltls.PrivateKeyAsBytes(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcsCertBytes, err := tls.CertAsBytes(cert)
|
||||
gcsCertBytes, err := utiltls.CertAsBytes(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
pkgacrtoken "github.com/Azure/ARO-RP/pkg/util/acrtoken"
|
||||
"github.com/Azure/ARO-RP/pkg/util/acrtoken"
|
||||
"github.com/Azure/ARO-RP/pkg/util/deployment"
|
||||
)
|
||||
|
||||
|
@ -16,19 +16,19 @@ func (m *manager) ensureACRToken(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
acrtoken, err := pkgacrtoken.NewManager(m.env, m.localFpAuthorizer)
|
||||
token, err := acrtoken.NewManager(m.env, m.localFpAuthorizer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rp := acrtoken.GetRegistryProfile(m.doc.OpenShiftCluster)
|
||||
rp := token.GetRegistryProfile(m.doc.OpenShiftCluster)
|
||||
if rp == nil {
|
||||
// 1. choose a name and establish the intent to create a token with
|
||||
// that name
|
||||
rp = acrtoken.NewRegistryProfile(m.doc.OpenShiftCluster)
|
||||
rp = token.NewRegistryProfile(m.doc.OpenShiftCluster)
|
||||
|
||||
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
|
||||
acrtoken.PutRegistryProfile(doc.OpenShiftCluster, rp)
|
||||
token.PutRegistryProfile(doc.OpenShiftCluster, rp)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -39,7 +39,7 @@ func (m *manager) ensureACRToken(ctx context.Context) error {
|
|||
if rp.Password == "" {
|
||||
// 2. ensure a token with the chosen name exists, generate a
|
||||
// password for it and store it in the database
|
||||
password, err := acrtoken.EnsureTokenAndPassword(ctx, rp)
|
||||
password, err := token.EnsureTokenAndPassword(ctx, rp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func (m *manager) ensureACRToken(ctx context.Context) error {
|
|||
rp.Password = api.SecureString(password)
|
||||
|
||||
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
|
||||
acrtoken.PutRegistryProfile(doc.OpenShiftCluster, rp)
|
||||
token.PutRegistryProfile(doc.OpenShiftCluster, rp)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
mgmtauthorization "github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization"
|
||||
"github.com/ghodss/yaml"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
|
@ -75,7 +75,7 @@ func (m *manager) updateAROSecret(ctx context.Context) error {
|
|||
// cloud-config: <base64 map[string]string with keys 'aadClientId' and 'aadClientSecret'>
|
||||
secret, err := m.kubernetescli.CoreV1().Secrets("kube-system").Get(ctx, "azure-cloud-provider", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) { // we are not in control if secret is not present
|
||||
if kerrors.IsNotFound(err) { // we are not in control if secret is not present
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -18,13 +18,12 @@ import (
|
|||
operatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
"github.com/Azure/ARO-RP/pkg/util/arm"
|
||||
mock_authz "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/authorization"
|
||||
mock_authorization "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/authorization"
|
||||
mock_features "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/features"
|
||||
"github.com/Azure/ARO-RP/pkg/util/rbac"
|
||||
)
|
||||
|
@ -58,7 +57,7 @@ func TestCreateOrUpdateClusterServicePrincipalRBAC(t *testing.T) {
|
|||
clusterSPObjectID string
|
||||
roleAssignments []mgmtauthorization.RoleAssignment
|
||||
mocksDeployment func(*mock_features.MockDeploymentsClient)
|
||||
mocksAuthz func(*mock_authz.MockRoleAssignmentsClient, interface{})
|
||||
mocksAuthz func(*mock_authorization.MockRoleAssignmentsClient, interface{})
|
||||
}{
|
||||
{
|
||||
|
||||
|
@ -73,7 +72,7 @@ func TestCreateOrUpdateClusterServicePrincipalRBAC(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
mocksAuthz: func(client *mock_authz.MockRoleAssignmentsClient, result interface{}) {
|
||||
mocksAuthz: func(client *mock_authorization.MockRoleAssignmentsClient, result interface{}) {
|
||||
client.EXPECT().ListForResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(result, nil)
|
||||
},
|
||||
},
|
||||
|
@ -94,7 +93,7 @@ func TestCreateOrUpdateClusterServicePrincipalRBAC(t *testing.T) {
|
|||
},
|
||||
}).Return(nil)
|
||||
},
|
||||
mocksAuthz: func(client *mock_authz.MockRoleAssignmentsClient, result interface{}) {
|
||||
mocksAuthz: func(client *mock_authorization.MockRoleAssignmentsClient, result interface{}) {
|
||||
client.EXPECT().ListForResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(result, nil)
|
||||
},
|
||||
},
|
||||
|
@ -125,7 +124,7 @@ func TestCreateOrUpdateClusterServicePrincipalRBAC(t *testing.T) {
|
|||
},
|
||||
}).Return(nil)
|
||||
},
|
||||
mocksAuthz: func(client *mock_authz.MockRoleAssignmentsClient, result interface{}) {
|
||||
mocksAuthz: func(client *mock_authorization.MockRoleAssignmentsClient, result interface{}) {
|
||||
client.EXPECT().ListForResourceGroup(gomock.Any(), gomock.Any(), gomock.Any()).Return(result, nil)
|
||||
client.EXPECT().Delete(gomock.Any(), resourceGroupID, assignmentName)
|
||||
},
|
||||
|
@ -135,7 +134,7 @@ func TestCreateOrUpdateClusterServicePrincipalRBAC(t *testing.T) {
|
|||
controller := gomock.NewController(t)
|
||||
defer controller.Finish()
|
||||
|
||||
raClient := mock_authz.NewMockRoleAssignmentsClient(controller)
|
||||
raClient := mock_authorization.NewMockRoleAssignmentsClient(controller)
|
||||
deployments := mock_features.NewMockDeploymentsClient(controller)
|
||||
|
||||
if tt.mocksDeployment != nil {
|
||||
|
@ -168,7 +167,7 @@ func getFakeAROSecret(clientID, secret string) corev1.Secret {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v1.Secret{
|
||||
return corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
|
@ -296,7 +295,7 @@ func getFakeOpenShiftSecret() corev1.Secret {
|
|||
"azure_client_secret": []byte("azure_client_secret_value"),
|
||||
"azure_tenant_id": []byte("azure_tenant_id_value"),
|
||||
}
|
||||
return v1.Secret{
|
||||
return corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
consoleapi "github.com/openshift/console-operator/pkg/api"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sfake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
const errMustBeNilMsg = "err must be nil; condition is retried until timeout"
|
||||
|
@ -50,7 +50,7 @@ func TestBootstrapConfigMapReady(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
m := &manager{
|
||||
kubernetescli: k8sfake.NewSimpleClientset(&corev1.ConfigMap{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tt.configMapName,
|
||||
Namespace: tt.configMapNamespace,
|
||||
|
@ -191,7 +191,7 @@ func TestMinimumWorkerNodesReady(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
m := &manager{
|
||||
kubernetescli: k8sfake.NewSimpleClientset(&corev1.NodeList{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.NodeList{
|
||||
Items: []corev1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"testing"
|
||||
|
||||
operatorv1 "github.com/openshift/api/operator/v1"
|
||||
"github.com/openshift/client-go/operator/clientset/versioned/fake"
|
||||
operatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
@ -20,7 +20,7 @@ func TestUpdateConsoleBranding(t *testing.T) {
|
|||
|
||||
m := &manager{
|
||||
log: logrus.NewEntry(logrus.StandardLogger()),
|
||||
operatorcli: fake.NewSimpleClientset(&operatorv1.Console{
|
||||
operatorcli: operatorfake.NewSimpleClientset(&operatorv1.Console{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: consoleName,
|
||||
},
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sfake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
testdatabase "github.com/Azure/ARO-RP/test/database"
|
||||
|
@ -100,7 +100,7 @@ func TestPopulateCreatedAt(t *testing.T) {
|
|||
|
||||
m := &manager{
|
||||
log: logrus.NewEntry(logrus.StandardLogger()),
|
||||
kubernetescli: k8sfake.NewSimpleClientset(tt.ns),
|
||||
kubernetescli: fake.NewSimpleClientset(tt.ns),
|
||||
doc: clusterdoc,
|
||||
db: fakeOpenShiftClustersDatabase,
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
"github.com/Azure/ARO-RP/pkg/util/arm"
|
||||
"github.com/Azure/ARO-RP/pkg/util/deployment"
|
||||
mock_authz "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/authorization"
|
||||
mock_authorization "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/authorization"
|
||||
mock_features "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/features"
|
||||
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
|
||||
)
|
||||
|
@ -101,7 +101,7 @@ func TestCreateOrUpdateDenyAssignment(t *testing.T) {
|
|||
defer controller.Finish()
|
||||
|
||||
env := mock_env.NewMockInterface(controller)
|
||||
denyAssignments := mock_authz.NewMockDenyAssignmentClient(controller)
|
||||
denyAssignments := mock_authorization.NewMockDenyAssignmentClient(controller)
|
||||
deployments := mock_features.NewMockDeploymentsClient(controller)
|
||||
|
||||
env.EXPECT().DeploymentMode().Return(deployment.Production)
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/openshift/installer/pkg/asset/installconfig"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
aztypes "github.com/openshift/installer/pkg/types/azure"
|
||||
azuretypes "github.com/openshift/installer/pkg/types/azure"
|
||||
)
|
||||
|
||||
func TestZones(t *testing.T) {
|
||||
|
@ -47,7 +47,7 @@ func TestZones(t *testing.T) {
|
|||
Config: &types.InstallConfig{
|
||||
ControlPlane: &types.MachinePool{
|
||||
Platform: types.MachinePoolPlatform{
|
||||
Azure: &aztypes.MachinePool{
|
||||
Azure: &azuretypes.MachinePool{
|
||||
Zones: tt.zones,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
v1 "github.com/openshift/api/config/v1"
|
||||
"github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
configfake "github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -18,11 +18,11 @@ func TestDisableUpdates(t *testing.T) {
|
|||
versionName := "version"
|
||||
|
||||
m := &manager{
|
||||
configcli: fake.NewSimpleClientset(&v1.ClusterVersion{
|
||||
configcli: configfake.NewSimpleClientset(&configv1.ClusterVersion{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: versionName,
|
||||
},
|
||||
Spec: v1.ClusterVersionSpec{
|
||||
Spec: configv1.ClusterVersionSpec{
|
||||
Upstream: "RemoveMe",
|
||||
Channel: "RemoveMe",
|
||||
},
|
||||
|
|
|
@ -12,11 +12,11 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset/tls"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/util/pem"
|
||||
utilpem "github.com/Azure/ARO-RP/pkg/util/pem"
|
||||
"github.com/Azure/ARO-RP/pkg/util/stringutils"
|
||||
)
|
||||
|
||||
|
@ -40,7 +40,7 @@ func (m *manager) fixMCSCert(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
_, certs, err := pem.Parse(s.Data[v1.TLSCertKey])
|
||||
_, certs, err := utilpem.Parse(s.Data[corev1.TLSCertKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -82,8 +82,8 @@ func (m *manager) fixMCSCert(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
s.Data[v1.TLSCertKey] = mcsCertKey.CertRaw
|
||||
s.Data[v1.TLSPrivateKeyKey] = mcsCertKey.KeyRaw
|
||||
s.Data[corev1.TLSCertKey] = mcsCertKey.CertRaw
|
||||
s.Data[corev1.TLSPrivateKeyKey] = mcsCertKey.KeyRaw
|
||||
|
||||
_, err = m.kubernetescli.CoreV1().Secrets("openshift-machine-config-operator").Update(ctx, s, metav1.UpdateOptions{})
|
||||
return err
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/openshift/installer/pkg/asset/tls"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
@ -68,13 +68,13 @@ func TestFixMCSCert(t *testing.T) {
|
|||
graph := mock_graph.NewMockManager(controller)
|
||||
graph.EXPECT().LoadPersisted(ctx, "", "cluster").Return(pg, nil)
|
||||
|
||||
kubernetescli := fake.NewSimpleClientset(&v1.Secret{
|
||||
kubernetescli := fake.NewSimpleClientset(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "machine-config-server-tls",
|
||||
Namespace: "openshift-machine-config-operator",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
v1.TLSCertKey: pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: validCerts[0].Raw}),
|
||||
corev1.TLSCertKey: pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: validCerts[0].Raw}),
|
||||
},
|
||||
})
|
||||
kubernetescli.AddReactor("delete-collection", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
|
@ -130,14 +130,14 @@ func TestFixMCSCert(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.Secret{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "machine-config-server-tls",
|
||||
Namespace: "openshift-machine-config-operator",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
v1.TLSCertKey: pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: validCerts[0].Raw}),
|
||||
v1.TLSPrivateKeyKey: pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: b}),
|
||||
corev1.TLSCertKey: pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: validCerts[0].Raw}),
|
||||
corev1.TLSPrivateKeyKey: pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: b}),
|
||||
},
|
||||
}),
|
||||
}, nil
|
||||
|
@ -169,8 +169,8 @@ func TestFixMCSCert(t *testing.T) {
|
|||
}
|
||||
|
||||
var pemdata []byte
|
||||
pemdata = append(pemdata, s.Data[v1.TLSCertKey]...)
|
||||
pemdata = append(pemdata, s.Data[v1.TLSPrivateKeyKey]...)
|
||||
pemdata = append(pemdata, s.Data[corev1.TLSCertKey]...)
|
||||
pemdata = append(pemdata, s.Data[corev1.TLSPrivateKeyKey]...)
|
||||
|
||||
key, certs, err := utilpem.Parse(pemdata)
|
||||
if err != nil {
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
machinev1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1"
|
||||
maofake "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/fake"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
@ -54,7 +54,7 @@ func marshal(t *testing.T, i interface{}) []byte {
|
|||
return b
|
||||
}
|
||||
|
||||
func userDataSecret(t *testing.T, namespace, name, appendSource, mergeSource string) *v1.Secret {
|
||||
func userDataSecret(t *testing.T, namespace, name, appendSource, mergeSource string) *corev1.Secret {
|
||||
config := map[string]interface{}{
|
||||
"extrakey": true,
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ func userDataSecret(t *testing.T, namespace, name, appendSource, mergeSource str
|
|||
}
|
||||
}
|
||||
|
||||
return &v1.Secret{
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
|
@ -152,19 +152,19 @@ func TestFixMCSUserData(t *testing.T) {
|
|||
),
|
||||
maocli: maofake.NewSimpleClientset(
|
||||
testMachineSet(t, "openshift-machine-api", "worker", &azureproviderv1beta1.AzureMachineProviderSpec{
|
||||
UserDataSecret: &v1.SecretReference{
|
||||
UserDataSecret: &corev1.SecretReference{
|
||||
Name: "worker-user-data",
|
||||
},
|
||||
}),
|
||||
testMachine(t, "openshift-machine-api", "master", &azureproviderv1beta1.AzureMachineProviderSpec{
|
||||
UserDataSecret: &v1.SecretReference{
|
||||
UserDataSecret: &corev1.SecretReference{
|
||||
Name: "master-user-data",
|
||||
},
|
||||
}),
|
||||
),
|
||||
}
|
||||
|
||||
wantSecrets := []*v1.Secret{
|
||||
wantSecrets := []*corev1.Secret{
|
||||
userDataSecret(t, "openshift-machine-api", "master-user-data", "https://1.2.3.4:22623/config/master", ""),
|
||||
userDataSecret(t, "openshift-machine-api", "worker-user-data", "", "https://1.2.3.4:22623/config/worker"),
|
||||
}
|
||||
|
|
|
@ -14,8 +14,8 @@ import (
|
|||
"github.com/onsi/gomega/types"
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
operatorv1 "github.com/openshift/api/operator/v1"
|
||||
fakeconfig "github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
fakeoperator "github.com/openshift/client-go/operator/clientset/versioned/fake"
|
||||
configfake "github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
operatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -66,8 +66,8 @@ func TestStepRunnerWithInstaller(t *testing.T) {
|
|||
wantEntries []map[string]types.GomegaMatcher
|
||||
wantErr string
|
||||
kubernetescli *fake.Clientset
|
||||
configcli *fakeconfig.Clientset
|
||||
operatorcli *fakeoperator.Clientset
|
||||
configcli *configfake.Clientset
|
||||
operatorcli *operatorfake.Clientset
|
||||
}{
|
||||
{
|
||||
name: "Failed step run will log cluster version, cluster operator status, and ingress information if available",
|
||||
|
@ -102,8 +102,8 @@ func TestStepRunnerWithInstaller(t *testing.T) {
|
|||
},
|
||||
},
|
||||
kubernetescli: fake.NewSimpleClientset(node),
|
||||
configcli: fakeconfig.NewSimpleClientset(clusterVersion, clusterOperator),
|
||||
operatorcli: fakeoperator.NewSimpleClientset(ingressController),
|
||||
configcli: configfake.NewSimpleClientset(clusterVersion, clusterOperator),
|
||||
operatorcli: operatorfake.NewSimpleClientset(ingressController),
|
||||
},
|
||||
{
|
||||
name: "Failed step run will not crash if it cannot get the clusterversions, clusteroperators, ingresscontrollers",
|
||||
|
@ -138,8 +138,8 @@ func TestStepRunnerWithInstaller(t *testing.T) {
|
|||
},
|
||||
},
|
||||
kubernetescli: fake.NewSimpleClientset(),
|
||||
configcli: fakeconfig.NewSimpleClientset(),
|
||||
operatorcli: fakeoperator.NewSimpleClientset(),
|
||||
configcli: configfake.NewSimpleClientset(),
|
||||
operatorcli: operatorfake.NewSimpleClientset(),
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
|
@ -67,14 +67,14 @@ func TestCreateOrUpdateRouterIPFromCluster(t *testing.T) {
|
|||
CreateOrUpdateRouter(gomock.Any(), gomock.Any(), gomock.Any()).
|
||||
Return(nil)
|
||||
},
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.Service{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "router-default",
|
||||
Namespace: "openshift-ingress",
|
||||
},
|
||||
Status: v1.ServiceStatus{
|
||||
LoadBalancer: v1.LoadBalancerStatus{
|
||||
Ingress: []v1.LoadBalancerIngress{{
|
||||
Status: corev1.ServiceStatus{
|
||||
LoadBalancer: corev1.LoadBalancerStatus{
|
||||
Ingress: []corev1.LoadBalancerIngress{{
|
||||
IP: "1.2.3.4",
|
||||
}},
|
||||
},
|
||||
|
@ -103,7 +103,7 @@ func TestCreateOrUpdateRouterIPFromCluster(t *testing.T) {
|
|||
doc.Dequeues = 1
|
||||
checker.AddOpenShiftClusterDocuments(doc)
|
||||
},
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.Service{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "router-default",
|
||||
Namespace: "openshift-ingress",
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/kubeconfig"
|
||||
"github.com/openshift/installer/pkg/asset/tls"
|
||||
clientcmd "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
clientcmdv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/cluster/graph"
|
||||
)
|
||||
|
@ -54,21 +54,21 @@ func generateKubeconfig(pg graph.PersistedGraph, commonName string, organization
|
|||
|
||||
// create a Config for the new service kubeconfig based on the generated cluster admin Config
|
||||
aroInternalClient := kubeconfig.AdminInternalClient{}
|
||||
aroInternalClient.Config = &clientcmd.Config{
|
||||
aroInternalClient.Config = &clientcmdv1.Config{
|
||||
Clusters: adminInternalClient.Config.Clusters,
|
||||
AuthInfos: []clientcmd.NamedAuthInfo{
|
||||
AuthInfos: []clientcmdv1.NamedAuthInfo{
|
||||
{
|
||||
Name: commonName,
|
||||
AuthInfo: clientcmd.AuthInfo{
|
||||
AuthInfo: clientcmdv1.AuthInfo{
|
||||
ClientCertificateData: clientCertKey.CertRaw,
|
||||
ClientKeyData: clientCertKey.KeyRaw,
|
||||
},
|
||||
},
|
||||
},
|
||||
Contexts: []clientcmd.NamedContext{
|
||||
Contexts: []clientcmdv1.NamedContext{
|
||||
{
|
||||
Name: commonName,
|
||||
Context: clientcmd.Context{
|
||||
Context: clientcmdv1.Context{
|
||||
Cluster: adminInternalClient.Config.Contexts[0].Context.Cluster,
|
||||
AuthInfo: commonName,
|
||||
},
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/ghodss/yaml"
|
||||
"github.com/openshift/installer/pkg/asset/kubeconfig"
|
||||
"github.com/openshift/installer/pkg/asset/tls"
|
||||
clientcmd "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
clientcmdv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/cluster/graph"
|
||||
utilpem "github.com/Azure/ARO-RP/pkg/util/pem"
|
||||
|
@ -40,21 +40,21 @@ func TestGenerateAROServiceKubeconfig(t *testing.T) {
|
|||
serviceName := "system:aro-service"
|
||||
|
||||
adminInternalClient := &kubeconfig.AdminInternalClient{}
|
||||
adminInternalClient.Config = &clientcmd.Config{
|
||||
Clusters: []clientcmd.NamedCluster{
|
||||
adminInternalClient.Config = &clientcmdv1.Config{
|
||||
Clusters: []clientcmdv1.NamedCluster{
|
||||
{
|
||||
Name: clusterName,
|
||||
Cluster: clientcmd.Cluster{
|
||||
Cluster: clientcmdv1.Cluster{
|
||||
Server: apiserverURL,
|
||||
CertificateAuthorityData: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
AuthInfos: []clientcmd.NamedAuthInfo{},
|
||||
Contexts: []clientcmd.NamedContext{
|
||||
AuthInfos: []clientcmdv1.NamedAuthInfo{},
|
||||
Contexts: []clientcmdv1.NamedContext{
|
||||
{
|
||||
Name: serviceName,
|
||||
Context: clientcmd.Context{
|
||||
Context: clientcmdv1.Context{
|
||||
Cluster: clusterName,
|
||||
AuthInfo: serviceName,
|
||||
},
|
||||
|
@ -77,7 +77,7 @@ func TestGenerateAROServiceKubeconfig(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var got *clientcmd.Config
|
||||
var got *clientcmdv1.Config
|
||||
err = yaml.Unmarshal(aroServiceInternalClient.File.Data, &got)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -123,7 +123,7 @@ func TestGenerateAROServiceKubeconfig(t *testing.T) {
|
|||
}
|
||||
|
||||
// validate the rest of the struct
|
||||
got.AuthInfos = []clientcmd.NamedAuthInfo{}
|
||||
got.AuthInfos = []clientcmdv1.NamedAuthInfo{}
|
||||
want := adminInternalClient.Config
|
||||
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
|
|
|
@ -12,9 +12,9 @@ import (
|
|||
"github.com/golang/mock/gomock"
|
||||
mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
|
||||
mcoclient "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned"
|
||||
fakemcoclient "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/fake"
|
||||
mcofake "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/fake"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
|
@ -68,7 +68,7 @@ func TestRemovePrivateDNSZone(t *testing.T) {
|
|||
},
|
||||
}, nil)
|
||||
},
|
||||
mcocli: fakemcoclient.NewSimpleClientset(
|
||||
mcocli: mcofake.NewSimpleClientset(
|
||||
&mcv1.MachineConfigPool{},
|
||||
),
|
||||
},
|
||||
|
@ -92,14 +92,14 @@ func TestRemovePrivateDNSZone(t *testing.T) {
|
|||
},
|
||||
}, nil)
|
||||
},
|
||||
mcocli: fakemcoclient.NewSimpleClientset(
|
||||
mcocli: mcofake.NewSimpleClientset(
|
||||
&mcv1.MachineConfigPool{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "master",
|
||||
},
|
||||
Status: mcv1.MachineConfigPoolStatus{
|
||||
Configuration: mcv1.MachineConfigPoolStatusConfiguration{
|
||||
Source: []v1.ObjectReference{
|
||||
Source: []corev1.ObjectReference{
|
||||
{
|
||||
Name: "99-master-aro-dns",
|
||||
},
|
||||
|
@ -146,14 +146,14 @@ func TestRemovePrivateDNSZone(t *testing.T) {
|
|||
DeleteAndWait(ctx, "testGroup", "zone1", "").
|
||||
Return(nil)
|
||||
},
|
||||
mcocli: fakemcoclient.NewSimpleClientset(
|
||||
mcocli: mcofake.NewSimpleClientset(
|
||||
&mcv1.MachineConfigPool{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "master",
|
||||
},
|
||||
Status: mcv1.MachineConfigPoolStatus{
|
||||
Configuration: mcv1.MachineConfigPoolStatusConfiguration{
|
||||
Source: []v1.ObjectReference{
|
||||
Source: []corev1.ObjectReference{
|
||||
{
|
||||
Name: "99-master-aro-dns",
|
||||
},
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
operatorv1 "github.com/openshift/api/operator/v1"
|
||||
configscheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
|
@ -43,15 +42,8 @@ func (m *manager) disableOperatorHubSources(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// https://bugzilla.redhat.com/show_bug.cgi?id=1815649
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
c := &configv1.OperatorHub{}
|
||||
err := m.configcli.ConfigV1().RESTClient().Get().
|
||||
Resource("operatorhubs").
|
||||
Name("cluster").
|
||||
VersionedParams(&metav1.GetOptions{}, configscheme.ParameterCodec).
|
||||
Do(ctx).
|
||||
Into(c)
|
||||
c, err := m.configcli.ConfigV1().OperatorHubs().Get(ctx, "cluster", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -9,10 +9,10 @@ import (
|
|||
"encoding/pem"
|
||||
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
coreclient "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/util/deployment"
|
||||
|
@ -68,7 +68,7 @@ func (m *manager) createCertificates(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) ensureSecret(ctx context.Context, secrets coreclient.SecretInterface, certificateName string) error {
|
||||
func (m *manager) ensureSecret(ctx context.Context, secrets corev1client.SecretInterface, certificateName string) error {
|
||||
bundle, err := m.env.ClusterKeyvault().GetSecret(ctx, certificateName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -89,17 +89,17 @@ func (m *manager) ensureSecret(ctx context.Context, secrets coreclient.SecretInt
|
|||
cb = append(cb, pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})...)
|
||||
}
|
||||
|
||||
_, err = secrets.Create(ctx, &v1.Secret{
|
||||
_, err = secrets.Create(ctx, &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: certificateName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
v1.TLSCertKey: cb,
|
||||
v1.TLSPrivateKeyKey: pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: b}),
|
||||
corev1.TLSCertKey: cb,
|
||||
corev1.TLSPrivateKeyKey: pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: b}),
|
||||
},
|
||||
Type: v1.SecretTypeTLS,
|
||||
Type: corev1.SecretTypeTLS,
|
||||
}, metav1.CreateOptions{})
|
||||
if errors.IsAlreadyExists(err) {
|
||||
if kerrors.IsAlreadyExists(err) {
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
s, err := secrets.Get(ctx, certificateName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -107,10 +107,10 @@ func (m *manager) ensureSecret(ctx context.Context, secrets coreclient.SecretInt
|
|||
}
|
||||
|
||||
s.Data = map[string][]byte{
|
||||
v1.TLSCertKey: cb,
|
||||
v1.TLSPrivateKeyKey: pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: b}),
|
||||
corev1.TLSCertKey: cb,
|
||||
corev1.TLSPrivateKeyKey: pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: b}),
|
||||
}
|
||||
s.Type = v1.SecretTypeTLS
|
||||
s.Type = corev1.SecretTypeTLS
|
||||
|
||||
_, err = secrets.Update(ctx, s, metav1.UpdateOptions{})
|
||||
return err
|
||||
|
@ -185,7 +185,7 @@ func (m *manager) configureIngressCertificate(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
ic.Spec.DefaultCertificate = &v1.LocalObjectReference{
|
||||
ic.Spec.DefaultCertificate = &corev1.LocalObjectReference{
|
||||
Name: m.doc.ID + "-ingress",
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
mgmtnetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network"
|
||||
mgmtauthorization "github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization"
|
||||
mgmtcontainerregistry "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2019-06-01-preview/containerregistry"
|
||||
mgmtmonitor "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights"
|
||||
mgmtinsights "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights"
|
||||
mgmtstorage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
|
@ -652,8 +652,8 @@ func (g *generator) lb() *arm.Resource {
|
|||
|
||||
func (g *generator) actionGroup(name string, shortName string) *arm.Resource {
|
||||
return &arm.Resource{
|
||||
Resource: mgmtmonitor.ActionGroupResource{
|
||||
ActionGroup: &mgmtmonitor.ActionGroup{
|
||||
Resource: mgmtinsights.ActionGroupResource{
|
||||
ActionGroup: &mgmtinsights.ActionGroup{
|
||||
Enabled: to.BoolPtr(true),
|
||||
GroupShortName: to.StringPtr(shortName),
|
||||
},
|
||||
|
@ -668,9 +668,9 @@ func (g *generator) actionGroup(name string, shortName string) *arm.Resource {
|
|||
// lbAlert generates an alert resource for the rp-lb healthprobe metric
|
||||
func (g *generator) lbAlert(threshold float64, severity int32, name string, evalFreq string, windowSize string, metric string) *arm.Resource {
|
||||
return &arm.Resource{
|
||||
Resource: mgmtmonitor.MetricAlertResource{
|
||||
MetricAlertProperties: &mgmtmonitor.MetricAlertProperties{
|
||||
Actions: &[]mgmtmonitor.MetricAlertAction{
|
||||
Resource: mgmtinsights.MetricAlertResource{
|
||||
MetricAlertProperties: &mgmtinsights.MetricAlertProperties{
|
||||
Actions: &[]mgmtinsights.MetricAlertAction{
|
||||
{
|
||||
ActionGroupID: to.StringPtr("[resourceId(parameters('subscriptionResourceGroupName'), 'Microsoft.Insights/actionGroups', 'rp-health-ag')]"),
|
||||
},
|
||||
|
@ -684,19 +684,19 @@ func (g *generator) lbAlert(threshold float64, severity int32, name string, eval
|
|||
WindowSize: to.StringPtr(windowSize),
|
||||
TargetResourceType: to.StringPtr("Microsoft.Network/loadBalancers"),
|
||||
AutoMitigate: to.BoolPtr(true),
|
||||
Criteria: mgmtmonitor.MetricAlertSingleResourceMultipleMetricCriteria{
|
||||
AllOf: &[]mgmtmonitor.MetricCriteria{
|
||||
Criteria: mgmtinsights.MetricAlertSingleResourceMultipleMetricCriteria{
|
||||
AllOf: &[]mgmtinsights.MetricCriteria{
|
||||
{
|
||||
CriterionType: mgmtmonitor.CriterionTypeStaticThresholdCriterion,
|
||||
CriterionType: mgmtinsights.CriterionTypeStaticThresholdCriterion,
|
||||
MetricName: to.StringPtr(metric),
|
||||
MetricNamespace: to.StringPtr("microsoft.network/loadBalancers"),
|
||||
Name: to.StringPtr("HealthProbeCheck"),
|
||||
Operator: mgmtmonitor.OperatorLessThan,
|
||||
Operator: mgmtinsights.OperatorLessThan,
|
||||
Threshold: to.Float64Ptr(threshold),
|
||||
TimeAggregation: mgmtmonitor.Average,
|
||||
TimeAggregation: mgmtinsights.Average,
|
||||
},
|
||||
},
|
||||
OdataType: mgmtmonitor.OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria,
|
||||
OdataType: mgmtinsights.OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria,
|
||||
},
|
||||
},
|
||||
Name: to.StringPtr("[concat('" + name + "-', resourceGroup().location)]"),
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
|
||||
azkeyvault "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
|
||||
mgmtfeatures "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/Azure/ARO-RP/pkg/deploy/generator"
|
||||
"github.com/Azure/ARO-RP/pkg/env"
|
||||
"github.com/Azure/ARO-RP/pkg/util/arm"
|
||||
utilkeyvault "github.com/Azure/ARO-RP/pkg/util/keyvault"
|
||||
"github.com/Azure/ARO-RP/pkg/util/keyvault"
|
||||
)
|
||||
|
||||
// PreDeploy deploys managed identity, NSGs and keyvaults, needed for main
|
||||
|
@ -346,7 +346,7 @@ func (d *deployer) configureServiceSecrets(ctx context.Context) error {
|
|||
return d.ensureSecretKey(ctx, d.portalKeyvault, env.PortalServerSSHKeySecretName)
|
||||
}
|
||||
|
||||
func (d *deployer) ensureSecret(ctx context.Context, kv utilkeyvault.Manager, secretName string) error {
|
||||
func (d *deployer) ensureSecret(ctx context.Context, kv keyvault.Manager, secretName string) error {
|
||||
existingSecrets, err := kv.GetSecrets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -365,12 +365,12 @@ func (d *deployer) ensureSecret(ctx context.Context, kv utilkeyvault.Manager, se
|
|||
}
|
||||
|
||||
d.log.Infof("setting %s", secretName)
|
||||
return kv.SetSecret(ctx, secretName, keyvault.SecretSetParameters{
|
||||
return kv.SetSecret(ctx, secretName, azkeyvault.SecretSetParameters{
|
||||
Value: to.StringPtr(base64.StdEncoding.EncodeToString(key)),
|
||||
})
|
||||
}
|
||||
|
||||
func (d *deployer) ensureSecretKey(ctx context.Context, kv utilkeyvault.Manager, secretName string) error {
|
||||
func (d *deployer) ensureSecretKey(ctx context.Context, kv keyvault.Manager, secretName string) error {
|
||||
existingSecrets, err := kv.GetSecrets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -388,7 +388,7 @@ func (d *deployer) ensureSecretKey(ctx context.Context, kv utilkeyvault.Manager,
|
|||
}
|
||||
|
||||
d.log.Infof("setting %s", secretName)
|
||||
return kv.SetSecret(ctx, secretName, keyvault.SecretSetParameters{
|
||||
return kv.SetSecret(ctx, secretName, azkeyvault.SecretSetParameters{
|
||||
Value: to.StringPtr(base64.StdEncoding.EncodeToString(x509.MarshalPKCS1PrivateKey(key))),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
admin "github.com/Azure/ARO-RP/pkg/api/admin"
|
||||
"github.com/Azure/ARO-RP/pkg/api/admin"
|
||||
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
|
||||
"github.com/Azure/ARO-RP/pkg/metrics"
|
||||
"github.com/Azure/ARO-RP/pkg/metrics/noop"
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"testing"
|
||||
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
"github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
configfake "github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -30,8 +30,8 @@ func TestUpgradeCluster(t *testing.T) {
|
|||
Version: version.NewVersion(4, 5, 3),
|
||||
}
|
||||
|
||||
newFakecli := func(status configv1.ClusterVersionStatus) *fake.Clientset {
|
||||
return fake.NewSimpleClientset(&configv1.ClusterVersion{
|
||||
newFakecli := func(status configv1.ClusterVersionStatus) *configfake.Clientset {
|
||||
return configfake.NewSimpleClientset(&configv1.ClusterVersion{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "version",
|
||||
},
|
||||
|
@ -44,7 +44,7 @@ func TestUpgradeCluster(t *testing.T) {
|
|||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
fakecli *fake.Clientset
|
||||
fakecli *configfake.Clientset
|
||||
|
||||
desiredVersion string
|
||||
upgradeY bool
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
|
@ -324,7 +324,7 @@ func (f *frontend) Run(ctx context.Context, stop <-chan struct{}, done chan<- st
|
|||
}
|
||||
|
||||
func adminReply(log *logrus.Entry, w http.ResponseWriter, header http.Header, b []byte, err error) {
|
||||
if apiErr, ok := err.(errors.APIStatus); ok {
|
||||
if apiErr, ok := err.(kerrors.APIStatus); ok {
|
||||
status := apiErr.Status()
|
||||
|
||||
var target string
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
admin "github.com/Azure/ARO-RP/pkg/api/admin"
|
||||
"github.com/Azure/ARO-RP/pkg/api/admin"
|
||||
v20200430 "github.com/Azure/ARO-RP/pkg/api/v20200430"
|
||||
"github.com/Azure/ARO-RP/pkg/metrics"
|
||||
"github.com/Azure/ARO-RP/pkg/metrics/noop"
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
mock_keyvault "github.com/Azure/ARO-RP/pkg/util/mocks/keyvault"
|
||||
utiltls "github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
testdatabase "github.com/Azure/ARO-RP/test/database"
|
||||
testclusterdata "github.com/Azure/ARO-RP/test/util/clusterdata"
|
||||
"github.com/Azure/ARO-RP/test/util/clusterdata"
|
||||
"github.com/Azure/ARO-RP/test/util/listener"
|
||||
testlog "github.com/Azure/ARO-RP/test/util/log"
|
||||
)
|
||||
|
@ -62,7 +62,7 @@ type testInfra struct {
|
|||
controller *gomock.Controller
|
||||
l net.Listener
|
||||
cli *http.Client
|
||||
enricher testclusterdata.TestEnricher
|
||||
enricher clusterdata.TestEnricher
|
||||
audit *logrus.Entry
|
||||
log *logrus.Entry
|
||||
fixture *testdatabase.Fixture
|
||||
|
@ -112,7 +112,7 @@ func newTestInfra(t *testing.T) *testInfra {
|
|||
env: _env,
|
||||
controller: controller,
|
||||
l: l,
|
||||
enricher: testclusterdata.NewTestEnricher(),
|
||||
enricher: clusterdata.NewTestEnricher(),
|
||||
fixture: fixture,
|
||||
checker: checker,
|
||||
audit: auditEntry,
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
|
||||
pkgnamespace "github.com/Azure/ARO-RP/pkg/util/namespace"
|
||||
utilnamespace "github.com/Azure/ARO-RP/pkg/util/namespace"
|
||||
)
|
||||
|
||||
func validateTerminalProvisioningState(state api.ProvisioningState) error {
|
||||
|
@ -77,7 +77,7 @@ func (f *frontend) validateOpenShiftUniqueKey(ctx context.Context, doc *api.Open
|
|||
var rxKubernetesString = regexp.MustCompile(`(?i)^[-a-z0-9.]{0,255}$`)
|
||||
|
||||
func validateAdminKubernetesObjectsNonCustomer(method, groupKind, namespace, name string) error {
|
||||
if !pkgnamespace.IsOpenShift(namespace) {
|
||||
if !utilnamespace.IsOpenShift(namespace) {
|
||||
return api.NewCloudError(http.StatusForbidden, api.CloudErrorCodeForbidden, "", "Access to the provided namespace '%s' is forbidden.", namespace)
|
||||
}
|
||||
|
||||
|
|
|
@ -7,25 +7,25 @@ import (
|
|||
"net/url"
|
||||
"time"
|
||||
|
||||
k8smetrics "k8s.io/client-go/tools/metrics"
|
||||
kmetrics "k8s.io/client-go/tools/metrics"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/metrics"
|
||||
)
|
||||
|
||||
var _ k8smetrics.LatencyMetric = (*tracer)(nil)
|
||||
var _ k8smetrics.ResultMetric = (*tracer)(nil)
|
||||
var _ kmetrics.LatencyMetric = (*tracer)(nil)
|
||||
var _ kmetrics.ResultMetric = (*tracer)(nil)
|
||||
|
||||
type tracer struct {
|
||||
m metrics.Interface
|
||||
}
|
||||
|
||||
func NewLatency(m metrics.Interface) k8smetrics.LatencyMetric {
|
||||
func NewLatency(m metrics.Interface) kmetrics.LatencyMetric {
|
||||
return &tracer{
|
||||
m: m,
|
||||
}
|
||||
}
|
||||
|
||||
func NewResult(m metrics.Interface) k8smetrics.ResultMetric {
|
||||
func NewResult(m metrics.Interface) kmetrics.ResultMetric {
|
||||
return &tracer{
|
||||
m: m,
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
pkgoperator "github.com/Azure/ARO-RP/pkg/operator"
|
||||
|
@ -39,7 +39,7 @@ func (mon *Monitor) listClusterOperators(ctx context.Context) (*configv1.Cluster
|
|||
}
|
||||
|
||||
// TODO: remove this function and paginate
|
||||
func (mon *Monitor) listNodes(ctx context.Context) (*v1.NodeList, error) {
|
||||
func (mon *Monitor) listNodes(ctx context.Context) (*corev1.NodeList, error) {
|
||||
if mon.cache.ns != nil {
|
||||
return mon.cache.ns, nil
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
mcoclient "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
|
@ -42,7 +42,7 @@ type Monitor struct {
|
|||
cache struct {
|
||||
cos *configv1.ClusterOperatorList
|
||||
cv *configv1.ClusterVersion
|
||||
ns *v1.NodeList
|
||||
ns *corev1.NodeList
|
||||
arodl *appsv1.DeploymentList
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"github.com/golang/mock/gomock"
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
"github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
configfake "github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
mock_metrics "github.com/Azure/ARO-RP/pkg/util/mocks/metrics"
|
||||
|
@ -18,7 +18,7 @@ import (
|
|||
func TestEmitClusterOperatorConditions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
configcli := fake.NewSimpleClientset(&configv1.ClusterOperator{
|
||||
configcli := configfake.NewSimpleClientset(&configv1.ClusterOperator{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "console",
|
||||
},
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"github.com/golang/mock/gomock"
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
"github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
configfake "github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
mock_metrics "github.com/Azure/ARO-RP/pkg/util/mocks/metrics"
|
||||
|
@ -18,7 +18,7 @@ import (
|
|||
func TestEmitClusterOperatorVersion(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
configcli := fake.NewSimpleClientset(
|
||||
configcli := configfake.NewSimpleClientset(
|
||||
&configv1.ClusterOperator{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "console",
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"github.com/golang/mock/gomock"
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
"github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
configfake "github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
mock_metrics "github.com/Azure/ARO-RP/pkg/util/mocks/metrics"
|
||||
|
@ -18,7 +18,7 @@ import (
|
|||
func TestEmitClusterVersionConditions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
configcli := fake.NewSimpleClientset(&configv1.ClusterVersion{
|
||||
configcli := configfake.NewSimpleClientset(&configv1.ClusterVersion{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "version",
|
||||
},
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
pkgoperator "github.com/Azure/ARO-RP/pkg/operator"
|
||||
"github.com/Azure/ARO-RP/pkg/operator"
|
||||
mock_metrics "github.com/Azure/ARO-RP/pkg/util/mocks/metrics"
|
||||
"github.com/Azure/ARO-RP/pkg/util/version"
|
||||
)
|
||||
|
@ -26,7 +26,7 @@ func TestEmitClusterVersion(t *testing.T) {
|
|||
cli := fake.NewSimpleClientset(
|
||||
&appsv1.Deployment{ // metrics expected
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: pkgoperator.Namespace,
|
||||
Namespace: operator.Namespace,
|
||||
Name: "aro-operator-master",
|
||||
Labels: map[string]string{
|
||||
"version": "test",
|
||||
|
|
|
@ -7,15 +7,15 @@ import (
|
|||
"context"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/util/namespace"
|
||||
)
|
||||
|
||||
var jobConditionsExpected = map[batchv1.JobConditionType]v1.ConditionStatus{
|
||||
batchv1.JobComplete: v1.ConditionTrue,
|
||||
batchv1.JobFailed: v1.ConditionFalse,
|
||||
var jobConditionsExpected = map[batchv1.JobConditionType]corev1.ConditionStatus{
|
||||
batchv1.JobComplete: corev1.ConditionTrue,
|
||||
batchv1.JobFailed: corev1.ConditionFalse,
|
||||
}
|
||||
|
||||
func (mon *Monitor) emitJobConditions(ctx context.Context) error {
|
||||
|
|
|
@ -6,18 +6,18 @@ package cluster
|
|||
import (
|
||||
"context"
|
||||
|
||||
v1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
|
||||
mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var machineConfigPoolConditionsExpected = map[v1.MachineConfigPoolConditionType]corev1.ConditionStatus{
|
||||
v1.MachineConfigPoolDegraded: corev1.ConditionFalse,
|
||||
v1.MachineConfigPoolNodeDegraded: corev1.ConditionFalse,
|
||||
v1.MachineConfigPoolRenderDegraded: corev1.ConditionFalse,
|
||||
v1.MachineConfigPoolUpdated: corev1.ConditionTrue,
|
||||
v1.MachineConfigPoolUpdating: corev1.ConditionFalse,
|
||||
var machineConfigPoolConditionsExpected = map[mcv1.MachineConfigPoolConditionType]corev1.ConditionStatus{
|
||||
mcv1.MachineConfigPoolDegraded: corev1.ConditionFalse,
|
||||
mcv1.MachineConfigPoolNodeDegraded: corev1.ConditionFalse,
|
||||
mcv1.MachineConfigPoolRenderDegraded: corev1.ConditionFalse,
|
||||
mcv1.MachineConfigPoolUpdated: corev1.ConditionTrue,
|
||||
mcv1.MachineConfigPoolUpdating: corev1.ConditionFalse,
|
||||
}
|
||||
|
||||
func (mon *Monitor) emitMachineConfigPoolConditions(ctx context.Context) error {
|
||||
|
|
|
@ -8,8 +8,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
v1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
|
||||
"github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/fake"
|
||||
mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
|
||||
mcofake "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/fake"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
|
@ -19,30 +19,30 @@ import (
|
|||
func TestEmitMachineConfigPoolConditions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
mcocli := fake.NewSimpleClientset(&v1.MachineConfigPool{
|
||||
mcocli := mcofake.NewSimpleClientset(&mcv1.MachineConfigPool{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "machine-config-pool",
|
||||
},
|
||||
Status: v1.MachineConfigPoolStatus{
|
||||
Conditions: []v1.MachineConfigPoolCondition{
|
||||
Status: mcv1.MachineConfigPoolStatus{
|
||||
Conditions: []mcv1.MachineConfigPoolCondition{
|
||||
{
|
||||
Type: v1.MachineConfigPoolDegraded,
|
||||
Type: mcv1.MachineConfigPoolDegraded,
|
||||
Status: corev1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.MachineConfigPoolNodeDegraded,
|
||||
Type: mcv1.MachineConfigPoolNodeDegraded,
|
||||
Status: corev1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.MachineConfigPoolRenderDegraded,
|
||||
Type: mcv1.MachineConfigPoolRenderDegraded,
|
||||
Status: corev1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.MachineConfigPoolUpdated,
|
||||
Type: mcv1.MachineConfigPoolUpdated,
|
||||
Status: corev1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
Type: v1.MachineConfigPoolUpdating,
|
||||
Type: mcv1.MachineConfigPoolUpdating,
|
||||
Status: corev1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -7,14 +7,14 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
var nodeConditionsExpected = map[v1.NodeConditionType]v1.ConditionStatus{
|
||||
v1.NodeDiskPressure: v1.ConditionFalse,
|
||||
v1.NodeMemoryPressure: v1.ConditionFalse,
|
||||
v1.NodePIDPressure: v1.ConditionFalse,
|
||||
v1.NodeReady: v1.ConditionTrue,
|
||||
var nodeConditionsExpected = map[corev1.NodeConditionType]corev1.ConditionStatus{
|
||||
corev1.NodeDiskPressure: corev1.ConditionFalse,
|
||||
corev1.NodeMemoryPressure: corev1.ConditionFalse,
|
||||
corev1.NodePIDPressure: corev1.ConditionFalse,
|
||||
corev1.NodeReady: corev1.ConditionTrue,
|
||||
}
|
||||
|
||||
func (mon *Monitor) emitNodeConditions(ctx context.Context) error {
|
||||
|
|
|
@ -7,17 +7,17 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/util/namespace"
|
||||
)
|
||||
|
||||
var podConditionsExpected = map[v1.PodConditionType]v1.ConditionStatus{
|
||||
v1.ContainersReady: v1.ConditionTrue,
|
||||
v1.PodInitialized: v1.ConditionTrue,
|
||||
v1.PodScheduled: v1.ConditionTrue,
|
||||
v1.PodReady: v1.ConditionTrue,
|
||||
var podConditionsExpected = map[corev1.PodConditionType]corev1.ConditionStatus{
|
||||
corev1.ContainersReady: corev1.ConditionTrue,
|
||||
corev1.PodInitialized: corev1.ConditionTrue,
|
||||
corev1.PodScheduled: corev1.ConditionTrue,
|
||||
corev1.PodReady: corev1.ConditionTrue,
|
||||
}
|
||||
|
||||
func (mon *Monitor) emitPodConditions(ctx context.Context) error {
|
||||
|
@ -40,13 +40,13 @@ func (mon *Monitor) emitPodConditions(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (mon *Monitor) _emitPodConditions(ps *v1.PodList) {
|
||||
func (mon *Monitor) _emitPodConditions(ps *corev1.PodList) {
|
||||
for _, p := range ps.Items {
|
||||
if !namespace.IsOpenShift(p.Namespace) {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.Status.Phase == v1.PodSucceeded {
|
||||
if p.Status.Phase == corev1.PodSucceeded {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -78,13 +78,13 @@ func (mon *Monitor) _emitPodConditions(ps *v1.PodList) {
|
|||
}
|
||||
}
|
||||
|
||||
func (mon *Monitor) _emitPodContainerStatuses(ps *v1.PodList) {
|
||||
func (mon *Monitor) _emitPodContainerStatuses(ps *corev1.PodList) {
|
||||
for _, p := range ps.Items {
|
||||
if !namespace.IsOpenShift(p.Namespace) {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.Status.Phase == v1.PodSucceeded {
|
||||
if p.Status.Phase == corev1.PodSucceeded {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/ghodss/yaml"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -124,7 +123,7 @@ func (r *AlertWebhookReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1.Secret{}).
|
||||
For(&corev1.Secret{}).
|
||||
WithEventFilter(isAlertManager).
|
||||
Named(controllers.AlertwebhookControllerName).
|
||||
Complete(r)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
@ -142,7 +142,7 @@ func TestSetAlertManagerWebhook(t *testing.T) {
|
|||
{
|
||||
name: "old cluster",
|
||||
reconciler: &AlertWebhookReconciler{
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.Secret{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "alertmanager-main",
|
||||
Namespace: "openshift-monitoring",
|
||||
|
@ -157,7 +157,7 @@ func TestSetAlertManagerWebhook(t *testing.T) {
|
|||
{
|
||||
name: "new cluster",
|
||||
reconciler: &AlertWebhookReconciler{
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.Secret{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "alertmanager-main",
|
||||
Namespace: "openshift-monitoring",
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
|
||||
mcoclient "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
@ -52,7 +52,7 @@ func (r *MachineConfigReconciler) Reconcile(request ctrl.Request) (ctrl.Result,
|
|||
role := m[1]
|
||||
|
||||
_, err := r.mcocli.MachineconfigurationV1().MachineConfigPools().Get(ctx, role, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
if kerrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
|
||||
mcoclient "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
@ -43,7 +43,7 @@ func (r *MachineConfigPoolReconciler) Reconcile(request ctrl.Request) (ctrl.Resu
|
|||
ctx := context.Background()
|
||||
|
||||
_, err := r.mcocli.MachineconfigurationV1().MachineConfigPools().Get(ctx, request.Name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
if kerrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
projectv1 "github.com/openshift/api/project/v1"
|
||||
securityv1 "github.com/openshift/api/security/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -55,34 +55,34 @@ func (g *GenevaloggingReconciler) daemonset(cluster *arov1alpha1.Cluster) (*apps
|
|||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": "mdsd"},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"app": "mdsd"},
|
||||
Annotations: map[string]string{"scheduler.alpha.kubernetes.io/critical-pod": ""},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "log",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/var/log",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "fluent",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/var/lib/fluent",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "fluent-config",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "fluent-config",
|
||||
},
|
||||
},
|
||||
|
@ -90,16 +90,16 @@ func (g *GenevaloggingReconciler) daemonset(cluster *arov1alpha1.Cluster) (*apps
|
|||
},
|
||||
{
|
||||
Name: "machine-id",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/etc/machine-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "certificates",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: certificatesSecretName,
|
||||
},
|
||||
},
|
||||
|
@ -107,17 +107,17 @@ func (g *GenevaloggingReconciler) daemonset(cluster *arov1alpha1.Cluster) (*apps
|
|||
},
|
||||
ServiceAccountName: "geneva",
|
||||
DeprecatedServiceAccount: "geneva",
|
||||
Tolerations: []v1.Toleration{
|
||||
Tolerations: []corev1.Toleration{
|
||||
{
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
Operator: corev1.TolerationOpExists,
|
||||
},
|
||||
{
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
Operator: corev1.TolerationOpExists,
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "fluentbit",
|
||||
Image: version.FluentbitImage(cluster.Spec.ACRDomain),
|
||||
|
@ -129,11 +129,11 @@ func (g *GenevaloggingReconciler) daemonset(cluster *arov1alpha1.Cluster) (*apps
|
|||
"/etc/td-agent-bit/fluent.conf",
|
||||
},
|
||||
// TODO: specify requests/limits
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: to.BoolPtr(true),
|
||||
RunAsUser: to.Int64Ptr(0),
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "fluent-config",
|
||||
ReadOnly: true,
|
||||
|
@ -169,7 +169,7 @@ func (g *GenevaloggingReconciler) daemonset(cluster *arov1alpha1.Cluster) (*apps
|
|||
"-r",
|
||||
"/var/run/mdsd/default",
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "MONITORING_GCS_ENVIRONMENT",
|
||||
Value: cluster.Spec.GenevaLogging.MonitoringGCSEnvironment,
|
||||
|
@ -212,8 +212,8 @@ func (g *GenevaloggingReconciler) daemonset(cluster *arov1alpha1.Cluster) (*apps
|
|||
},
|
||||
{
|
||||
Name: "MONITORING_ROLE_INSTANCE",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
|
@ -236,21 +236,21 @@ func (g *GenevaloggingReconciler) daemonset(cluster *arov1alpha1.Cluster) (*apps
|
|||
Value: strings.ToLower(r.ResourceName),
|
||||
},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
v1.ResourceMemory: resource.MustParse("1000Mi"),
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("200m"),
|
||||
corev1.ResourceMemory: resource.MustParse("1000Mi"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("10m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("10m"),
|
||||
corev1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: to.BoolPtr(true),
|
||||
RunAsUser: to.Int64Ptr(0),
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "certificates",
|
||||
MountPath: "/etc/mdsd.d/secret",
|
||||
|
@ -276,13 +276,13 @@ func (g *GenevaloggingReconciler) resources(ctx context.Context, cluster *arov1a
|
|||
}
|
||||
|
||||
return []runtime.Object{
|
||||
&v1.Namespace{
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubeNamespace,
|
||||
Annotations: map[string]string{projectv1.ProjectNodeSelector: ""},
|
||||
},
|
||||
},
|
||||
&v1.Secret{
|
||||
&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: certificatesSecretName,
|
||||
Namespace: kubeNamespace,
|
||||
|
@ -292,7 +292,7 @@ func (g *GenevaloggingReconciler) resources(ctx context.Context, cluster *arov1a
|
|||
GenevaKeyName: gcskey,
|
||||
},
|
||||
},
|
||||
&v1.ConfigMap{
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fluent-config",
|
||||
Namespace: kubeNamespace,
|
||||
|
@ -302,7 +302,7 @@ func (g *GenevaloggingReconciler) resources(ctx context.Context, cluster *arov1a
|
|||
"parsers.conf": parsersConf,
|
||||
},
|
||||
},
|
||||
&v1.ServiceAccount{
|
||||
&corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "geneva",
|
||||
Namespace: kubeNamespace,
|
||||
|
|
|
@ -10,8 +10,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/ugorji/go/codec"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -166,10 +165,10 @@ func (r *Reconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) {
|
|||
})
|
||||
}
|
||||
|
||||
func (r *Reconciler) monitoringConfigMap(ctx context.Context) (*v1.ConfigMap, bool, error) {
|
||||
func (r *Reconciler) monitoringConfigMap(ctx context.Context) (*corev1.ConfigMap, bool, error) {
|
||||
cm, err := r.kubernetescli.CoreV1().ConfigMaps(monitoringName.Namespace).Get(ctx, monitoringName.Name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return &v1.ConfigMap{
|
||||
if kerrors.IsNotFound(err) {
|
||||
return &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: monitoringName.Name,
|
||||
Namespace: monitoringName.Namespace,
|
||||
|
@ -227,7 +226,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||
For(&arov1alpha1.Cluster{}).
|
||||
// https://github.com/kubernetes-sigs/controller-runtime/issues/1173
|
||||
// equivalent to For(&v1.ConfigMap{})., but can't call For multiple times on one builder
|
||||
Watches(&source.Kind{Type: &v1.ConfigMap{}}, &handler.EnqueueRequestForObject{}).
|
||||
Watches(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}).
|
||||
WithEventFilter(isMonitoringConfigMap).
|
||||
Named(controllers.MonitoringControllerName).
|
||||
Complete(r)
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/ugorji/go/codec"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
@ -43,7 +43,7 @@ func TestReconcileMonitoringConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}),
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.ConfigMap{}),
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.ConfigMap{}),
|
||||
log: log,
|
||||
jsonHandle: new(codec.JsonHandle),
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ prometheusK8s:
|
|||
},
|
||||
},
|
||||
}),
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.ConfigMap{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.ConfigMap{
|
||||
ObjectMeta: cmMetadata,
|
||||
}),
|
||||
log: log,
|
||||
|
@ -99,7 +99,7 @@ prometheusK8s:
|
|||
},
|
||||
},
|
||||
}),
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.ConfigMap{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.ConfigMap{
|
||||
ObjectMeta: cmMetadata,
|
||||
Data: map[string]string{
|
||||
"config.yaml": ``,
|
||||
|
@ -131,7 +131,7 @@ prometheusK8s:
|
|||
},
|
||||
},
|
||||
}),
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.ConfigMap{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.ConfigMap{
|
||||
ObjectMeta: cmMetadata,
|
||||
Data: map[string]string{
|
||||
"config.yaml": `
|
||||
|
@ -175,7 +175,7 @@ prometheusK8s:
|
|||
},
|
||||
},
|
||||
}),
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.ConfigMap{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.ConfigMap{
|
||||
ObjectMeta: cmMetadata,
|
||||
Data: map[string]string{
|
||||
"config.yaml": `
|
||||
|
@ -214,7 +214,7 @@ prometheusK8s:
|
|||
},
|
||||
},
|
||||
}),
|
||||
kubernetescli: fake.NewSimpleClientset(&v1.ConfigMap{
|
||||
kubernetescli: fake.NewSimpleClientset(&corev1.ConfigMap{
|
||||
ObjectMeta: cmMetadata,
|
||||
Data: map[string]string{
|
||||
"config.yaml": `
|
||||
|
|
|
@ -9,8 +9,7 @@ import (
|
|||
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -76,24 +75,24 @@ func (r *PullSecretReconciler) Reconcile(request ctrl.Request) (ctrl.Result, err
|
|||
}
|
||||
|
||||
// validate
|
||||
if !json.Valid(ps.Data[v1.DockerConfigJsonKey]) {
|
||||
if !json.Valid(ps.Data[corev1.DockerConfigJsonKey]) {
|
||||
r.log.Info("pull secret is not valid json - recreating")
|
||||
delete(ps.Data, v1.DockerConfigJsonKey)
|
||||
delete(ps.Data, corev1.DockerConfigJsonKey)
|
||||
}
|
||||
|
||||
pullsec, changed, err := pullsecret.Merge(string(ps.Data[corev1.DockerConfigJsonKey]), string(mysec.Data[v1.DockerConfigJsonKey]))
|
||||
pullsec, changed, err := pullsecret.Merge(string(ps.Data[corev1.DockerConfigJsonKey]), string(mysec.Data[corev1.DockerConfigJsonKey]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// repair Secret type
|
||||
if ps.Type != v1.SecretTypeDockerConfigJson {
|
||||
ps = &v1.Secret{
|
||||
if ps.Type != corev1.SecretTypeDockerConfigJson {
|
||||
ps = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pullSecretName.Name,
|
||||
Namespace: pullSecretName.Namespace,
|
||||
},
|
||||
Type: v1.SecretTypeDockerConfigJson,
|
||||
Type: corev1.SecretTypeDockerConfigJson,
|
||||
Data: map[string][]byte{},
|
||||
}
|
||||
isCreate = true
|
||||
|
@ -126,15 +125,15 @@ func (r *PullSecretReconciler) Reconcile(request ctrl.Request) (ctrl.Result, err
|
|||
})
|
||||
}
|
||||
|
||||
func (r *PullSecretReconciler) pullsecret(ctx context.Context) (*v1.Secret, bool, error) {
|
||||
func (r *PullSecretReconciler) pullsecret(ctx context.Context) (*corev1.Secret, bool, error) {
|
||||
ps, err := r.kubernetescli.CoreV1().Secrets(pullSecretName.Namespace).Get(ctx, pullSecretName.Name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return &v1.Secret{
|
||||
if kerrors.IsNotFound(err) {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pullSecretName.Name,
|
||||
Namespace: pullSecretName.Namespace,
|
||||
},
|
||||
Type: v1.SecretTypeDockerConfigJson,
|
||||
Type: corev1.SecretTypeDockerConfigJson,
|
||||
}, true, nil
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -184,8 +183,8 @@ func (r *PullSecretReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||
For(&arov1alpha1.Cluster{}).
|
||||
// https://github.com/kubernetes-sigs/controller-runtime/issues/1173
|
||||
// equivalent to For(&v1.Secret{})., but can't call For multiple times on one builder
|
||||
Watches(&source.Kind{Type: &v1.Secret{}}, &handler.EnqueueRequestForObject{}).
|
||||
Owns(&v1.Secret{}).
|
||||
Watches(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}).
|
||||
Owns(&corev1.Secret{}).
|
||||
WithEventFilter(isPullSecret).
|
||||
Named(controllers.PullSecretControllerName).
|
||||
Complete(r)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
@ -19,12 +19,12 @@ import (
|
|||
)
|
||||
|
||||
func TestPullSecretReconciler(t *testing.T) {
|
||||
newFakecli := func(s *v1.Secret, c *v1.Secret) *fake.Clientset {
|
||||
newFakecli := func(s *corev1.Secret, c *corev1.Secret) *fake.Clientset {
|
||||
c.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: operator.SecretName,
|
||||
Namespace: operator.Namespace,
|
||||
}
|
||||
c.Type = v1.SecretTypeOpaque
|
||||
c.Type = corev1.SecretTypeOpaque
|
||||
if s == nil {
|
||||
return fake.NewSimpleClientset(c)
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ func TestPullSecretReconciler(t *testing.T) {
|
|||
Namespace: "openshift-config",
|
||||
}
|
||||
if s.Type == "" {
|
||||
s.Type = v1.SecretTypeDockerConfigJson
|
||||
s.Type = corev1.SecretTypeDockerConfigJson
|
||||
}
|
||||
return fake.NewSimpleClientset(s, c)
|
||||
}
|
||||
|
@ -50,51 +50,51 @@ func TestPullSecretReconciler(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "deleted pull secret",
|
||||
fakecli: newFakecli(nil, &v1.Secret{Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
fakecli: newFakecli(nil, &corev1.Secret{Data: map[string][]byte{
|
||||
corev1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
}}),
|
||||
want: `{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`,
|
||||
wantCreated: true,
|
||||
},
|
||||
{
|
||||
name: "missing arosvc pull secret",
|
||||
fakecli: newFakecli(&v1.Secret{}, &v1.Secret{Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
fakecli: newFakecli(&corev1.Secret{}, &corev1.Secret{Data: map[string][]byte{
|
||||
corev1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
}}),
|
||||
want: `{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`,
|
||||
wantUpdated: true,
|
||||
},
|
||||
{
|
||||
name: "modified arosvc pull secret",
|
||||
fakecli: newFakecli(&v1.Secret{
|
||||
fakecli: newFakecli(&corev1.Secret{
|
||||
Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":""}}}`),
|
||||
corev1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":""}}}`),
|
||||
},
|
||||
}, &v1.Secret{
|
||||
}, &corev1.Secret{
|
||||
Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
corev1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
}}),
|
||||
want: `{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`,
|
||||
wantUpdated: true,
|
||||
},
|
||||
{
|
||||
name: "unparseable secret",
|
||||
fakecli: newFakecli(&v1.Secret{
|
||||
fakecli: newFakecli(&corev1.Secret{
|
||||
Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: []byte(`bad`),
|
||||
corev1.DockerConfigJsonKey: []byte(`bad`),
|
||||
},
|
||||
}, &v1.Secret{Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
}, &corev1.Secret{Data: map[string][]byte{
|
||||
corev1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
}}),
|
||||
want: `{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`,
|
||||
wantUpdated: true,
|
||||
},
|
||||
{
|
||||
name: "wrong secret type",
|
||||
fakecli: newFakecli(&v1.Secret{
|
||||
Type: v1.SecretTypeOpaque,
|
||||
}, &v1.Secret{Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
fakecli: newFakecli(&corev1.Secret{
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
}, &corev1.Secret{Data: map[string][]byte{
|
||||
corev1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
}}),
|
||||
want: `{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`,
|
||||
wantCreated: true,
|
||||
|
@ -102,12 +102,12 @@ func TestPullSecretReconciler(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "no change",
|
||||
fakecli: newFakecli(&v1.Secret{
|
||||
fakecli: newFakecli(&corev1.Secret{
|
||||
Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
corev1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
},
|
||||
}, &v1.Secret{Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
}, &corev1.Secret{Data: map[string][]byte{
|
||||
corev1.DockerConfigJsonKey: []byte(`{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`),
|
||||
}}),
|
||||
want: `{"auths":{"arosvc.azurecr.io":{"auth":"ZnJlZDplbnRlcg=="}}}`,
|
||||
},
|
||||
|
@ -162,12 +162,12 @@ func TestPullSecretReconciler(t *testing.T) {
|
|||
t.Error(err)
|
||||
}
|
||||
|
||||
if s.Type != v1.SecretTypeDockerConfigJson {
|
||||
if s.Type != corev1.SecretTypeDockerConfigJson {
|
||||
t.Error(s.Type)
|
||||
}
|
||||
|
||||
if string(s.Data[v1.DockerConfigJsonKey]) != tt.want {
|
||||
t.Error(string(s.Data[v1.DockerConfigJsonKey]))
|
||||
if string(s.Data[corev1.DockerConfigJsonKey]) != tt.want {
|
||||
t.Error(string(s.Data[corev1.DockerConfigJsonKey]))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
projectv1 "github.com/openshift/api/project/v1"
|
||||
securityv1 "github.com/openshift/api/security/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
|
@ -51,7 +51,7 @@ func (r *RouteFixReconciler) resources(ctx context.Context, cluster *arov1alpha1
|
|||
return nil, err
|
||||
}
|
||||
return []runtime.Object{
|
||||
&v1.Namespace{
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubeNamespace,
|
||||
Annotations: map[string]string{projectv1.ProjectNodeSelector: ""},
|
||||
|
@ -67,12 +67,12 @@ func (r *RouteFixReconciler) resources(ctx context.Context, cluster *arov1alpha1
|
|||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": "routefix"},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"app": "routefix"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: kubeName,
|
||||
Image: version.RouteFixImage(cluster.Spec.ACRDomain),
|
||||
|
@ -82,20 +82,20 @@ func (r *RouteFixReconciler) resources(ctx context.Context, cluster *arov1alpha1
|
|||
shellScript,
|
||||
},
|
||||
// TODO: specify requests/limits
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: to.BoolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
HostNetwork: true,
|
||||
Tolerations: []v1.Toleration{
|
||||
Tolerations: []corev1.Toleration{
|
||||
{
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
Operator: corev1.TolerationOpExists,
|
||||
},
|
||||
{
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
Operator: corev1.TolerationOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
|
@ -36,7 +36,7 @@ func (*ifReload) Ensure(ctx context.Context) error {
|
|||
|
||||
func (i *ifReload) Remove(ctx context.Context) error {
|
||||
err := i.cli.CoreV1().Namespaces().Delete(ctx, kubeNamespace, metav1.DeleteOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
if kerrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/golang/mock/gomock"
|
||||
mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
|
||||
fakemcoclient "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/fake"
|
||||
mcofake "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/fake"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ktesting "k8s.io/client-go/testing"
|
||||
|
@ -23,14 +23,14 @@ import (
|
|||
func TestSystemreservedEnsure(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mcocli *fakemcoclient.Clientset
|
||||
mcocli *mcofake.Clientset
|
||||
mocker func(mdh *mock_dynamichelper.MockInterface)
|
||||
machineConfigPoolNeedsUpdate bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "first time create",
|
||||
mcocli: fakemcoclient.NewSimpleClientset(&mcv1.MachineConfigPool{
|
||||
mcocli: mcofake.NewSimpleClientset(&mcv1.MachineConfigPool{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "worker",
|
||||
},
|
||||
|
@ -42,7 +42,7 @@ func TestSystemreservedEnsure(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "nothing to be done",
|
||||
mcocli: fakemcoclient.NewSimpleClientset(&mcv1.MachineConfigPool{
|
||||
mcocli: mcofake.NewSimpleClientset(&mcv1.MachineConfigPool{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "worker",
|
||||
Labels: map[string]string{labelName: labelValue},
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/golang/mock/gomock"
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
fakeconfigclient "github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
configfake "github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
@ -80,7 +80,7 @@ func TestWorkaroundReconciler(t *testing.T) {
|
|||
|
||||
mwa := mock_workaround.NewMockWorkaround(controller)
|
||||
r := &WorkaroundReconciler{
|
||||
configcli: fakeconfigclient.NewSimpleClientset(clusterVersion("4.4.10")),
|
||||
configcli: configfake.NewSimpleClientset(clusterVersion("4.4.10")),
|
||||
workarounds: []Workaround{mwa},
|
||||
log: utillog.GetLogger(),
|
||||
}
|
||||
|
|
|
@ -13,10 +13,9 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
extensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
extensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -38,7 +37,7 @@ import (
|
|||
"github.com/Azure/ARO-RP/pkg/util/ready"
|
||||
"github.com/Azure/ARO-RP/pkg/util/restconfig"
|
||||
"github.com/Azure/ARO-RP/pkg/util/subnet"
|
||||
"github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
utiltls "github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
"github.com/Azure/ARO-RP/pkg/util/version"
|
||||
)
|
||||
|
||||
|
@ -116,12 +115,12 @@ func (o *operator) resources() ([]runtime.Object, error) {
|
|||
}
|
||||
// then dynamic resources
|
||||
key, cert := o.env.ClusterGenevaLoggingSecret()
|
||||
gcsKeyBytes, err := tls.PrivateKeyAsBytes(key)
|
||||
gcsKeyBytes, err := utiltls.PrivateKeyAsBytes(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcsCertBytes, err := tls.CertAsBytes(cert)
|
||||
gcsCertBytes, err := utiltls.CertAsBytes(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -162,7 +161,7 @@ func (o *operator) resources() ([]runtime.Object, error) {
|
|||
Data: map[string][]byte{
|
||||
genevalogging.GenevaCertName: gcsCertBytes,
|
||||
genevalogging.GenevaKeyName: gcsKeyBytes,
|
||||
v1.DockerConfigJsonKey: []byte(ps),
|
||||
corev1.DockerConfigJsonKey: []byte(ps),
|
||||
},
|
||||
},
|
||||
&arov1alpha1.Cluster{
|
||||
|
@ -259,7 +258,7 @@ func (o *operator) CreateOrUpdate(ctx context.Context) error {
|
|||
// RESTMapping for APIVersion aro.openshift.io/v1alpha1 Kind
|
||||
// Cluster: no matches for kind "Cluster" in version
|
||||
// "aro.openshift.io/v1alpha1"
|
||||
return errors.IsForbidden(err) || errors.IsConflict(err)
|
||||
return kerrors.IsForbidden(err) || kerrors.IsConflict(err)
|
||||
}, func() error {
|
||||
cluster, err := o.arocli.AroV1alpha1().Clusters().Get(ctx, arov1alpha1.SingletonClusterName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -300,11 +299,11 @@ func (o *operator) IsReady(ctx context.Context) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func isCRDEstablished(crd *extv1.CustomResourceDefinition) bool {
|
||||
m := make(map[extv1.CustomResourceDefinitionConditionType]extv1.ConditionStatus, len(crd.Status.Conditions))
|
||||
func isCRDEstablished(crd *extensionsv1.CustomResourceDefinition) bool {
|
||||
m := make(map[extensionsv1.CustomResourceDefinitionConditionType]extensionsv1.ConditionStatus, len(crd.Status.Conditions))
|
||||
for _, cond := range crd.Status.Conditions {
|
||||
m[cond.Type] = cond.Status
|
||||
}
|
||||
return m[extv1.Established] == extv1.ConditionTrue &&
|
||||
m[extv1.NamesAccepted] == extv1.ConditionTrue
|
||||
return m[extensionsv1.Established] == extensionsv1.ConditionTrue &&
|
||||
m[extensionsv1.NamesAccepted] == extensionsv1.ConditionTrue
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/gorilla/mux"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
clientcmdv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
"github.com/Azure/ARO-RP/pkg/api/validate"
|
||||
|
@ -145,13 +145,13 @@ func (k *kubeconfig) internalServerError(w http.ResponseWriter, err error) {
|
|||
}
|
||||
|
||||
func (k *kubeconfig) makeKubeconfig(server, token string) ([]byte, error) {
|
||||
return json.MarshalIndent(&v1.Config{
|
||||
return json.MarshalIndent(&clientcmdv1.Config{
|
||||
APIVersion: "v1",
|
||||
Kind: "Config",
|
||||
Clusters: []v1.NamedCluster{
|
||||
Clusters: []clientcmdv1.NamedCluster{
|
||||
{
|
||||
Name: "cluster",
|
||||
Cluster: v1.Cluster{
|
||||
Cluster: clientcmdv1.Cluster{
|
||||
Server: server,
|
||||
CertificateAuthorityData: pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
|
@ -160,18 +160,18 @@ func (k *kubeconfig) makeKubeconfig(server, token string) ([]byte, error) {
|
|||
},
|
||||
},
|
||||
},
|
||||
AuthInfos: []v1.NamedAuthInfo{
|
||||
AuthInfos: []clientcmdv1.NamedAuthInfo{
|
||||
{
|
||||
Name: "user",
|
||||
AuthInfo: v1.AuthInfo{
|
||||
AuthInfo: clientcmdv1.AuthInfo{
|
||||
Token: token,
|
||||
},
|
||||
},
|
||||
},
|
||||
Contexts: []v1.NamedContext{
|
||||
Contexts: []clientcmdv1.NamedContext{
|
||||
{
|
||||
Name: "context",
|
||||
Context: v1.Context{
|
||||
Context: clientcmdv1.Context{
|
||||
Cluster: "cluster",
|
||||
Namespace: "default",
|
||||
AuthInfo: "user",
|
||||
|
|
|
@ -14,13 +14,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
clientcmdv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
"github.com/Azure/ARO-RP/pkg/api/validate"
|
||||
"github.com/Azure/ARO-RP/pkg/portal/middleware"
|
||||
"github.com/Azure/ARO-RP/pkg/portal/util/responsewriter"
|
||||
"github.com/Azure/ARO-RP/pkg/util/pem"
|
||||
utilpem "github.com/Azure/ARO-RP/pkg/util/pem"
|
||||
"github.com/Azure/ARO-RP/pkg/util/restconfig"
|
||||
)
|
||||
|
||||
|
@ -108,7 +108,7 @@ func (k *kubeconfig) cli(ctx context.Context, resourceID string, elevated bool)
|
|||
return nil, fmt.Errorf("kubeconfig is nil")
|
||||
}
|
||||
|
||||
var kubeconfig *v1.Config
|
||||
var kubeconfig *clientcmdv1.Config
|
||||
err = yaml.Unmarshal(kc, &kubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -118,12 +118,12 @@ func (k *kubeconfig) cli(ctx context.Context, resourceID string, elevated bool)
|
|||
b = append(b, kubeconfig.AuthInfos[0].AuthInfo.ClientKeyData...)
|
||||
b = append(b, kubeconfig.AuthInfos[0].AuthInfo.ClientCertificateData...)
|
||||
|
||||
clientKey, clientCerts, err := pem.Parse(b)
|
||||
clientKey, clientCerts, err := utilpem.Parse(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, caCerts, err := pem.Parse(kubeconfig.Clusters[0].Cluster.CertificateAuthorityData)
|
||||
_, caCerts, err := utilpem.Parse(kubeconfig.Clusters[0].Cluster.CertificateAuthorityData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/golang/mock/gomock"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
clientcmdv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
|
||||
|
@ -60,11 +60,11 @@ func fakeServer(cacerts []*x509.Certificate, serverkey *rsa.PrivateKey, serverce
|
|||
}
|
||||
|
||||
func testKubeconfig(cacerts []*x509.Certificate, clientkey *rsa.PrivateKey, clientcerts []*x509.Certificate) ([]byte, error) {
|
||||
kc := &v1.Config{
|
||||
Clusters: []v1.NamedCluster{
|
||||
kc := &clientcmdv1.Config{
|
||||
Clusters: []clientcmdv1.NamedCluster{
|
||||
{},
|
||||
},
|
||||
AuthInfos: []v1.NamedAuthInfo{
|
||||
AuthInfos: []clientcmdv1.NamedAuthInfo{
|
||||
{},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream/spdy"
|
||||
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
clientcmdv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
|
||||
|
@ -136,15 +136,15 @@ func fakeServer(cacerts []*x509.Certificate, serverkey *rsa.PrivateKey, serverce
|
|||
}
|
||||
|
||||
func testKubeconfig(cacerts []*x509.Certificate, clientkey *rsa.PrivateKey, clientcerts []*x509.Certificate) ([]byte, error) {
|
||||
kc := &v1.Config{
|
||||
Clusters: []v1.NamedCluster{
|
||||
kc := &clientcmdv1.Config{
|
||||
Clusters: []clientcmdv1.NamedCluster{
|
||||
{
|
||||
Cluster: v1.Cluster{
|
||||
Cluster: clientcmdv1.Cluster{
|
||||
Server: "https://kubernetes:6443",
|
||||
},
|
||||
},
|
||||
},
|
||||
AuthInfos: []v1.NamedAuthInfo{
|
||||
AuthInfos: []clientcmdv1.NamedAuthInfo{
|
||||
{},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
|
||||
mock_proxy "github.com/Azure/ARO-RP/pkg/util/mocks/proxy"
|
||||
"github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
utiltls "github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
testdatabase "github.com/Azure/ARO-RP/test/database"
|
||||
"github.com/Azure/ARO-RP/test/util/bufferedpipe"
|
||||
"github.com/Azure/ARO-RP/test/util/listener"
|
||||
|
@ -84,7 +84,7 @@ func fakeServer(clientKey *rsa.PublicKey) (*listener.Listener, error) {
|
|||
},
|
||||
}
|
||||
|
||||
key, _, err := tls.GenerateKeyAndCertificate("server", nil, nil, false, false)
|
||||
key, _, err := utiltls.GenerateKeyAndCertificate("server", nil, nil, false, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -143,12 +143,12 @@ func TestProxy(t *testing.T) {
|
|||
resourceID := "/subscriptions/" + subscriptionID + "/resourcegroups/" + resourceGroup + "/providers/microsoft.redhatopenshift/openshiftclusters/" + resourceName
|
||||
privateEndpointIP := "1.2.3.4"
|
||||
|
||||
hostKey, _, err := tls.GenerateKeyAndCertificate("proxy", nil, nil, false, false)
|
||||
hostKey, _, err := utiltls.GenerateKeyAndCertificate("proxy", nil, nil, false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
clusterKey, _, err := tls.GenerateKeyAndCertificate("cluster", nil, nil, false, false)
|
||||
clusterKey, _, err := utiltls.GenerateKeyAndCertificate("cluster", nil, nil, false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/Azure/ARO-RP/pkg/portal/util/responsewriter"
|
||||
"github.com/Azure/ARO-RP/pkg/util/deployment"
|
||||
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
|
||||
"github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
utiltls "github.com/Azure/ARO-RP/pkg/util/tls"
|
||||
testdatabase "github.com/Azure/ARO-RP/test/database"
|
||||
)
|
||||
|
||||
|
@ -33,7 +33,7 @@ func TestNew(t *testing.T) {
|
|||
password := "password"
|
||||
master := 0
|
||||
|
||||
hostKey, _, err := tls.GenerateKeyAndCertificate("proxy", nil, nil, false, false)
|
||||
hostKey, _, err := utiltls.GenerateKeyAndCertificate("proxy", nil, nil, false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ package graphrbac
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
azgraphrbac "github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
)
|
||||
|
@ -14,20 +14,20 @@ import (
|
|||
// ApplicationsClient is a minimal interface for azure ApplicationsClient
|
||||
type ApplicationsClient interface {
|
||||
ApplicationsClientAddons
|
||||
Create(ctx context.Context, parameters graphrbac.ApplicationCreateParameters) (result graphrbac.Application, err error)
|
||||
GetServicePrincipalsIDByAppID(ctx context.Context, applicationID string) (result graphrbac.ServicePrincipalObjectResult, err error)
|
||||
Create(ctx context.Context, parameters azgraphrbac.ApplicationCreateParameters) (result azgraphrbac.Application, err error)
|
||||
GetServicePrincipalsIDByAppID(ctx context.Context, applicationID string) (result azgraphrbac.ServicePrincipalObjectResult, err error)
|
||||
Delete(ctx context.Context, applicationObjectID string) (result autorest.Response, err error)
|
||||
}
|
||||
|
||||
type applicationsClient struct {
|
||||
graphrbac.ApplicationsClient
|
||||
azgraphrbac.ApplicationsClient
|
||||
}
|
||||
|
||||
var _ ApplicationsClient = &applicationsClient{}
|
||||
|
||||
// NewApplicationsClient creates a new ApplicationsClient
|
||||
func NewApplicationsClient(environment *azure.Environment, tenantID string, authorizer autorest.Authorizer) ApplicationsClient {
|
||||
client := graphrbac.NewApplicationsClientWithBaseURI(environment.GraphEndpoint, tenantID)
|
||||
client := azgraphrbac.NewApplicationsClientWithBaseURI(environment.GraphEndpoint, tenantID)
|
||||
client.Authorizer = authorizer
|
||||
|
||||
return &applicationsClient{
|
||||
|
|
|
@ -6,15 +6,15 @@ package graphrbac
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
azgraphrbac "github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
)
|
||||
|
||||
// ApplicationsClientAddons is a minimal interface for azure ApplicationsClient
|
||||
type ApplicationsClientAddons interface {
|
||||
List(ctx context.Context, filter string) (result []graphrbac.Application, err error)
|
||||
List(ctx context.Context, filter string) (result []azgraphrbac.Application, err error)
|
||||
}
|
||||
|
||||
func (sc *applicationsClient) List(ctx context.Context, filter string) (result []graphrbac.Application, err error) {
|
||||
func (sc *applicationsClient) List(ctx context.Context, filter string) (result []azgraphrbac.Application, err error) {
|
||||
page, err := sc.ApplicationsClient.List(ctx, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -6,7 +6,7 @@ package graphrbac
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
azgraphrbac "github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
)
|
||||
|
@ -14,18 +14,18 @@ import (
|
|||
// ServicePrincipalClient is a minimal interface for azure ApplicationsClient
|
||||
type ServicePrincipalClient interface {
|
||||
ServicePrincipalClientAddons
|
||||
Create(ctx context.Context, parameters graphrbac.ServicePrincipalCreateParameters) (result graphrbac.ServicePrincipal, err error)
|
||||
Create(ctx context.Context, parameters azgraphrbac.ServicePrincipalCreateParameters) (result azgraphrbac.ServicePrincipal, err error)
|
||||
}
|
||||
|
||||
type servicePrincipalClient struct {
|
||||
graphrbac.ServicePrincipalsClient
|
||||
azgraphrbac.ServicePrincipalsClient
|
||||
}
|
||||
|
||||
var _ ServicePrincipalClient = &servicePrincipalClient{}
|
||||
|
||||
// NewServicePrincipalClient creates a new ServicePrincipalClient
|
||||
func NewServicePrincipalClient(environment *azure.Environment, tenantID string, authorizer autorest.Authorizer) ServicePrincipalClient {
|
||||
client := graphrbac.NewServicePrincipalsClientWithBaseURI(environment.GraphEndpoint, tenantID)
|
||||
client := azgraphrbac.NewServicePrincipalsClientWithBaseURI(environment.GraphEndpoint, tenantID)
|
||||
client.Authorizer = authorizer
|
||||
|
||||
return &servicePrincipalClient{
|
||||
|
|
|
@ -6,15 +6,15 @@ package graphrbac
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
azgraphrbac "github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
)
|
||||
|
||||
// ServicePrincipalClientAddons is a minimal interface for azure ServicePrincipalClient
|
||||
type ServicePrincipalClientAddons interface {
|
||||
List(ctx context.Context, filter string) (result []graphrbac.ServicePrincipal, err error)
|
||||
List(ctx context.Context, filter string) (result []azgraphrbac.ServicePrincipal, err error)
|
||||
}
|
||||
|
||||
func (sc *servicePrincipalClient) List(ctx context.Context, filter string) (result []graphrbac.ServicePrincipal, err error) {
|
||||
func (sc *servicePrincipalClient) List(ctx context.Context, filter string) (result []azgraphrbac.ServicePrincipal, err error) {
|
||||
page, err := sc.ServicePrincipalsClient.List(ctx, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -6,30 +6,30 @@ package keyvault
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
|
||||
azkeyvault "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
// BaseClient is a minimal interface for azure BaseClient
|
||||
type BaseClient interface {
|
||||
CreateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters keyvault.CertificateCreateParameters) (result keyvault.CertificateOperation, err error)
|
||||
DeleteCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result keyvault.DeletedCertificateBundle, err error)
|
||||
GetCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result keyvault.CertificateOperation, err error)
|
||||
GetSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (result keyvault.SecretBundle, err error)
|
||||
GetCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result keyvault.CertificateListResultPage, err error)
|
||||
SetSecret(ctx context.Context, vaultBaseURL string, secretName string, parameters keyvault.SecretSetParameters) (result keyvault.SecretBundle, err error)
|
||||
CreateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters azkeyvault.CertificateCreateParameters) (result azkeyvault.CertificateOperation, err error)
|
||||
DeleteCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result azkeyvault.DeletedCertificateBundle, err error)
|
||||
GetCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result azkeyvault.CertificateOperation, err error)
|
||||
GetSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (result azkeyvault.SecretBundle, err error)
|
||||
GetCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result azkeyvault.CertificateListResultPage, err error)
|
||||
SetSecret(ctx context.Context, vaultBaseURL string, secretName string, parameters azkeyvault.SecretSetParameters) (result azkeyvault.SecretBundle, err error)
|
||||
BaseClientAddons
|
||||
}
|
||||
|
||||
type baseClient struct {
|
||||
keyvault.BaseClient
|
||||
azkeyvault.BaseClient
|
||||
}
|
||||
|
||||
var _ BaseClient = &baseClient{}
|
||||
|
||||
// New creates a new BaseClient
|
||||
func New(authorizer autorest.Authorizer) BaseClient {
|
||||
client := keyvault.New()
|
||||
client := azkeyvault.New()
|
||||
client.Authorizer = authorizer
|
||||
|
||||
return &baseClient{
|
||||
|
|
|
@ -6,15 +6,15 @@ package keyvault
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
|
||||
azkeyvault "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
|
||||
)
|
||||
|
||||
// BaseClientAddons contains addons for BaseClient
|
||||
type BaseClientAddons interface {
|
||||
GetSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (secrets []keyvault.SecretItem, err error)
|
||||
GetSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (secrets []azkeyvault.SecretItem, err error)
|
||||
}
|
||||
|
||||
func (c *baseClient) GetSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (secrets []keyvault.SecretItem, err error) {
|
||||
func (c *baseClient) GetSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (secrets []azkeyvault.SecretItem, err error) {
|
||||
page, err := c.BaseClient.GetSecrets(ctx, vaultBaseURL, maxresults)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -4,7 +4,7 @@ package insights
|
|||
// Licensed under the Apache License 2.0.
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights"
|
||||
mgmtinsights "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
)
|
||||
|
@ -15,14 +15,14 @@ type ActivityLogsClient interface {
|
|||
}
|
||||
|
||||
type activityLogsClient struct {
|
||||
insights.ActivityLogsClient
|
||||
mgmtinsights.ActivityLogsClient
|
||||
}
|
||||
|
||||
var _ ActivityLogsClient = &activityLogsClient{}
|
||||
|
||||
// NewActivityLogsClient creates a new ActivityLogsClient
|
||||
func NewActivityLogsClient(environment *azure.Environment, subscriptionID string, authorizer autorest.Authorizer) ActivityLogsClient {
|
||||
client := insights.NewActivityLogsClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID)
|
||||
client := mgmtinsights.NewActivityLogsClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID)
|
||||
client.Authorizer = authorizer
|
||||
|
||||
return &activityLogsClient{
|
||||
|
|
|
@ -6,15 +6,15 @@ package insights
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights"
|
||||
mgmtinsights "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights"
|
||||
)
|
||||
|
||||
// ActivityLogsClientAddons contains addons for ActivityLogsClient
|
||||
type ActivityLogsClientAddons interface {
|
||||
List(ctx context.Context, filter string, selectParameter string) (result []insights.EventData, err error)
|
||||
List(ctx context.Context, filter string, selectParameter string) (result []mgmtinsights.EventData, err error)
|
||||
}
|
||||
|
||||
func (c *activityLogsClient) List(ctx context.Context, filter string, selectParameter string) (result []insights.EventData, err error) {
|
||||
func (c *activityLogsClient) List(ctx context.Context, filter string, selectParameter string) (result []mgmtinsights.EventData, err error) {
|
||||
page, err := c.ActivityLogsClient.List(ctx, filter, selectParameter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -12,28 +12,28 @@ import (
|
|||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2020-04-30/redhatopenshift"
|
||||
mgmtredhatopenshift20200430 "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2020-04-30/redhatopenshift"
|
||||
"github.com/Azure/ARO-RP/pkg/util/deployment"
|
||||
)
|
||||
|
||||
// OpenShiftClustersClient is a minimal interface for azure OpenshiftClustersClient
|
||||
type OpenShiftClustersClient interface {
|
||||
ListCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result redhatopenshift.OpenShiftClusterCredentials, err error)
|
||||
Get(ctx context.Context, resourceGroupName string, resourceName string) (result redhatopenshift.OpenShiftCluster, err error)
|
||||
ListCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result mgmtredhatopenshift20200430.OpenShiftClusterCredentials, err error)
|
||||
Get(ctx context.Context, resourceGroupName string, resourceName string) (result mgmtredhatopenshift20200430.OpenShiftCluster, err error)
|
||||
OpenShiftClustersClientAddons
|
||||
}
|
||||
|
||||
type openShiftClustersClient struct {
|
||||
redhatopenshift.OpenShiftClustersClient
|
||||
mgmtredhatopenshift20200430.OpenShiftClustersClient
|
||||
}
|
||||
|
||||
var _ OpenShiftClustersClient = &openShiftClustersClient{}
|
||||
|
||||
// NewOpenShiftClustersClient creates a new OpenShiftClustersClient
|
||||
func NewOpenShiftClustersClient(environment *azure.Environment, subscriptionID string, authorizer autorest.Authorizer) OpenShiftClustersClient {
|
||||
var client redhatopenshift.OpenShiftClustersClient
|
||||
var client mgmtredhatopenshift20200430.OpenShiftClustersClient
|
||||
if deployment.NewMode() == deployment.Development {
|
||||
client = redhatopenshift.NewOpenShiftClustersClientWithBaseURI("https://localhost:8443", subscriptionID)
|
||||
client = mgmtredhatopenshift20200430.NewOpenShiftClustersClientWithBaseURI("https://localhost:8443", subscriptionID)
|
||||
client.Sender = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
|
@ -42,7 +42,7 @@ func NewOpenShiftClustersClient(environment *azure.Environment, subscriptionID s
|
|||
},
|
||||
}
|
||||
} else {
|
||||
client = redhatopenshift.NewOpenShiftClustersClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID)
|
||||
client = mgmtredhatopenshift20200430.NewOpenShiftClustersClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID)
|
||||
client.Authorizer = authorizer
|
||||
}
|
||||
client.PollingDelay = 10 * time.Second
|
||||
|
|
|
@ -6,18 +6,18 @@ package redhatopenshift
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2020-04-30/redhatopenshift"
|
||||
mgmtredhatopenshift20200430 "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2020-04-30/redhatopenshift"
|
||||
)
|
||||
|
||||
// OpenShiftClustersClientAddons contains addons for OpenShiftClustersClient
|
||||
type OpenShiftClustersClientAddons interface {
|
||||
CreateOrUpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters redhatopenshift.OpenShiftCluster) error
|
||||
CreateOrUpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters mgmtredhatopenshift20200430.OpenShiftCluster) error
|
||||
DeleteAndWait(ctx context.Context, resourceGroupName string, resourceName string) error
|
||||
List(ctx context.Context) (clusters []redhatopenshift.OpenShiftCluster, err error)
|
||||
ListByResourceGroup(ctx context.Context, resourceGroupName string) (clusters []redhatopenshift.OpenShiftCluster, err error)
|
||||
List(ctx context.Context) (clusters []mgmtredhatopenshift20200430.OpenShiftCluster, err error)
|
||||
ListByResourceGroup(ctx context.Context, resourceGroupName string) (clusters []mgmtredhatopenshift20200430.OpenShiftCluster, err error)
|
||||
}
|
||||
|
||||
func (c *openShiftClustersClient) CreateOrUpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters redhatopenshift.OpenShiftCluster) error {
|
||||
func (c *openShiftClustersClient) CreateOrUpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters mgmtredhatopenshift20200430.OpenShiftCluster) error {
|
||||
future, err := c.CreateOrUpdate(ctx, resourceGroupName, resourceName, parameters)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -35,7 +35,7 @@ func (c *openShiftClustersClient) DeleteAndWait(ctx context.Context, resourceGro
|
|||
return future.WaitForCompletionRef(ctx, c.Client)
|
||||
}
|
||||
|
||||
func (c *openShiftClustersClient) List(ctx context.Context) (clusters []redhatopenshift.OpenShiftCluster, err error) {
|
||||
func (c *openShiftClustersClient) List(ctx context.Context) (clusters []mgmtredhatopenshift20200430.OpenShiftCluster, err error) {
|
||||
page, err := c.OpenShiftClustersClient.List(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -53,7 +53,7 @@ func (c *openShiftClustersClient) List(ctx context.Context) (clusters []redhatop
|
|||
return clusters, nil
|
||||
}
|
||||
|
||||
func (c *openShiftClustersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (clusters []redhatopenshift.OpenShiftCluster, err error) {
|
||||
func (c *openShiftClustersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (clusters []mgmtredhatopenshift20200430.OpenShiftCluster, err error) {
|
||||
page, err := c.OpenShiftClustersClient.ListByResourceGroup(ctx, resourceGroupName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2020-04-30/redhatopenshift"
|
||||
mgmtredhatopenshift20200430 "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2020-04-30/redhatopenshift"
|
||||
"github.com/Azure/ARO-RP/pkg/util/deployment"
|
||||
)
|
||||
|
||||
|
@ -20,16 +20,16 @@ type OperationsClient interface {
|
|||
}
|
||||
|
||||
type operationsClient struct {
|
||||
redhatopenshift.OperationsClient
|
||||
mgmtredhatopenshift20200430.OperationsClient
|
||||
}
|
||||
|
||||
var _ OperationsClient = &operationsClient{}
|
||||
|
||||
// NewOperationsClient creates a new OperationsClient
|
||||
func NewOperationsClient(environment *azure.Environment, subscriptionID string, authorizer autorest.Authorizer) OperationsClient {
|
||||
var client redhatopenshift.OperationsClient
|
||||
var client mgmtredhatopenshift20200430.OperationsClient
|
||||
if deployment.NewMode() == deployment.Development {
|
||||
client = redhatopenshift.NewOperationsClientWithBaseURI("https://localhost:8443", subscriptionID)
|
||||
client = mgmtredhatopenshift20200430.NewOperationsClientWithBaseURI("https://localhost:8443", subscriptionID)
|
||||
client.Sender = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
|
@ -38,7 +38,7 @@ func NewOperationsClient(environment *azure.Environment, subscriptionID string,
|
|||
},
|
||||
}
|
||||
} else {
|
||||
client = redhatopenshift.NewOperationsClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID)
|
||||
client = mgmtredhatopenshift20200430.NewOperationsClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID)
|
||||
client.Authorizer = authorizer
|
||||
}
|
||||
|
||||
|
|
|
@ -6,15 +6,15 @@ package redhatopenshift
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2020-04-30/redhatopenshift"
|
||||
mgmtredhatopenshift20200430 "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2020-04-30/redhatopenshift"
|
||||
)
|
||||
|
||||
// OperationsClientAddons contains addons for OperationsClient
|
||||
type OperationsClientAddons interface {
|
||||
List(ctx context.Context) (operations []redhatopenshift.Operation, err error)
|
||||
List(ctx context.Context) (operations []mgmtredhatopenshift20200430.Operation, err error)
|
||||
}
|
||||
|
||||
func (c *operationsClient) List(ctx context.Context) (operations []redhatopenshift.Operation, err error) {
|
||||
func (c *operationsClient) List(ctx context.Context) (operations []mgmtredhatopenshift20200430.Operation, err error) {
|
||||
page, err := c.OperationsClient.List(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -12,28 +12,28 @@ import (
|
|||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2021-01-31-preview/redhatopenshift"
|
||||
mgmtredhatopenshift20210131preview "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2021-01-31-preview/redhatopenshift"
|
||||
"github.com/Azure/ARO-RP/pkg/util/deployment"
|
||||
)
|
||||
|
||||
// OpenShiftClustersClient is a minimal interface for azure OpenshiftClustersClient
|
||||
type OpenShiftClustersClient interface {
|
||||
ListCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result redhatopenshift.OpenShiftClusterCredentials, err error)
|
||||
Get(ctx context.Context, resourceGroupName string, resourceName string) (result redhatopenshift.OpenShiftCluster, err error)
|
||||
ListCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result mgmtredhatopenshift20210131preview.OpenShiftClusterCredentials, err error)
|
||||
Get(ctx context.Context, resourceGroupName string, resourceName string) (result mgmtredhatopenshift20210131preview.OpenShiftCluster, err error)
|
||||
OpenShiftClustersClientAddons
|
||||
}
|
||||
|
||||
type openShiftClustersClient struct {
|
||||
redhatopenshift.OpenShiftClustersClient
|
||||
mgmtredhatopenshift20210131preview.OpenShiftClustersClient
|
||||
}
|
||||
|
||||
var _ OpenShiftClustersClient = &openShiftClustersClient{}
|
||||
|
||||
// NewOpenShiftClustersClient creates a new OpenShiftClustersClient
|
||||
func NewOpenShiftClustersClient(environment *azure.Environment, subscriptionID string, authorizer autorest.Authorizer) OpenShiftClustersClient {
|
||||
var client redhatopenshift.OpenShiftClustersClient
|
||||
var client mgmtredhatopenshift20210131preview.OpenShiftClustersClient
|
||||
if deployment.NewMode() == deployment.Development {
|
||||
client = redhatopenshift.NewOpenShiftClustersClientWithBaseURI("https://localhost:8443", subscriptionID)
|
||||
client = mgmtredhatopenshift20210131preview.NewOpenShiftClustersClientWithBaseURI("https://localhost:8443", subscriptionID)
|
||||
client.Sender = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
|
@ -42,7 +42,7 @@ func NewOpenShiftClustersClient(environment *azure.Environment, subscriptionID s
|
|||
},
|
||||
}
|
||||
} else {
|
||||
client = redhatopenshift.NewOpenShiftClustersClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID)
|
||||
client = mgmtredhatopenshift20210131preview.NewOpenShiftClustersClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID)
|
||||
client.Authorizer = authorizer
|
||||
}
|
||||
client.PollingDelay = 10 * time.Second
|
||||
|
|
|
@ -6,18 +6,18 @@ package redhatopenshift
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2021-01-31-preview/redhatopenshift"
|
||||
mgmtredhatopenshift20210131preview "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2021-01-31-preview/redhatopenshift"
|
||||
)
|
||||
|
||||
// OpenShiftClustersClientAddons contains addons for OpenShiftClustersClient
|
||||
type OpenShiftClustersClientAddons interface {
|
||||
CreateOrUpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters redhatopenshift.OpenShiftCluster) error
|
||||
CreateOrUpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters mgmtredhatopenshift20210131preview.OpenShiftCluster) error
|
||||
DeleteAndWait(ctx context.Context, resourceGroupName string, resourceName string) error
|
||||
List(ctx context.Context) (clusters []redhatopenshift.OpenShiftCluster, err error)
|
||||
ListByResourceGroup(ctx context.Context, resourceGroupName string) (clusters []redhatopenshift.OpenShiftCluster, err error)
|
||||
List(ctx context.Context) (clusters []mgmtredhatopenshift20210131preview.OpenShiftCluster, err error)
|
||||
ListByResourceGroup(ctx context.Context, resourceGroupName string) (clusters []mgmtredhatopenshift20210131preview.OpenShiftCluster, err error)
|
||||
}
|
||||
|
||||
func (c *openShiftClustersClient) CreateOrUpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters redhatopenshift.OpenShiftCluster) error {
|
||||
func (c *openShiftClustersClient) CreateOrUpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters mgmtredhatopenshift20210131preview.OpenShiftCluster) error {
|
||||
future, err := c.CreateOrUpdate(ctx, resourceGroupName, resourceName, parameters)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -35,7 +35,7 @@ func (c *openShiftClustersClient) DeleteAndWait(ctx context.Context, resourceGro
|
|||
return future.WaitForCompletionRef(ctx, c.Client)
|
||||
}
|
||||
|
||||
func (c *openShiftClustersClient) List(ctx context.Context) (clusters []redhatopenshift.OpenShiftCluster, err error) {
|
||||
func (c *openShiftClustersClient) List(ctx context.Context) (clusters []mgmtredhatopenshift20210131preview.OpenShiftCluster, err error) {
|
||||
page, err := c.OpenShiftClustersClient.List(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -53,7 +53,7 @@ func (c *openShiftClustersClient) List(ctx context.Context) (clusters []redhatop
|
|||
return clusters, nil
|
||||
}
|
||||
|
||||
func (c *openShiftClustersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (clusters []redhatopenshift.OpenShiftCluster, err error) {
|
||||
func (c *openShiftClustersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (clusters []mgmtredhatopenshift20210131preview.OpenShiftCluster, err error) {
|
||||
page, err := c.OpenShiftClustersClient.ListByResourceGroup(ctx, resourceGroupName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2021-01-31-preview/redhatopenshift"
|
||||
mgmtredhatopenshift20210131preview "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2021-01-31-preview/redhatopenshift"
|
||||
"github.com/Azure/ARO-RP/pkg/util/deployment"
|
||||
)
|
||||
|
||||
|
@ -20,16 +20,16 @@ type OperationsClient interface {
|
|||
}
|
||||
|
||||
type operationsClient struct {
|
||||
redhatopenshift.OperationsClient
|
||||
mgmtredhatopenshift20210131preview.OperationsClient
|
||||
}
|
||||
|
||||
var _ OperationsClient = &operationsClient{}
|
||||
|
||||
// NewOperationsClient creates a new OperationsClient
|
||||
func NewOperationsClient(environment *azure.Environment, subscriptionID string, authorizer autorest.Authorizer) OperationsClient {
|
||||
var client redhatopenshift.OperationsClient
|
||||
var client mgmtredhatopenshift20210131preview.OperationsClient
|
||||
if deployment.NewMode() == deployment.Development {
|
||||
client = redhatopenshift.NewOperationsClientWithBaseURI("https://localhost:8443", subscriptionID)
|
||||
client = mgmtredhatopenshift20210131preview.NewOperationsClientWithBaseURI("https://localhost:8443", subscriptionID)
|
||||
client.Sender = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
|
@ -38,7 +38,7 @@ func NewOperationsClient(environment *azure.Environment, subscriptionID string,
|
|||
},
|
||||
}
|
||||
} else {
|
||||
client = redhatopenshift.NewOperationsClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID)
|
||||
client = mgmtredhatopenshift20210131preview.NewOperationsClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID)
|
||||
client.Authorizer = authorizer
|
||||
}
|
||||
|
||||
|
|
|
@ -6,15 +6,15 @@ package redhatopenshift
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2021-01-31-preview/redhatopenshift"
|
||||
mgmtredhatopenshift20210131preview "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2021-01-31-preview/redhatopenshift"
|
||||
)
|
||||
|
||||
// OperationsClientAddons contains addons for OperationsClient
|
||||
type OperationsClientAddons interface {
|
||||
List(ctx context.Context) (operations []redhatopenshift.Operation, err error)
|
||||
List(ctx context.Context) (operations []mgmtredhatopenshift20210131preview.Operation, err error)
|
||||
}
|
||||
|
||||
func (c *operationsClient) List(ctx context.Context) (operations []redhatopenshift.Operation, err error) {
|
||||
func (c *operationsClient) List(ctx context.Context) (operations []mgmtredhatopenshift20210131preview.Operation, err error) {
|
||||
page, err := c.OperationsClient.List(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
mgmtgraphrbac "github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
azgraphrbac "github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/date"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
|
@ -38,10 +38,10 @@ func (c *Cluster) getServicePrincipal(ctx context.Context, appID string) (string
|
|||
func (c *Cluster) createApplication(ctx context.Context, displayName string) (string, string, error) {
|
||||
password := uuid.NewV4().String()
|
||||
|
||||
app, err := c.applications.Create(ctx, mgmtgraphrbac.ApplicationCreateParameters{
|
||||
app, err := c.applications.Create(ctx, azgraphrbac.ApplicationCreateParameters{
|
||||
DisplayName: &displayName,
|
||||
IdentifierUris: &[]string{"https://test.aro.azure.com/" + uuid.NewV4().String()},
|
||||
PasswordCredentials: &[]mgmtgraphrbac.PasswordCredential{
|
||||
PasswordCredentials: &[]azgraphrbac.PasswordCredential{
|
||||
{
|
||||
EndDate: &date.Time{Time: time.Now().AddDate(1, 0, 0)},
|
||||
Value: &password,
|
||||
|
@ -56,7 +56,7 @@ func (c *Cluster) createApplication(ctx context.Context, displayName string) (st
|
|||
}
|
||||
|
||||
func (c *Cluster) createServicePrincipal(ctx context.Context, appID string) (string, error) {
|
||||
var sp mgmtgraphrbac.ServicePrincipal
|
||||
var sp azgraphrbac.ServicePrincipal
|
||||
var err error
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 2*time.Minute)
|
||||
|
@ -66,7 +66,7 @@ func (c *Cluster) createServicePrincipal(ctx context.Context, appID string) (str
|
|||
// wait.PollImmediateUntil. Doing this will not propagate the latest error
|
||||
// to the user in case when wait exceeds the timeout
|
||||
_ = wait.PollImmediateUntil(10*time.Second, func() (bool, error) {
|
||||
sp, err = c.serviceprincipals.Create(ctx, mgmtgraphrbac.ServicePrincipalCreateParameters{
|
||||
sp, err = c.serviceprincipals.Create(ctx, azgraphrbac.ServicePrincipalCreateParameters{
|
||||
AppID: &appID,
|
||||
})
|
||||
if detailedErr, ok := err.(autorest.DetailedError); ok &&
|
||||
|
|
|
@ -26,9 +26,9 @@ import (
|
|||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
v20200430 "github.com/Azure/ARO-RP/pkg/api/v20200430"
|
||||
v2021131preview "github.com/Azure/ARO-RP/pkg/api/v20210131preview"
|
||||
mgmtopenshiftclustersv20200430 "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2020-04-30/redhatopenshift"
|
||||
mgmtopenshiftclustersv20210131preview "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2021-01-31-preview/redhatopenshift"
|
||||
"github.com/Azure/ARO-RP/pkg/api/v20210131preview"
|
||||
mgmtredhatopenshift20200430 "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2020-04-30/redhatopenshift"
|
||||
mgmtredhatopenshift20210131preview "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2021-01-31-preview/redhatopenshift"
|
||||
"github.com/Azure/ARO-RP/pkg/deploy"
|
||||
"github.com/Azure/ARO-RP/pkg/deploy/generator"
|
||||
"github.com/Azure/ARO-RP/pkg/env"
|
||||
|
@ -37,8 +37,8 @@ import (
|
|||
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/authorization"
|
||||
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/features"
|
||||
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/network"
|
||||
openshiftclustersv20200430 "github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/redhatopenshift/2020-04-30/redhatopenshift"
|
||||
openshiftclustersv20210131preview "github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/redhatopenshift/2021-01-31-preview/redhatopenshift"
|
||||
redhatopenshift20200430 "github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/redhatopenshift/2020-04-30/redhatopenshift"
|
||||
redhatopenshift20210131preview "github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/redhatopenshift/2021-01-31-preview/redhatopenshift"
|
||||
"github.com/Azure/ARO-RP/pkg/util/deployment"
|
||||
"github.com/Azure/ARO-RP/pkg/util/rbac"
|
||||
)
|
||||
|
@ -53,8 +53,8 @@ type Cluster struct {
|
|||
groups features.ResourceGroupsClient
|
||||
applications graphrbac.ApplicationsClient
|
||||
serviceprincipals graphrbac.ServicePrincipalClient
|
||||
openshiftclustersv20200430 openshiftclustersv20200430.OpenShiftClustersClient
|
||||
openshiftclustersv20210131preview openshiftclustersv20210131preview.OpenShiftClustersClient
|
||||
openshiftclustersv20200430 redhatopenshift20200430.OpenShiftClustersClient
|
||||
openshiftclustersv20210131preview redhatopenshift20210131preview.OpenShiftClustersClient
|
||||
securitygroups network.SecurityGroupsClient
|
||||
subnets network.SubnetsClient
|
||||
routetables network.RouteTablesClient
|
||||
|
@ -109,8 +109,8 @@ func New(log *logrus.Entry, env env.Core, ci bool) (*Cluster, error) {
|
|||
|
||||
deployments: features.NewDeploymentsClient(env.Environment(), env.SubscriptionID(), authorizer),
|
||||
groups: features.NewResourceGroupsClient(env.Environment(), env.SubscriptionID(), authorizer),
|
||||
openshiftclustersv20200430: openshiftclustersv20200430.NewOpenShiftClustersClient(env.Environment(), env.SubscriptionID(), authorizer),
|
||||
openshiftclustersv20210131preview: openshiftclustersv20210131preview.NewOpenShiftClustersClient(env.Environment(), env.SubscriptionID(), authorizer),
|
||||
openshiftclustersv20200430: redhatopenshift20200430.NewOpenShiftClustersClient(env.Environment(), env.SubscriptionID(), authorizer),
|
||||
openshiftclustersv20210131preview: redhatopenshift20210131preview.NewOpenShiftClustersClient(env.Environment(), env.SubscriptionID(), authorizer),
|
||||
applications: graphrbac.NewApplicationsClient(env.Environment(), env.TenantID(), graphAuthorizer),
|
||||
serviceprincipals: graphrbac.NewServicePrincipalClient(env.Environment(), env.TenantID(), graphAuthorizer),
|
||||
securitygroups: network.NewSecurityGroupsClient(env.Environment(), env.SubscriptionID(), authorizer),
|
||||
|
@ -433,13 +433,13 @@ func (c *Cluster) createCluster(ctx context.Context, vnetResourceGroup, clusterN
|
|||
switch c.env.DeploymentMode() {
|
||||
case deployment.Development:
|
||||
oc.Properties.WorkerProfiles[0].VMSize = api.VMSizeStandardD2sV3
|
||||
ext := api.APIs[v2021131preview.APIVersion].OpenShiftClusterConverter().ToExternal(&oc)
|
||||
ext := api.APIs[v20210131preview.APIVersion].OpenShiftClusterConverter().ToExternal(&oc)
|
||||
data, err := json.Marshal(ext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ocExt := mgmtopenshiftclustersv20210131preview.OpenShiftCluster{}
|
||||
ocExt := mgmtredhatopenshift20210131preview.OpenShiftCluster{}
|
||||
err = json.Unmarshal(data, &ocExt)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -453,7 +453,7 @@ func (c *Cluster) createCluster(ctx context.Context, vnetResourceGroup, clusterN
|
|||
return err
|
||||
}
|
||||
|
||||
ocExt := mgmtopenshiftclustersv20200430.OpenShiftCluster{}
|
||||
ocExt := mgmtredhatopenshift20200430.OpenShiftCluster{}
|
||||
err = json.Unmarshal(data, &ocExt)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
configclient "github.com/openshift/client-go/config/clientset/versioned"
|
||||
"github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
configfake "github.com/openshift/client-go/config/clientset/versioned/fake"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
|
@ -29,7 +29,7 @@ func TestClusterVersionEnricherTask(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "version object exists",
|
||||
client: fake.NewSimpleClientset(&configv1.ClusterVersion{
|
||||
client: configfake.NewSimpleClientset(&configv1.ClusterVersion{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "version"},
|
||||
Status: configv1.ClusterVersionStatus{
|
||||
Desired: configv1.Release{Version: "1.2.3"},
|
||||
|
@ -45,14 +45,14 @@ func TestClusterVersionEnricherTask(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "version object exists, but desired version is not set",
|
||||
client: fake.NewSimpleClientset(&configv1.ClusterVersion{
|
||||
client: configfake.NewSimpleClientset(&configv1.ClusterVersion{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "version"},
|
||||
}),
|
||||
wantOc: &api.OpenShiftCluster{},
|
||||
},
|
||||
{
|
||||
name: "version object does not exist",
|
||||
client: fake.NewSimpleClientset(),
|
||||
client: configfake.NewSimpleClientset(),
|
||||
wantOc: &api.OpenShiftCluster{},
|
||||
wantErr: `clusterversions.config.openshift.io "version" not found`,
|
||||
},
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
operatorv1 "github.com/openshift/api/operator/v1"
|
||||
operatorclient "github.com/openshift/client-go/operator/clientset/versioned"
|
||||
fakeopclient "github.com/openshift/client-go/operator/clientset/versioned/fake"
|
||||
operatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -36,7 +36,7 @@ func TestIngressProfilesEnricherTask(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "default simplest case of ingress profile found",
|
||||
operatorcli: fakeopclient.NewSimpleClientset(
|
||||
operatorcli: operatorfake.NewSimpleClientset(
|
||||
&operatorv1.IngressController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
|
@ -77,7 +77,7 @@ func TestIngressProfilesEnricherTask(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "private ingress profile found",
|
||||
operatorcli: fakeopclient.NewSimpleClientset(
|
||||
operatorcli: operatorfake.NewSimpleClientset(
|
||||
&operatorv1.IngressController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "private",
|
||||
|
@ -125,7 +125,7 @@ func TestIngressProfilesEnricherTask(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "public ingress profile found",
|
||||
operatorcli: fakeopclient.NewSimpleClientset(
|
||||
operatorcli: operatorfake.NewSimpleClientset(
|
||||
&operatorv1.IngressController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "public",
|
||||
|
@ -173,7 +173,7 @@ func TestIngressProfilesEnricherTask(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "several ingress profiles found",
|
||||
operatorcli: fakeopclient.NewSimpleClientset(
|
||||
operatorcli: operatorfake.NewSimpleClientset(
|
||||
&operatorv1.IngressController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
|
@ -260,7 +260,7 @@ func TestIngressProfilesEnricherTask(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "no router service found",
|
||||
operatorcli: fakeopclient.NewSimpleClientset(
|
||||
operatorcli: operatorfake.NewSimpleClientset(
|
||||
&operatorv1.IngressController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "private",
|
||||
|
|
|
@ -13,11 +13,11 @@ import (
|
|||
"github.com/Azure/go-autorest/autorest/to"
|
||||
machinev1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1"
|
||||
maoclient "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned"
|
||||
"github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/fake"
|
||||
maofake "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/fake"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgotesting "k8s.io/client-go/testing"
|
||||
ktesting "k8s.io/client-go/testing"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/api"
|
||||
"github.com/Azure/ARO-RP/test/util/cmp"
|
||||
|
@ -48,7 +48,7 @@ func TestWorkerProfilesEnricherTask(t *testing.T) {
|
|||
{
|
||||
name: "machine set objects exists - valid provider spec JSON",
|
||||
client: func() maoclient.Interface {
|
||||
return fake.NewSimpleClientset(
|
||||
return maofake.NewSimpleClientset(
|
||||
&machinev1beta1.MachineSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-worker-profile-1",
|
||||
|
@ -135,7 +135,7 @@ func TestWorkerProfilesEnricherTask(t *testing.T) {
|
|||
{
|
||||
name: "machine set objects exists - invalid provider spec JSON",
|
||||
client: func() maoclient.Interface {
|
||||
return fake.NewSimpleClientset(
|
||||
return maofake.NewSimpleClientset(
|
||||
&machinev1beta1.MachineSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-worker-profile-1",
|
||||
|
@ -165,7 +165,7 @@ func TestWorkerProfilesEnricherTask(t *testing.T) {
|
|||
{
|
||||
name: "machine set objects exists - provider spec is missing",
|
||||
client: func() maoclient.Interface {
|
||||
return fake.NewSimpleClientset(
|
||||
return maofake.NewSimpleClientset(
|
||||
&machinev1beta1.MachineSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-worker-profile-1",
|
||||
|
@ -184,7 +184,7 @@ func TestWorkerProfilesEnricherTask(t *testing.T) {
|
|||
{
|
||||
name: "machine set objects exists - provider spec is missing raw value",
|
||||
client: func() maoclient.Interface {
|
||||
return fake.NewSimpleClientset(
|
||||
return maofake.NewSimpleClientset(
|
||||
&machinev1beta1.MachineSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-worker-profile-1",
|
||||
|
@ -212,7 +212,7 @@ func TestWorkerProfilesEnricherTask(t *testing.T) {
|
|||
{
|
||||
name: "machine set objects do not exist",
|
||||
client: func() maoclient.Interface {
|
||||
return fake.NewSimpleClientset()
|
||||
return maofake.NewSimpleClientset()
|
||||
},
|
||||
wantOc: &api.OpenShiftCluster{
|
||||
ID: clusterID,
|
||||
|
@ -224,8 +224,8 @@ func TestWorkerProfilesEnricherTask(t *testing.T) {
|
|||
{
|
||||
name: "machine set list request failed",
|
||||
client: func() maoclient.Interface {
|
||||
client := fake.NewSimpleClientset()
|
||||
client.PrependReactor("list", "machinesets", func(action clientgotesting.Action) (bool, runtime.Object, error) {
|
||||
client := maofake.NewSimpleClientset()
|
||||
client.PrependReactor("list", "machinesets", func(action ktesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("fake list error")
|
||||
})
|
||||
return client
|
||||
|
@ -238,7 +238,7 @@ func TestWorkerProfilesEnricherTask(t *testing.T) {
|
|||
{
|
||||
name: "invalid cluster object",
|
||||
client: func() maoclient.Interface {
|
||||
return fake.NewSimpleClientset()
|
||||
return maofake.NewSimpleClientset()
|
||||
},
|
||||
modifyOc: func(oc *api.OpenShiftCluster) {
|
||||
oc.ID = "invalid"
|
||||
|
|
|
@ -7,19 +7,19 @@ import (
|
|||
"crypto/x509"
|
||||
"math/big"
|
||||
|
||||
gocmp "github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
// Diff is a wrapper for github.com/google/go-cmp/cmp.Diff with extra options
|
||||
func Diff(x, y interface{}, opts ...gocmp.Option) string {
|
||||
func Diff(x, y interface{}, opts ...cmp.Option) string {
|
||||
newOpts := append(
|
||||
opts,
|
||||
// FIXME: Remove x509CertComparer after upgrading to a Go version that includes https://github.com/golang/go/issues/28743
|
||||
gocmp.Comparer(x509CertComparer),
|
||||
gocmp.Comparer(bigIntComparer),
|
||||
cmp.Comparer(x509CertComparer),
|
||||
cmp.Comparer(bigIntComparer),
|
||||
)
|
||||
|
||||
return gocmp.Diff(x, y, newOpts...)
|
||||
return cmp.Diff(x, y, newOpts...)
|
||||
}
|
||||
|
||||
func x509CertComparer(x, y *x509.Certificate) bool {
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kversion "k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/discovery"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
utillog "github.com/Azure/ARO-RP/pkg/util/log"
|
||||
"github.com/Azure/ARO-RP/pkg/util/version"
|
||||
|
@ -161,7 +161,7 @@ type fakeDiscoveryClient struct {
|
|||
|
||||
var _ discovery.DiscoveryInterface = &fakeDiscoveryClient{}
|
||||
|
||||
func (c *fakeDiscoveryClient) RESTClient() restclient.Interface {
|
||||
func (c *fakeDiscoveryClient) RESTClient() rest.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"sort"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -48,7 +48,7 @@ func Prepare(resources []runtime.Object) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func addWorkloadHashes(o *metav1.ObjectMeta, t *v1.PodTemplateSpec, configToHash map[string]string) {
|
||||
func addWorkloadHashes(o *metav1.ObjectMeta, t *corev1.PodTemplateSpec, configToHash map[string]string) {
|
||||
for _, v := range t.Spec.Volumes {
|
||||
if v.Secret != nil {
|
||||
if hash, found := configToHash[keyFunc(schema.GroupKind{Kind: "Secret"}, o.Namespace, v.Secret.SecretName)]; found {
|
||||
|
@ -78,9 +78,9 @@ func hashWorkloadConfigs(resources []runtime.Object) error {
|
|||
configToHash := map[string]string{}
|
||||
for _, o := range resources {
|
||||
switch o := o.(type) {
|
||||
case *v1.Secret:
|
||||
case *corev1.Secret:
|
||||
configToHash[keyFunc(schema.GroupKind{Kind: "Secret"}, o.Namespace, o.Name)] = getHashSecret(o)
|
||||
case *v1.ConfigMap:
|
||||
case *corev1.ConfigMap:
|
||||
configToHash[keyFunc(schema.GroupKind{Kind: "ConfigMap"}, o.Namespace, o.Name)] = getHashConfigMap(o)
|
||||
}
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ func hashWorkloadConfigs(resources []runtime.Object) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getHashSecret(o *v1.Secret) string {
|
||||
func getHashSecret(o *corev1.Secret) string {
|
||||
keys := make([]string, 0, len(o.Data))
|
||||
for key := range o.Data {
|
||||
keys = append(keys, key)
|
||||
|
@ -119,7 +119,7 @@ func getHashSecret(o *v1.Secret) string {
|
|||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func getHashConfigMap(o *v1.ConfigMap) string {
|
||||
func getHashConfigMap(o *corev1.ConfigMap) string {
|
||||
keys := make([]string, 0, len(o.Data))
|
||||
for key := range o.Data {
|
||||
keys = append(keys, key)
|
||||
|
|
|
@ -8,13 +8,13 @@ import (
|
|||
"testing"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func TestHashWorkloadConfigs(t *testing.T) {
|
||||
sec := &v1.Secret{
|
||||
sec := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "certificates",
|
||||
Namespace: "openshift-azure-logging",
|
||||
|
@ -23,7 +23,7 @@ func TestHashWorkloadConfigs(t *testing.T) {
|
|||
"stuff": []byte("9485958"),
|
||||
},
|
||||
}
|
||||
cm := &v1.ConfigMap{
|
||||
cm := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fluent-config",
|
||||
Namespace: "openshift-azure-logging",
|
||||
|
@ -41,33 +41,33 @@ func TestHashWorkloadConfigs(t *testing.T) {
|
|||
Namespace: "openshift-azure-logging",
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "certificates",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "certificates",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "fluent-config",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "fluent-config",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "fluentbit-audit",
|
||||
Image: "fluentbitImage",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "fluent-config",
|
||||
ReadOnly: true,
|
||||
|
@ -78,7 +78,7 @@ func TestHashWorkloadConfigs(t *testing.T) {
|
|||
{
|
||||
Name: "mdsd",
|
||||
Image: "mdsdImage",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "certificates",
|
||||
MountPath: "/etc/mdsd.d/secret",
|
||||
|
|
|
@ -13,14 +13,14 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
|
||||
azkeyvault "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
basekeyvault "github.com/Azure/ARO-RP/pkg/util/azureclient/keyvault"
|
||||
"github.com/Azure/ARO-RP/pkg/util/pem"
|
||||
"github.com/Azure/ARO-RP/pkg/util/azureclient/keyvault"
|
||||
utilpem "github.com/Azure/ARO-RP/pkg/util/pem"
|
||||
)
|
||||
|
||||
type Eku string
|
||||
|
@ -41,14 +41,14 @@ type Manager interface {
|
|||
EnsureCertificateDeleted(context.Context, string) error
|
||||
GetBase64Secret(context.Context, string) ([]byte, error)
|
||||
GetCertificateSecret(context.Context, string) (*rsa.PrivateKey, []*x509.Certificate, error)
|
||||
GetSecret(context.Context, string) (keyvault.SecretBundle, error)
|
||||
GetSecrets(context.Context) ([]keyvault.SecretItem, error)
|
||||
SetSecret(context.Context, string, keyvault.SecretSetParameters) error
|
||||
GetSecret(context.Context, string) (azkeyvault.SecretBundle, error)
|
||||
GetSecrets(context.Context) ([]azkeyvault.SecretItem, error)
|
||||
SetSecret(context.Context, string, azkeyvault.SecretSetParameters) error
|
||||
WaitForCertificateOperation(context.Context, string) error
|
||||
}
|
||||
|
||||
type manager struct {
|
||||
kv basekeyvault.BaseClient
|
||||
kv keyvault.BaseClient
|
||||
keyvaultURI string
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ type manager struct {
|
|||
// access a key vault.
|
||||
func NewManager(kvAuthorizer autorest.Authorizer, keyvaultURI string) Manager {
|
||||
return &manager{
|
||||
kv: basekeyvault.New(kvAuthorizer),
|
||||
kv: keyvault.New(kvAuthorizer),
|
||||
keyvaultURI: keyvaultURI,
|
||||
}
|
||||
}
|
||||
|
@ -75,43 +75,43 @@ func (m *manager) CreateSignedCertificate(ctx context.Context, issuer Issuer, ce
|
|||
shortCommonName = "reserved.aroapp.io"
|
||||
}
|
||||
|
||||
op, err := m.kv.CreateCertificate(ctx, m.keyvaultURI, certificateName, keyvault.CertificateCreateParameters{
|
||||
CertificatePolicy: &keyvault.CertificatePolicy{
|
||||
KeyProperties: &keyvault.KeyProperties{
|
||||
op, err := m.kv.CreateCertificate(ctx, m.keyvaultURI, certificateName, azkeyvault.CertificateCreateParameters{
|
||||
CertificatePolicy: &azkeyvault.CertificatePolicy{
|
||||
KeyProperties: &azkeyvault.KeyProperties{
|
||||
Exportable: to.BoolPtr(true),
|
||||
KeyType: keyvault.RSA,
|
||||
KeyType: azkeyvault.RSA,
|
||||
KeySize: to.Int32Ptr(2048),
|
||||
},
|
||||
SecretProperties: &keyvault.SecretProperties{
|
||||
SecretProperties: &azkeyvault.SecretProperties{
|
||||
ContentType: to.StringPtr("application/x-pem-file"),
|
||||
},
|
||||
X509CertificateProperties: &keyvault.X509CertificateProperties{
|
||||
X509CertificateProperties: &azkeyvault.X509CertificateProperties{
|
||||
Subject: to.StringPtr(pkix.Name{CommonName: shortCommonName}.String()),
|
||||
Ekus: &[]string{
|
||||
string(eku),
|
||||
},
|
||||
SubjectAlternativeNames: &keyvault.SubjectAlternativeNames{
|
||||
SubjectAlternativeNames: &azkeyvault.SubjectAlternativeNames{
|
||||
DNSNames: &[]string{
|
||||
commonName,
|
||||
},
|
||||
},
|
||||
KeyUsage: &[]keyvault.KeyUsageType{
|
||||
keyvault.DigitalSignature,
|
||||
keyvault.KeyEncipherment,
|
||||
KeyUsage: &[]azkeyvault.KeyUsageType{
|
||||
azkeyvault.DigitalSignature,
|
||||
azkeyvault.KeyEncipherment,
|
||||
},
|
||||
ValidityInMonths: to.Int32Ptr(12),
|
||||
},
|
||||
LifetimeActions: &[]keyvault.LifetimeAction{
|
||||
LifetimeActions: &[]azkeyvault.LifetimeAction{
|
||||
{
|
||||
Trigger: &keyvault.Trigger{
|
||||
Trigger: &azkeyvault.Trigger{
|
||||
DaysBeforeExpiry: to.Int32Ptr(365 - 90),
|
||||
},
|
||||
Action: &keyvault.Action{
|
||||
ActionType: keyvault.AutoRenew,
|
||||
Action: &azkeyvault.Action{
|
||||
ActionType: azkeyvault.AutoRenew,
|
||||
},
|
||||
},
|
||||
},
|
||||
IssuerParameters: &keyvault.IssuerParameters{
|
||||
IssuerParameters: &azkeyvault.IssuerParameters{
|
||||
Name: to.StringPtr(string(issuer)),
|
||||
},
|
||||
},
|
||||
|
@ -152,7 +152,7 @@ func (m *manager) GetCertificateSecret(ctx context.Context, secretName string) (
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
key, certs, err := pem.Parse([]byte(*bundle.Value))
|
||||
key, certs, err := utilpem.Parse([]byte(*bundle.Value))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -168,15 +168,15 @@ func (m *manager) GetCertificateSecret(ctx context.Context, secretName string) (
|
|||
return key, certs, nil
|
||||
}
|
||||
|
||||
func (m *manager) GetSecret(ctx context.Context, secretName string) (keyvault.SecretBundle, error) {
|
||||
func (m *manager) GetSecret(ctx context.Context, secretName string) (azkeyvault.SecretBundle, error) {
|
||||
return m.kv.GetSecret(ctx, m.keyvaultURI, secretName, "")
|
||||
}
|
||||
|
||||
func (m *manager) GetSecrets(ctx context.Context) ([]keyvault.SecretItem, error) {
|
||||
func (m *manager) GetSecrets(ctx context.Context) ([]azkeyvault.SecretItem, error) {
|
||||
return m.kv.GetSecrets(ctx, m.keyvaultURI, nil)
|
||||
}
|
||||
|
||||
func (m *manager) SetSecret(ctx context.Context, secretName string, parameters keyvault.SecretSetParameters) error {
|
||||
func (m *manager) SetSecret(ctx context.Context, secretName string, parameters azkeyvault.SecretSetParameters) error {
|
||||
_, err := m.kv.SetSecret(ctx, m.keyvaultURI, secretName, parameters)
|
||||
return err
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ func (m *manager) WaitForCertificateOperation(ctx context.Context, certificateNa
|
|||
return err
|
||||
}
|
||||
|
||||
func keyvaultError(err *keyvault.Error) string {
|
||||
func keyvaultError(err *azkeyvault.Error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ func keyvaultError(err *keyvault.Error) string {
|
|||
return sb.String()
|
||||
}
|
||||
|
||||
func checkOperation(op *keyvault.CertificateOperation) (bool, error) {
|
||||
func checkOperation(op *azkeyvault.CertificateOperation) (bool, error) {
|
||||
switch *op.Status {
|
||||
case "inProgress":
|
||||
return false, nil
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"net/http"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
|
@ -22,9 +22,9 @@ func DialContext(ctx context.Context, log *logrus.Entry, restconfig *rest.Config
|
|||
|
||||
// Connect the error stream, r/o
|
||||
errorStream, err := spdyConn.CreateStream(http.Header{
|
||||
v1.StreamType: []string{v1.StreamTypeError},
|
||||
v1.PortHeader: []string{port},
|
||||
v1.PortForwardRequestIDHeader: []string{"0"},
|
||||
corev1.StreamType: []string{corev1.StreamTypeError},
|
||||
corev1.PortHeader: []string{port},
|
||||
corev1.PortForwardRequestIDHeader: []string{"0"},
|
||||
})
|
||||
if err != nil {
|
||||
spdyConn.Close()
|
||||
|
@ -34,9 +34,9 @@ func DialContext(ctx context.Context, log *logrus.Entry, restconfig *rest.Config
|
|||
|
||||
// Connect the data stream, r/w
|
||||
dataStream, err := spdyConn.CreateStream(http.Header{
|
||||
v1.StreamType: []string{v1.StreamTypeData},
|
||||
v1.PortHeader: []string{port},
|
||||
v1.PortForwardRequestIDHeader: []string{"0"},
|
||||
corev1.StreamType: []string{corev1.StreamTypeData},
|
||||
corev1.PortHeader: []string{port},
|
||||
corev1.PortForwardRequestIDHeader: []string{"0"},
|
||||
})
|
||||
if err != nil {
|
||||
spdyConn.Close()
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче