Register cluster with hive at install time

This commit is contained in:
Ulrich Schlueter 2022-05-31 14:09:11 +02:00 коммит произвёл Mikalai Radchuk
Родитель 8f0a38c191
Коммит 3d8654a1a7
12 изменённых файлов: 348 добавлений и 17 удалений

2
go.mod
Просмотреть файл

@ -44,6 +44,7 @@ require (
github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3 github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3
github.com/openshift/console-operator v0.0.0-20220407014945-45d37e70e0c2 github.com/openshift/console-operator v0.0.0-20220407014945-45d37e70e0c2
github.com/openshift/hive v1.1.16 github.com/openshift/hive v1.1.16
github.com/openshift/hive/apis v0.0.0
github.com/openshift/installer v0.16.1 github.com/openshift/installer v0.16.1
github.com/openshift/library-go v0.0.0-20220405134141-226b07263a02 github.com/openshift/library-go v0.0.0-20220405134141-226b07263a02
github.com/openshift/machine-config-operator v3.11.0+incompatible github.com/openshift/machine-config-operator v3.11.0+incompatible
@ -259,7 +260,6 @@ require (
github.com/openshift/cluster-api-provider-ibmcloud v0.0.0-20211008100740-4d7907adbd6b // indirect github.com/openshift/cluster-api-provider-ibmcloud v0.0.0-20211008100740-4d7907adbd6b // indirect
github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 // indirect github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 // indirect
github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20211111151530-06177b773958 // indirect github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20211111151530-06177b773958 // indirect
github.com/openshift/hive/apis v0.0.0 // indirect
github.com/ovirt/go-ovirt v0.0.0-20210308100159-ac0bcbc88d7c // indirect github.com/ovirt/go-ovirt v0.0.0-20210308100159-ac0bcbc88d7c // indirect
github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 // indirect github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 // indirect
github.com/pborman/uuid v1.2.1 // indirect github.com/pborman/uuid v1.2.1 // indirect

Просмотреть файл

@ -108,3 +108,7 @@ allowedImportNames:
- ctrl - ctrl
github.com/openshift/hive/pkg/client/clientset/versioned: github.com/openshift/hive/pkg/client/clientset/versioned:
- hiveclient - hiveclient
github.com/openshift/hive/pkg/client/clientset/versioned/fake:
- hivefake
github.com/openshift/hive/apis/hive/v1:
- hivev1

Просмотреть файл

@ -54,6 +54,7 @@ type OpenShiftClusterProperties struct {
RegistryProfiles []RegistryProfile `json:"registryProfiles,omitempty"` RegistryProfiles []RegistryProfile `json:"registryProfiles,omitempty"`
ImageRegistryStorageAccountName string `json:"imageRegistryStorageAccountName,omitempty"` ImageRegistryStorageAccountName string `json:"imageRegistryStorageAccountName,omitempty"`
InfraID string `json:"infraId,omitempty"` InfraID string `json:"infraId,omitempty"`
HiveProfile HiveProfile `json:"hiveProfile,omitempty"`
} }
// ProvisioningState represents a provisioning state. // ProvisioningState represents a provisioning state.
@ -295,3 +296,7 @@ type SystemData struct {
LastModifiedByType CreatedByType `json:"lastModifiedByType,omitempty"` LastModifiedByType CreatedByType `json:"lastModifiedByType,omitempty"`
LastModifiedAt *time.Time `json:"lastModifiedAt,omitempty"` LastModifiedAt *time.Time `json:"lastModifiedAt,omitempty"`
} }
type HiveProfile struct {
Namespace string `json:"namespace,omitempty"`
}

Просмотреть файл

@ -122,6 +122,9 @@ func (c *openShiftClusterConverter) ToExternal(oc *api.OpenShiftCluster) interfa
} }
} }
out.Properties.HiveProfile = HiveProfile{
Namespace: oc.Properties.HiveProfile.Namespace,
}
out.SystemData = SystemData{ out.SystemData = SystemData{
CreatedBy: oc.SystemData.CreatedBy, CreatedBy: oc.SystemData.CreatedBy,
CreatedAt: oc.SystemData.CreatedAt, CreatedAt: oc.SystemData.CreatedAt,
@ -169,6 +172,7 @@ func (c *openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShi
} }
out.Properties.ArchitectureVersion = api.ArchitectureVersion(oc.Properties.ArchitectureVersion) out.Properties.ArchitectureVersion = api.ArchitectureVersion(oc.Properties.ArchitectureVersion)
out.Properties.InfraID = oc.Properties.InfraID out.Properties.InfraID = oc.Properties.InfraID
out.Properties.HiveProfile.Namespace = oc.Properties.HiveProfile.Namespace
out.Properties.ProvisioningState = api.ProvisioningState(oc.Properties.ProvisioningState) out.Properties.ProvisioningState = api.ProvisioningState(oc.Properties.ProvisioningState)
out.Properties.LastProvisioningState = api.ProvisioningState(oc.Properties.LastProvisioningState) out.Properties.LastProvisioningState = api.ProvisioningState(oc.Properties.LastProvisioningState)
out.Properties.FailedProvisioningState = api.ProvisioningState(oc.Properties.FailedProvisioningState) out.Properties.FailedProvisioningState = api.ProvisioningState(oc.Properties.FailedProvisioningState)

Просмотреть файл

@ -136,6 +136,7 @@ type OpenShiftClusterProperties struct {
InfraID string `json:"infraId,omitempty"` InfraID string `json:"infraId,omitempty"`
SSHKey SecureBytes `json:"sshKey,omitempty"` SSHKey SecureBytes `json:"sshKey,omitempty"`
// AdminKubeconfig is installer generated kubeconfig. It is 10 year config, // AdminKubeconfig is installer generated kubeconfig. It is 10 year config,
// and should never be returned to the user. // and should never be returned to the user.
AdminKubeconfig SecureBytes `json:"adminKubeconfig,omitempty"` AdminKubeconfig SecureBytes `json:"adminKubeconfig,omitempty"`
@ -150,6 +151,8 @@ type OpenShiftClusterProperties struct {
UserAdminKubeconfig SecureBytes `json:"userAdminKubeconfig,omitempty"` UserAdminKubeconfig SecureBytes `json:"userAdminKubeconfig,omitempty"`
RegistryProfiles []*RegistryProfile `json:"registryProfiles,omitempty"` RegistryProfiles []*RegistryProfile `json:"registryProfiles,omitempty"`
HiveProfile HiveProfile `json:"hiveProfile,omitempty"`
} }
// ProvisioningState represents a provisioning state // ProvisioningState represents a provisioning state
@ -406,3 +409,10 @@ const (
// ArchitectureVersionV2: 4.5: 1 load balancer, 1 NSG // ArchitectureVersionV2: 4.5: 1 load balancer, 1 NSG
ArchitectureVersionV2 ArchitectureVersionV2
) )
// HiveProfile represents the hive related data of a cluster
type HiveProfile struct {
MissingFields
Namespace string `json:"namespace,omitempty"`
}

Просмотреть файл

@ -13,11 +13,13 @@ import (
"github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to" "github.com/Azure/go-autorest/autorest/to"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"k8s.io/client-go/rest"
"github.com/Azure/ARO-RP/pkg/api" "github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/cluster" "github.com/Azure/ARO-RP/pkg/cluster"
"github.com/Azure/ARO-RP/pkg/database" "github.com/Azure/ARO-RP/pkg/database"
"github.com/Azure/ARO-RP/pkg/env" "github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/hive"
"github.com/Azure/ARO-RP/pkg/util/billing" "github.com/Azure/ARO-RP/pkg/util/billing"
"github.com/Azure/ARO-RP/pkg/util/encryption" "github.com/Azure/ARO-RP/pkg/util/encryption"
utillog "github.com/Azure/ARO-RP/pkg/util/log" utillog "github.com/Azure/ARO-RP/pkg/util/log"
@ -27,7 +29,7 @@ import (
type openShiftClusterBackend struct { type openShiftClusterBackend struct {
*backend *backend
newManager func(context.Context, *logrus.Entry, env.Interface, database.OpenShiftClusters, database.Gateway, encryption.AEAD, billing.Manager, *api.OpenShiftClusterDocument, *api.SubscriptionDocument) (cluster.Interface, error) newManager func(context.Context, *logrus.Entry, env.Interface, database.OpenShiftClusters, database.Gateway, encryption.AEAD, billing.Manager, *api.OpenShiftClusterDocument, *api.SubscriptionDocument, *rest.Config) (cluster.Interface, error)
} }
func newOpenShiftClusterBackend(b *backend) *openShiftClusterBackend { func newOpenShiftClusterBackend(b *backend) *openShiftClusterBackend {
@ -100,7 +102,12 @@ func (ocb *openShiftClusterBackend) handle(ctx context.Context, log *logrus.Entr
return err return err
} }
m, err := ocb.newManager(ctx, log, ocb.env, ocb.dbOpenShiftClusters, ocb.dbGateway, ocb.aead, ocb.billing, doc, subscriptionDoc) hiveRestConfig, err := hive.HiveRestConfig()
if err != nil {
log.Error(err) // Don't fail because of hive
}
m, err := ocb.newManager(ctx, log, ocb.env, ocb.dbOpenShiftClusters, ocb.dbGateway, ocb.aead, ocb.billing, doc, subscriptionDoc, hiveRestConfig)
if err != nil { if err != nil {
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateFailed, err) return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateFailed, err)
} }

Просмотреть файл

@ -12,6 +12,7 @@ import (
"github.com/golang/mock/gomock" "github.com/golang/mock/gomock"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"k8s.io/client-go/rest"
"github.com/Azure/ARO-RP/pkg/api" "github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/cluster" "github.com/Azure/ARO-RP/pkg/cluster"
@ -292,7 +293,7 @@ func TestBackendTry(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
createManager := func(context.Context, *logrus.Entry, env.Interface, database.OpenShiftClusters, database.Gateway, encryption.AEAD, billing.Manager, *api.OpenShiftClusterDocument, *api.SubscriptionDocument) (cluster.Interface, error) { createManager := func(context.Context, *logrus.Entry, env.Interface, database.OpenShiftClusters, database.Gateway, encryption.AEAD, billing.Manager, *api.OpenShiftClusterDocument, *api.SubscriptionDocument, *rest.Config) (cluster.Interface, error) {
return manager, nil return manager, nil
} }

Просмотреть файл

@ -17,11 +17,13 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
extensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" extensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/Azure/ARO-RP/pkg/api" "github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/cluster/graph" "github.com/Azure/ARO-RP/pkg/cluster/graph"
"github.com/Azure/ARO-RP/pkg/database" "github.com/Azure/ARO-RP/pkg/database"
"github.com/Azure/ARO-RP/pkg/env" "github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/hive"
aroclient "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned" aroclient "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned"
"github.com/Azure/ARO-RP/pkg/operator/deploy" "github.com/Azure/ARO-RP/pkg/operator/deploy"
"github.com/Azure/ARO-RP/pkg/util/azureclient/graphrbac" "github.com/Azure/ARO-RP/pkg/util/azureclient/graphrbac"
@ -92,12 +94,14 @@ type manager struct {
arocli aroclient.Interface arocli aroclient.Interface
imageregistrycli imageregistryclient.Interface imageregistrycli imageregistryclient.Interface
hiveClusterManager hive.ClusterManager
aroOperatorDeployer deploy.Operator aroOperatorDeployer deploy.Operator
} }
// New returns a cluster manager // New returns a cluster manager
func New(ctx context.Context, log *logrus.Entry, _env env.Interface, db database.OpenShiftClusters, dbGateway database.Gateway, aead encryption.AEAD, func New(ctx context.Context, log *logrus.Entry, _env env.Interface, db database.OpenShiftClusters, dbGateway database.Gateway, aead encryption.AEAD,
billing billing.Manager, doc *api.OpenShiftClusterDocument, subscriptionDoc *api.SubscriptionDocument) (Interface, error) { billing billing.Manager, doc *api.OpenShiftClusterDocument, subscriptionDoc *api.SubscriptionDocument, hiveConfig *rest.Config) (Interface, error) {
r, err := azure.ParseResourceID(doc.OpenShiftCluster.ID) r, err := azure.ParseResourceID(doc.OpenShiftCluster.ID)
if err != nil { if err != nil {
return nil, err return nil, err
@ -120,6 +124,15 @@ func New(ctx context.Context, log *logrus.Entry, _env env.Interface, db database
storage := storage.NewManager(_env, r.SubscriptionID, fpAuthorizer) storage := storage.NewManager(_env, r.SubscriptionID, fpAuthorizer)
// TODO: always set hive once we have it everywhere in prod and dev
var hr hive.ClusterManager
if hiveConfig != nil {
hr, err = hive.NewClusterManagerFromConfig(hiveConfig)
if err != nil {
return nil, err
}
}
return &manager{ return &manager{
log: log, log: log,
env: _env, env: _env,
@ -153,5 +166,7 @@ func New(ctx context.Context, log *logrus.Entry, _env env.Interface, db database
storage: storage, storage: storage,
subnet: subnet.NewManager(_env.Environment(), r.SubscriptionID, fpAuthorizer), subnet: subnet.NewManager(_env.Environment(), r.SubscriptionID, fpAuthorizer),
graph: graph.NewManager(log, aead, storage), graph: graph.NewManager(log, aead, storage),
hiveClusterManager: hr,
}, nil }, nil
} }

Просмотреть файл

@ -19,6 +19,7 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"github.com/Azure/ARO-RP/pkg/api" "github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/hive"
"github.com/Azure/ARO-RP/pkg/installer" "github.com/Azure/ARO-RP/pkg/installer"
aroclient "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned" aroclient "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned"
"github.com/Azure/ARO-RP/pkg/operator/deploy" "github.com/Azure/ARO-RP/pkg/operator/deploy"
@ -128,6 +129,10 @@ func (m *manager) callInstaller(ctx context.Context) error {
// Install installs an ARO cluster // Install installs an ARO cluster
func (m *manager) Install(ctx context.Context) error { func (m *manager) Install(ctx context.Context) error {
var (
workloadCluster *hive.WorkloadCluster
)
steps := map[api.InstallPhase][]steps.Step{ steps := map[api.InstallPhase][]steps.Step{
api.InstallPhaseBootstrap: { api.InstallPhaseBootstrap: {
steps.AuthorizationRefreshingAction(m.fpAuthorizer, steps.Action(m.validateResources)), steps.AuthorizationRefreshingAction(m.fpAuthorizer, steps.Action(m.validateResources)),
@ -184,6 +189,25 @@ func (m *manager) Install(ctx context.Context) error {
steps.Action(m.configureIngressCertificate), steps.Action(m.configureIngressCertificate),
steps.Condition(m.ingressControllerReady, 30*time.Minute, true), steps.Condition(m.ingressControllerReady, 30*time.Minute, true),
steps.Action(m.configureDefaultStorageClass), steps.Action(m.configureDefaultStorageClass),
steps.Action(func(ctx context.Context) error {
var err error
workloadCluster, err = m.collectDataForHive(ctx)
if err != nil {
m.hiveClusterManager = nil
m.log.Infof("collectDataForHive: %s", err)
}
return nil // don't fail because of hive
}),
steps.Action(func(ctx context.Context) error {
err := m.registerWithHive(ctx, workloadCluster)
if err != nil {
m.hiveClusterManager = nil
m.log.Infof("registerWithHive: %s", err)
}
return nil // don't fail because of hive
}),
steps.Condition(m.verifyRegistration, 2*time.Minute, false), // don't fail because of hive
steps.Action(m.finishInstallation), steps.Action(m.finishInstallation),
}, },
} }

Просмотреть файл

@ -0,0 +1,99 @@
package cluster
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/json"
"errors"
"github.com/openshift/installer/pkg/asset/installconfig/azure"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/hive"
"github.com/Azure/ARO-RP/pkg/util/stringutils"
)
func (m *manager) collectDataForHive(ctx context.Context) (*hive.WorkloadCluster, error) {
m.log.Info("collecting registration data from the new cluster")
if m.hiveClusterManager == nil {
return nil, errors.New("no hive cluster manager, skipping")
}
// TODO: When hive support first party principles we'll need to send both first party and cluster service principles
clusterSP := azure.Credentials{
TenantID: m.subscriptionDoc.Subscription.Properties.TenantID,
SubscriptionID: m.subscriptionDoc.ID,
ClientID: m.doc.OpenShiftCluster.Properties.ServicePrincipalProfile.ClientID,
ClientSecret: string(m.doc.OpenShiftCluster.Properties.ServicePrincipalProfile.ClientSecret),
}
clusterSPBytes, err := json.Marshal(clusterSP)
if err != nil {
return nil, err
}
resourceGroupName := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
hiveWorkloadCluster := &hive.WorkloadCluster{
SubscriptionID: m.subscriptionDoc.ID,
ClusterName: m.doc.OpenShiftCluster.Name,
ResourceGroupName: resourceGroupName,
Location: m.doc.OpenShiftCluster.Location,
InfraID: m.doc.OpenShiftCluster.Properties.InfraID,
ClusterID: m.doc.ID,
KubeConfig: string(m.doc.OpenShiftCluster.Properties.AROServiceKubeconfig),
ServicePrincipal: string(clusterSPBytes),
}
return hiveWorkloadCluster, nil
}
func (m *manager) registerWithHive(ctx context.Context, workloadCluster *hive.WorkloadCluster) error {
m.log.Info("registering with hive")
if m.hiveClusterManager == nil {
return errors.New("no hive cluster manager, skipping")
}
cd, err := m.hiveClusterManager.Register(ctx, workloadCluster)
if err != nil {
return err
}
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
doc.OpenShiftCluster.Properties.HiveProfile = api.HiveProfile{
Namespace: cd.Namespace,
}
return nil
})
return err
}
// verifyRegistration is being run in steps.Condition with fail=false
// as we don't want to fail on Hive errors during the post-installation
// cluster adoption.
// We must return true in case of successful adoption.
// Returning false means that we want to continue waiting for adoption.
// Returning non-nil error means that we give up waiting.
func (m *manager) verifyRegistration(ctx context.Context) (bool, error) {
m.log.Info("verifying cluster registration in hive")
if m.hiveClusterManager == nil {
return false, errors.New("no hive cluster manager, skipping")
}
isConnected, reason, err := m.hiveClusterManager.IsConnected(ctx, m.doc.OpenShiftCluster.Properties.HiveProfile.Namespace)
if err != nil {
m.log.Infof("error getting hive registration status: %s", err)
return false, nil
}
if !isConnected {
m.log.Infof("hive is not able to connect to cluster %s", reason)
return false, nil
}
m.log.Info("cluster adopted successfully")
return true, nil
}

Просмотреть файл

@ -4,36 +4,114 @@ package hive
// Licensed under the Apache License 2.0. // Licensed under the Apache License 2.0.
import ( import (
"context"
"fmt"
"github.com/gofrs/uuid"
hivev1 "github.com/openshift/hive/apis/hive/v1"
hiveclient "github.com/openshift/hive/pkg/client/clientset/versioned" hiveclient "github.com/openshift/hive/pkg/client/clientset/versioned"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
) )
type Interface interface { type ClusterManager interface {
// We need to keep cluster SP updated with Hive, so we not only need to be able to create,
// but also be able to update resources.
// See relevant work item: https://msazure.visualstudio.com/AzureRedHatOpenShift/_workitems/edit/14895480
// We might be able to do do this by using dynamic client.
// Something similar to this: https://github.com/Azure/ARO-RP/pull/2145#discussion_r897915283
// TODO: Replace Register with CreateOrUpdate and remove the comment above
Register(ctx context.Context, workloadCluster *WorkloadCluster) (*hivev1.ClusterDeployment, error)
IsConnected(ctx context.Context, namespace string) (bool, string, error)
} }
type client struct { // WorkloadCluster represents all data in hive pertaining to a single ARO cluster
type WorkloadCluster struct {
SubscriptionID string `json:"subscription,omitempty"`
ClusterName string `json:"name,omitempty"`
ResourceGroupName string `json:"resourceGroup,omitempty"`
Location string `json:"location,omitempty"`
InfraID string `json:"infraId,omitempty"`
ClusterID string `json:"clusterID,omitempty"`
KubeConfig string `json:"kubeconfig,omitempty"`
ServicePrincipal string `json:"serviceprincipal,omitempty"`
}
type clusterManager struct {
hiveClientset *hiveclient.Clientset hiveClientset *hiveclient.Clientset
kubernetescli *kubernetes.Clientset kubernetescli *kubernetes.Clientset
} }
func New(hiveClientset *hiveclient.Clientset, kubernetescli *kubernetes.Clientset) Interface { func NewClusterManagerFromConfig(restConfig *rest.Config) (ClusterManager, error) {
return &client{
hiveClientset: hiveClientset,
kubernetescli: kubernetescli,
}
}
func NewFromConfig(restConfig *rest.Config) (Interface, error) {
hiveclientset, err := hiveclient.NewForConfig(restConfig) hiveclientset, err := hiveclient.NewForConfig(restConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
hiveKubernetescli, err := kubernetes.NewForConfig(restConfig) kubernetescli, err := kubernetes.NewForConfig(restConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return New(hiveclientset, hiveKubernetescli), nil return newClusterManager(hiveclientset, kubernetescli), nil
}
func newClusterManager(hiveClientset *hiveclient.Clientset, kubernetescli *kubernetes.Clientset) ClusterManager {
return &clusterManager{
hiveClientset: hiveClientset,
kubernetescli: kubernetescli,
}
}
func (hr *clusterManager) Register(ctx context.Context, workloadCluster *WorkloadCluster) (*hivev1.ClusterDeployment, error) {
var namespace string
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
namespace = "aro-" + uuid.Must(uuid.NewV4()).String()
csn := ClusterNamespace(namespace)
_, err := hr.kubernetescli.CoreV1().Namespaces().Create(ctx, csn, metav1.CreateOptions{})
return err
})
if err != nil {
return nil, err
}
kubesecret := KubeAdminSecret(namespace, []byte(workloadCluster.KubeConfig))
_, err = hr.kubernetescli.CoreV1().Secrets(namespace).Create(ctx, kubesecret, metav1.CreateOptions{})
if err != nil {
return nil, err
}
spsecret := ServicePrincipalSecret(namespace, []byte(workloadCluster.ServicePrincipal))
_, err = hr.kubernetescli.CoreV1().Secrets(namespace).Create(ctx, spsecret, metav1.CreateOptions{})
if err != nil {
return nil, err
}
cds := ClusterDeployment(namespace, workloadCluster.ClusterName, workloadCluster.ClusterID, workloadCluster.InfraID, workloadCluster.Location)
return hr.hiveClientset.HiveV1().ClusterDeployments(namespace).Create(ctx, cds, metav1.CreateOptions{})
}
func (hr *clusterManager) IsConnected(ctx context.Context, namespace string) (bool, string, error) {
cd, err := hr.hiveClientset.HiveV1().ClusterDeployments(namespace).Get(ctx, clusterDeploymentName, metav1.GetOptions{})
if err != nil {
return false, "", err
}
// Looking for the UnreachableCondition in the list of conditions
// the order is not stable, but the condition is expected to be present
for _, condition := range cd.Status.Conditions {
if condition.Type == hivev1.UnreachableCondition {
//checking for false, meaning not unreachable, so is reachable
isReachable := condition.Status != corev1.ConditionTrue
return isReachable, condition.Message, nil
}
}
// we should never arrive here (famous last words)
return false, "", fmt.Errorf("could not find UnreachableCondition")
} }

84
pkg/hive/resources.go Normal file
Просмотреть файл

@ -0,0 +1,84 @@
package hive
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
hivev1 "github.com/openshift/hive/apis/hive/v1"
"github.com/openshift/hive/apis/hive/v1/azure"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
clusterDeploymentName = "cluster"
kubesecretName = "admin-kube-secret"
servicePrincipalSecretname = "serviceprincipal-secret"
)
func ClusterNamespace(namespace string) *corev1.Namespace {
return &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}
}
func KubeAdminSecret(namespace string, kubeConfig []byte) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: kubesecretName,
Namespace: namespace,
},
Data: map[string][]byte{
"kubeconfig": kubeConfig,
},
Type: corev1.SecretTypeOpaque,
}
}
func ServicePrincipalSecret(namespace string, secret []byte) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: servicePrincipalSecretname,
Namespace: namespace,
},
Data: map[string][]byte{
"osServicePrincipal.json": secret,
},
Type: corev1.SecretTypeOpaque,
}
}
func ClusterDeployment(namespace string, clusterName string, clusterID string, infraID string, location string) *hivev1.ClusterDeployment {
return &hivev1.ClusterDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: clusterDeploymentName,
Namespace: namespace,
},
Spec: hivev1.ClusterDeploymentSpec{
BaseDomain: "",
ClusterName: clusterName,
Installed: true,
ClusterMetadata: &hivev1.ClusterMetadata{
AdminKubeconfigSecretRef: corev1.LocalObjectReference{
Name: kubesecretName,
},
ClusterID: clusterID,
InfraID: infraID,
},
Platform: hivev1.Platform{
Azure: &azure.Platform{
BaseDomainResourceGroupName: "",
Region: location,
CredentialsSecretRef: corev1.LocalObjectReference{
Name: servicePrincipalSecretname,
},
},
},
PreserveOnDelete: true,
ManageDNS: false,
},
}
}