This commit is contained in:
Nils Elde 2020-09-25 17:00:51 -04:00
Родитель 70cb0d94fe
Коммит 79d78250b2
35 изменённых файлов: 336 добавлений и 336 удалений

Просмотреть файл

@ -17,10 +17,10 @@ import (
"github.com/Azure/ARO-RP/pkg/util/azureerrors"
)
func (i *manager) deployARMTemplate(ctx context.Context, rg string, tName string, t *arm.Template, params map[string]interface{}) error {
i.log.Printf("deploying %s template", tName)
func (m *manager) deployARMTemplate(ctx context.Context, rg string, tName string, t *arm.Template, params map[string]interface{}) error {
m.log.Printf("deploying %s template", tName)
err := i.deployments.CreateOrUpdateAndWait(ctx, rg, deploymentName, mgmtfeatures.Deployment{
err := m.deployments.CreateOrUpdateAndWait(ctx, rg, deploymentName, mgmtfeatures.Deployment{
Properties: &mgmtfeatures.DeploymentProperties{
Template: t,
Parameters: params,
@ -29,8 +29,8 @@ func (i *manager) deployARMTemplate(ctx context.Context, rg string, tName string
})
if azureerrors.IsDeploymentActiveError(err) {
i.log.Printf("waiting for %s template to be deployed", tName)
err = i.deployments.Wait(ctx, rg, deploymentName)
m.log.Printf("waiting for %s template to be deployed", tName)
err = m.deployments.Wait(ctx, rg, deploymentName)
}
if azureerrors.HasAuthorizationFailedError(err) ||

Просмотреть файл

@ -9,16 +9,16 @@ import (
"github.com/Azure/ARO-RP/pkg/operator/deploy"
)
func (i *manager) ensureAROOperator(ctx context.Context) error {
dep, err := deploy.New(i.log, i.env, i.doc.OpenShiftCluster, i.kubernetescli, i.extcli, i.arocli)
func (m *manager) ensureAROOperator(ctx context.Context) error {
dep, err := deploy.New(m.log, m.env, m.doc.OpenShiftCluster, m.kubernetescli, m.extcli, m.arocli)
if err != nil {
return err
}
return dep.CreateOrUpdate()
}
func (i *manager) aroDeploymentReady(ctx context.Context) (bool, error) {
dep, err := deploy.New(i.log, i.env, i.doc.OpenShiftCluster, i.kubernetescli, i.extcli, i.arocli)
func (m *manager) aroDeploymentReady(ctx context.Context) (bool, error) {
dep, err := deploy.New(m.log, m.env, m.doc.OpenShiftCluster, m.kubernetescli, m.extcli, m.arocli)
if err != nil {
return false, err
}

Просмотреть файл

@ -7,6 +7,6 @@ import (
"context"
)
func (i *manager) ensureBillingRecord(ctx context.Context) error {
return i.billing.Ensure(ctx, i.doc)
func (m *manager) ensureBillingRecord(ctx context.Context) error {
return m.billing.Ensure(ctx, m.doc)
}

Просмотреть файл

@ -47,12 +47,12 @@ func TestEnsureBillingEntry(t *testing.T) {
billing := mock_billing.NewMockManager(controller)
tt.mocks(billing)
i := &manager{
m := &manager{
doc: &api.OpenShiftClusterDocument{},
billing: billing,
}
err := i.ensureBillingRecord(ctx)
err := m.ensureBillingRecord(ctx)
if err != nil && err.Error() != tt.wantErr ||
err == nil && tt.wantErr != "" {
t.Error(err)

Просмотреть файл

@ -16,11 +16,11 @@ import (
"github.com/Azure/ARO-RP/pkg/util/stringutils"
)
func (i *manager) getBlobService(ctx context.Context, p mgmtstorage.Permissions, r mgmtstorage.SignedResourceTypes) (*azstorage.BlobStorageClient, error) {
resourceGroup := stringutils.LastTokenByte(i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
func (m *manager) getBlobService(ctx context.Context, p mgmtstorage.Permissions, r mgmtstorage.SignedResourceTypes) (*azstorage.BlobStorageClient, error) {
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
t := time.Now().UTC().Truncate(time.Second)
res, err := i.accounts.ListAccountSAS(ctx, resourceGroup, "cluster"+i.doc.OpenShiftCluster.Properties.StorageSuffix, mgmtstorage.AccountSasParameters{
res, err := m.accounts.ListAccountSAS(ctx, resourceGroup, "cluster"+m.doc.OpenShiftCluster.Properties.StorageSuffix, mgmtstorage.AccountSasParameters{
Services: "b",
ResourceTypes: r,
Permissions: p,
@ -37,7 +37,7 @@ func (i *manager) getBlobService(ctx context.Context, p mgmtstorage.Permissions,
return nil, err
}
c := azstorage.NewAccountSASClient("cluster"+i.doc.OpenShiftCluster.Properties.StorageSuffix, v, azure.PublicCloud).GetBlobService()
c := azstorage.NewAccountSASClient("cluster"+m.doc.OpenShiftCluster.Properties.StorageSuffix, v, azure.PublicCloud).GetBlobService()
return &c, nil
}

Просмотреть файл

@ -14,34 +14,34 @@ import (
// condition functions should return an error only if it's not retryable
// if a condition function encounters a retryable error it should return false, nil.
func (i *manager) bootstrapConfigMapReady(ctx context.Context) (bool, error) {
cm, err := i.kubernetescli.CoreV1().ConfigMaps("kube-system").Get("bootstrap", metav1.GetOptions{})
func (m *manager) bootstrapConfigMapReady(ctx context.Context) (bool, error) {
cm, err := m.kubernetescli.CoreV1().ConfigMaps("kube-system").Get("bootstrap", metav1.GetOptions{})
return err == nil && cm.Data["status"] == "complete", nil
}
func (i *manager) apiServersReady(ctx context.Context) (bool, error) {
apiserver, err := i.configcli.ConfigV1().ClusterOperators().Get("kube-apiserver", metav1.GetOptions{})
func (m *manager) apiServersReady(ctx context.Context) (bool, error) {
apiserver, err := m.configcli.ConfigV1().ClusterOperators().Get("kube-apiserver", metav1.GetOptions{})
if err != nil {
return false, nil
}
return isOperatorAvailable(apiserver), nil
}
func (i *manager) operatorConsoleExists(ctx context.Context) (bool, error) {
_, err := i.operatorcli.OperatorV1().Consoles().Get(consoleapi.ConfigResourceName, metav1.GetOptions{})
func (m *manager) operatorConsoleExists(ctx context.Context) (bool, error) {
_, err := m.operatorcli.OperatorV1().Consoles().Get(consoleapi.ConfigResourceName, metav1.GetOptions{})
return err == nil, nil
}
func (i *manager) operatorConsoleReady(ctx context.Context) (bool, error) {
consoleOperator, err := i.configcli.ConfigV1().ClusterOperators().Get("console", metav1.GetOptions{})
func (m *manager) operatorConsoleReady(ctx context.Context) (bool, error) {
consoleOperator, err := m.configcli.ConfigV1().ClusterOperators().Get("console", metav1.GetOptions{})
if err != nil {
return false, nil
}
return isOperatorAvailable(consoleOperator), nil
}
func (i *manager) clusterVersionReady(ctx context.Context) (bool, error) {
cv, err := i.configcli.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{})
func (m *manager) clusterVersionReady(ctx context.Context) (bool, error) {
cv, err := m.configcli.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{})
if err == nil {
for _, cond := range cv.Status.Conditions {
if cond.Type == configv1.OperatorAvailable && cond.Status == configv1.ConditionTrue {
@ -52,8 +52,8 @@ func (i *manager) clusterVersionReady(ctx context.Context) (bool, error) {
return false, nil
}
func (i *manager) ingressControllerReady(ctx context.Context) (bool, error) {
ingressOperator, err := i.configcli.ConfigV1().ClusterOperators().Get("ingress", metav1.GetOptions{})
func (m *manager) ingressControllerReady(ctx context.Context) (bool, error) {
ingressOperator, err := m.configcli.ConfigV1().ClusterOperators().Get("ingress", metav1.GetOptions{})
if err != nil {
return false, nil
}

Просмотреть файл

@ -49,7 +49,7 @@ func TestBootstrapConfigMapReady(t *testing.T) {
want: true,
},
} {
i := &manager{
m := &manager{
kubernetescli: k8sfake.NewSimpleClientset(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: tt.configMapName,
@ -60,7 +60,7 @@ func TestBootstrapConfigMapReady(t *testing.T) {
},
}),
}
ready, err := i.bootstrapConfigMapReady(ctx)
ready, err := m.bootstrapConfigMapReady(ctx)
if err != nil {
t.Error(errMustBeNilMsg)
}
@ -87,14 +87,14 @@ func TestOperatorConsoleExists(t *testing.T) {
want: true,
},
} {
i := &manager{
m := &manager{
operatorcli: operatorfake.NewSimpleClientset(&operatorv1.Console{
ObjectMeta: metav1.ObjectMeta{
Name: tt.consoleName,
},
}),
}
ready, err := i.operatorConsoleExists(ctx)
ready, err := m.operatorConsoleExists(ctx)
if err != nil {
t.Error(errMustBeNilMsg)
}
@ -182,7 +182,7 @@ func TestClusterVersionReady(t *testing.T) {
want: true,
},
} {
i := &manager{
m := &manager{
configcli: configfake.NewSimpleClientset(&configv1.ClusterVersion{
ObjectMeta: metav1.ObjectMeta{
Name: tt.version,
@ -197,7 +197,7 @@ func TestClusterVersionReady(t *testing.T) {
},
}),
}
ready, err := i.clusterVersionReady(ctx)
ready, err := m.clusterVersionReady(ctx)
if err != nil {
t.Error(errMustBeNilMsg)
}

Просмотреть файл

@ -12,17 +12,17 @@ import (
"k8s.io/client-go/util/retry"
)
func (i *manager) updateConsoleBranding(ctx context.Context) error {
i.log.Print("updating console-operator branding")
func (m *manager) updateConsoleBranding(ctx context.Context) error {
m.log.Print("updating console-operator branding")
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
operatorConfig, err := i.operatorcli.OperatorV1().Consoles().Get(consoleapi.ConfigResourceName, metav1.GetOptions{})
operatorConfig, err := m.operatorcli.OperatorV1().Consoles().Get(consoleapi.ConfigResourceName, metav1.GetOptions{})
if err != nil {
return err
}
operatorConfig.Spec.Customization.Brand = operatorv1.BrandAzure
_, err = i.operatorcli.OperatorV1().Consoles().Update(operatorConfig)
_, err = m.operatorcli.OperatorV1().Consoles().Update(operatorConfig)
return err
})
}

Просмотреть файл

@ -18,7 +18,7 @@ func TestUpdateConsoleBranding(t *testing.T) {
consoleName := "cluster"
i := &manager{
m := &manager{
log: logrus.NewEntry(logrus.StandardLogger()),
operatorcli: fake.NewSimpleClientset(&operatorv1.Console{
ObjectMeta: metav1.ObjectMeta{
@ -37,12 +37,12 @@ func TestUpdateConsoleBranding(t *testing.T) {
}),
}
err := i.updateConsoleBranding(ctx)
err := m.updateConsoleBranding(ctx)
if err != nil {
t.Error(err)
}
console, err := i.operatorcli.OperatorV1().Consoles().Get(consoleName, metav1.GetOptions{})
console, err := m.operatorcli.OperatorV1().Consoles().Get(consoleName, metav1.GetOptions{})
if err != nil {
t.Error(err)
}

Просмотреть файл

@ -23,8 +23,8 @@ import (
"github.com/Azure/ARO-RP/pkg/util/subnet"
)
func (i *manager) deployResourceTemplate(ctx context.Context) error {
g, err := i.loadGraph(ctx)
func (m *manager) deployResourceTemplate(ctx context.Context) error {
g, err := m.loadGraph(ctx)
if err != nil {
return err
}
@ -32,14 +32,14 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
installConfig := g[reflect.TypeOf(&installconfig.InstallConfig{})].(*installconfig.InstallConfig)
machineMaster := g[reflect.TypeOf(&machine.Master{})].(*machine.Master)
infraID := i.doc.OpenShiftCluster.Properties.InfraID
infraID := m.doc.OpenShiftCluster.Properties.InfraID
if infraID == "" {
infraID = "aro" // TODO: remove after deploy
}
resourceGroup := stringutils.LastTokenByte(i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
vnetID, _, err := subnet.Split(i.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID)
vnetID, _, err := subnet.Split(m.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID)
if err != nil {
return err
}
@ -181,7 +181,7 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
{
PrivateLinkServiceIPConfigurationProperties: &mgmtnetwork.PrivateLinkServiceIPConfigurationProperties{
Subnet: &mgmtnetwork.Subnet{
ID: to.StringPtr(i.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID),
ID: to.StringPtr(m.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID),
},
},
Name: to.StringPtr(infraID + "-pls-nic"),
@ -189,12 +189,12 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
},
Visibility: &mgmtnetwork.PrivateLinkServicePropertiesVisibility{
Subscriptions: &[]string{
i.env.SubscriptionID(),
m.env.SubscriptionID(),
},
},
AutoApproval: &mgmtnetwork.PrivateLinkServicePropertiesAutoApproval{
Subscriptions: &[]string{
i.env.SubscriptionID(),
m.env.SubscriptionID(),
},
},
},
@ -221,7 +221,7 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
},
APIVersion: azureclient.APIVersions["Microsoft.Network"],
},
i.apiServerPublicLoadBalancer(installConfig.Config.Azure.Region),
m.apiServerPublicLoadBalancer(installConfig.Config.Azure.Region),
{
Resource: &mgmtnetwork.LoadBalancer{
Sku: &mgmtnetwork.LoadBalancerSku{
@ -233,7 +233,7 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
FrontendIPConfigurationPropertiesFormat: &mgmtnetwork.FrontendIPConfigurationPropertiesFormat{
PrivateIPAllocationMethod: mgmtnetwork.Dynamic,
Subnet: &mgmtnetwork.Subnet{
ID: to.StringPtr(i.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID),
ID: to.StringPtr(m.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID),
},
},
Name: to.StringPtr("internal-lb-ip-v4"),
@ -329,7 +329,7 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
},
},
Subnet: &mgmtnetwork.Subnet{
ID: to.StringPtr(i.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID),
ID: to.StringPtr(m.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID),
},
},
Name: to.StringPtr("bootstrap-nic-ip-v4"),
@ -361,7 +361,7 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
},
},
Subnet: &mgmtnetwork.Subnet{
ID: to.StringPtr(i.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID),
ID: to.StringPtr(m.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID),
},
},
Name: to.StringPtr("pipConfig"),
@ -409,7 +409,7 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
ComputerName: to.StringPtr(infraID + "-bootstrap-vm"),
AdminUsername: to.StringPtr("core"),
AdminPassword: to.StringPtr("NotActuallyApplied!"),
CustomData: to.StringPtr(`[base64(concat('{"ignition":{"version":"2.2.0","config":{"replace":{"source":"https://cluster` + i.doc.OpenShiftCluster.Properties.StorageSuffix + `.blob.core.windows.net/ignition/bootstrap.ign?', listAccountSas(resourceId('Microsoft.Storage/storageAccounts', 'cluster` + i.doc.OpenShiftCluster.Properties.StorageSuffix + `'), '2019-04-01', parameters('sas')).accountSasToken, '"}}}}'))]`),
CustomData: to.StringPtr(`[base64(concat('{"ignition":{"version":"2.2.0","config":{"replace":{"source":"https://cluster` + m.doc.OpenShiftCluster.Properties.StorageSuffix + `.blob.core.windows.net/ignition/bootstrap.ign?', listAccountSas(resourceId('Microsoft.Storage/storageAccounts', 'cluster` + m.doc.OpenShiftCluster.Properties.StorageSuffix + `'), '2019-04-01', parameters('sas')).accountSasToken, '"}}}}'))]`),
LinuxConfiguration: &mgmtcompute.LinuxConfiguration{
DisablePasswordAuthentication: to.BoolPtr(false),
},
@ -424,7 +424,7 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
DiagnosticsProfile: &mgmtcompute.DiagnosticsProfile{
BootDiagnostics: &mgmtcompute.BootDiagnostics{
Enabled: to.BoolPtr(true),
StorageURI: to.StringPtr("https://cluster" + i.doc.OpenShiftCluster.Properties.StorageSuffix + ".blob.core.windows.net/"),
StorageURI: to.StringPtr("https://cluster" + m.doc.OpenShiftCluster.Properties.StorageSuffix + ".blob.core.windows.net/"),
},
},
},
@ -480,7 +480,7 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
DiagnosticsProfile: &mgmtcompute.DiagnosticsProfile{
BootDiagnostics: &mgmtcompute.BootDiagnostics{
Enabled: to.BoolPtr(true),
StorageURI: to.StringPtr("https://cluster" + i.doc.OpenShiftCluster.Properties.StorageSuffix + ".blob.core.windows.net/"),
StorageURI: to.StringPtr("https://cluster" + m.doc.OpenShiftCluster.Properties.StorageSuffix + ".blob.core.windows.net/"),
},
},
},
@ -565,11 +565,11 @@ func (i *manager) deployResourceTemplate(ctx context.Context) error {
},
},
}
return i.deployARMTemplate(ctx, resourceGroup, "resources", t, map[string]interface{}{
return m.deployARMTemplate(ctx, resourceGroup, "resources", t, map[string]interface{}{
"sas": map[string]interface{}{
"value": map[string]interface{}{
"signedStart": i.doc.OpenShiftCluster.Properties.Install.Now.Format(time.RFC3339),
"signedExpiry": i.doc.OpenShiftCluster.Properties.Install.Now.Add(24 * time.Hour).Format(time.RFC3339),
"signedStart": m.doc.OpenShiftCluster.Properties.Install.Now.Format(time.RFC3339),
"signedExpiry": m.doc.OpenShiftCluster.Properties.Install.Now.Add(24 * time.Hour).Format(time.RFC3339),
"signedPermission": "rl",
"signedResourceTypes": "o",
"signedServices": "b",

Просмотреть файл

@ -40,15 +40,15 @@ import (
"github.com/Azure/ARO-RP/pkg/util/subnet"
)
func (i *manager) createDNS(ctx context.Context) error {
return i.dns.Create(ctx, i.doc.OpenShiftCluster)
func (m *manager) createDNS(ctx context.Context) error {
return m.dns.Create(ctx, m.doc.OpenShiftCluster)
}
func (i *manager) clusterSPObjectID(ctx context.Context) (string, error) {
func (m *manager) clusterSPObjectID(ctx context.Context) (string, error) {
var clusterSPObjectID string
spp := &i.doc.OpenShiftCluster.Properties.ServicePrincipalProfile
spp := &m.doc.OpenShiftCluster.Properties.ServicePrincipalProfile
token, err := aad.GetToken(ctx, i.log, i.doc.OpenShiftCluster, azure.PublicCloud.GraphEndpoint)
token, err := aad.GetToken(ctx, m.log, m.doc.OpenShiftCluster, azure.PublicCloud.GraphEndpoint)
if err != nil {
return "", err
}
@ -66,7 +66,7 @@ func (i *manager) clusterSPObjectID(ctx context.Context) (string, error) {
res, err = applications.GetServicePrincipalsIDByAppID(ctx, spp.ClientID)
if err != nil {
if strings.Contains(err.Error(), "Authorization_IdentityNotFound") {
i.log.Info(err)
m.log.Info(err)
return false, nil
}
return false, err
@ -79,15 +79,15 @@ func (i *manager) clusterSPObjectID(ctx context.Context) (string, error) {
return clusterSPObjectID, err
}
func (i *manager) deployStorageTemplate(ctx context.Context, installConfig *installconfig.InstallConfig, platformCreds *installconfig.PlatformCreds, image *releaseimage.Image, bootstrapLoggingConfig *bootstraplogging.Config) error {
if i.doc.OpenShiftCluster.Properties.InfraID == "" {
func (m *manager) deployStorageTemplate(ctx context.Context, installConfig *installconfig.InstallConfig, platformCreds *installconfig.PlatformCreds, image *releaseimage.Image, bootstrapLoggingConfig *bootstraplogging.Config) error {
if m.doc.OpenShiftCluster.Properties.InfraID == "" {
clusterID := &installconfig.ClusterID{}
err := clusterID.Generate(asset.Parents{
reflect.TypeOf(installConfig): &installconfig.InstallConfig{
Config: &types.InstallConfig{
ObjectMeta: metav1.ObjectMeta{
Name: strings.ToLower(i.doc.OpenShiftCluster.Name),
Name: strings.ToLower(m.doc.OpenShiftCluster.Name),
},
},
},
@ -96,7 +96,7 @@ func (i *manager) deployStorageTemplate(ctx context.Context, installConfig *inst
return err
}
i.doc, err = i.db.PatchWithLease(ctx, i.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
doc.OpenShiftCluster.Properties.InfraID = clusterID.InfraID
return nil
})
@ -104,19 +104,19 @@ func (i *manager) deployStorageTemplate(ctx context.Context, installConfig *inst
return err
}
}
infraID := i.doc.OpenShiftCluster.Properties.InfraID
infraID := m.doc.OpenShiftCluster.Properties.InfraID
resourceGroup := stringutils.LastTokenByte(i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
i.log.Print("creating resource group")
m.log.Print("creating resource group")
group := mgmtfeatures.ResourceGroup{
Location: &installConfig.Config.Azure.Region,
ManagedBy: to.StringPtr(i.doc.OpenShiftCluster.ID),
ManagedBy: to.StringPtr(m.doc.OpenShiftCluster.ID),
}
if i.env.DeploymentMode() == deployment.Development {
if m.env.DeploymentMode() == deployment.Development {
group.ManagedBy = nil
}
_, err := i.groups.CreateOrUpdate(ctx, resourceGroup, group)
_, err := m.groups.CreateOrUpdate(ctx, resourceGroup, group)
if requestErr, ok := err.(*azure.RequestError); ok &&
requestErr.ServiceError != nil && requestErr.ServiceError.Code == "RequestDisallowedByPolicy" {
// if request was disallowed by policy, inform user so they can take appropriate action
@ -138,12 +138,12 @@ func (i *manager) deployStorageTemplate(ctx context.Context, installConfig *inst
return err
}
err = i.env.CreateARMResourceGroupRoleAssignment(ctx, i.fpAuthorizer, resourceGroup)
err = m.env.CreateARMResourceGroupRoleAssignment(ctx, m.fpAuthorizer, resourceGroup)
if err != nil {
return err
}
clusterSPObjectID, err := i.clusterSPObjectID(ctx)
clusterSPObjectID, err := m.clusterSPObjectID(ctx)
if err != nil {
return err
}
@ -170,7 +170,7 @@ func (i *manager) deployStorageTemplate(ctx context.Context, installConfig *inst
Sku: &mgmtstorage.Sku{
Name: "Standard_LRS",
},
Name: to.StringPtr("cluster" + i.doc.OpenShiftCluster.Properties.StorageSuffix),
Name: to.StringPtr("cluster" + m.doc.OpenShiftCluster.Properties.StorageSuffix),
Location: &installConfig.Config.Azure.Region,
Type: to.StringPtr("Microsoft.Storage/storageAccounts"),
},
@ -178,25 +178,25 @@ func (i *manager) deployStorageTemplate(ctx context.Context, installConfig *inst
},
{
Resource: &mgmtstorage.BlobContainer{
Name: to.StringPtr("cluster" + i.doc.OpenShiftCluster.Properties.StorageSuffix + "/default/ignition"),
Name: to.StringPtr("cluster" + m.doc.OpenShiftCluster.Properties.StorageSuffix + "/default/ignition"),
Type: to.StringPtr("Microsoft.Storage/storageAccounts/blobServices/containers"),
},
APIVersion: azureclient.APIVersions["Microsoft.Storage"],
DependsOn: []string{
"Microsoft.Storage/storageAccounts/cluster" + i.doc.OpenShiftCluster.Properties.StorageSuffix,
"Microsoft.Storage/storageAccounts/cluster" + m.doc.OpenShiftCluster.Properties.StorageSuffix,
},
},
{
Resource: &mgmtstorage.BlobContainer{
Name: to.StringPtr("cluster" + i.doc.OpenShiftCluster.Properties.StorageSuffix + "/default/aro"),
Name: to.StringPtr("cluster" + m.doc.OpenShiftCluster.Properties.StorageSuffix + "/default/aro"),
Type: to.StringPtr("Microsoft.Storage/storageAccounts/blobServices/containers"),
},
APIVersion: azureclient.APIVersions["Microsoft.Storage"],
DependsOn: []string{
"Microsoft.Storage/storageAccounts/cluster" + i.doc.OpenShiftCluster.Properties.StorageSuffix,
"Microsoft.Storage/storageAccounts/cluster" + m.doc.OpenShiftCluster.Properties.StorageSuffix,
},
},
i.apiServerNSG(installConfig.Config.Azure.Region),
m.apiServerNSG(installConfig.Config.Azure.Region),
{
Resource: &mgmtnetwork.SecurityGroup{
Name: to.StringPtr(infraID + subnet.NSGNodeSuffix),
@ -208,22 +208,22 @@ func (i *manager) deployStorageTemplate(ctx context.Context, installConfig *inst
},
}
if i.env.DeploymentMode() == deployment.Production {
t.Resources = append(t.Resources, i.denyAssignments(clusterSPObjectID))
if m.env.DeploymentMode() == deployment.Production {
t.Resources = append(t.Resources, m.denyAssignments(clusterSPObjectID))
}
err = i.deployARMTemplate(ctx, resourceGroup, "storage", t, nil)
err = m.deployARMTemplate(ctx, resourceGroup, "storage", t, nil)
if err != nil {
return err
}
exists, err := i.graphExists(ctx)
exists, err := m.graphExists(ctx)
if err != nil || exists {
return err
}
clusterID := &installconfig.ClusterID{
UUID: i.doc.ID,
UUID: m.doc.ID,
InfraID: infraID,
}
@ -235,7 +235,7 @@ func (i *manager) deployStorageTemplate(ctx context.Context, installConfig *inst
reflect.TypeOf(bootstrapLoggingConfig): bootstrapLoggingConfig,
}
i.log.Print("resolving graph")
m.log.Print("resolving graph")
for _, a := range targets.Cluster {
_, err := g.resolve(a)
if err != nil {
@ -244,15 +244,15 @@ func (i *manager) deployStorageTemplate(ctx context.Context, installConfig *inst
}
// the graph is quite big so we store it in a storage account instead of in cosmosdb
return i.saveGraph(ctx, g)
return m.saveGraph(ctx, g)
}
func (i *manager) denyAssignments(clusterSPObjectID string) *arm.Resource {
func (m *manager) denyAssignments(clusterSPObjectID string) *arm.Resource {
notActions := []string{
"Microsoft.Network/networkSecurityGroups/join/action",
}
if feature.IsRegisteredForFeature(i.subscriptionDoc.Subscription.Properties, "Microsoft.RedHatOpenShift/EnableSnapshots") {
if feature.IsRegisteredForFeature(m.subscriptionDoc.Subscription.Properties, "Microsoft.RedHatOpenShift/EnableSnapshots") {
notActions = append(notActions, []string{
"Microsoft.Compute/disks/beginGetAccess/action",
"Microsoft.Compute/disks/endGetAccess/action",
@ -280,7 +280,7 @@ func (i *manager) denyAssignments(clusterSPObjectID string) *arm.Resource {
NotActions: &notActions,
},
},
Scope: &i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID,
Scope: &m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID,
Principals: &[]mgmtauthorization.Principal{
{
ID: to.StringPtr("00000000-0000-0000-0000-000000000000"),
@ -300,15 +300,15 @@ func (i *manager) denyAssignments(clusterSPObjectID string) *arm.Resource {
}
}
func (i *manager) deploySnapshotUpgradeTemplate(ctx context.Context) error {
if i.env.DeploymentMode() != deployment.Production {
func (m *manager) deploySnapshotUpgradeTemplate(ctx context.Context) error {
if m.env.DeploymentMode() != deployment.Production {
// only need this upgrade in production, where there are DenyAssignments
return nil
}
resourceGroup := stringutils.LastTokenByte(i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
clusterSPObjectID, err := i.clusterSPObjectID(ctx)
clusterSPObjectID, err := m.clusterSPObjectID(ctx)
if err != nil {
return err
}
@ -316,27 +316,27 @@ func (i *manager) deploySnapshotUpgradeTemplate(ctx context.Context) error {
t := &arm.Template{
Schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
ContentVersion: "1.0.0.0",
Resources: []*arm.Resource{i.denyAssignments(clusterSPObjectID)},
Resources: []*arm.Resource{m.denyAssignments(clusterSPObjectID)},
}
return i.deployARMTemplate(ctx, resourceGroup, "storage", t, nil)
return m.deployARMTemplate(ctx, resourceGroup, "storage", t, nil)
}
func (i *manager) attachNSGsAndPatch(ctx context.Context) error {
g, err := i.loadGraph(ctx)
func (m *manager) attachNSGsAndPatch(ctx context.Context) error {
g, err := m.loadGraph(ctx)
if err != nil {
return err
}
for _, subnetID := range []string{
i.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID,
i.doc.OpenShiftCluster.Properties.WorkerProfiles[0].SubnetID,
m.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID,
m.doc.OpenShiftCluster.Properties.WorkerProfiles[0].SubnetID,
} {
i.log.Printf("attaching network security group to subnet %s", subnetID)
m.log.Printf("attaching network security group to subnet %s", subnetID)
// TODO: there is probably an undesirable race condition here - check if etags can help.
s, err := i.subnet.Get(ctx, subnetID)
s, err := m.subnet.Get(ctx, subnetID)
if err != nil {
return err
}
@ -345,7 +345,7 @@ func (i *manager) attachNSGsAndPatch(ctx context.Context) error {
s.SubnetPropertiesFormat = &mgmtnetwork.SubnetPropertiesFormat{}
}
nsgID, err := subnet.NetworkSecurityGroupID(i.doc.OpenShiftCluster, subnetID)
nsgID, err := subnet.NetworkSecurityGroupID(m.doc.OpenShiftCluster, subnetID)
if err != nil {
return err
}
@ -366,19 +366,19 @@ func (i *manager) attachNSGsAndPatch(ctx context.Context) error {
ID: to.StringPtr(nsgID),
}
err = i.subnet.CreateOrUpdate(ctx, subnetID, s)
err = m.subnet.CreateOrUpdate(ctx, subnetID, s)
if err != nil {
return err
}
}
adminInternalClient := g[reflect.TypeOf(&kubeconfig.AdminInternalClient{})].(*kubeconfig.AdminInternalClient)
aroServiceInternalClient, err := i.generateAROServiceKubeconfig(g)
aroServiceInternalClient, err := m.generateAROServiceKubeconfig(g)
if err != nil {
return err
}
i.doc, err = i.db.PatchWithLease(ctx, i.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
// used for the SAS token with which the bootstrap node retrieves its
// ignition payload
var t time.Time

Просмотреть файл

@ -41,7 +41,7 @@ func TestDenyAssignments(t *testing.T) {
},
} {
t.Run(tt.name, func(t *testing.T) {
i := &manager{
m := &manager{
doc: &api.OpenShiftClusterDocument{
OpenShiftCluster: &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{
@ -64,7 +64,7 @@ func TestDenyAssignments(t *testing.T) {
},
},
}
exceptionsToDeniedActions := *(*((i.denyAssignments("testing").Resource).(*mgmtauthorization.DenyAssignment).
exceptionsToDeniedActions := *(*((m.denyAssignments("testing").Resource).(*mgmtauthorization.DenyAssignment).
DenyAssignmentProperties.Permissions))[0].NotActions
if !reflect.DeepEqual(exceptionsToDeniedActions, tt.want) {

Просмотреть файл

@ -10,9 +10,9 @@ import (
"k8s.io/client-go/util/retry"
)
func (i *manager) disableUpdates(ctx context.Context) error {
func (m *manager) disableUpdates(ctx context.Context) error {
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
cv, err := i.configcli.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{})
cv, err := m.configcli.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{})
if err != nil {
return err
}
@ -20,7 +20,7 @@ func (i *manager) disableUpdates(ctx context.Context) error {
cv.Spec.Upstream = ""
cv.Spec.Channel = ""
_, err = i.configcli.ConfigV1().ClusterVersions().Update(cv)
_, err = m.configcli.ConfigV1().ClusterVersions().Update(cv)
return err
})
}

Просмотреть файл

@ -17,7 +17,7 @@ func TestDisableUpdates(t *testing.T) {
versionName := "version"
i := &manager{
m := &manager{
configcli: fake.NewSimpleClientset(&v1.ClusterVersion{
ObjectMeta: metav1.ObjectMeta{
Name: versionName,
@ -29,12 +29,12 @@ func TestDisableUpdates(t *testing.T) {
}),
}
err := i.disableUpdates(ctx)
err := m.disableUpdates(ctx)
if err != nil {
t.Error(err)
}
cv, err := i.configcli.ConfigV1().ClusterVersions().Get(versionName, metav1.GetOptions{})
cv, err := m.configcli.ConfigV1().ClusterVersions().Get(versionName, metav1.GetOptions{})
if err != nil {
t.Error(err)
}

Просмотреть файл

@ -18,13 +18,13 @@ import (
"github.com/Azure/ARO-RP/pkg/util/stringutils"
)
func (i *manager) fixLBProbeConfig(ctx context.Context, resourceGroup, lbName string) error {
mcsCertIsMalformed, err := i.mcsCertIsMalformed()
func (m *manager) fixLBProbeConfig(ctx context.Context, resourceGroup, lbName string) error {
mcsCertIsMalformed, err := m.mcsCertIsMalformed()
if err != nil {
return err
}
lb, err := i.loadbalancers.Get(ctx, resourceGroup, lbName, "")
lb, err := m.loadbalancers.Get(ctx, resourceGroup, lbName, "")
if err != nil {
return err
}
@ -68,22 +68,22 @@ loop:
return nil
}
return i.loadbalancers.CreateOrUpdateAndWait(ctx, resourceGroup, lbName, lb)
return m.loadbalancers.CreateOrUpdateAndWait(ctx, resourceGroup, lbName, lb)
}
func (i *manager) fixLBProbes(ctx context.Context) error {
infraID := i.doc.OpenShiftCluster.Properties.InfraID
func (m *manager) fixLBProbes(ctx context.Context) error {
infraID := m.doc.OpenShiftCluster.Properties.InfraID
if infraID == "" {
infraID = "aro"
}
resourceGroup := stringutils.LastTokenByte(i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
for _, lbName := range []string{
infraID + "-public-lb",
infraID + "-internal-lb",
} {
err := i.fixLBProbeConfig(ctx, resourceGroup, lbName)
err := m.fixLBProbeConfig(ctx, resourceGroup, lbName)
if err != nil {
return err
}
@ -96,8 +96,8 @@ func (i *manager) fixLBProbes(ctx context.Context) error {
// authority key identifier equals the subject key identifier, which is
// non-compliant and is rejected by Azure SLB. This provisioning error was
// fixed in 4a7415a4 but clusters pre-dating the fix still exist.
func (i *manager) mcsCertIsMalformed() (bool, error) {
s, err := i.kubernetescli.CoreV1().Secrets("openshift-machine-config-operator").Get("machine-config-server-tls", metav1.GetOptions{})
func (m *manager) mcsCertIsMalformed() (bool, error) {
s, err := m.kubernetescli.CoreV1().Secrets("openshift-machine-config-operator").Get("machine-config-server-tls", metav1.GetOptions{})
if err != nil {
return false, err
}

Просмотреть файл

@ -468,7 +468,7 @@ func TestFixLBProbes(t *testing.T) {
loadbalancersClient := mock_network.NewMockLoadBalancersClient(controller)
tt.mocks(loadbalancersClient)
i := &manager{
m := &manager{
kubernetescli: kubernetescli,
loadbalancers: loadbalancersClient,
doc: &api.OpenShiftClusterDocument{
@ -483,7 +483,7 @@ func TestFixLBProbes(t *testing.T) {
},
}
err := i.fixLBProbes(ctx)
err := m.fixLBProbes(ctx)
if err != nil && err.Error() != tt.wantErr ||
err == nil && tt.wantErr != "" {
t.Error(err)

Просмотреть файл

@ -13,19 +13,19 @@ import (
"github.com/Azure/ARO-RP/pkg/util/subnet"
)
func (i *manager) fixNSG(ctx context.Context) error {
if i.doc.OpenShiftCluster.Properties.APIServerProfile.Visibility == api.VisibilityPublic {
func (m *manager) fixNSG(ctx context.Context) error {
if m.doc.OpenShiftCluster.Properties.APIServerProfile.Visibility == api.VisibilityPublic {
return nil
}
infraID := i.doc.OpenShiftCluster.Properties.InfraID
infraID := m.doc.OpenShiftCluster.Properties.InfraID
if infraID == "" {
infraID = "aro"
}
resourceGroup := stringutils.LastTokenByte(i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
nsg, err := i.securitygroups.Get(ctx, resourceGroup, infraID+subnet.NSGControlPlaneSuffix, "")
nsg, err := m.securitygroups.Get(ctx, resourceGroup, infraID+subnet.NSGControlPlaneSuffix, "")
if err != nil {
return err
}
@ -54,5 +54,5 @@ func (i *manager) fixNSG(ctx context.Context) error {
nsg.SecurityRules = &rules
return i.securitygroups.CreateOrUpdateAndWait(ctx, resourceGroup, infraID+subnet.NSGControlPlaneSuffix, nsg)
return m.securitygroups.CreateOrUpdateAndWait(ctx, resourceGroup, infraID+subnet.NSGControlPlaneSuffix, nsg)
}

Просмотреть файл

@ -78,7 +78,7 @@ func TestFixNSG(t *testing.T) {
tt.mocks(securitygroupsClient)
}
i := &manager{
m := &manager{
securitygroups: securitygroupsClient,
doc: &api.OpenShiftClusterDocument{
OpenShiftCluster: &api.OpenShiftCluster{
@ -95,7 +95,7 @@ func TestFixNSG(t *testing.T) {
},
}
err := i.fixNSG(ctx)
err := m.fixNSG(ctx)
if err != nil && err.Error() != tt.wantErr ||
err == nil && tt.wantErr != "" {
t.Error(err)

Просмотреть файл

@ -13,17 +13,17 @@ import (
"github.com/Azure/ARO-RP/pkg/util/pullsecret"
)
func (i *manager) fixPullSecret(ctx context.Context) error {
func (m *manager) fixPullSecret(ctx context.Context) error {
// TODO: this function does not currently reapply a pull secret in
// development mode.
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
ps, err := i.kubernetescli.CoreV1().Secrets("openshift-config").Get("pull-secret", metav1.GetOptions{})
ps, err := m.kubernetescli.CoreV1().Secrets("openshift-config").Get("pull-secret", metav1.GetOptions{})
if err != nil {
return err
}
pullSecret, changed, err := pullsecret.SetRegistryProfiles(string(ps.Data[v1.DockerConfigJsonKey]), i.doc.OpenShiftCluster.Properties.RegistryProfiles...)
pullSecret, changed, err := pullsecret.SetRegistryProfiles(string(ps.Data[v1.DockerConfigJsonKey]), m.doc.OpenShiftCluster.Properties.RegistryProfiles...)
if err != nil {
return err
}
@ -34,7 +34,7 @@ func (i *manager) fixPullSecret(ctx context.Context) error {
ps.Data[v1.DockerConfigJsonKey] = []byte(pullSecret)
_, err = i.kubernetescli.CoreV1().Secrets("openshift-config").Update(ps)
_, err = m.kubernetescli.CoreV1().Secrets("openshift-config").Update(ps)
return err
})
}

Просмотреть файл

@ -82,7 +82,7 @@ func TestFixPullSecret(t *testing.T) {
return false, nil, nil
})
i := &manager{
m := &manager{
kubernetescli: fakecli,
doc: &api.OpenShiftClusterDocument{
OpenShiftCluster: &api.OpenShiftCluster{
@ -93,7 +93,7 @@ func TestFixPullSecret(t *testing.T) {
},
}
err := i.fixPullSecret(ctx)
err := m.fixPullSecret(ctx)
if err != nil {
t.Error(err)
}
@ -102,7 +102,7 @@ func TestFixPullSecret(t *testing.T) {
t.Fatal(updated)
}
s, err := i.kubernetescli.CoreV1().Secrets("openshift-config").Get("pull-secret", metav1.GetOptions{})
s, err := m.kubernetescli.CoreV1().Secrets("openshift-config").Get("pull-secret", metav1.GetOptions{})
if err != nil {
t.Error(err)
}

Просмотреть файл

@ -12,14 +12,14 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (i *manager) gatherFailureLogs(ctx context.Context) {
func (m *manager) gatherFailureLogs(ctx context.Context) {
for _, f := range []func(context.Context) (interface{}, error){
i.logClusterVersion,
i.logClusterOperators,
m.logClusterVersion,
m.logClusterOperators,
} {
o, err := f(ctx)
if err != nil {
i.log.Error(err)
m.log.Error(err)
continue
}
if o == nil {
@ -28,26 +28,26 @@ func (i *manager) gatherFailureLogs(ctx context.Context) {
b, err := json.Marshal(o)
if err != nil {
i.log.Error(err)
m.log.Error(err)
continue
}
i.log.Printf("%s: %s", runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name(), string(b))
m.log.Printf("%s: %s", runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name(), string(b))
}
}
func (i *manager) logClusterVersion(ctx context.Context) (interface{}, error) {
if i.configcli == nil {
func (m *manager) logClusterVersion(ctx context.Context) (interface{}, error) {
if m.configcli == nil {
return nil, nil
}
return i.configcli.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{})
return m.configcli.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{})
}
func (i *manager) logClusterOperators(ctx context.Context) (interface{}, error) {
if i.configcli == nil {
func (m *manager) logClusterOperators(ctx context.Context) (interface{}, error) {
if m.configcli == nil {
return nil, nil
}
return i.configcli.ConfigV1().ClusterOperators().List(metav1.ListOptions{})
return m.configcli.ConfigV1().ClusterOperators().List(metav1.ListOptions{})
}

Просмотреть файл

@ -153,10 +153,10 @@ func (g graph) resolve(a asset.Asset) (asset.Asset, error) {
return g[reflect.TypeOf(a)], nil
}
func (i *manager) graphExists(ctx context.Context) (bool, error) {
i.log.Print("checking if graph exists")
func (m *manager) graphExists(ctx context.Context) (bool, error) {
m.log.Print("checking if graph exists")
blobService, err := i.getBlobService(ctx, mgmtstorage.Permissions("r"), mgmtstorage.SignedResourceTypesO)
blobService, err := m.getBlobService(ctx, mgmtstorage.Permissions("r"), mgmtstorage.SignedResourceTypesO)
if err != nil {
return false, err
}
@ -165,10 +165,10 @@ func (i *manager) graphExists(ctx context.Context) (bool, error) {
return aro.GetBlobReference("graph").Exists()
}
func (i *manager) loadGraph(ctx context.Context) (graph, error) {
i.log.Print("load graph")
func (m *manager) loadGraph(ctx context.Context) (graph, error) {
m.log.Print("load graph")
blobService, err := i.getBlobService(ctx, mgmtstorage.Permissions("r"), mgmtstorage.SignedResourceTypesO)
blobService, err := m.getBlobService(ctx, mgmtstorage.Permissions("r"), mgmtstorage.SignedResourceTypesO)
if err != nil {
return nil, err
}
@ -186,7 +186,7 @@ func (i *manager) loadGraph(ctx context.Context) (graph, error) {
return nil, err
}
output, err := i.cipher.Decrypt(encrypted)
output, err := m.cipher.Decrypt(encrypted)
if err != nil {
return nil, err
}
@ -200,10 +200,10 @@ func (i *manager) loadGraph(ctx context.Context) (graph, error) {
return g, nil
}
func (i *manager) saveGraph(ctx context.Context, g graph) error {
i.log.Print("save graph")
func (m *manager) saveGraph(ctx context.Context, g graph) error {
m.log.Print("save graph")
blobService, err := i.getBlobService(ctx, mgmtstorage.Permissions("cw"), mgmtstorage.SignedResourceTypesO)
blobService, err := m.getBlobService(ctx, mgmtstorage.Permissions("cw"), mgmtstorage.SignedResourceTypesO)
if err != nil {
return err
}
@ -221,7 +221,7 @@ func (i *manager) saveGraph(ctx context.Context, g graph) error {
return err
}
output, err := i.cipher.Encrypt(b)
output, err := m.cipher.Encrypt(b)
if err != nil {
return err
}

Просмотреть файл

@ -26,93 +26,93 @@ import (
)
// AdminUpgrade performs an admin upgrade of an ARO cluster
func (i *manager) AdminUpgrade(ctx context.Context) error {
func (m *manager) AdminUpgrade(ctx context.Context) error {
steps := []steps.Step{
steps.Action(i.initializeKubernetesClients), // must be first
steps.Action(i.deploySnapshotUpgradeTemplate),
steps.Action(i.startVMs),
steps.Condition(i.apiServersReady, 30*time.Minute),
steps.Action(i.ensureBillingRecord), // belt and braces
steps.Action(i.fixLBProbes),
steps.Action(i.fixNSG),
steps.Action(i.fixPullSecret), // TODO(mj): Remove when operator deployed
steps.Action(i.ensureRouteFix),
steps.Action(i.ensureAROOperator),
steps.Condition(i.aroDeploymentReady, 10*time.Minute),
steps.Action(i.upgradeCertificates),
steps.Action(i.configureAPIServerCertificate),
steps.Action(i.configureIngressCertificate),
steps.Action(i.addResourceProviderVersion), // Run this last so we capture the resource provider only once the upgrade has been fully performed
steps.Action(m.initializeKubernetesClients), // must be first
steps.Action(m.deploySnapshotUpgradeTemplate),
steps.Action(m.startVMs),
steps.Condition(m.apiServersReady, 30*time.Minute),
steps.Action(m.ensureBillingRecord), // belt and braces
steps.Action(m.fixLBProbes),
steps.Action(m.fixNSG),
steps.Action(m.fixPullSecret), // TODO(mj): Remove when operator deployed
steps.Action(m.ensureRouteFix),
steps.Action(m.ensureAROOperator),
steps.Condition(m.aroDeploymentReady, 10*time.Minute),
steps.Action(m.upgradeCertificates),
steps.Action(m.configureAPIServerCertificate),
steps.Action(m.configureIngressCertificate),
steps.Action(m.addResourceProviderVersion), // Run this last so we capture the resource provider only once the upgrade has been fully performed
}
return i.runSteps(ctx, steps)
return m.runSteps(ctx, steps)
}
// Install installs an ARO cluster
func (i *manager) Install(ctx context.Context, installConfig *installconfig.InstallConfig, platformCreds *installconfig.PlatformCreds, image *releaseimage.Image, bootstrapLoggingConfig *bootstraplogging.Config) error {
func (m *manager) Install(ctx context.Context, installConfig *installconfig.InstallConfig, platformCreds *installconfig.PlatformCreds, image *releaseimage.Image, bootstrapLoggingConfig *bootstraplogging.Config) error {
steps := map[api.InstallPhase][]steps.Step{
api.InstallPhaseBootstrap: {
steps.Action(i.createDNS),
steps.AuthorizationRefreshingAction(i.fpAuthorizer, steps.Action(func(ctx context.Context) error {
return i.deployStorageTemplate(ctx, installConfig, platformCreds, image, bootstrapLoggingConfig)
steps.Action(m.createDNS),
steps.AuthorizationRefreshingAction(m.fpAuthorizer, steps.Action(func(ctx context.Context) error {
return m.deployStorageTemplate(ctx, installConfig, platformCreds, image, bootstrapLoggingConfig)
})),
steps.AuthorizationRefreshingAction(i.fpAuthorizer, steps.Action(i.attachNSGsAndPatch)),
steps.Action(i.ensureBillingRecord),
steps.AuthorizationRefreshingAction(i.fpAuthorizer, steps.Action(i.deployResourceTemplate)),
steps.Action(i.createPrivateEndpoint),
steps.Action(i.updateAPIIP),
steps.Action(i.createCertificates),
steps.Action(i.initializeKubernetesClients),
steps.Condition(i.bootstrapConfigMapReady, 30*time.Minute),
steps.Action(i.ensureRouteFix),
steps.Action(i.ensureAROOperator),
steps.Action(i.incrInstallPhase),
steps.AuthorizationRefreshingAction(m.fpAuthorizer, steps.Action(m.attachNSGsAndPatch)),
steps.Action(m.ensureBillingRecord),
steps.AuthorizationRefreshingAction(m.fpAuthorizer, steps.Action(m.deployResourceTemplate)),
steps.Action(m.createPrivateEndpoint),
steps.Action(m.updateAPIIP),
steps.Action(m.createCertificates),
steps.Action(m.initializeKubernetesClients),
steps.Condition(m.bootstrapConfigMapReady, 30*time.Minute),
steps.Action(m.ensureRouteFix),
steps.Action(m.ensureAROOperator),
steps.Action(m.incrInstallPhase),
},
api.InstallPhaseRemoveBootstrap: {
steps.Action(i.initializeKubernetesClients),
steps.Action(i.removeBootstrap),
steps.Action(i.removeBootstrapIgnition),
steps.Action(i.configureAPIServerCertificate),
steps.Condition(i.apiServersReady, 30*time.Minute),
steps.Condition(i.operatorConsoleExists, 30*time.Minute),
steps.Action(i.updateConsoleBranding),
steps.Condition(i.operatorConsoleReady, 30*time.Minute),
steps.Condition(i.clusterVersionReady, 30*time.Minute),
steps.Condition(i.aroDeploymentReady, 10*time.Minute),
steps.Action(i.disableUpdates),
steps.Action(i.disableSamples),
steps.Action(i.disableOperatorHubSources),
steps.Action(i.updateRouterIP),
steps.Action(i.configureIngressCertificate),
steps.Condition(i.ingressControllerReady, 30*time.Minute),
steps.Action(i.finishInstallation),
steps.Action(i.addResourceProviderVersion),
steps.Action(m.initializeKubernetesClients),
steps.Action(m.removeBootstrap),
steps.Action(m.removeBootstrapIgnition),
steps.Action(m.configureAPIServerCertificate),
steps.Condition(m.apiServersReady, 30*time.Minute),
steps.Condition(m.operatorConsoleExists, 30*time.Minute),
steps.Action(m.updateConsoleBranding),
steps.Condition(m.operatorConsoleReady, 10*time.Minute),
steps.Condition(m.clusterVersionReady, 30*time.Minute),
steps.Condition(m.aroDeploymentReady, 10*time.Minute),
steps.Action(m.disableUpdates),
steps.Action(m.disableSamples),
steps.Action(m.disableOperatorHubSources),
steps.Action(m.updateRouterIP),
steps.Action(m.configureIngressCertificate),
steps.Condition(m.ingressControllerReady, 30*time.Minute),
steps.Action(m.finishInstallation),
steps.Action(m.addResourceProviderVersion),
},
}
err := i.startInstallation(ctx)
err := m.startInstallation(ctx)
if err != nil {
return err
}
if steps[i.doc.OpenShiftCluster.Properties.Install.Phase] == nil {
return fmt.Errorf("unrecognised phase %s", i.doc.OpenShiftCluster.Properties.Install.Phase)
if steps[m.doc.OpenShiftCluster.Properties.Install.Phase] == nil {
return fmt.Errorf("unrecognised phase %s", m.doc.OpenShiftCluster.Properties.Install.Phase)
}
i.log.Printf("starting phase %s", i.doc.OpenShiftCluster.Properties.Install.Phase)
return i.runSteps(ctx, steps[i.doc.OpenShiftCluster.Properties.Install.Phase])
m.log.Printf("starting phase %s", m.doc.OpenShiftCluster.Properties.Install.Phase)
return m.runSteps(ctx, steps[m.doc.OpenShiftCluster.Properties.Install.Phase])
}
func (i *manager) runSteps(ctx context.Context, s []steps.Step) error {
err := steps.Run(ctx, i.log, 10*time.Second, s)
func (m *manager) runSteps(ctx context.Context, s []steps.Step) error {
err := steps.Run(ctx, m.log, 10*time.Second, s)
if err != nil {
i.gatherFailureLogs(ctx)
m.gatherFailureLogs(ctx)
}
return err
}
func (i *manager) startInstallation(ctx context.Context) error {
func (m *manager) startInstallation(ctx context.Context) error {
var err error
i.doc, err = i.db.PatchWithLease(ctx, i.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
if doc.OpenShiftCluster.Properties.Install == nil {
doc.OpenShiftCluster.Properties.Install = &api.Install{}
}
@ -121,18 +121,18 @@ func (i *manager) startInstallation(ctx context.Context) error {
return err
}
func (i *manager) incrInstallPhase(ctx context.Context) error {
func (m *manager) incrInstallPhase(ctx context.Context) error {
var err error
i.doc, err = i.db.PatchWithLease(ctx, i.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
doc.OpenShiftCluster.Properties.Install.Phase++
return nil
})
return err
}
func (i *manager) finishInstallation(ctx context.Context) error {
func (m *manager) finishInstallation(ctx context.Context) error {
var err error
i.doc, err = i.db.PatchWithLease(ctx, i.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
doc.OpenShiftCluster.Properties.Install = nil
return nil
})
@ -141,51 +141,51 @@ func (i *manager) finishInstallation(ctx context.Context) error {
// initializeKubernetesClients initializes clients which are used
// once the cluster is up later on in the install process.
func (i *manager) initializeKubernetesClients(ctx context.Context) error {
restConfig, err := restconfig.RestConfig(i.env, i.doc.OpenShiftCluster)
func (m *manager) initializeKubernetesClients(ctx context.Context) error {
restConfig, err := restconfig.RestConfig(m.env, m.doc.OpenShiftCluster)
if err != nil {
return err
}
i.kubernetescli, err = kubernetes.NewForConfig(restConfig)
m.kubernetescli, err = kubernetes.NewForConfig(restConfig)
if err != nil {
return err
}
i.extcli, err = extensionsclient.NewForConfig(restConfig)
m.extcli, err = extensionsclient.NewForConfig(restConfig)
if err != nil {
return err
}
i.operatorcli, err = operatorclient.NewForConfig(restConfig)
m.operatorcli, err = operatorclient.NewForConfig(restConfig)
if err != nil {
return err
}
i.securitycli, err = securityclient.NewForConfig(restConfig)
m.securitycli, err = securityclient.NewForConfig(restConfig)
if err != nil {
return err
}
i.samplescli, err = samplesclient.NewForConfig(restConfig)
m.samplescli, err = samplesclient.NewForConfig(restConfig)
if err != nil {
return err
}
i.arocli, err = aroclient.NewForConfig(restConfig)
m.arocli, err = aroclient.NewForConfig(restConfig)
if err != nil {
return err
}
i.configcli, err = configclient.NewForConfig(restConfig)
m.configcli, err = configclient.NewForConfig(restConfig)
return err
}
// addResourceProviderVersion sets the deploying resource provider version in
// the cluster document for deployment-tracking purposes.
func (i *manager) addResourceProviderVersion(ctx context.Context) error {
func (m *manager) addResourceProviderVersion(ctx context.Context) error {
var err error
i.doc, err = i.db.PatchWithLease(ctx, i.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
doc.OpenShiftCluster.Properties.ProvisionedBy = version.GitCommit
return nil
})

Просмотреть файл

@ -124,12 +124,12 @@ func TestStepRunnerWithInstaller(t *testing.T) {
} {
t.Run(tt.name, func(t *testing.T) {
h, log := test_log.NewCapturingLogger()
i := &manager{
m := &manager{
log: log,
configcli: tt.configcli,
}
err := i.runSteps(ctx, tt.steps)
err := m.runSteps(ctx, tt.steps)
if err != nil && err.Error() != tt.wantErr ||
err == nil && tt.wantErr != "" {
t.Error(err)
@ -230,12 +230,12 @@ func TestDeployARMTemplate(t *testing.T) {
deploymentsClient := mock_features.NewMockDeploymentsClient(controller)
tt.mocks(deploymentsClient)
i := &manager{
m := &manager{
log: logrus.NewEntry(logrus.StandardLogger()),
deployments: deploymentsClient,
}
err := i.deployARMTemplate(ctx, resourceGroup, "test", armTemplate, params)
err := m.deployARMTemplate(ctx, resourceGroup, "test", armTemplate, params)
if err != nil && err.Error() != tt.wantErr ||
err == nil && tt.wantErr != "" {
@ -292,11 +292,11 @@ func TestAddResourceProviderVersion(t *testing.T) {
return docFromDatabase, err
})
i := &manager{
m := &manager{
doc: clusterdoc,
db: openshiftClusters,
}
err = i.addResourceProviderVersion(ctx)
err = m.addResourceProviderVersion(ctx)
if err != nil {
t.Error(err)
return

Просмотреть файл

@ -16,8 +16,8 @@ import (
"github.com/Azure/ARO-RP/pkg/util/stringutils"
)
func (i *manager) updateRouterIP(ctx context.Context) error {
g, err := i.loadGraph(ctx)
func (m *manager) updateRouterIP(ctx context.Context) error {
g, err := m.loadGraph(ctx)
if err != nil {
return err
}
@ -25,7 +25,7 @@ func (i *manager) updateRouterIP(ctx context.Context) error {
installConfig := g[reflect.TypeOf(&installconfig.InstallConfig{})].(*installconfig.InstallConfig)
kubeadminPassword := g[reflect.TypeOf(&password.KubeadminPassword{})].(*password.KubeadminPassword)
svc, err := i.kubernetescli.CoreV1().Services("openshift-ingress").Get("router-default", metav1.GetOptions{})
svc, err := m.kubernetescli.CoreV1().Services("openshift-ingress").Get("router-default", metav1.GetOptions{})
if err != nil {
return err
}
@ -36,12 +36,12 @@ func (i *manager) updateRouterIP(ctx context.Context) error {
routerIP := svc.Status.LoadBalancer.Ingress[0].IP
err = i.dns.CreateOrUpdateRouter(ctx, i.doc.OpenShiftCluster, routerIP)
err = m.dns.CreateOrUpdateRouter(ctx, m.doc.OpenShiftCluster, routerIP)
if err != nil {
return err
}
i.doc, err = i.db.PatchWithLease(ctx, i.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
doc.OpenShiftCluster.Properties.APIServerProfile.URL = "https://api." + installConfig.Config.ObjectMeta.Name + "." + installConfig.Config.BaseDomain + ":6443/"
doc.OpenShiftCluster.Properties.IngressProfiles[0].IP = routerIP
doc.OpenShiftCluster.Properties.ConsoleProfile.URL = "https://console-openshift-console.apps." + installConfig.Config.ObjectMeta.Name + "." + installConfig.Config.BaseDomain + "/"
@ -51,39 +51,39 @@ func (i *manager) updateRouterIP(ctx context.Context) error {
return err
}
func (i *manager) updateAPIIP(ctx context.Context) error {
infraID := i.doc.OpenShiftCluster.Properties.InfraID
func (m *manager) updateAPIIP(ctx context.Context) error {
infraID := m.doc.OpenShiftCluster.Properties.InfraID
if infraID == "" {
infraID = "aro" // TODO: remove after deploy
}
resourceGroup := stringutils.LastTokenByte(i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
var ipAddress string
if i.doc.OpenShiftCluster.Properties.APIServerProfile.Visibility == api.VisibilityPublic {
ip, err := i.publicipaddresses.Get(ctx, resourceGroup, infraID+"-pip-v4", "")
if m.doc.OpenShiftCluster.Properties.APIServerProfile.Visibility == api.VisibilityPublic {
ip, err := m.publicipaddresses.Get(ctx, resourceGroup, infraID+"-pip-v4", "")
if err != nil {
return err
}
ipAddress = *ip.IPAddress
} else {
lb, err := i.loadbalancers.Get(ctx, resourceGroup, infraID+"-internal-lb", "")
lb, err := m.loadbalancers.Get(ctx, resourceGroup, infraID+"-internal-lb", "")
if err != nil {
return err
}
ipAddress = *((*lb.FrontendIPConfigurations)[0].PrivateIPAddress)
}
err := i.dns.Update(ctx, i.doc.OpenShiftCluster, ipAddress)
err := m.dns.Update(ctx, m.doc.OpenShiftCluster, ipAddress)
if err != nil {
return err
}
privateEndpointIP, err := i.privateendpoint.GetIP(ctx, i.doc)
privateEndpointIP, err := m.privateendpoint.GetIP(ctx, m.doc)
if err != nil {
return err
}
i.doc, err = i.db.PatchWithLease(ctx, i.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
doc.OpenShiftCluster.Properties.NetworkProfile.PrivateEndpointIP = privateEndpointIP
doc.OpenShiftCluster.Properties.APIServerProfile.IP = ipAddress
return nil
@ -91,6 +91,6 @@ func (i *manager) updateAPIIP(ctx context.Context) error {
return err
}
func (i *manager) createPrivateEndpoint(ctx context.Context) error {
return i.privateendpoint.Create(ctx, i.doc)
func (m *manager) createPrivateEndpoint(ctx context.Context) error {
return m.privateendpoint.Create(ctx, m.doc)
}

Просмотреть файл

@ -17,7 +17,7 @@ import (
// generateAROServiceKubeconfig generates additional admin credentials and kubeconfig
// based on admin kubeconfig found in graph
func (i *manager) generateAROServiceKubeconfig(g graph) (*kubeconfig.AdminInternalClient, error) {
func (m *manager) generateAROServiceKubeconfig(g graph) (*kubeconfig.AdminInternalClient, error) {
ca := g[reflect.TypeOf(&tls.AdminKubeConfigSignerCertKey{})].(*tls.AdminKubeConfigSignerCertKey)
cfg := &tls.CertCfg{
Subject: pkix.Name{CommonName: "system:aro-service", Organization: []string{"system:masters"}},

Просмотреть файл

@ -65,9 +65,9 @@ func TestGenerateAROServiceKubeconfig(t *testing.T) {
CurrentContext: serviceName,
}
i := &manager{}
m := &manager{}
aroServiceInternalClient, err := i.generateAROServiceKubeconfig(g)
aroServiceInternalClient, err := m.generateAROServiceKubeconfig(g)
if err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -12,8 +12,8 @@ import (
"github.com/Azure/ARO-RP/pkg/util/azureclient"
)
func (i *manager) apiServerPublicLoadBalancer(location string) *arm.Resource {
infraID := i.doc.OpenShiftCluster.Properties.InfraID
func (m *manager) apiServerPublicLoadBalancer(location string) *arm.Resource {
infraID := m.doc.OpenShiftCluster.Properties.InfraID
if infraID == "" {
infraID = "aro" // TODO: remove after deploy
}
@ -61,7 +61,7 @@ func (i *manager) apiServerPublicLoadBalancer(location string) *arm.Resource {
Location: &location,
}
if i.doc.OpenShiftCluster.Properties.APIServerProfile.Visibility == api.VisibilityPublic {
if m.doc.OpenShiftCluster.Properties.APIServerProfile.Visibility == api.VisibilityPublic {
lb.LoadBalancingRules = &[]mgmtnetwork.LoadBalancingRule{
{
LoadBalancingRulePropertiesFormat: &mgmtnetwork.LoadBalancingRulePropertiesFormat{

Просмотреть файл

@ -13,8 +13,8 @@ import (
"github.com/Azure/ARO-RP/pkg/util/subnet"
)
func (i *manager) apiServerNSG(location string) *arm.Resource {
infraID := i.doc.OpenShiftCluster.Properties.InfraID
func (m *manager) apiServerNSG(location string) *arm.Resource {
infraID := m.doc.OpenShiftCluster.Properties.InfraID
if infraID == "" {
infraID = "aro" // TODO: remove after deploy
}
@ -26,7 +26,7 @@ func (i *manager) apiServerNSG(location string) *arm.Resource {
Location: &location,
}
if i.doc.OpenShiftCluster.Properties.APIServerProfile.Visibility == api.VisibilityPublic {
if m.doc.OpenShiftCluster.Properties.APIServerProfile.Visibility == api.VisibilityPublic {
nsg.SecurityRules = &[]mgmtnetwork.SecurityRule{
{
SecurityRulePropertiesFormat: &mgmtnetwork.SecurityRulePropertiesFormat{

Просмотреть файл

@ -12,33 +12,33 @@ import (
"github.com/Azure/ARO-RP/pkg/util/stringutils"
)
func (i *manager) removeBootstrap(ctx context.Context) error {
infraID := i.doc.OpenShiftCluster.Properties.InfraID
func (m *manager) removeBootstrap(ctx context.Context) error {
infraID := m.doc.OpenShiftCluster.Properties.InfraID
if infraID == "" {
infraID = "aro" // TODO: remove after deploy
}
resourceGroup := stringutils.LastTokenByte(i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
i.log.Print("removing bootstrap vm")
err := i.virtualmachines.DeleteAndWait(ctx, resourceGroup, infraID+"-bootstrap")
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
m.log.Print("removing bootstrap vm")
err := m.virtualmachines.DeleteAndWait(ctx, resourceGroup, infraID+"-bootstrap")
if err != nil {
return err
}
i.log.Print("removing bootstrap disk")
err = i.disks.DeleteAndWait(ctx, resourceGroup, infraID+"-bootstrap_OSDisk")
m.log.Print("removing bootstrap disk")
err = m.disks.DeleteAndWait(ctx, resourceGroup, infraID+"-bootstrap_OSDisk")
if err != nil {
return err
}
i.log.Print("removing bootstrap nic")
return i.interfaces.DeleteAndWait(ctx, resourceGroup, infraID+"-bootstrap-nic")
m.log.Print("removing bootstrap nic")
return m.interfaces.DeleteAndWait(ctx, resourceGroup, infraID+"-bootstrap-nic")
}
func (i *manager) removeBootstrapIgnition(ctx context.Context) error {
i.log.Print("remove ignition config")
func (m *manager) removeBootstrapIgnition(ctx context.Context) error {
m.log.Print("remove ignition config")
blobService, err := i.getBlobService(ctx, mgmtstorage.Permissions("d"), mgmtstorage.SignedResourceTypesC)
blobService, err := m.getBlobService(ctx, mgmtstorage.Permissions("d"), mgmtstorage.SignedResourceTypesC)
if err != nil {
return err
}

Просмотреть файл

@ -9,7 +9,7 @@ import (
"github.com/Azure/ARO-RP/pkg/routefix"
)
func (i *manager) ensureRouteFix(ctx context.Context) error {
rf := routefix.New(i.log, i.env, i.kubernetescli, i.securitycli)
func (m *manager) ensureRouteFix(ctx context.Context) error {
rf := routefix.New(m.log, m.env, m.kubernetescli, m.securitycli)
return rf.CreateOrUpdate(ctx)
}

Просмотреть файл

@ -16,37 +16,37 @@ import (
)
// disableSamples disables the samples if there's no appropriate pull secret
func (i *manager) disableSamples(ctx context.Context) error {
if i.env.DeploymentMode() != deployment.Development &&
i.doc.OpenShiftCluster.Properties.ClusterProfile.PullSecret != "" {
func (m *manager) disableSamples(ctx context.Context) error {
if m.env.DeploymentMode() != deployment.Development &&
m.doc.OpenShiftCluster.Properties.ClusterProfile.PullSecret != "" {
return nil
}
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
c, err := i.samplescli.SamplesV1().Configs().Get("cluster", metav1.GetOptions{})
c, err := m.samplescli.SamplesV1().Configs().Get("cluster", metav1.GetOptions{})
if err != nil {
return err
}
c.Spec.ManagementState = operatorv1.Removed
_, err = i.samplescli.SamplesV1().Configs().Update(c)
_, err = m.samplescli.SamplesV1().Configs().Update(c)
return err
})
}
// disableOperatorHubSources disables operator hub sources if there's no
// appropriate pull secret
func (i *manager) disableOperatorHubSources(ctx context.Context) error {
if i.env.DeploymentMode() != deployment.Development &&
i.doc.OpenShiftCluster.Properties.ClusterProfile.PullSecret != "" {
func (m *manager) disableOperatorHubSources(ctx context.Context) error {
if m.env.DeploymentMode() != deployment.Development &&
m.doc.OpenShiftCluster.Properties.ClusterProfile.PullSecret != "" {
return nil
}
// https://bugzilla.redhat.com/show_bug.cgi?id=1815649
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
c := &configv1.OperatorHub{}
err := i.configcli.ConfigV1().RESTClient().Get().
err := m.configcli.ConfigV1().RESTClient().Get().
Resource("operatorhubs").
Name("cluster").
VersionedParams(&metav1.GetOptions{}, configscheme.ParameterCodec).
@ -75,7 +75,7 @@ func (i *manager) disableOperatorHubSources(ctx context.Context) error {
}
c.Spec.Sources = sources
err = i.configcli.ConfigV1().RESTClient().Put().
err = m.configcli.ConfigV1().RESTClient().Put().
Resource("operatorhubs").
Name("cluster").
Body(c).

Просмотреть файл

@ -14,9 +14,9 @@ import (
)
// startVMs checks cluster VMs power state and starts deallocated and stopped VMs, if any
func (i *manager) startVMs(ctx context.Context) error {
resourceGroupName := stringutils.LastTokenByte(i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
vms, err := i.virtualmachines.List(ctx, resourceGroupName)
func (m *manager) startVMs(ctx context.Context) error {
resourceGroupName := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
vms, err := m.virtualmachines.List(ctx, resourceGroupName)
if err != nil {
return err
}
@ -26,7 +26,7 @@ func (i *manager) startVMs(ctx context.Context) error {
for idx, vm := range vms {
idx, vm := idx, vm // https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() (err error) {
vms[idx], err = i.virtualmachines.Get(groupCtx, resourceGroupName, *vm.Name, mgmtcompute.InstanceView)
vms[idx], err = m.virtualmachines.Get(groupCtx, resourceGroupName, *vm.Name, mgmtcompute.InstanceView)
return
})
}
@ -66,7 +66,7 @@ func (i *manager) startVMs(ctx context.Context) error {
for _, vm := range vmsToStart {
vm := vm // https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() error {
return i.virtualmachines.StartAndWait(groupCtx, resourceGroupName, *vm.Name)
return m.virtualmachines.StartAndWait(groupCtx, resourceGroupName, *vm.Name)
})
}
return g.Wait()

Просмотреть файл

@ -207,7 +207,7 @@ func TestStartVMs(t *testing.T) {
tt.mock(vmClient)
i := &manager{
m := &manager{
virtualmachines: vmClient,
doc: &api.OpenShiftClusterDocument{
OpenShiftCluster: &api.OpenShiftCluster{
@ -220,7 +220,7 @@ func TestStartVMs(t *testing.T) {
},
}
err := i.startVMs(ctx)
err := m.startVMs(ctx)
if err != nil && err.Error() != tt.wantErr ||
err == nil && tt.wantErr != "" {
t.Error(err)

Просмотреть файл

@ -21,12 +21,12 @@ import (
utilpem "github.com/Azure/ARO-RP/pkg/util/pem"
)
func (i *manager) createCertificates(ctx context.Context) error {
if i.env.DeploymentMode() == deployment.Development {
func (m *manager) createCertificates(ctx context.Context) error {
if m.env.DeploymentMode() == deployment.Development {
return nil
}
managedDomain, err := dns.ManagedDomain(i.env, i.doc.OpenShiftCluster.Properties.ClusterProfile.Domain)
managedDomain, err := dns.ManagedDomain(m.env, m.doc.OpenShiftCluster.Properties.ClusterProfile.Domain)
if err != nil {
return err
}
@ -40,26 +40,26 @@ func (i *manager) createCertificates(ctx context.Context) error {
commonName string
}{
{
certificateName: i.doc.ID + "-apiserver",
certificateName: m.doc.ID + "-apiserver",
commonName: "api." + managedDomain,
},
{
certificateName: i.doc.ID + "-ingress",
certificateName: m.doc.ID + "-ingress",
commonName: "*.apps." + managedDomain,
},
}
for _, c := range certs {
i.log.Printf("creating certificate %s", c.certificateName)
err = i.keyvault.CreateSignedCertificate(ctx, keyvault.IssuerDigicert, c.certificateName, c.commonName, keyvault.EkuServerAuth)
m.log.Printf("creating certificate %s", c.certificateName)
err = m.keyvault.CreateSignedCertificate(ctx, keyvault.IssuerDigicert, c.certificateName, c.commonName, keyvault.EkuServerAuth)
if err != nil {
return err
}
}
for _, c := range certs {
i.log.Printf("waiting for certificate %s", c.certificateName)
err = i.keyvault.WaitForCertificateOperation(ctx, c.certificateName)
m.log.Printf("waiting for certificate %s", c.certificateName)
err = m.keyvault.WaitForCertificateOperation(ctx, c.certificateName)
if err != nil {
return err
}
@ -68,12 +68,12 @@ func (i *manager) createCertificates(ctx context.Context) error {
return nil
}
func (i *manager) upgradeCertificates(ctx context.Context) error {
if i.env.DeploymentMode() == deployment.Development {
func (m *manager) upgradeCertificates(ctx context.Context) error {
if m.env.DeploymentMode() == deployment.Development {
return nil
}
managedDomain, err := dns.ManagedDomain(i.env, i.doc.OpenShiftCluster.Properties.ClusterProfile.Domain)
managedDomain, err := dns.ManagedDomain(m.env, m.doc.OpenShiftCluster.Properties.ClusterProfile.Domain)
if err != nil {
return err
}
@ -82,9 +82,9 @@ func (i *manager) upgradeCertificates(ctx context.Context) error {
return nil
}
for _, c := range []string{i.doc.ID + "-apiserver", i.doc.ID + "-ingress"} {
i.log.Printf("upgrading certificate %s", c)
err = i.keyvault.UpgradeCertificatePolicy(ctx, c)
for _, c := range []string{m.doc.ID + "-apiserver", m.doc.ID + "-ingress"} {
m.log.Printf("upgrading certificate %s", c)
err = m.keyvault.UpgradeCertificatePolicy(ctx, c)
if err != nil {
return err
}
@ -93,8 +93,8 @@ func (i *manager) upgradeCertificates(ctx context.Context) error {
return nil
}
func (i *manager) ensureSecret(ctx context.Context, secrets coreclient.SecretInterface, certificateName string) error {
bundle, err := i.keyvault.GetSecret(ctx, certificateName)
func (m *manager) ensureSecret(ctx context.Context, secrets coreclient.SecretInterface, certificateName string) error {
bundle, err := m.keyvault.GetSecret(ctx, certificateName)
if err != nil {
return err
}
@ -144,12 +144,12 @@ func (i *manager) ensureSecret(ctx context.Context, secrets coreclient.SecretInt
return err
}
func (i *manager) configureAPIServerCertificate(ctx context.Context) error {
if i.env.DeploymentMode() == deployment.Development {
func (m *manager) configureAPIServerCertificate(ctx context.Context) error {
if m.env.DeploymentMode() == deployment.Development {
return nil
}
managedDomain, err := dns.ManagedDomain(i.env, i.doc.OpenShiftCluster.Properties.ClusterProfile.Domain)
managedDomain, err := dns.ManagedDomain(m.env, m.doc.OpenShiftCluster.Properties.ClusterProfile.Domain)
if err != nil {
return err
}
@ -158,13 +158,13 @@ func (i *manager) configureAPIServerCertificate(ctx context.Context) error {
return nil
}
err = i.ensureSecret(ctx, i.kubernetescli.CoreV1().Secrets("openshift-config"), i.doc.ID+"-apiserver")
err = m.ensureSecret(ctx, m.kubernetescli.CoreV1().Secrets("openshift-config"), m.doc.ID+"-apiserver")
if err != nil {
return err
}
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
apiserver, err := i.configcli.ConfigV1().APIServers().Get("cluster", metav1.GetOptions{})
apiserver, err := m.configcli.ConfigV1().APIServers().Get("cluster", metav1.GetOptions{})
if err != nil {
return err
}
@ -175,22 +175,22 @@ func (i *manager) configureAPIServerCertificate(ctx context.Context) error {
"api." + managedDomain,
},
ServingCertificate: configv1.SecretNameReference{
Name: i.doc.ID + "-apiserver",
Name: m.doc.ID + "-apiserver",
},
},
}
_, err = i.configcli.ConfigV1().APIServers().Update(apiserver)
_, err = m.configcli.ConfigV1().APIServers().Update(apiserver)
return err
})
}
func (i *manager) configureIngressCertificate(ctx context.Context) error {
if i.env.DeploymentMode() == deployment.Development {
func (m *manager) configureIngressCertificate(ctx context.Context) error {
if m.env.DeploymentMode() == deployment.Development {
return nil
}
managedDomain, err := dns.ManagedDomain(i.env, i.doc.OpenShiftCluster.Properties.ClusterProfile.Domain)
managedDomain, err := dns.ManagedDomain(m.env, m.doc.OpenShiftCluster.Properties.ClusterProfile.Domain)
if err != nil {
return err
}
@ -199,22 +199,22 @@ func (i *manager) configureIngressCertificate(ctx context.Context) error {
return nil
}
err = i.ensureSecret(ctx, i.kubernetescli.CoreV1().Secrets("openshift-ingress"), i.doc.ID+"-ingress")
err = m.ensureSecret(ctx, m.kubernetescli.CoreV1().Secrets("openshift-ingress"), m.doc.ID+"-ingress")
if err != nil {
return err
}
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
ic, err := i.operatorcli.OperatorV1().IngressControllers("openshift-ingress-operator").Get("default", metav1.GetOptions{})
ic, err := m.operatorcli.OperatorV1().IngressControllers("openshift-ingress-operator").Get("default", metav1.GetOptions{})
if err != nil {
return err
}
ic.Spec.DefaultCertificate = &v1.LocalObjectReference{
Name: i.doc.ID + "-ingress",
Name: m.doc.ID + "-ingress",
}
_, err = i.operatorcli.OperatorV1().IngressControllers("openshift-ingress-operator").Update(ic)
_, err = m.operatorcli.OperatorV1().IngressControllers("openshift-ingress-operator").Update(ic)
return err
})
}