Merge pull request #3211 from bennerv/network-acl-fix-for-storageaccounts

Network acl fix for storageaccounts on Create or AdminUpdate
This commit is contained in:
Christoph Blecker 2023-10-14 14:48:35 -07:00 коммит произвёл GitHub
Родитель ca8a3a0c17 e3b1ebb71d
Коммит f131c2564e
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
10 изменённых файлов: 701 добавлений и 104 удалений

Просмотреть файл

@ -25,6 +25,8 @@ import (
"github.com/Azure/ARO-RP/pkg/util/stringutils"
)
const storageServiceEndpoint = "Microsoft.Storage"
func (m *manager) createDNS(ctx context.Context) error {
return m.dns.Create(ctx, m.doc.OpenShiftCluster)
}
@ -132,11 +134,16 @@ func (m *manager) deployBaseResourceTemplate(ctx context.Context) error {
clusterStorageAccountName := "cluster" + m.doc.OpenShiftCluster.Properties.StorageSuffix
azureRegion := strings.ToLower(m.doc.OpenShiftCluster.Location) // Used in k8s object names, so must pass DNS-1123 validation
ocpSubnets, err := m.subnetsWithServiceEndpoint(ctx, storageServiceEndpoint)
if err != nil {
return err
}
resources := []*arm.Resource{
m.storageAccount(clusterStorageAccountName, azureRegion, true),
m.storageAccount(clusterStorageAccountName, azureRegion, ocpSubnets, true),
m.storageAccountBlobContainer(clusterStorageAccountName, "ignition"),
m.storageAccountBlobContainer(clusterStorageAccountName, "aro"),
m.storageAccount(m.doc.OpenShiftCluster.Properties.ImageRegistryStorageAccountName, azureRegion, true),
m.storageAccount(m.doc.OpenShiftCluster.Properties.ImageRegistryStorageAccountName, azureRegion, ocpSubnets, true),
m.storageAccountBlobContainer(m.doc.OpenShiftCluster.Properties.ImageRegistryStorageAccountName, "image-registry"),
m.clusterNSG(infraID, azureRegion),
m.clusterServicePrincipalRBAC(),
@ -178,6 +185,49 @@ func (m *manager) deployBaseResourceTemplate(ctx context.Context) error {
return arm.DeployTemplate(ctx, m.log, m.deployments, resourceGroup, "storage", t, nil)
}
// subnetsWithServiceEndpoint returns a unique slice of subnet resource IDs that have the corresponding
// service endpoint
func (m *manager) subnetsWithServiceEndpoint(ctx context.Context, serviceEndpoint string) ([]string, error) {
subnetsMap := map[string]struct{}{}
subnetsMap[m.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID] = struct{}{}
workerProfiles, _ := api.GetEnrichedWorkerProfiles(m.doc.OpenShiftCluster.Properties)
for _, v := range workerProfiles {
// don't fail empty worker profiles/subnet IDs as they're not valid
if v.SubnetID == "" {
continue
}
subnetsMap[strings.ToLower(v.SubnetID)] = struct{}{}
}
subnets := []string{}
for subnetId := range subnetsMap {
// We purposefully fail if we can't fetch the subnet as the FPSP most likely
// lost read permission over the subnet.
subnet, err := m.subnet.Get(ctx, subnetId)
if err != nil {
return nil, err
}
if subnet.SubnetPropertiesFormat == nil || subnet.ServiceEndpoints == nil {
continue
}
for _, endpoint := range *subnet.ServiceEndpoints {
if endpoint.Service != nil && strings.EqualFold(*endpoint.Service, serviceEndpoint) && endpoint.Locations != nil {
for _, loc := range *endpoint.Locations {
if loc == "*" || strings.EqualFold(loc, m.doc.OpenShiftCluster.Location) {
subnets = append(subnets, subnetId)
}
}
}
}
}
return subnets, nil
}
func (m *manager) attachNSGs(ctx context.Context) error {
if m.doc.OpenShiftCluster.Properties.NetworkProfile.PreconfiguredNSG == api.PreconfiguredNSGEnabled {
return nil

Просмотреть файл

@ -77,7 +77,7 @@ func (m *manager) clusterServicePrincipalRBAC() *arm.Resource {
// storageAccount will return storage account resource.
// Legacy storage accounts (public) are not encrypted and cannot be retrofitted.
// The flag controls this behavior in update/create.
func (m *manager) storageAccount(name, region string, encrypted bool) *arm.Resource {
func (m *manager) storageAccount(name, region string, ocpSubnets []string, encrypted bool) *arm.Resource {
virtualNetworkRules := []mgmtstorage.VirtualNetworkRule{
{
VirtualNetworkResourceID: to.StringPtr("/subscriptions/" + m.env.SubscriptionID() + "/resourceGroups/" + m.env.ResourceGroup() + "/providers/Microsoft.Network/virtualNetworks/rp-pe-vnet-001/subnets/rp-pe-subnet"),
@ -89,21 +89,12 @@ func (m *manager) storageAccount(name, region string, encrypted bool) *arm.Resou
},
}
// Virtual network rules to allow the cluster subnets to directly reach the storage accounts
// are only needed when egress lockdown is not enabled.
if !m.doc.OpenShiftCluster.Properties.FeatureProfile.GatewayEnabled {
workerProfiles, _ := api.GetEnrichedWorkerProfiles(m.doc.OpenShiftCluster.Properties)
workerSubnetId := workerProfiles[0].SubnetID
virtualNetworkRules = append(virtualNetworkRules, []mgmtstorage.VirtualNetworkRule{
{
VirtualNetworkResourceID: &m.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID,
Action: mgmtstorage.Allow,
},
{
VirtualNetworkResourceID: &workerSubnetId,
Action: mgmtstorage.Allow,
},
}...)
// add OCP subnets which have Microsoft.Storage service endpoint enabled
for _, subnet := range ocpSubnets {
virtualNetworkRules = append(virtualNetworkRules, mgmtstorage.VirtualNetworkRule{
VirtualNetworkResourceID: to.StringPtr(subnet),
Action: mgmtstorage.Allow,
})
}
// when installing via Hive we need to allow Hive to persist the installConfig graph in the cluster's storage account

Просмотреть файл

@ -8,6 +8,8 @@ import (
"errors"
"fmt"
"net/http"
"reflect"
"sort"
"strings"
"testing"
@ -400,3 +402,207 @@ func TestEnsureInfraID(t *testing.T) {
})
}
}
func TestSubnetsWithServiceEndpoints(t *testing.T) {
ctx := context.Background()
masterSubnet := strings.ToLower("/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resourceGroup/providers/Microsoft.Network/virtualNetworks/vnet/subnets/master-subnet")
workerSubnetFormatString := strings.ToLower("/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resourceGroup/providers/Microsoft.Network/virtualNetworks/vnet/subnets/%s")
resourceID := "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resourceGroup/providers/Microsoft.RedHatOpenShift/openShiftClusters/resourceName"
serviceEndpoint := "Microsoft.Storage"
location := "eastus"
for _, tt := range []struct {
name string
mocks func(subnet *mock_subnet.MockManager)
workerSubnets []string
wantSubnets []string
wantErr string
}{
{
name: "no service endpoints set returns empty string slice",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, masterSubnet).Return(&mgmtnetwork.Subnet{}, nil)
},
wantSubnets: []string{},
},
{
name: "master subnet has service endpoint, but incorrect location",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, masterSubnet).Return(&mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
ServiceEndpoints: &[]mgmtnetwork.ServiceEndpointPropertiesFormat{
{
Service: &serviceEndpoint,
Locations: &[]string{
"bad-location",
},
},
},
},
}, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(&mgmtnetwork.Subnet{}, nil)
},
workerSubnets: []string{
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
},
wantSubnets: []string{},
},
{
name: "master subnet has service endpoint with correct location",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, masterSubnet).Return(&mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
ServiceEndpoints: &[]mgmtnetwork.ServiceEndpointPropertiesFormat{
{
Service: &serviceEndpoint,
Locations: &[]string{
location,
},
},
},
},
}, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(&mgmtnetwork.Subnet{}, nil)
},
workerSubnets: []string{
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
},
wantSubnets: []string{masterSubnet},
},
{
name: "master subnet has service endpoint with all location",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, masterSubnet).Return(&mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
ServiceEndpoints: &[]mgmtnetwork.ServiceEndpointPropertiesFormat{
{
Service: &serviceEndpoint,
Locations: &[]string{
"*",
},
},
},
},
}, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(&mgmtnetwork.Subnet{}, nil)
},
workerSubnets: []string{
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
},
wantSubnets: []string{masterSubnet},
},
{
name: "all subnets have service endpoint with correct locations",
mocks: func(subnet *mock_subnet.MockManager) {
subnetWithServiceEndpoint := &mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
ServiceEndpoints: &[]mgmtnetwork.ServiceEndpointPropertiesFormat{
{
Service: &serviceEndpoint,
Locations: &[]string{
"*",
},
},
},
},
}
subnet.EXPECT().Get(ctx, masterSubnet).Return(subnetWithServiceEndpoint, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(subnetWithServiceEndpoint, nil)
},
workerSubnets: []string{
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
},
wantSubnets: []string{
masterSubnet,
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
},
},
{
name: "mixed subnets with service endpoint",
mocks: func(subnet *mock_subnet.MockManager) {
subnetWithServiceEndpoint := &mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
ServiceEndpoints: &[]mgmtnetwork.ServiceEndpointPropertiesFormat{
{
Service: &serviceEndpoint,
Locations: &[]string{
location,
},
},
},
},
}
subnet.EXPECT().Get(ctx, masterSubnet).Return(subnetWithServiceEndpoint, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(subnetWithServiceEndpoint, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-002")).Return(&mgmtnetwork.Subnet{}, nil)
},
workerSubnets: []string{
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-002"),
"",
},
wantSubnets: []string{
masterSubnet,
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
},
},
{
name: "Get subnet returns error",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, masterSubnet).Return(nil, errors.New("generic error"))
},
workerSubnets: []string{},
wantErr: "generic error",
},
} {
t.Run(tt.name, func(t *testing.T) {
controller := gomock.NewController(t)
defer controller.Finish()
subnet := mock_subnet.NewMockManager(controller)
tt.mocks(subnet)
workerProfiles := []api.WorkerProfile{}
if tt.workerSubnets != nil {
for _, subnet := range tt.workerSubnets {
workerProfiles = append(workerProfiles, api.WorkerProfile{
SubnetID: subnet,
})
}
}
m := &manager{
doc: &api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "FoobarCluster",
Location: location,
Properties: api.OpenShiftClusterProperties{
MasterProfile: api.MasterProfile{
SubnetID: masterSubnet,
},
WorkerProfiles: workerProfiles,
},
},
},
subnet: subnet,
}
subnets, err := m.subnetsWithServiceEndpoint(ctx, serviceEndpoint)
utilerror.AssertErrorMessage(t, err, tt.wantErr)
// sort slices for ordering
sort.Strings(subnets)
sort.Strings(tt.wantSubnets)
if !reflect.DeepEqual(subnets, tt.wantSubnets) {
t.Errorf("got: %v, wanted %v", subnets, tt.wantSubnets)
}
})
}
}

Просмотреть файл

@ -18,12 +18,11 @@ import (
// The encryption flag is set to false/disabled for legacy storage accounts.
func (m *manager) migrateStorageAccounts(ctx context.Context) error {
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
workerProfiles, _ := api.GetEnrichedWorkerProfiles(m.doc.OpenShiftCluster.Properties)
if len(workerProfiles) == 0 {
m.log.Error("skipping migrateStorageAccounts due to missing WorkerProfiles.")
return nil
ocpSubnets, err := m.subnetsWithServiceEndpoint(ctx, storageServiceEndpoint)
if err != nil {
return err
}
clusterStorageAccountName := "cluster" + m.doc.OpenShiftCluster.Properties.StorageSuffix
registryStorageAccountName := m.doc.OpenShiftCluster.Properties.ImageRegistryStorageAccountName
@ -31,8 +30,8 @@ func (m *manager) migrateStorageAccounts(ctx context.Context) error {
Schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
ContentVersion: "1.0.0.0",
Resources: []*arm.Resource{
m.storageAccount(clusterStorageAccountName, m.doc.OpenShiftCluster.Location, false),
m.storageAccount(registryStorageAccountName, m.doc.OpenShiftCluster.Location, false),
m.storageAccount(clusterStorageAccountName, m.doc.OpenShiftCluster.Location, ocpSubnets, false),
m.storageAccount(registryStorageAccountName, m.doc.OpenShiftCluster.Location, ocpSubnets, false),
},
}

Просмотреть файл

@ -48,6 +48,7 @@ type reconcileManager struct {
client client.Client
kubeSubnets subnet.KubeManager
subnets subnet.Manager
storage storage.AccountsClient
}
@ -103,6 +104,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.
client: r.client,
kubeSubnets: subnet.NewKubeManager(r.client, resource.SubscriptionID),
subnets: subnet.NewManager(&azEnv, resource.SubscriptionID, authorizer),
storage: storage.NewAccountsClient(&azEnv, resource.SubscriptionID, authorizer),
}

Просмотреть файл

@ -13,30 +13,52 @@ import (
imageregistryv1 "github.com/openshift/api/imageregistry/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/Azure/ARO-RP/pkg/operator"
"github.com/Azure/ARO-RP/pkg/util/stringutils"
)
func (r *reconcileManager) reconcileAccounts(ctx context.Context) error {
location := r.instance.Spec.Location
resourceGroup := stringutils.LastTokenByte(r.instance.Spec.ClusterResourceGroupID, '/')
serviceSubnets := r.instance.Spec.ServiceSubnets
// Only include the master and worker subnets in the storage accounts' virtual
// network rules if egress lockdown is not enabled.
if !operator.GatewayEnabled(r.instance) {
subnets, err := r.kubeSubnets.List(ctx)
subnets, err := r.kubeSubnets.List(ctx)
if err != nil {
return err
}
// Check each of the cluster subnets for the Microsoft.Storage service endpoint. If the subnet has
// the service endpoint, it needs to be included in the storage account vnet rules.
for _, subnet := range subnets {
mgmtSubnet, err := r.subnets.Get(ctx, subnet.ResourceID)
if err != nil {
return err
}
for _, subnet := range subnets {
serviceSubnets = append(serviceSubnets, subnet.ResourceID)
if mgmtSubnet.SubnetPropertiesFormat != nil && mgmtSubnet.SubnetPropertiesFormat.ServiceEndpoints != nil {
for _, serviceEndpoint := range *mgmtSubnet.SubnetPropertiesFormat.ServiceEndpoints {
isStorageEndpoint := (serviceEndpoint.Service != nil) && (*serviceEndpoint.Service == "Microsoft.Storage")
matchesClusterLocation := false
if serviceEndpoint.Locations != nil {
for _, l := range *serviceEndpoint.Locations {
if l == "*" || l == location {
matchesClusterLocation = true
break
}
}
}
if isStorageEndpoint && matchesClusterLocation {
serviceSubnets = append(serviceSubnets, subnet.ResourceID)
break
}
}
}
}
rc := &imageregistryv1.Config{}
err := r.client.Get(ctx, types.NamespacedName{Name: "cluster"}, rc)
err = r.client.Get(ctx, types.NamespacedName{Name: "cluster"}, rc)
if err != nil {
return err
}

Просмотреть файл

@ -8,6 +8,7 @@ import (
"strconv"
"testing"
mgmtnetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-08-01/network"
mgmtstorage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/mock/gomock"
@ -16,6 +17,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/Azure/ARO-RP/pkg/api"
apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet"
arov1alpha1 "github.com/Azure/ARO-RP/pkg/operator/apis/aro.openshift.io/v1alpha1"
mock_storage "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/storage"
mock_subnet "github.com/Azure/ARO-RP/pkg/util/mocks/subnet"
@ -24,13 +27,16 @@ import (
)
var (
location = "eastus"
subscriptionId = "0000000-0000-0000-0000-000000000000"
clusterResourceGroupName = "aro-iljrzb5a"
clusterResourceGroupId = "/subscriptions/" + subscriptionId + "/resourcegroups/" + clusterResourceGroupName
infraId = "abcd"
vnetResourceGroup = "vnet-rg"
vnetName = "vnet"
subnetNameWorker = "worker"
subnetNameMaster = "master"
nsgv1MasterResourceId = clusterResourceGroupId + "/providers/Microsoft.Network/networkSecurityGroups/" + infraId + apisubnet.NSGControlPlaneSuffixV1
storageSuffix = "random-suffix"
clusterStorageAccountName = "cluster" + storageSuffix
@ -44,6 +50,7 @@ func getValidClusterInstance(operatorFlag bool) *arov1alpha1.Cluster {
return &arov1alpha1.Cluster{
Spec: arov1alpha1.ClusterSpec{
ClusterResourceGroupID: clusterResourceGroupId,
Location: location,
StorageSuffix: storageSuffix,
OperatorFlags: arov1alpha1.OperatorFlags{
controllerEnabled: strconv.FormatBool(operatorFlag),
@ -70,12 +77,32 @@ func getValidAccount(virtualNetworkResourceIDs []string) *mgmtstorage.Account {
return account
}
func getValidSubnet(resourceId string) *mgmtnetwork.Subnet {
s := &mgmtnetwork.Subnet{
ID: to.StringPtr(resourceId),
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
NetworkSecurityGroup: &mgmtnetwork.SecurityGroup{
ID: to.StringPtr(nsgv1MasterResourceId),
},
ServiceEndpoints: &[]mgmtnetwork.ServiceEndpointPropertiesFormat{},
},
}
for _, endpoint := range api.SubnetsEndpoints {
*s.SubnetPropertiesFormat.ServiceEndpoints = append(*s.SubnetPropertiesFormat.ServiceEndpoints, mgmtnetwork.ServiceEndpointPropertiesFormat{
Service: to.StringPtr(endpoint),
Locations: &[]string{location},
ProvisioningState: mgmtnetwork.Succeeded,
})
}
return s
}
func TestReconcileManager(t *testing.T) {
log := logrus.NewEntry(logrus.StandardLogger())
for _, tt := range []struct {
name string
mocks func(*mock_storage.MockAccountsClient, *mock_subnet.MockKubeManager)
mocks func(*mock_storage.MockAccountsClient, *mock_subnet.MockKubeManager, *mock_subnet.MockManager)
instance func(*arov1alpha1.Cluster)
operatorFlag bool
wantErr error
@ -83,7 +110,14 @@ func TestReconcileManager(t *testing.T) {
{
name: "Operator Flag enabled - nothing to do",
operatorFlag: true,
mocks: func(storage *mock_storage.MockAccountsClient, kubeSubnet *mock_subnet.MockKubeManager) {
mocks: func(storage *mock_storage.MockAccountsClient, kubeSubnet *mock_subnet.MockKubeManager, mgmtSubnet *mock_subnet.MockManager) {
// Azure subnets
masterSubnet := getValidSubnet(resourceIdMaster)
workerSubnet := getValidSubnet(resourceIdWorker)
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdMaster).Return(masterSubnet, nil)
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdWorker).Return(workerSubnet, nil)
// cluster subnets
kubeSubnet.EXPECT().List(gomock.Any()).Return([]subnet.Subnet{
{
@ -105,7 +139,14 @@ func TestReconcileManager(t *testing.T) {
{
name: "Operator Flag disabled - nothing to do",
operatorFlag: false,
mocks: func(storage *mock_storage.MockAccountsClient, kubeSubnet *mock_subnet.MockKubeManager) {
mocks: func(storage *mock_storage.MockAccountsClient, kubeSubnet *mock_subnet.MockKubeManager, mgmtSubnet *mock_subnet.MockManager) {
// Azure subnets
masterSubnet := getValidSubnet(resourceIdMaster)
workerSubnet := getValidSubnet(resourceIdWorker)
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdMaster).Return(masterSubnet, nil)
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdWorker).Return(workerSubnet, nil)
// cluster subnets
kubeSubnet.EXPECT().List(gomock.Any()).Return([]subnet.Subnet{
{
@ -127,7 +168,14 @@ func TestReconcileManager(t *testing.T) {
{
name: "Operator Flag enabled - all rules to all accounts",
operatorFlag: true,
mocks: func(storage *mock_storage.MockAccountsClient, kubeSubnet *mock_subnet.MockKubeManager) {
mocks: func(storage *mock_storage.MockAccountsClient, kubeSubnet *mock_subnet.MockKubeManager, mgmtSubnet *mock_subnet.MockManager) {
// Azure subnets
masterSubnet := getValidSubnet(resourceIdMaster)
workerSubnet := getValidSubnet(resourceIdWorker)
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdMaster).Return(masterSubnet, nil)
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdWorker).Return(workerSubnet, nil)
// cluster subnets
kubeSubnet.EXPECT().List(gomock.Any()).Return([]subnet.Subnet{
{
@ -165,15 +213,129 @@ func TestReconcileManager(t *testing.T) {
},
},
{
name: "Operator Flag enabled - nothing to do because egress lockdown is enabled",
name: "Operator flag enabled - worker subnet rule to all accounts because storage service endpoint on worker subnet",
operatorFlag: true,
instance: func(cluster *arov1alpha1.Cluster) {
cluster.Spec.GatewayDomains = []string{"somegatewaydomain.com"}
},
mocks: func(storage *mock_storage.MockAccountsClient, kubeSubnet *mock_subnet.MockKubeManager) {
mocks: func(storage *mock_storage.MockAccountsClient, kubeSubnet *mock_subnet.MockKubeManager, mgmtSubnet *mock_subnet.MockManager) {
// Azure subnets
masterSubnet := getValidSubnet(resourceIdMaster)
workerSubnet := getValidSubnet(resourceIdWorker)
masterSubnet.ServiceEndpoints = nil
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdMaster).Return(masterSubnet, nil)
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdWorker).Return(workerSubnet, nil)
// cluster subnets
kubeSubnet.EXPECT().List(gomock.Any()).Return([]subnet.Subnet{
{
ResourceID: resourceIdMaster,
IsMaster: true,
},
{
ResourceID: resourceIdWorker,
IsMaster: false,
},
}, nil)
// storage objects in azure
result := getValidAccount([]string{})
updated := mgmtstorage.AccountUpdateParameters{
AccountPropertiesUpdateParameters: &mgmtstorage.AccountPropertiesUpdateParameters{
NetworkRuleSet: getValidAccount([]string{resourceIdWorker}).NetworkRuleSet,
},
}
storage.EXPECT().GetProperties(gomock.Any(), clusterResourceGroupName, clusterStorageAccountName, gomock.Any()).Return(*result, nil)
storage.EXPECT().Update(gomock.Any(), clusterResourceGroupName, clusterStorageAccountName, updated)
// we can't reuse these from above due to fact how gomock handles objects.
// they are modified by the functions so they are not the same anymore
result = getValidAccount([]string{})
updated = mgmtstorage.AccountUpdateParameters{
AccountPropertiesUpdateParameters: &mgmtstorage.AccountPropertiesUpdateParameters{
NetworkRuleSet: getValidAccount([]string{resourceIdWorker}).NetworkRuleSet,
},
}
storage.EXPECT().GetProperties(gomock.Any(), clusterResourceGroupName, registryStorageAccountName, gomock.Any()).Return(*result, nil)
storage.EXPECT().Update(gomock.Any(), clusterResourceGroupName, registryStorageAccountName, updated)
},
},
{
name: "Operator flag enabled - nothing to do because no service endpoints",
operatorFlag: true,
mocks: func(storage *mock_storage.MockAccountsClient, kubeSubnet *mock_subnet.MockKubeManager, mgmtSubnet *mock_subnet.MockManager) {
// Azure subnets
masterSubnet := getValidSubnet(resourceIdMaster)
workerSubnet := getValidSubnet(resourceIdWorker)
masterSubnet.ServiceEndpoints = nil
workerSubnet.ServiceEndpoints = nil
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdMaster).Return(masterSubnet, nil)
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdWorker).Return(workerSubnet, nil)
// cluster subnets
kubeSubnet.EXPECT().List(gomock.Any()).Return([]subnet.Subnet{
{
ResourceID: resourceIdMaster,
IsMaster: true,
},
{
ResourceID: resourceIdWorker,
IsMaster: false,
},
}, nil)
// storage objects in azure
result := getValidAccount([]string{})
storage.EXPECT().GetProperties(gomock.Any(), clusterResourceGroupName, clusterStorageAccountName, gomock.Any()).Return(*result, nil)
storage.EXPECT().GetProperties(gomock.Any(), clusterResourceGroupName, registryStorageAccountName, gomock.Any()).Return(*result, nil)
},
},
{
name: "Operator flag enabled - nothing to do because the storage endpoint is there but the location does not match the cluster",
operatorFlag: true,
mocks: func(storage *mock_storage.MockAccountsClient, kubeSubnet *mock_subnet.MockKubeManager, mgmtSubnet *mock_subnet.MockManager) {
// Azure subnets
masterSubnet := getValidSubnet(resourceIdMaster)
workerSubnet := getValidSubnet(resourceIdWorker)
newMasterServiceEndpoints := []mgmtnetwork.ServiceEndpointPropertiesFormat{}
for _, se := range *masterSubnet.ServiceEndpoints {
se.Locations = &[]string{"not_a_real_place"}
newMasterServiceEndpoints = append(newMasterServiceEndpoints, se)
}
masterSubnet.ServiceEndpoints = &newMasterServiceEndpoints
newWorkerServiceEndpoints := []mgmtnetwork.ServiceEndpointPropertiesFormat{}
for _, se := range *workerSubnet.ServiceEndpoints {
se.Locations = &[]string{"not_a_real_place"}
newWorkerServiceEndpoints = append(newWorkerServiceEndpoints, se)
}
workerSubnet.ServiceEndpoints = &newWorkerServiceEndpoints
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdMaster).Return(masterSubnet, nil)
mgmtSubnet.EXPECT().Get(gomock.Any(), resourceIdWorker).Return(workerSubnet, nil)
// cluster subnets
kubeSubnet.EXPECT().List(gomock.Any()).Return([]subnet.Subnet{
{
ResourceID: resourceIdMaster,
IsMaster: true,
},
{
ResourceID: resourceIdWorker,
IsMaster: false,
},
}, nil)
// storage objects in azure
result := getValidAccount([]string{})
storage.EXPECT().GetProperties(gomock.Any(), clusterResourceGroupName, clusterStorageAccountName, gomock.Any()).Return(*result, nil)
storage.EXPECT().GetProperties(gomock.Any(), clusterResourceGroupName, registryStorageAccountName, gomock.Any()).Return(*result, nil)
},
@ -185,9 +347,10 @@ func TestReconcileManager(t *testing.T) {
storage := mock_storage.NewMockAccountsClient(controller)
kubeSubnet := mock_subnet.NewMockKubeManager(controller)
subnet := mock_subnet.NewMockManager(controller)
if tt.mocks != nil {
tt.mocks(storage, kubeSubnet)
tt.mocks(storage, kubeSubnet, subnet)
}
instance := getValidClusterInstance(tt.operatorFlag)
@ -214,6 +377,7 @@ func TestReconcileManager(t *testing.T) {
instance: instance,
subscriptionID: subscriptionId,
storage: storage,
subnets: subnet,
kubeSubnets: kubeSubnet,
client: clientFake,
}

Просмотреть файл

@ -5,10 +5,16 @@ package e2e
import (
"context"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
mgmtnetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-08-01/network"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@ -16,20 +22,20 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet"
"github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2022-09-04/redhatopenshift"
"github.com/Azure/ARO-RP/pkg/util/ready"
"github.com/Azure/ARO-RP/pkg/util/stringutils"
"github.com/Azure/ARO-RP/pkg/util/version"
"github.com/Azure/ARO-RP/test/util/project"
)
const (
testNamespace = "test-e2e"
)
var _ = Describe("Cluster", func() {
var _ = Describe("Cluster", Serial, func() {
var p project.Project
var _ = BeforeEach(func(ctx context.Context) {
BeforeEach(func(ctx context.Context) {
By("creating a test namespace")
testNamespace := fmt.Sprintf("test-e2e-%d", GinkgoParallelProcess())
p = project.NewProject(clients.Kubernetes, clients.Project, testNamespace)
err := p.Create(ctx)
Expect(err).NotTo(HaveOccurred(), "Failed to create test namespace")
@ -37,49 +43,189 @@ var _ = Describe("Cluster", func() {
By("verifying the namespace is ready")
Eventually(func(ctx context.Context) error {
return p.Verify(ctx)
}).WithContext(ctx).Should(BeNil())
}).WithContext(ctx).WithTimeout(5 * time.Minute).Should(BeNil())
DeferCleanup(func(ctx context.Context) {
By("deleting a test namespace")
err := p.Delete(ctx)
Expect(err).NotTo(HaveOccurred(), "Failed to delete test namespace")
By("verifying the namespace is deleted")
Eventually(func(ctx context.Context) error {
return p.VerifyProjectIsDeleted(ctx)
}).WithContext(ctx).WithTimeout(5 * time.Minute).Should(BeNil())
})
})
var _ = AfterEach(func(ctx context.Context) {
By("deleting a test namespace")
err := p.Delete(ctx)
Expect(err).NotTo(HaveOccurred(), "Failed to delete test namespace")
Context("can run a stateful set", func() {
It("which is using Azure Disk storage", func(ctx context.Context) {
By("creating stateful set")
oc, _ := clients.OpenshiftClusters.Get(ctx, vnetResourceGroup, clusterName)
installVersion, _ := version.ParseVersion(*oc.ClusterProfile.Version)
By("verifying the namespace is deleted")
Eventually(func(ctx context.Context) error {
return p.VerifyProjectIsDeleted(ctx)
}).WithContext(ctx).Should(BeNil())
})
storageClass := "managed-csi"
It("can run a stateful set which is using Azure Disk storage", func(ctx context.Context) {
if installVersion.Lt(version.NewVersion(4, 11)) {
storageClass = "managed-premium"
}
By("creating stateful set")
err := createStatefulSet(ctx, clients.Kubernetes)
Expect(err).NotTo(HaveOccurred())
ssName, err := createStatefulSet(ctx, clients.Kubernetes, p.Name, storageClass)
Expect(err).NotTo(HaveOccurred())
By("verifying the stateful set is ready")
Eventually(func(g Gomega, ctx context.Context) {
s, err := clients.Kubernetes.AppsV1().StatefulSets(testNamespace).Get(ctx, "busybox", metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
By("verifying the stateful set is ready")
Eventually(func(g Gomega, ctx context.Context) {
s, err := clients.Kubernetes.AppsV1().StatefulSets(p.Name).Get(ctx, ssName, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(ready.StatefulSetIsReady(s)).To(BeTrue(), "expect stateful to be ready")
GinkgoWriter.Println(s)
}).WithContext(ctx).WithTimeout(5 * time.Minute).Should(Succeed())
})
// TODO: this test is marked as pending as it isn't working as expected
It("which is using the default Azure File storage class backed by the cluster storage account", Pending, func(ctx context.Context) {
By("adding the Microsoft.Storage service endpoint to each cluster subnet")
oc, err := clients.OpenshiftClusters.Get(ctx, vnetResourceGroup, clusterName)
Expect(err).NotTo(HaveOccurred())
ocpSubnets := clusterSubnets(oc)
for _, s := range ocpSubnets {
vnetID, subnetName, err := apisubnet.Split(s)
Expect(err).NotTo(HaveOccurred())
vnetR, err := azure.ParseResourceID(vnetID)
Expect(err).NotTo(HaveOccurred())
mgmtSubnet, err := clients.Subnet.Get(ctx, vnetResourceGroup, vnetR.ResourceName, subnetName, "")
Expect(err).NotTo(HaveOccurred())
if mgmtSubnet.SubnetPropertiesFormat == nil {
mgmtSubnet.SubnetPropertiesFormat = &mgmtnetwork.SubnetPropertiesFormat{}
}
if mgmtSubnet.SubnetPropertiesFormat.ServiceEndpoints == nil {
mgmtSubnet.SubnetPropertiesFormat.ServiceEndpoints = &[]mgmtnetwork.ServiceEndpointPropertiesFormat{}
}
// Check whether service endpoint is already there before trying to add
// it; trying to add a duplicate results in an error
subnetHasStorageEndpoint := false
for _, se := range *mgmtSubnet.ServiceEndpoints {
if se.Service != nil && *se.Service == "Microsoft.Storage" {
subnetHasStorageEndpoint = true
break
}
}
if !subnetHasStorageEndpoint {
storageEndpoint := mgmtnetwork.ServiceEndpointPropertiesFormat{
Service: to.StringPtr("Microsoft.Storage"),
Locations: &[]string{"*"},
}
*mgmtSubnet.ServiceEndpoints = append(*mgmtSubnet.ServiceEndpoints, storageEndpoint)
err = clients.Subnet.CreateOrUpdateAndWait(ctx, vnetResourceGroup, vnetR.ResourceName, subnetName, mgmtSubnet)
Expect(err).NotTo(HaveOccurred())
}
}
// PUCM would be more reliable to check against,
// but we cannot PUCM in prod, and dev clusters have ACLs set to allow
By("checking the storage account vnet rules to verify that they include the cluster subnets")
cluster, err := clients.AROClusters.AroV1alpha1().Clusters().Get(ctx, "cluster", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Poke the ARO storageaccount controller to reconcile
cluster.Spec.OperatorFlags["aro.storageaccounts.enabled"] = "false"
cluster, err = clients.AROClusters.AroV1alpha1().Clusters().Update(ctx, cluster, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
cluster.Spec.OperatorFlags["aro.storageaccounts.enabled"] = "true"
cluster, err = clients.AROClusters.AroV1alpha1().Clusters().Update(ctx, cluster, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
rgName := stringutils.LastTokenByte(cluster.Spec.ClusterResourceGroupID, '/')
// only checking the cluster storage account
Eventually(func(g Gomega, ctx context.Context) {
account, err := clients.Storage.GetProperties(ctx, rgName, "cluster"+cluster.Spec.StorageSuffix, "")
g.Expect(err).NotTo(HaveOccurred())
nAclSubnets := []string{}
g.Expect(account.AccountProperties).NotTo(BeNil())
g.Expect(account.NetworkRuleSet).NotTo(BeNil())
g.Expect(account.NetworkRuleSet.VirtualNetworkRules).NotTo(BeNil())
for _, rule := range *account.NetworkRuleSet.VirtualNetworkRules {
if rule.Action == storage.Allow && rule.VirtualNetworkResourceID != nil {
nAclSubnets = append(nAclSubnets, strings.ToLower(*rule.VirtualNetworkResourceID))
}
}
for _, subnet := range ocpSubnets {
g.Expect(nAclSubnets).To(ContainElement(strings.ToLower(subnet)))
}
}).WithContext(ctx).WithTimeout(5 * time.Minute).Should(Succeed())
By("creating stateful set")
storageClass := "azurefile-csi"
ssName, err := createStatefulSet(ctx, clients.Kubernetes, p.Name, storageClass)
Expect(err).NotTo(HaveOccurred())
By("verifying the stateful set is ready")
Eventually(func(g Gomega, ctx context.Context) {
s, err := clients.Kubernetes.AppsV1().StatefulSets(p.Name).Get(ctx, ssName, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(ready.StatefulSetIsReady(s)).To(BeTrue(), "expect stateful to be ready")
GinkgoWriter.Println(s)
pvc, err := clients.Kubernetes.CoreV1().PersistentVolumeClaims(p.Name).Get(ctx, ssName, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
GinkgoWriter.Println(pvc)
}).WithContext(ctx).WithTimeout(5 * time.Minute).Should(Succeed())
By("cleaning up the cluster subnets (removing service endpoints)")
for _, s := range ocpSubnets {
vnetID, subnetName, err := apisubnet.Split(s)
Expect(err).NotTo(HaveOccurred())
vnetR, err := azure.ParseResourceID(vnetID)
Expect(err).NotTo(HaveOccurred())
mgmtSubnet, err := clients.Subnet.Get(ctx, vnetResourceGroup, vnetR.ResourceName, subnetName, "")
Expect(err).NotTo(HaveOccurred())
if mgmtSubnet.SubnetPropertiesFormat == nil {
mgmtSubnet.SubnetPropertiesFormat = &mgmtnetwork.SubnetPropertiesFormat{}
}
mgmtSubnet.SubnetPropertiesFormat.ServiceEndpoints = &[]mgmtnetwork.ServiceEndpointPropertiesFormat{}
err = clients.Subnet.CreateOrUpdateAndWait(ctx, vnetResourceGroup, vnetR.ResourceName, subnetName, mgmtSubnet)
Expect(err).NotTo(HaveOccurred())
}
})
g.Expect(ready.StatefulSetIsReady(s)).To(BeTrue(), "expect stateful to be ready")
}).WithContext(ctx).Should(Succeed())
})
It("can create load balancer services", func(ctx context.Context) {
By("creating an external load balancer service")
err := createLoadBalancerService(ctx, clients.Kubernetes, "elb", map[string]string{})
err := createLoadBalancerService(ctx, clients.Kubernetes, "elb", p.Name, map[string]string{})
Expect(err).NotTo(HaveOccurred())
By("creating an internal load balancer service")
err = createLoadBalancerService(ctx, clients.Kubernetes, "ilb", map[string]string{
err = createLoadBalancerService(ctx, clients.Kubernetes, "ilb", p.Name, map[string]string{
"service.beta.kubernetes.io/azure-load-balancer-internal": "true",
})
Expect(err).NotTo(HaveOccurred())
By("verifying the external load balancer service is ready")
Eventually(func(ctx context.Context) bool {
svc, err := clients.Kubernetes.CoreV1().Services(testNamespace).Get(ctx, "elb", metav1.GetOptions{})
svc, err := clients.Kubernetes.CoreV1().Services(p.Name).Get(ctx, "elb", metav1.GetOptions{})
if err != nil {
return false
}
@ -88,7 +234,7 @@ var _ = Describe("Cluster", func() {
By("verifying the internal load balancer service is ready")
Eventually(func(ctx context.Context) bool {
svc, err := clients.Kubernetes.CoreV1().Services(testNamespace).Get(ctx, "ilb", metav1.GetOptions{})
svc, err := clients.Kubernetes.CoreV1().Services(p.Name).Get(ctx, "ilb", metav1.GetOptions{})
if err != nil {
return false
}
@ -102,12 +248,12 @@ var _ = Describe("Cluster", func() {
deployName := "internal-registry-deploy"
By("creating a test deployment from an internal container registry")
err := createContainerFromInternalContainerRegistryImage(ctx, clients.Kubernetes, deployName)
err := createContainerFromInternalContainerRegistryImage(ctx, clients.Kubernetes, deployName, p.Name)
Expect(err).NotTo(HaveOccurred())
By("verifying the deployment is ready")
Eventually(func(g Gomega, ctx context.Context) {
s, err := clients.Kubernetes.AppsV1().Deployments(testNamespace).Get(ctx, deployName, metav1.GetOptions{})
s, err := clients.Kubernetes.AppsV1().Deployments(p.Name).Get(ctx, deployName, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(ready.DeploymentIsReady(s)).To(BeTrue(), "expect stateful to be ready")
@ -115,30 +261,44 @@ var _ = Describe("Cluster", func() {
})
})
func createStatefulSet(ctx context.Context, cli kubernetes.Interface) error {
oc, _ := clients.OpenshiftClusters.Get(ctx, vnetResourceGroup, clusterName)
installVersion, _ := version.ParseVersion(*oc.ClusterProfile.Version)
// clusterSubnets returns a slice containing all of the cluster subnets' resource IDs
func clusterSubnets(oc redhatopenshift.OpenShiftCluster) []string {
subnetMap := map[string]struct{}{}
subnetMap[*oc.OpenShiftClusterProperties.MasterProfile.SubnetID] = struct{}{}
defaultStorageClass := "managed-csi"
if installVersion.Lt(version.NewVersion(4, 11)) {
defaultStorageClass = "managed-premium"
// TODO: change to workerProfileStatuses when we bump the API to 20230904 stable
for _, p := range *oc.OpenShiftClusterProperties.WorkerProfiles {
s := strings.ToLower(*p.SubnetID)
subnetMap[s] = struct{}{}
}
subnets := []string{}
for subnet := range subnetMap {
subnets = append(subnets, subnet)
}
return subnets
}
func createStatefulSet(ctx context.Context, cli kubernetes.Interface, namespace, storageClass string) (string, error) {
pvcStorage, err := resource.ParseQuantity("2Gi")
if err != nil {
return err
return "", err
}
ssName := fmt.Sprintf("busybox-%s-%d", storageClass, GinkgoParallelProcess())
_, err = cli.AppsV1().StatefulSets(testNamespace).Create(ctx, &appsv1.StatefulSet{
_, err = cli.AppsV1().StatefulSets(namespace).Create(ctx, &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "busybox",
Name: ssName,
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "busybox"},
MatchLabels: map[string]string{"app": ssName},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "busybox"},
Labels: map[string]string{"app": ssName},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@ -152,7 +312,7 @@ func createStatefulSet(ctx context.Context, cli kubernetes.Interface) error {
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "busybox",
Name: ssName,
MountPath: "/data",
ReadOnly: false,
},
@ -164,13 +324,13 @@ func createStatefulSet(ctx context.Context, cli kubernetes.Interface) error {
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "busybox",
Name: ssName,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
StorageClassName: to.StringPtr(defaultStorageClass),
StorageClassName: to.StringPtr(storageClass),
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: pvcStorage,
@ -181,14 +341,14 @@ func createStatefulSet(ctx context.Context, cli kubernetes.Interface) error {
},
},
}, metav1.CreateOptions{})
return err
return ssName, err
}
func createLoadBalancerService(ctx context.Context, cli kubernetes.Interface, name string, annotations map[string]string) error {
func createLoadBalancerService(ctx context.Context, cli kubernetes.Interface, name, namespace string, annotations map[string]string) error {
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: testNamespace,
Namespace: namespace,
Annotations: annotations,
},
Spec: corev1.ServiceSpec{
@ -201,15 +361,15 @@ func createLoadBalancerService(ctx context.Context, cli kubernetes.Interface, na
Type: corev1.ServiceTypeLoadBalancer,
},
}
_, err := cli.CoreV1().Services(testNamespace).Create(ctx, svc, metav1.CreateOptions{})
_, err := cli.CoreV1().Services(namespace).Create(ctx, svc, metav1.CreateOptions{})
return err
}
func createContainerFromInternalContainerRegistryImage(ctx context.Context, cli kubernetes.Interface, name string) error {
func createContainerFromInternalContainerRegistryImage(ctx context.Context, cli kubernetes.Interface, name, namespace string) error {
deploy := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: testNamespace,
Namespace: namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: to.Int32Ptr(1),
@ -238,6 +398,6 @@ func createContainerFromInternalContainerRegistryImage(ctx context.Context, cli
},
},
}
_, err := cli.AppsV1().Deployments(testNamespace).Create(ctx, deploy, metav1.CreateOptions{})
_, err := cli.AppsV1().Deployments(namespace).Create(ctx, deploy, metav1.CreateOptions{})
return err
}

Просмотреть файл

@ -41,6 +41,7 @@ import (
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/features"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/network"
redhatopenshift20220904 "github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/redhatopenshift/2022-09-04/redhatopenshift"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/storage"
"github.com/Azure/ARO-RP/pkg/util/cluster"
utillog "github.com/Azure/ARO-RP/pkg/util/log"
"github.com/Azure/ARO-RP/pkg/util/uuid"
@ -60,6 +61,7 @@ type clientSet struct {
Disks compute.DisksClient
NetworkSecurityGroups network.SecurityGroupsClient
Subnet network.SubnetsClient
Storage storage.AccountsClient
RestConfig *rest.Config
HiveRestConfig *rest.Config
@ -341,6 +343,7 @@ func newClientSet(ctx context.Context) (*clientSet, error) {
DiskEncryptionSets: compute.NewDiskEncryptionSetsClient(_env.Environment(), _env.SubscriptionID(), authorizer),
Subnet: network.NewSubnetsClient(_env.Environment(), _env.SubscriptionID(), authorizer),
NetworkSecurityGroups: network.NewSecurityGroupsClient(_env.Environment(), _env.SubscriptionID(), authorizer),
Storage: storage.NewAccountsClient(_env.Environment(), _env.SubscriptionID(), authorizer),
RestConfig: restconfig,
HiveRestConfig: hiveRestConfig,

Просмотреть файл

@ -18,28 +18,28 @@ import (
type Project struct {
projectClient projectclient.Interface
cli kubernetes.Interface
name string
Name string
}
func NewProject(cli kubernetes.Interface, projectClient projectclient.Interface, name string) Project {
return Project{
projectClient: projectClient,
cli: cli,
name: name,
Name: name,
}
}
func (p Project) Create(ctx context.Context) error {
_, err := p.projectClient.ProjectV1().Projects().Create(ctx, &projectv1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: p.name,
Name: p.Name,
},
}, metav1.CreateOptions{})
return err
}
func (p Project) Delete(ctx context.Context) error {
return p.projectClient.ProjectV1().Projects().Delete(ctx, p.name, metav1.DeleteOptions{})
return p.projectClient.ProjectV1().Projects().Delete(ctx, p.Name, metav1.DeleteOptions{})
}
// VerifyProjectIsReady verifies that the project and relevant resources have been created correctly and returns error otherwise
@ -48,7 +48,7 @@ func (p Project) Verify(ctx context.Context) error {
&authorizationv1.SelfSubjectAccessReview{
Spec: authorizationv1.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authorizationv1.ResourceAttributes{
Namespace: p.name,
Namespace: p.Name,
Verb: "create",
Resource: "pods",
},
@ -58,7 +58,7 @@ func (p Project) Verify(ctx context.Context) error {
return err
}
sa, err := p.cli.CoreV1().ServiceAccounts(p.name).Get(ctx, "default", metav1.GetOptions{})
sa, err := p.cli.CoreV1().ServiceAccounts(p.Name).Get(ctx, "default", metav1.GetOptions{})
if err != nil || kerrors.IsNotFound(err) {
return fmt.Errorf("error retrieving default ServiceAccount")
}
@ -67,7 +67,7 @@ func (p Project) Verify(ctx context.Context) error {
return fmt.Errorf("default ServiceAccount does not have secrets")
}
proj, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.name, metav1.GetOptions{})
proj, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.Name, metav1.GetOptions{})
if err != nil {
return err
}
@ -81,7 +81,7 @@ func (p Project) Verify(ctx context.Context) error {
// VerifyProjectIsDeleted verifies that the project does not exist and returns error if a project exists
// or if it encounters an error other than NotFound
func (p Project) VerifyProjectIsDeleted(ctx context.Context) error {
_, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.name, metav1.GetOptions{})
_, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.Name, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return nil
}