2020-08-24 13:32:21 +03:00
package cluster
2019-10-16 06:29:17 +03:00
2019-12-17 04:16:50 +03:00
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
2019-10-16 06:29:17 +03:00
import (
"context"
2020-09-25 22:52:05 +03:00
"encoding/json"
2022-05-02 05:06:24 +03:00
"fmt"
2020-04-23 12:21:38 +03:00
"net/http"
2022-05-02 05:06:24 +03:00
"regexp"
2019-12-03 07:00:33 +03:00
"strings"
2024-02-13 05:27:05 +03:00
"time"
2019-10-16 06:29:17 +03:00
2024-05-07 18:51:07 +03:00
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
2021-03-20 03:13:21 +03:00
mgmtnetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-08-01/network"
2022-09-30 18:32:41 +03:00
mgmtfeatures "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features"
2021-03-05 13:41:26 +03:00
"github.com/Azure/go-autorest/autorest"
2020-03-30 17:10:36 +03:00
"github.com/Azure/go-autorest/autorest/azure"
2019-10-16 06:29:17 +03:00
"github.com/Azure/go-autorest/autorest/to"
2022-05-02 05:06:24 +03:00
utilrand "k8s.io/apimachinery/pkg/util/rand"
2024-02-13 05:27:05 +03:00
"k8s.io/apimachinery/pkg/util/wait"
2019-10-16 06:29:17 +03:00
2019-12-17 04:26:21 +03:00
"github.com/Azure/ARO-RP/pkg/api"
2023-08-03 09:41:32 +03:00
apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet"
2021-03-10 04:27:15 +03:00
"github.com/Azure/ARO-RP/pkg/env"
2019-12-17 04:26:21 +03:00
"github.com/Azure/ARO-RP/pkg/util/arm"
2024-05-07 18:51:07 +03:00
"github.com/Azure/ARO-RP/pkg/util/oidcbuilder"
2024-06-11 01:05:14 +03:00
"github.com/Azure/ARO-RP/pkg/util/pointerutils"
2020-02-25 01:34:14 +03:00
"github.com/Azure/ARO-RP/pkg/util/stringutils"
2019-10-16 06:29:17 +03:00
)
2023-12-11 20:38:22 +03:00
var nsgNotReadyErrorRegex = regexp . MustCompile ( "Resource.*networkSecurityGroups.*referenced by resource.*not found" )
2023-10-12 19:53:38 +03:00
const storageServiceEndpoint = "Microsoft.Storage"
2020-09-26 00:00:51 +03:00
func ( m * manager ) createDNS ( ctx context . Context ) error {
return m . dns . Create ( ctx , m . doc . OpenShiftCluster )
2020-02-11 09:46:28 +03:00
}
2019-12-31 06:49:34 +03:00
2024-05-07 18:51:07 +03:00
func ( m * manager ) createOIDC ( ctx context . Context ) error {
2024-06-07 01:16:12 +03:00
if m . doc . OpenShiftCluster . Properties . ServicePrincipalProfile != nil || m . doc . OpenShiftCluster . Properties . PlatformWorkloadIdentityProfile == nil {
return nil
}
2024-05-07 18:51:07 +03:00
2024-06-21 06:49:27 +03:00
// OIDC Storage Web Endpoint need to be determined for Development environments
var oidcEndpoint string
if m . env . FeatureIsSet ( env . FeatureRequireOIDCStorageWebEndpoint ) {
properties , err := m . rpBlob . GetContainerProperties ( ctx , m . env . ResourceGroup ( ) , m . env . OIDCStorageAccountName ( ) , oidcbuilder . WebContainer )
if err != nil {
return err
}
oidcEndpoint = * properties . Properties . PrimaryEndpoints . Web
} else {
// For Production Azure Front Door Endpoint will be the OIDC Endpoint
oidcEndpoint = m . env . OIDCEndpoint ( )
2024-05-07 18:51:07 +03:00
}
2024-06-21 06:49:27 +03:00
oidcBuilder , err := oidcbuilder . NewOIDCBuilder ( m . env , oidcEndpoint , env . OIDCBlobDirectoryPrefix + m . doc . ID )
2024-05-07 18:51:07 +03:00
if err != nil {
return err
}
azBlobClient , err := m . rpBlob . GetAZBlobClient ( oidcBuilder . GetBlobContainerURL ( ) , & azblob . ClientOptions { } )
if err != nil {
return err
}
2024-06-21 06:49:27 +03:00
err = oidcBuilder . EnsureOIDCDocs ( ctx , azBlobClient )
2024-05-07 18:51:07 +03:00
if err != nil {
return err
}
m . doc , err = m . db . PatchWithLease ( ctx , m . doc . Key , func ( doc * api . OpenShiftClusterDocument ) error {
2024-06-13 18:51:52 +03:00
doc . OpenShiftCluster . Properties . ClusterProfile . OIDCIssuer = pointerutils . ToPtr ( api . OIDCIssuer ( oidcBuilder . GetEndpointUrl ( ) ) )
2024-06-11 01:05:14 +03:00
doc . OpenShiftCluster . Properties . ClusterProfile . BoundServiceAccountSigningKey = pointerutils . ToPtr ( api . SecureString ( oidcBuilder . GetPrivateKey ( ) ) )
2024-05-07 18:51:07 +03:00
return nil
} )
return err
}
2022-05-02 05:06:24 +03:00
func ( m * manager ) ensureInfraID ( ctx context . Context ) ( err error ) {
2021-03-03 00:02:27 +03:00
if m . doc . OpenShiftCluster . Properties . InfraID != "" {
return err
2019-10-16 06:29:17 +03:00
}
2022-05-02 05:06:24 +03:00
// generate an infra ID that is 27 characters long with 5 bytes of them random
infraID := generateInfraID ( strings . ToLower ( m . doc . OpenShiftCluster . Name ) , 27 , 5 )
2021-03-03 00:02:27 +03:00
m . doc , err = m . db . PatchWithLease ( ctx , m . doc . Key , func ( doc * api . OpenShiftClusterDocument ) error {
2022-05-02 05:06:24 +03:00
doc . OpenShiftCluster . Properties . InfraID = infraID
2021-03-03 00:02:27 +03:00
return nil
} )
return err
}
2022-09-30 18:32:41 +03:00
func ( m * manager ) ensureResourceGroup ( ctx context . Context ) ( err error ) {
2020-09-26 00:00:51 +03:00
resourceGroup := stringutils . LastTokenByte ( m . doc . OpenShiftCluster . Properties . ClusterProfile . ResourceGroupID , '/' )
2022-09-30 18:32:41 +03:00
group := mgmtfeatures . ResourceGroup { }
2020-01-10 20:42:48 +03:00
2022-08-16 18:37:02 +03:00
// Retain the existing resource group configuration (such as tags) if it exists
2023-08-01 21:10:47 +03:00
group , err = m . resourceGroups . Get ( ctx , resourceGroup )
if err != nil {
if detailedErr , ok := err . ( autorest . DetailedError ) ; ! ok || detailedErr . StatusCode != http . StatusNotFound {
return err
2022-08-16 18:37:02 +03:00
}
2023-08-01 21:10:47 +03:00
// set field values if the RG doesn't exist
group . Location = & m . doc . OpenShiftCluster . Location
group . ManagedBy = & m . doc . OpenShiftCluster . ID
}
resourceGroupAlreadyExistsError := & api . CloudError {
StatusCode : http . StatusBadRequest ,
CloudErrorBody : & api . CloudErrorBody {
Code : api . CloudErrorCodeClusterResourceGroupAlreadyExists ,
Message : "Resource group " + m . doc . OpenShiftCluster . Properties . ClusterProfile . ResourceGroupID +
" must not already exist." ,
} ,
2019-12-11 03:18:41 +03:00
}
2022-08-16 18:37:02 +03:00
2023-08-01 21:10:47 +03:00
// If managedBy or location don't match, return an error that RG must not already exist
if group . Location == nil || ! strings . EqualFold ( * group . Location , m . doc . OpenShiftCluster . Location ) {
return resourceGroupAlreadyExistsError
}
if group . ManagedBy == nil || ! strings . EqualFold ( * group . ManagedBy , m . doc . OpenShiftCluster . ID ) {
return resourceGroupAlreadyExistsError
}
2022-08-16 18:37:02 +03:00
// HACK: set purge=true on dev clusters so our purger wipes them out since there is not deny assignment in place
2021-04-22 00:44:31 +03:00
if m . env . IsLocalDevelopmentMode ( ) {
2021-06-29 16:50:11 +03:00
if group . Tags == nil {
group . Tags = map [ string ] * string { }
}
group . Tags [ "purge" ] = to . StringPtr ( "true" )
2021-04-13 19:47:01 +03:00
}
2021-03-03 00:12:32 +03:00
2021-05-03 20:40:21 +03:00
// According to https://stackoverflow.microsoft.com/a/245391/62320,
// re-PUTting our RG should re-create RP RBAC after a customer subscription
// migrates between tenants.
2022-08-16 18:37:02 +03:00
_ , err = m . resourceGroups . CreateOrUpdate ( ctx , resourceGroup , group )
2021-03-05 13:41:26 +03:00
var serviceError * azure . ServiceError
// CreateOrUpdate wraps DetailedError wrapping a *RequestError (if error generated in ResourceGroup CreateOrUpdateResponder at least)
if detailedErr , ok := err . ( autorest . DetailedError ) ; ok {
if requestErr , ok := detailedErr . Original . ( * azure . RequestError ) ; ok {
serviceError = requestErr . ServiceError
}
}
// TODO [gv]: Keeping this for retro-compatibility, but probably this can be removed
if requestErr , ok := err . ( * azure . RequestError ) ; ok {
serviceError = requestErr . ServiceError
}
if serviceError != nil && serviceError . Code == "RequestDisallowedByPolicy" {
2020-09-25 22:52:05 +03:00
// if request was disallowed by policy, inform user so they can take appropriate action
2021-03-05 13:41:26 +03:00
b , _ := json . Marshal ( serviceError )
2020-09-25 22:52:05 +03:00
return & api . CloudError {
StatusCode : http . StatusBadRequest ,
CloudErrorBody : & api . CloudErrorBody {
Code : api . CloudErrorCodeDeploymentFailed ,
Message : "Deployment failed." ,
Details : [ ] api . CloudErrorBody {
{
Message : string ( b ) ,
} ,
} ,
} ,
}
}
2019-10-16 06:29:17 +03:00
if err != nil {
return err
}
2023-06-14 20:10:37 +03:00
return m . env . EnsureARMResourceGroupRoleAssignment ( ctx , resourceGroup )
2021-03-03 00:02:27 +03:00
}
2023-04-06 01:37:42 +03:00
func ( m * manager ) deployBaseResourceTemplate ( ctx context . Context ) error {
2021-03-03 00:02:27 +03:00
resourceGroup := stringutils . LastTokenByte ( m . doc . OpenShiftCluster . Properties . ClusterProfile . ResourceGroupID , '/' )
infraID := m . doc . OpenShiftCluster . Properties . InfraID
2019-12-03 21:40:36 +03:00
2021-03-20 01:20:42 +03:00
clusterStorageAccountName := "cluster" + m . doc . OpenShiftCluster . Properties . StorageSuffix
2022-05-02 07:56:25 +03:00
azureRegion := strings . ToLower ( m . doc . OpenShiftCluster . Location ) // Used in k8s object names, so must pass DNS-1123 validation
2021-03-20 01:20:42 +03:00
2023-10-12 19:53:38 +03:00
ocpSubnets , err := m . subnetsWithServiceEndpoint ( ctx , storageServiceEndpoint )
if err != nil {
return err
}
2021-02-06 17:43:55 +03:00
resources := [ ] * arm . Resource {
2023-10-12 19:53:38 +03:00
m . storageAccount ( clusterStorageAccountName , azureRegion , ocpSubnets , true ) ,
2021-03-20 01:20:42 +03:00
m . storageAccountBlobContainer ( clusterStorageAccountName , "ignition" ) ,
m . storageAccountBlobContainer ( clusterStorageAccountName , "aro" ) ,
2023-10-12 19:53:38 +03:00
m . storageAccount ( m . doc . OpenShiftCluster . Properties . ImageRegistryStorageAccountName , azureRegion , ocpSubnets , true ) ,
2021-03-20 03:13:54 +03:00
m . storageAccountBlobContainer ( m . doc . OpenShiftCluster . Properties . ImageRegistryStorageAccountName , "image-registry" ) ,
2022-05-02 07:56:25 +03:00
m . clusterNSG ( infraID , azureRegion ) ,
2021-03-15 15:32:38 +03:00
m . clusterServicePrincipalRBAC ( ) ,
2022-05-02 07:56:25 +03:00
m . networkPrivateLinkService ( azureRegion ) ,
m . networkInternalLoadBalancer ( azureRegion ) ,
2021-02-06 17:43:55 +03:00
}
2022-08-30 19:00:33 +03:00
// Create a public load balancer routing if needed
if m . doc . OpenShiftCluster . Properties . NetworkProfile . OutboundType == api . OutboundTypeLoadbalancer {
2023-10-26 18:32:57 +03:00
m . newPublicLoadBalancer ( ctx , & resources )
2023-10-31 17:46:15 +03:00
// If the cluster is public we still want the default public IP address
if m . doc . OpenShiftCluster . Properties . IngressProfiles [ 0 ] . Visibility == api . VisibilityPublic {
resources = append ( resources ,
m . networkPublicIPAddress ( azureRegion , infraID + "-default-v4" ) ,
)
}
2021-02-06 17:43:55 +03:00
}
2021-03-08 20:01:12 +03:00
if m . doc . OpenShiftCluster . Properties . FeatureProfile . GatewayEnabled {
resources = append ( resources ,
m . networkPrivateEndpoint ( ) ,
)
}
2020-03-30 22:43:25 +03:00
t := & arm . Template {
Schema : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#" ,
ContentVersion : "1.0.0.0" ,
2021-02-06 17:43:55 +03:00
Resources : resources ,
2020-03-30 22:43:25 +03:00
}
2020-03-30 17:10:36 +03:00
2021-03-10 04:27:15 +03:00
if ! m . env . FeatureIsSet ( env . FeatureDisableDenyAssignments ) {
2021-02-24 15:34:40 +03:00
t . Resources = append ( t . Resources , m . denyAssignment ( ) )
2020-03-30 17:10:36 +03:00
}
2022-05-03 05:10:43 +03:00
return arm . DeployTemplate ( ctx , m . log , m . deployments , resourceGroup , "storage" , t , nil )
2021-02-06 17:10:25 +03:00
}
2023-10-26 18:32:57 +03:00
func ( m * manager ) newPublicLoadBalancer ( ctx context . Context , resources * [ ] * arm . Resource ) {
infraID := m . doc . OpenShiftCluster . Properties . InfraID
azureRegion := strings . ToLower ( m . doc . OpenShiftCluster . Location ) // Used in k8s object names, so must pass DNS-1123 validation
var outboundIPs [ ] api . ResourceReference
if m . doc . OpenShiftCluster . Properties . APIServerProfile . Visibility == api . VisibilityPublic {
* resources = append ( * resources ,
m . networkPublicIPAddress ( azureRegion , infraID + "-pip-v4" ) ,
)
if m . doc . OpenShiftCluster . Properties . NetworkProfile . LoadBalancerProfile . ManagedOutboundIPs != nil {
outboundIPs = append ( outboundIPs , api . ResourceReference { ID : m . doc . OpenShiftCluster . Properties . ClusterProfile . ResourceGroupID + "/providers/Microsoft.Network/publicIPAddresses/" + infraID + "-pip-v4" } )
}
}
if m . doc . OpenShiftCluster . Properties . NetworkProfile . LoadBalancerProfile . ManagedOutboundIPs != nil {
for i := len ( outboundIPs ) ; i < m . doc . OpenShiftCluster . Properties . NetworkProfile . LoadBalancerProfile . ManagedOutboundIPs . Count ; i ++ {
ipName := genManagedOutboundIPName ( )
* resources = append ( * resources , m . networkPublicIPAddress ( azureRegion , ipName ) )
outboundIPs = append ( outboundIPs , api . ResourceReference { ID : m . doc . OpenShiftCluster . Properties . ClusterProfile . ResourceGroupID + "/providers/Microsoft.Network/publicIPAddresses/" + ipName } )
}
}
m . patchEffectiveOutboundIPs ( ctx , outboundIPs )
2023-10-31 17:40:17 +03:00
2023-10-26 18:32:57 +03:00
* resources = append ( * resources ,
m . networkPublicLoadBalancer ( azureRegion , outboundIPs ) ,
)
}
2023-10-12 19:41:09 +03:00
// subnetsWithServiceEndpoint returns a unique slice of subnet resource IDs that have the corresponding
// service endpoint
func ( m * manager ) subnetsWithServiceEndpoint ( ctx context . Context , serviceEndpoint string ) ( [ ] string , error ) {
subnetsMap := map [ string ] struct { } { }
subnetsMap [ m . doc . OpenShiftCluster . Properties . MasterProfile . SubnetID ] = struct { } { }
workerProfiles , _ := api . GetEnrichedWorkerProfiles ( m . doc . OpenShiftCluster . Properties )
for _ , v := range workerProfiles {
// don't fail empty worker profiles/subnet IDs as they're not valid
if v . SubnetID == "" {
continue
}
2023-10-12 19:53:38 +03:00
subnetsMap [ strings . ToLower ( v . SubnetID ) ] = struct { } { }
2023-10-12 19:41:09 +03:00
}
subnets := [ ] string { }
for subnetId := range subnetsMap {
// We purposefully fail if we can't fetch the subnet as the FPSP most likely
// lost read permission over the subnet.
subnet , err := m . subnet . Get ( ctx , subnetId )
if err != nil {
return nil , err
}
if subnet . SubnetPropertiesFormat == nil || subnet . ServiceEndpoints == nil {
continue
}
for _ , endpoint := range * subnet . ServiceEndpoints {
2023-10-12 19:53:38 +03:00
if endpoint . Service != nil && strings . EqualFold ( * endpoint . Service , serviceEndpoint ) && endpoint . Locations != nil {
for _ , loc := range * endpoint . Locations {
if loc == "*" || strings . EqualFold ( loc , m . doc . OpenShiftCluster . Location ) {
subnets = append ( subnets , subnetId )
}
}
2023-10-12 19:41:09 +03:00
}
}
}
return subnets , nil
}
2024-02-21 03:13:46 +03:00
// attachNSGs attaches NSGs to the cluster subnets, if preconfigured NSG is not
// enabled. This method is suitable for use with steps, and has default
// timeout/polls set.
2024-02-13 05:27:05 +03:00
func ( m * manager ) attachNSGs ( ctx context . Context ) error {
2024-02-21 03:13:46 +03:00
// Since we need to guard against the case where NSGs are not ready
// immediately after creation, we can have a relatively short retry period
// of 30s and timeout of 3m. These numbers were chosen via a
// highly-non-specific and data-adjacent process (picking them because they
// seemed decent enough).
//
// If we get the NSG not-ready error after 3 minutes, it's unusual enough
// that we should be raising it as an issue rather than tolerating it.
return m . _attachNSGs ( ctx , 3 * time . Minute , 30 * time . Second )
2024-02-13 05:27:05 +03:00
}
2024-02-21 03:13:46 +03:00
// _attachNSGs attaches NSGs to the cluster subnets, if preconfigured NSG is not
// enabled. timeout and pollInterval are provided as arguments for testing
// reasons.
func ( m * manager ) _attachNSGs ( ctx context . Context , timeout time . Duration , pollInterval time . Duration ) error {
2023-05-15 21:37:59 +03:00
if m . doc . OpenShiftCluster . Properties . NetworkProfile . PreconfiguredNSG == api . PreconfiguredNSGEnabled {
2024-02-13 05:27:05 +03:00
return nil
2023-04-17 17:52:22 +03:00
}
2024-02-13 05:27:05 +03:00
var innerErr error
2023-09-05 21:56:27 +03:00
workerProfiles , _ := api . GetEnrichedWorkerProfiles ( m . doc . OpenShiftCluster . Properties )
workerSubnetId := workerProfiles [ 0 ] . SubnetID
2023-04-17 17:52:22 +03:00
2024-02-13 05:27:05 +03:00
timeoutCtx , cancel := context . WithTimeout ( ctx , timeout )
defer cancel ( )
2024-02-21 03:13:46 +03:00
// This polling function protects the case below where the NSG might not be
// ready to be referenced. We don't guard against trying to re-attach the
// NSG since the inner loop is tolerant of that, and since we are attaching
// the same NSG the only allowed failure case is when the NSG cannot be
// attached to begin with, so it shouldn't happen in practice.
_ = wait . PollImmediateUntil ( pollInterval , func ( ) ( bool , error ) {
2024-02-13 05:27:05 +03:00
var c bool
c , innerErr = func ( ) ( bool , error ) {
for _ , subnetID := range [ ] string {
m . doc . OpenShiftCluster . Properties . MasterProfile . SubnetID ,
workerSubnetId ,
} {
m . log . Printf ( "attaching network security group to subnet %s" , subnetID )
// TODO: there is probably an undesirable race condition here - check if etags can help.
// We use the outer context, not the timeout context, as we do not want
// to time out the condition function itself, only stop retrying once
// timeoutCtx's timeout has fired.
s , err := m . subnet . Get ( ctx , subnetID )
if err != nil {
return false , err
}
2019-11-21 05:32:34 +03:00
2024-02-13 05:27:05 +03:00
if s . SubnetPropertiesFormat == nil {
s . SubnetPropertiesFormat = & mgmtnetwork . SubnetPropertiesFormat { }
}
2019-11-21 05:32:34 +03:00
2024-02-13 05:27:05 +03:00
nsgID , err := apisubnet . NetworkSecurityGroupID ( m . doc . OpenShiftCluster , subnetID )
if err != nil {
return false , err
}
2019-11-29 23:20:30 +03:00
2024-02-13 05:27:05 +03:00
// Sometimes we get into the race condition between external services modifying
// subnets and our validation code. We try to catch this early, but
// these errors is propagated to make the user-facing error more clear incase
// modification happened after we ran validation code and we lost the race
if s . SubnetPropertiesFormat . NetworkSecurityGroup != nil {
if strings . EqualFold ( * s . SubnetPropertiesFormat . NetworkSecurityGroup . ID , nsgID ) {
continue
}
2019-12-03 07:00:33 +03:00
2024-02-13 05:27:05 +03:00
return false , api . NewCloudError ( http . StatusBadRequest , api . CloudErrorCodeInvalidLinkedVNet , "" , "The provided subnet '%s' is invalid: must not have a network security group attached." , subnetID )
}
2019-12-03 07:00:33 +03:00
2024-02-13 05:27:05 +03:00
s . SubnetPropertiesFormat . NetworkSecurityGroup = & mgmtnetwork . SecurityGroup {
ID : to . StringPtr ( nsgID ) ,
}
2019-11-21 05:32:34 +03:00
2024-02-13 05:27:05 +03:00
// Because we attempt to attach the NSG immediately after the base resource deployment
// finishes, the NSG is sometimes not yet ready to be referenced and used, causing
// an error to occur here. So if this particular error occurs, return nil to retry.
// But if some other type of error occurs, just return that error.
err = m . subnet . CreateOrUpdate ( ctx , subnetID , s )
if err != nil {
if nsgNotReadyErrorRegex . MatchString ( err . Error ( ) ) {
return false , nil
}
return false , err
}
2023-12-11 20:38:22 +03:00
}
2024-02-13 05:27:05 +03:00
return true , nil
} ( )
return c , innerErr
} , timeoutCtx . Done ( ) )
2019-11-21 05:32:34 +03:00
2024-02-13 05:27:05 +03:00
return innerErr
2021-07-13 11:26:20 +03:00
}
2021-03-08 20:01:12 +03:00
func ( m * manager ) setMasterSubnetPolicies ( ctx context . Context ) error {
// TODO: there is probably an undesirable race condition here - check if etags can help.
2023-06-21 06:41:22 +03:00
subnetId := m . doc . OpenShiftCluster . Properties . MasterProfile . SubnetID
s , err := m . subnet . Get ( ctx , subnetId )
2021-03-08 20:01:12 +03:00
if err != nil {
return err
}
if s . SubnetPropertiesFormat == nil {
s . SubnetPropertiesFormat = & mgmtnetwork . SubnetPropertiesFormat { }
}
if m . doc . OpenShiftCluster . Properties . FeatureProfile . GatewayEnabled {
s . SubnetPropertiesFormat . PrivateEndpointNetworkPolicies = to . StringPtr ( "Disabled" )
}
s . SubnetPropertiesFormat . PrivateLinkServiceNetworkPolicies = to . StringPtr ( "Disabled" )
2023-06-21 06:41:22 +03:00
err = m . subnet . CreateOrUpdate ( ctx , subnetId , s )
if detailedErr , ok := err . ( autorest . DetailedError ) ; ok {
if strings . Contains ( detailedErr . Original . Error ( ) , "RequestDisallowedByPolicy" ) {
return & api . CloudError {
StatusCode : http . StatusBadRequest ,
CloudErrorBody : & api . CloudErrorBody {
Code : api . CloudErrorCodeRequestDisallowedByPolicy ,
Message : fmt . Sprintf ( "Resource %s was disallowed by policy." ,
subnetId [ strings . LastIndex ( subnetId , "/" ) + 1 : ] ,
) ,
Details : [ ] api . CloudErrorBody {
{
Code : api . CloudErrorCodeRequestDisallowedByPolicy ,
Message : fmt . Sprintf ( "Policy definition : %s\nPolicy Assignment : %s" ,
regexp . MustCompile ( ` policyDefinitionName":"([^"]+)" ` ) . FindStringSubmatch ( detailedErr . Original . Error ( ) ) [ 1 ] ,
regexp . MustCompile ( ` policyAssignmentName":"([^"]+)" ` ) . FindStringSubmatch ( detailedErr . Original . Error ( ) ) [ 1 ] ,
) ,
} ,
} ,
} ,
}
}
}
return err
2021-03-08 20:01:12 +03:00
}
2022-05-02 05:06:24 +03:00
// generateInfraID take base and returns a ID that
// - is of length maxLen
// - contains randomLen random bytes
// - only contains `alphanum` or `-`
// see openshift/installer/pkg/asset/installconfig/clusterid.go for original implementation
func generateInfraID ( base string , maxLen int , randomLen int ) string {
maxBaseLen := maxLen - ( randomLen + 1 )
// replace all characters that are not `alphanum` or `-` with `-`
re := regexp . MustCompile ( "[^A-Za-z0-9-]" )
base = re . ReplaceAllString ( base , "-" )
// replace all multiple dashes in a sequence with single one.
re = regexp . MustCompile ( ` - { 2,} ` )
base = re . ReplaceAllString ( base , "-" )
// truncate to maxBaseLen
if len ( base ) > maxBaseLen {
base = base [ : maxBaseLen ]
}
base = strings . TrimRight ( base , "-" )
// add random chars to the end to randomize
return fmt . Sprintf ( "%s-%s" , base , utilrand . String ( randomLen ) )
}