create clustermanager api definitions, wire up cosmosdb

This commit is contained in:
Ross Bryan 2022-07-22 15:34:34 -04:00
Родитель 9075dc0f15
Коммит 2d076a8242
121 изменённых файлов: 4060 добавлений и 1098 удалений

Просмотреть файл

@ -1,4 +1,4 @@
468fa0da0a50d50640ec57843ad288af343128b39f5bf23e76e4e336580883d4 swagger/redhatopenshift/resource-manager/Microsoft.RedHatOpenShift/stable/2020-04-30/redhatopenshift.json
c323c84befa5ea11da50a2407050abed6540ea01e796720bc2241604ce80567c swagger/redhatopenshift/resource-manager/Microsoft.RedHatOpenShift/preview/2021-09-01-preview/redhatopenshift.json
e4e522e41855de71c0318db31cbd96c8713e7a74e7c81911bb494f0b194b3f43 swagger/redhatopenshift/resource-manager/Microsoft.RedHatOpenShift/stable/2022-04-01/redhatopenshift.json
1904724f413437e8f7bec1493e1c332e48b2dffd0f7d1f2f6060c281c07880ce swagger/redhatopenshift/resource-manager/Microsoft.RedHatOpenShift/stable/2022-09-04/redhatopenshift.json
10f0a281da1e0d3fcd30788b87d5afdd2b4fb501af5d179c114c45ab870b6f2a swagger/redhatopenshift/resource-manager/Microsoft.RedHatOpenShift/stable/2022-09-04/redhatopenshift.json

Просмотреть файл

@ -110,6 +110,11 @@ func rp(ctx context.Context, log, audit *logrus.Entry) error {
return err
}
dbClusterManagerConfiguration, err := database.NewClusterManagerConfigurations(ctx, _env.IsLocalDevelopmentMode(), dbc)
if err != nil {
return err
}
dbBilling, err := database.NewBilling(ctx, _env.IsLocalDevelopmentMode(), dbc)
if err != nil {
return err
@ -142,7 +147,7 @@ func rp(ctx context.Context, log, audit *logrus.Entry) error {
return err
}
f, err := frontend.NewFrontend(ctx, audit, log.WithField("component", "frontend"), _env, dbAsyncOperations, dbOpenShiftClusters, dbSubscriptions, dbOpenShiftVersions, api.APIs, m, feAead, adminactions.NewKubeActions, adminactions.NewAzureActions, clusterdata.NewBestEffortEnricher)
f, err := frontend.NewFrontend(ctx, audit, log.WithField("component", "frontend"), _env, dbAsyncOperations, dbClusterManagerConfiguration, dbOpenShiftClusters, dbSubscriptions, dbOpenShiftVersions, api.APIs, m, feAead, adminactions.NewKubeActions, adminactions.NewAzureActions, clusterdata.NewBestEffortEnricher)
if err != nil {
return err
}

Просмотреть файл

@ -216,6 +216,16 @@
curl -X GET -k "https://localhost:8443/admin/subscriptions/$AZURE_SUBSCRIPTION_ID/resourceGroups/$RESOURCEGROUP/providers/Microsoft.RedHatOpenShift/openShiftClusters/$CLUSTER/kubernetespodlogs?podname=$POD&namespace=$NAMESPACE&container=$CONTAINER"
```
## OpenShift Cluster Manager (OCM) Configuration API Actions
* Create a new OCM configuration
* You can find example payloads in the projects `./hack/ocm` folder.
```bash
curl -X PUT -k "https://localhost:8443/subscriptions/fe16a035-e540-4ab7-80d9-373fa9a3d6ae/resourceGroups/$RESOURCEGROUP/providers/Microsoft.RedHatOpenShift/openShiftClusters/$CLUSTER/syncsets/mySyncSet?api-version=2022-09-04" --header "Content-Type: application/json" -d @./hack/ocm/syncset.json
```
## Debugging OpenShift Cluster
* SSH to the bootstrap node:

39
hack/ocm/machinepool.json Normal file
Просмотреть файл

@ -0,0 +1,39 @@
{
"apiVersion": "hive.openshift.io/v1",
"kind": "MachinePool",
"metadata": {
"creationTimestamp": "2022-08-16T14:17:10Z",
"generation": 1,
"labels": {
"api.openshift.com/id": "1u4lhakk4ar41bi3vgn0b7v9hk93dg4m"
},
"name": "oadler-full-worker",
"namespace": "uhc-staging-1u4lhakk4ar41bi3vgn0b7v9hk93dg4m",
"resourceVersion": "1205855122",
"uid": "28a4de99-dc5f-4a9a-9f50-94a7dd47c712"
},
"spec": {
"clusterDeploymentRef": {
"name": "oadler-full"
},
"name": "worker",
"platform": {
"aws": {
"rootVolume": {
"iops": 0,
"size": 300,
"type": "gp3"
},
"type": "m5.xlarge",
"zones": [
"us-east-1a"
]
}
},
"replicas": 2
},
"status": {
"conditions": [
]
}
}

Просмотреть файл

@ -0,0 +1,36 @@
{
"apiVersion": "hive.openshift.io/v1",
"kind": "SyncIdentityProvider",
"metadata": {
"creationTimestamp": "2022-08-25T19:15:28Z",
"generation": 4,
"labels": {
"api.openshift.com/id": "1ua35jp6or0hbv455nm2h3d0hhdkdbj2",
"api.openshift.com/name": "abyrne-hcp2"
},
"name": "abyrne-hcp2",
"namespace": "uhc-staging-1ua35jp6or0hbv455nm2h3d0hhdkdbj2",
"resourceVersion": "1221948462",
"uid": "ae015d32-8ff3-441c-9770-123bbc69853d"
},
"spec": {
"clusterDeploymentRefs": [
{
"name": "abyrne-hcp2"
}
],
"identityProviders": [
{
"htpasswd": {
"fileData": {
"name": "htpasswd-secret"
}
},
"mappingMethod": "claim",
"name": "HTPasswd",
"type": "HTPasswd"
}
]
},
"status": {}
}

20
hack/ocm/syncset.json Normal file
Просмотреть файл

@ -0,0 +1,20 @@
{
"apiVersion": "hive.openshift.io/v1",
"kind": "SyncSet",
"metadata": {
"name": "sample",
"namespace": "aro-f60ae8a2-bca1-4987-9056-f2f6a1837caa"
},
"spec": {
"clusterDeploymentRefs": [],
"resources": [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "myconfigmap"
}
}
]
}
}

133
pkg/api/clustermanager.go Normal file
Просмотреть файл

@ -0,0 +1,133 @@
package api
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// SyncSetList represents a list of SyncSets for a given cluster.
type SyncSetList struct {
Syncsets []*SyncSet `json:"value"`
// The link used to get the next page of operations.
NextLink string `json:"nextLink,omitempty"`
}
type ClusterManagerConfigurationList struct {
ClusterManagerConfigurations []*ClusterManagerConfiguration `json:"value"`
NextLink string `json:"nextLink,omitempty"`
}
// ClusterManagerConfiguration represents the configuration from OpenShift Cluster Manager (OCM)
type ClusterManagerConfiguration struct {
MissingFields
// ID is the unique identifier for the cluster manager configuration
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
ClusterResourceId string `json:"clusterResourceId,omitempty"`
Deleting bool `json:"deleting,omitempty"` // https://docs.microsoft.com/en-us/azure/cosmos-db/change-feed-design-patterns#deletes
Properties ClusterManagerConfigurationProperties `json:"properties,omitempty"`
// SystemData metadata from ARM, more info in pkg/api/openshiftcluster.go
SystemData *SystemData `json:"systemData,omitempty"`
}
type ClusterManagerConfigurationProperties struct {
Resources []byte `json:"resources,omitempty"`
}
// SyncSet represents a SyncSet for an Azure Red Hat OpenShift Cluster.
type SyncSet struct {
MissingFields
// ID, Name and Type are cased as the user provided them at create time.
// ID, Name, Type and Location are immutable.
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
// The Syncsets properties
Properties SyncSetProperties `json:"properties,omitempty"`
}
// SyncSetProperties represents the properties of a SyncSet
type SyncSetProperties struct {
// The parent Azure Red Hat OpenShift resourceID.
ClusterResourceId string `json:"clusterResourceId,omitempty"`
// APIVersion for the SyncSet.
APIVersion string `json:"apiVersion,omitempty"`
// SyncSet kind.
Kind string `json:"kind,omitempty"`
// Metadata for the SyncSet.
Metadata map[string]string `json:"metadata,omitempty"`
// The SyncSet Specification.
Spec SyncSetSpec `json:"spec,omitempty"`
// ClusterDeploymentRefs map SyncSets to a Hive Cluster Deployment.
ClusterDeploymentRefs []string `json:"clusterDeploymentRefs,omitempty"`
// Resources represents the SyncSets configuration.
Resources map[string]string `json:"resources,omitempty"`
// The status of the object.
Status string `json:"status,omitempty"`
// Resources []byte `json:"resources,omitempty"`
}
type SyncSetSpec struct {
}
// MachinePool represents a MachinePool
type MachinePool struct {
// The Resource ID.
ID string `json:"id,omitempty"`
// The resource name.
Name string `json:"name,omitempty"`
// The parent cluster resourceID.
ClusterResourceId string `json:"clusterResourceId,omitempty"`
// The MachinePool properties.
Properties MachinePoolProperties `json:"properties,omitempty"`
}
// MachinePoolProperties represents the properties of a MachinePool
type MachinePoolProperties struct {
Resources []byte `json:"resources,omitempty"`
}
// SyncIdentityProvider represents a SyncIdentityProvider
type SyncIdentityProvider struct {
// The Resource ID.
ID string `json:"id,omitempty"`
// The resource name.
Name string `json:"name,omitempty"`
// The parent cluster resourceID.
ClusterResourceId string `json:"clusterResourceId,omitempty"`
// The SyncIdentityProvider properties.
Properties SyncIdentityProviderProperties `json:"properties,omitempty"`
}
// SyncSetProperties represents the properties of a SyncSet
type SyncIdentityProviderProperties struct {
MissingFields
Resources []byte `json:"resources,omitempty"`
}
// // HiveSecret represents a hive secret.
// type HiveSecret struct {
// }
// // SyncSetProperties represents the properties of a SyncSet
// type HiveSecretProperties struct {
// MissingFields
// Resources []byte `json:"resources,omitempty"`
// }

Просмотреть файл

@ -3,22 +3,22 @@ package api
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// OpenShiftClusterManagerConfigurationDocument represents OpenShift cluster manager configuration documents.
// ClusterManagerConfigurationDocument represents OpenShift cluster manager configuration documents.
// pkg/database/cosmosdb requires its definition.
type OpenShiftClusterManagerConfigurationDocuments struct {
Count int `json:"_count,omitempty"`
ResourceID string `json:"_rid,omitempty"`
OpenShiftClusterManagerConfigurationDocuments []*OpenShiftClusterManagerConfigurationDocument `json:"Documents,omitempty"`
type ClusterManagerConfigurationDocuments struct {
Count int `json:"_count,omitempty"`
ResourceID string `json:"_rid,omitempty"`
ClusterManagerConfigurationDocuments []*ClusterManagerConfigurationDocument `json:"Documents,omitempty"`
}
// String returns a JSON representation of the OpenShiftClusterManagerConfigurationDocuments struct.
func (c *OpenShiftClusterManagerConfigurationDocuments) String() string {
func (c *ClusterManagerConfigurationDocuments) String() string {
return encodeJSON(c)
}
// OpenShiftClusterManagerConfigurationDocument represents an OpenShift cluster manager configuration document.
// pkg/database/cosmosdb requires its definition.
type OpenShiftClusterManagerConfigurationDocument struct {
type ClusterManagerConfigurationDocument struct {
MissingFields
ID string `json:"id,omitempty"`
@ -31,5 +31,20 @@ type OpenShiftClusterManagerConfigurationDocument struct {
LSN int `json:"_lsn,omitempty"`
Metadata map[string]interface{} `json:"_metadata,omitempty"`
//OpenShiftClusterManagerConfiguration *OpenShiftClusterManagerConfiguration `json:"openShiftClusterManagerConfiguration,omitempty"`
Key string `json:"key,omitempty"`
PartitionKey string `json:"partitionKey,omitempty" deep:"-"`
Deleting bool `json:"deleting,omitempty"` // https://docs.microsoft.com/en-us/azure/cosmos-db/change-feed-design-patterns#deletes
ClusterManagerConfiguration *ClusterManagerConfiguration `json:"clusterManagerConfiguration,omitempty"`
SyncIdentityProvider *SyncIdentityProvider `json:"syncIdentityProvider,omitempty"`
SyncSet *SyncSet `json:"syncSet,omitempty"`
MachinePool *MachinePool `json:"machinePool,omitempty"`
CorrelationData *CorrelationData `json:"correlationData,omitempty" deep:"-"`
}
// String returns a JSON representation of the OpenShiftClusterManagerConfigurationDocument struct.
func (c *ClusterManagerConfigurationDocument) String() string {
return encodeJSON(c)
}

Просмотреть файл

@ -0,0 +1,85 @@
package api
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
const mpPayload string = `
{
"apiVersion": "hive.openshift.io/v1",
"kind": "MachinePool",
"metadata": {
"creationTimestamp": "2022-08-16T14:17:10Z",
"generation": 1,
"labels": {
"api.openshift.com/id": "1u4lhakk4ar41bi3vgn0b7v9hk93dg4m"
},
"name": "oadler-full-worker",
"namespace": "uhc-staging-1u4lhakk4ar41bi3vgn0b7v9hk93dg4m",
"resourceVersion": "1205855122",
"uid": "28a4de99-dc5f-4a9a-9f50-94a7dd47c712"
},
"spec": {
"clusterDeploymentRef": {
"name": "oadler-full"
},
"name": "worker",
"platform": {}
},
"replicas": 2
},
"status": {
"conditions": [
]
}
}
`
func ExampleClusterManagerConfigurationDocumentSyncSet() *ClusterManagerConfigurationDocument {
return &ClusterManagerConfigurationDocument{
ID: "00000000-0000-0000-0000-000000000000",
Key: "/subscriptions/subscriptionid/resourcegroups/resourcegroup/providers/microsoft.redhatopenshift/openshiftclusters/resourcename/syncSets/mySyncSet",
ResourceID: "",
PartitionKey: "",
SyncSet: &SyncSet{
Name: "mySyncSet",
ID: "/subscriptions/subscriptionId/resourceGroups/resourceGroup/providers/Microsoft.RedHatOpenShift/OpenShiftClusters/resourceName/syncSets/mySyncSet",
Properties: SyncSetProperties{
ClusterResourceId: "/subscriptions/subscriptionid/resourcegroups/resourcegroup/providers/microsoft.redhatopenshift/openshiftclusters/resourcename",
APIVersion: "hive.openshift.io/v1",
Kind: "SyncSet",
Metadata: map[string]string{
"name": "sample",
"namespace": "aro-f60ae8a2-bca1-4987-9056-f2f6a1837caa",
},
ClusterDeploymentRefs: []string{
"uhc-staging-1u4lhakk4ar41bi3vgn0b7v9hk93dg4m",
"aro-f60ae8a2-bca1-4987-9056-f2f6a1837caa",
},
Resources: map[string]string{
"name": "worker",
"platform": "azure",
"replicas": "2",
},
},
},
CorrelationData: &CorrelationData{},
}
}
func ExampleClusterManagerConfigurationDocumentMachinePool() *ClusterManagerConfigurationDocument {
return &ClusterManagerConfigurationDocument{
ID: "00000000-0000-0000-0000-000000000000",
Key: "/subscriptions/subscriptionid/resourcegroups/resourcegroup/providers/microsoft.redhatopenshift/openshiftclusters/resourcename/machinepools/myMachinePool",
ResourceID: "",
PartitionKey: "",
MachinePool: &MachinePool{
Name: "myMachinePool",
ID: "/subscriptions/subscriptionId/resourceGroups/resourceGroup/providers/Microsoft.RedHatOpenShift/OpenShiftClusters/resourceName/machinePools/myMachinePool",
ClusterResourceId: "/subscriptions/subscriptionid/resourcegroups/resourcegroup/providers/microsoft.redhatopenshift/openshiftclusters/resourcename",
Properties: MachinePoolProperties{
Resources: []byte(mpPayload),
},
},
CorrelationData: &CorrelationData{},
}
}

Просмотреть файл

@ -39,7 +39,7 @@ func ExampleOpenShiftClusterDocument() *OpenShiftClusterDocument {
ClusterProfile: ClusterProfile{
PullSecret: `{"auths":{"registry.connect.redhat.com":{"auth":""},"registry.redhat.io":{"auth":""}}}`,
Domain: "cluster.location.aroapp.io",
Version: "4.3.0",
Version: "4.11.0",
ResourceGroupID: "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup",
},
ConsoleProfile: ConsoleProfile{

Просмотреть файл

@ -129,3 +129,31 @@ var OperationListInstallVersions = Operation{
},
Origin: "user,system",
}
var OperationSyncSetsRead = Operation{
Name: "Microsoft.RedHatOpenShift/openShiftClusters/syncSets/read",
Display: Display{
Provider: "Azure Red Hat OpenShift",
Resource: "syncSets",
Operation: "Read OpenShift cluster sync set",
},
Origin: "user,system",
}
var OperationSyncSetsWrite = Operation{
Name: "Microsoft.RedHatOpenShift/openShiftClusters/syncSets/write",
Display: Display{
Provider: "Azure Red Hat OpenShift",
Resource: "syncSets",
Operation: "Write OpenShift cluster sync set",
},
Origin: "user,system",
}
var OperationSyncSetsDelete = Operation{
Name: "Microsoft.RedHatOpenShift/openShiftClusters/syncSets/delete",
Display: Display{
Provider: "Azure Red Hat OpenShift",
Resource: "syncSets",
Operation: "Delete OpenShift cluster syncset delete",
},
Origin: "user,system",
}

Просмотреть файл

@ -3,6 +3,14 @@ package api
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
type ClusterManagerConfigurationConverter interface {
ToExternal(*ClusterManagerConfiguration) (interface{}, error)
ToExternalList([]*ClusterManagerConfiguration, string) (interface{}, error)
ToInternal(interface{}, *ClusterManagerConfiguration) error
}
type ClusterManagerConfigurationStaticValidator interface {
Static(interface{}, *ClusterManagerConfiguration) error
}
type OpenShiftClusterConverter interface {
ToExternal(*OpenShiftCluster) interface{}
ToExternalList([]*OpenShiftCluster, string) interface{}

Просмотреть файл

@ -0,0 +1,147 @@
package v20220904
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// OCM Kinds supported
const (
MachinePoolKind = "MachinePool"
SyncIdentityProviderKind = "SyncIdentityProvider"
SyncSetKind = "SyncSet"
SecretKind = "Secret"
)
// SyncSetList represents a list of SyncSets
type SyncSetList struct {
SyncSets []*SyncSet `json:"value"`
// The link used to get the next page of operations.
NextLink string `json:"nextLink,omitempty"`
}
type ClusterManagerConfigurationList struct {
ClusterManagerConfigurations []*ClusterManagerConfiguration `json:"value"`
NextLink string `json:"nextLink,omitempty"`
}
type ClusterManagerConfiguration struct {
// ID is the unique identifier for the cluster manager configuration
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
ClusterResourceId string `json:"clusterResourceId,omitempty"`
Deleting bool `json:"deleting,omitempty"` // https://docs.microsoft.com/en-us/azure/cosmos-db/change-feed-design-patterns#deletes
Properties ClusterManagerConfigurationProperties `json:"properties,omitempty"`
// SystemData metadata from ARM, more info in pkg/api/openshiftcluster.go
SystemData *SystemData `json:"systemData,omitempty"`
}
type ClusterManagerConfigurationProperties struct {
Resources interface{} `json:"resources,omitempty"`
}
// SyncSet represents a SyncSet for an Azure Red Hat OpenShift Cluster.
type SyncSet struct {
// This is a flag used during the swagger generation typewalker to
// signal that it should be marked as a proxy resource and
// not a tracked ARM resource.
proxyResource bool
// The resource ID.
ID string `json:"id,omitempty" mutable:"case"`
// The resource name.
Name string `json:"name,omitempty" mutable:"case"`
// The resource type.
Type string `json:"type,omitempty" mutable:"case"`
// // SystemData metadata relating to this resource.
// SystemData *SystemData `json:"systemData,omitempty"`
// The Syncsets properties
Properties SyncSetProperties `json:"properties,omitempty"`
}
// SyncSetProperties represents the properties of a SyncSet
type SyncSetProperties struct {
// The parent Azure Red Hat OpenShift resourceID.
ClusterResourceId string `json:"clusterResourceId,omitempty"`
// APIVersion for the SyncSet.
APIVersion string `json:"apiVersion,omitempty"`
// SyncSet kind.
Kind string `json:"kind,omitempty"`
// Metadata for the SyncSet.
Metadata map[string]string `json:"metadata,omitempty"`
// The SyncSet Specification.
Spec SyncSetSpec `json:"spec,omitempty"`
// ClusterDeploymentRefs map SyncSets to a Hive Cluster Deployment.
ClusterDeploymentRefs []string `json:"clusterDeploymentRefs,omitempty"`
// Resources represents the SyncSets configuration.
Resources map[string]string `json:"resources,omitempty"`
// The status of the object.
Status string `json:"status,omitempty"`
// Resources []byte `json:"resources,omitempty"`
}
type SyncSetSpec struct {
// ClusterDeploymentRefs map SyncSets to a Hive Cluster Deployment.
ClusterDeploymentRefs []string `json:"clusterDeploymentRefs,omitempty"`
// Resources represents the SyncSets configuration.
Resources map[string]interface{} `json:"resources,omitempty"`
// The status of the object.
Status string `json:"status,omitempty"`
}
// MachinePool represents a MachinePool
type MachinePool struct {
// The Resource ID.
ID string `json:"id,omitempty"`
// The resource name.
Name string `json:"name,omitempty"`
// The parent cluster resourceID.
ClusterResourceId string `json:"clusterResourceId,omitempty"`
// SystemData metadata relating to this resource.
SystemData *SystemData `json:"systemData,omitempty"`
Properties MachinePoolProperties `json:"properties,omitempty"`
}
// MachinePoolProperties represents the properties of a MachinePool
type MachinePoolProperties struct {
Resources interface{} `json:"resources,omitempty"`
}
// SyncIdentityProvider represents a SyncIdentityProvider
type SyncIdentityProvider struct {
// The Resource ID.
ID string `json:"id,omitempty"`
// The resource name.
Name string `json:"name,omitempty"`
// The parent cluster resourceID.
ClusterResourceId string `json:"clusterResourceId,omitempty"`
// SystemData metadata relating to this resource.
SystemData *SystemData `json:"systemData,omitempty"`
Properties SyncIdentityProviderProperties `json:"properties,omitempty"`
}
// SyncSetProperties represents the properties of a SyncSet
type SyncIdentityProviderProperties struct {
Resources interface{} `json:"resources,omitempty"`
}

Просмотреть файл

@ -0,0 +1,68 @@
package v20220904
import (
"encoding/json"
"github.com/Azure/ARO-RP/pkg/api"
)
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
type clusterManagerConfigurationConverter struct{}
func (c *clusterManagerConfigurationConverter) ToExternal(ocm *api.ClusterManagerConfiguration) (interface{}, error) {
out := new(ClusterManagerConfiguration)
out.ID = ocm.ID
var data interface{}
err := json.Unmarshal(ocm.Properties.Resources, &data)
if err != nil {
return nil, err
}
out.Properties.Resources = data
return out, nil
}
func (c *clusterManagerConfigurationConverter) SyncSetToExternal(ocm *api.SyncSet) (interface{}, error) {
out := new(SyncSet)
out.ID = ocm.ID
out.Name = ocm.Name
out.Type = ocm.Type
out.proxyResource = true
out.Properties = SyncSetProperties{
ClusterResourceId: ocm.Properties.ClusterResourceId,
APIVersion: ocm.Properties.APIVersion,
Kind: ocm.Properties.Kind,
Metadata: ocm.Properties.Metadata,
ClusterDeploymentRefs: ocm.Properties.ClusterDeploymentRefs,
Resources: ocm.Properties.Resources,
Status: ocm.Properties.Status,
}
return out, nil
}
func (c *clusterManagerConfigurationConverter) ToInternal(_ocm interface{}, out *api.ClusterManagerConfiguration) error {
ocm := _ocm.(*api.ClusterManagerConfiguration)
out.ID = ocm.ID
return nil
}
// ToExternalList returns a slice of external representations of the internal objects
func (c *clusterManagerConfigurationConverter) ToExternalList(ocms []*api.ClusterManagerConfiguration, nextLink string) (interface{}, error) {
l := &ClusterManagerConfigurationList{
ClusterManagerConfigurations: make([]*ClusterManagerConfiguration, 0, len(ocms)),
NextLink: nextLink,
}
for _, ocm := range ocms {
c, err := c.ToExternal(ocm)
if err != nil {
return nil, err
}
l.ClusterManagerConfigurations = append(l.ClusterManagerConfigurations, c.(*ClusterManagerConfiguration))
}
return l, nil
}

Просмотреть файл

@ -0,0 +1,38 @@
package v20220904
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
func exampleSyncSet() *SyncSet {
doc := api.ExampleClusterManagerConfigurationDocumentSyncSet()
// ext, err := (&clusterManagerConfigurationConverter{}).ToExternal(doc.ClusterManagerConfiguration)
ext, err := (&clusterManagerConfigurationConverter{}).SyncSetToExternal(doc.SyncSet)
if err != nil {
panic(err)
}
return ext.(*SyncSet)
}
func ExampleSyncSetPutParameter() interface{} {
return exampleSyncSet()
}
func ExampleSyncSetPatchParameter() interface{} {
return ExampleSyncSetPutParameter()
}
func ExampleSyncSetResponse() interface{} {
return exampleSyncSet()
}
func ExampleSyncSetListResponse() interface{} {
return &SyncSetList{
SyncSets: []*SyncSet{
ExampleSyncSetResponse().(*SyncSet),
},
}
}

Просмотреть файл

@ -0,0 +1,8 @@
package v20220904
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// TODO Implement the static validator
// type SyncSetStaticValidator struct {
// }

Просмотреть файл

@ -33,6 +33,9 @@ func init() {
api.OperationOpenShiftClusterListCredentials,
api.OperationOpenShiftClusterListAdminCredentials,
api.OperationListInstallVersions,
api.OperationSyncSetsRead,
api.OperationSyncSetsWrite,
api.OperationSyncSetsDelete,
},
},
}

Просмотреть файл

@ -27,28 +27,28 @@ import (
"github.com/Azure/go-autorest/tracing"
)
// ListClient is the rest API for Azure Red Hat OpenShift 4
type ListClient struct {
// InstallVersionsClient is the rest API for Azure Red Hat OpenShift 4
type InstallVersionsClient struct {
BaseClient
}
// NewListClient creates an instance of the ListClient client.
func NewListClient(subscriptionID string) ListClient {
return NewListClientWithBaseURI(DefaultBaseURI, subscriptionID)
// NewInstallVersionsClient creates an instance of the InstallVersionsClient client.
func NewInstallVersionsClient(subscriptionID string) InstallVersionsClient {
return NewInstallVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewListClientWithBaseURI creates an instance of the ListClient client using a custom endpoint. Use this when
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewListClientWithBaseURI(baseURI string, subscriptionID string) ListClient {
return ListClient{NewWithBaseURI(baseURI, subscriptionID)}
// NewInstallVersionsClientWithBaseURI creates an instance of the InstallVersionsClient client using a custom endpoint.
// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewInstallVersionsClientWithBaseURI(baseURI string, subscriptionID string) InstallVersionsClient {
return InstallVersionsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// Versions the operation returns the installable OpenShift versions as strings.
// List the operation returns the installable OpenShift versions as strings.
// Parameters:
// location - the name of Azure region.
func (client ListClient) Versions(ctx context.Context, location string) (result ListString, err error) {
func (client InstallVersionsClient) List(ctx context.Context, location string) (result ListString, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ListClient.Versions")
ctx = tracing.StartSpan(ctx, fqdn+"/InstallVersionsClient.List")
defer func() {
sc := -1
if result.Response.Response != nil {
@ -62,33 +62,33 @@ func (client ListClient) Versions(ctx context.Context, location string) (result
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
{TargetValue: location,
Constraints: []validation.Constraint{{Target: "location", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
return result, validation.NewError("redhatopenshift.ListClient", "Versions", err.Error())
return result, validation.NewError("redhatopenshift.InstallVersionsClient", "List", err.Error())
}
req, err := client.VersionsPreparer(ctx, location)
req, err := client.ListPreparer(ctx, location)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.ListClient", "Versions", nil, "Failure preparing request")
err = autorest.NewErrorWithError(err, "redhatopenshift.InstallVersionsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.VersionsSender(req)
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "redhatopenshift.ListClient", "Versions", resp, "Failure sending request")
err = autorest.NewErrorWithError(err, "redhatopenshift.InstallVersionsClient", "List", resp, "Failure sending request")
return
}
result, err = client.VersionsResponder(resp)
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.ListClient", "Versions", resp, "Failure responding to request")
err = autorest.NewErrorWithError(err, "redhatopenshift.InstallVersionsClient", "List", resp, "Failure responding to request")
return
}
return
}
// VersionsPreparer prepares the Versions request.
func (client ListClient) VersionsPreparer(ctx context.Context, location string) (*http.Request, error) {
// ListPreparer prepares the List request.
func (client InstallVersionsClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"location": autorest.Encode("path", location),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
@ -107,15 +107,15 @@ func (client ListClient) VersionsPreparer(ctx context.Context, location string)
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// VersionsSender sends the Versions request. The method will close the
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ListClient) VersionsSender(req *http.Request) (*http.Response, error) {
func (client InstallVersionsClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// VersionsResponder handles the response to the Versions request. The method always
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client ListClient) VersionsResponder(resp *http.Response) (result ListString, err error) {
func (client InstallVersionsClient) ListResponder(resp *http.Response) (result ListString, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),

Просмотреть файл

@ -861,6 +861,323 @@ type ServicePrincipalProfile struct {
ClientSecret *string `json:"clientSecret,omitempty"`
}
// SyncSet syncSet represents a SyncSet for an Azure Red Hat OpenShift Cluster.
type SyncSet struct {
autorest.Response `json:"-"`
// SyncSetProperties - The Syncsets properties
*SyncSetProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; The name of the resource
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Type *string `json:"type,omitempty"`
// SystemData - READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information.
SystemData *SystemData `json:"systemData,omitempty"`
}
// MarshalJSON is the custom marshaler for SyncSet.
func (ss SyncSet) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if ss.SyncSetProperties != nil {
objectMap["properties"] = ss.SyncSetProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for SyncSet struct.
func (ss *SyncSet) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var syncSetProperties SyncSetProperties
err = json.Unmarshal(*v, &syncSetProperties)
if err != nil {
return err
}
ss.SyncSetProperties = &syncSetProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
ss.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
ss.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
ss.Type = &typeVar
}
case "systemData":
if v != nil {
var systemData SystemData
err = json.Unmarshal(*v, &systemData)
if err != nil {
return err
}
ss.SystemData = &systemData
}
}
}
return nil
}
// SyncSetList syncSetList represents a list of SyncSets
type SyncSetList struct {
autorest.Response `json:"-"`
Value *[]SyncSet `json:"value,omitempty"`
// NextLink - The link used to get the next page of operations.
NextLink *string `json:"nextLink,omitempty"`
}
// SyncSetListIterator provides access to a complete listing of SyncSet values.
type SyncSetListIterator struct {
i int
page SyncSetListPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *SyncSetListIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SyncSetListIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *SyncSetListIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SyncSetListIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter SyncSetListIterator) Response() SyncSetList {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter SyncSetListIterator) Value() SyncSet {
if !iter.page.NotDone() {
return SyncSet{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the SyncSetListIterator type.
func NewSyncSetListIterator(page SyncSetListPage) SyncSetListIterator {
return SyncSetListIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (ssl SyncSetList) IsEmpty() bool {
return ssl.Value == nil || len(*ssl.Value) == 0
}
// hasNextLink returns true if the NextLink is not empty.
func (ssl SyncSetList) hasNextLink() bool {
return ssl.NextLink != nil && len(*ssl.NextLink) != 0
}
// syncSetListPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (ssl SyncSetList) syncSetListPreparer(ctx context.Context) (*http.Request, error) {
if !ssl.hasNextLink() {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(ssl.NextLink)))
}
// SyncSetListPage contains a page of SyncSet values.
type SyncSetListPage struct {
fn func(context.Context, SyncSetList) (SyncSetList, error)
ssl SyncSetList
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *SyncSetListPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SyncSetListPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
for {
next, err := page.fn(ctx, page.ssl)
if err != nil {
return err
}
page.ssl = next
if !next.hasNextLink() || !next.IsEmpty() {
break
}
}
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *SyncSetListPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SyncSetListPage) NotDone() bool {
return !page.ssl.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page SyncSetListPage) Response() SyncSetList {
return page.ssl
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page SyncSetListPage) Values() []SyncSet {
if page.ssl.IsEmpty() {
return nil
}
return *page.ssl.Value
}
// Creates a new instance of the SyncSetListPage type.
func NewSyncSetListPage(cur SyncSetList, getNextPage func(context.Context, SyncSetList) (SyncSetList, error)) SyncSetListPage {
return SyncSetListPage{
fn: getNextPage,
ssl: cur,
}
}
// SyncSetProperties syncSetProperties represents the properties of a SyncSet
type SyncSetProperties struct {
// ClusterResourceID - The parent Azure Red Hat OpenShift resourceID.
ClusterResourceID *string `json:"clusterResourceId,omitempty"`
// APIVersion - APIVersion for the SyncSet.
APIVersion *string `json:"apiVersion,omitempty"`
// Kind - SyncSet kind.
Kind *string `json:"kind,omitempty"`
// Metadata - Metadata for the SyncSet.
Metadata map[string]*string `json:"metadata"`
// Spec - The SyncSet Specification.
Spec *SyncSetSpec `json:"spec,omitempty"`
// ClusterDeploymentRefs - ClusterDeploymentRefs map SyncSets to a Hive Cluster Deployment.
ClusterDeploymentRefs interface{} `json:"clusterDeploymentRefs,omitempty"`
// Resources - Resources represents the SyncSets configuration.
Resources map[string]*string `json:"resources"`
// Status - The status of the object.
Status *string `json:"status,omitempty"`
}
// MarshalJSON is the custom marshaler for SyncSetProperties.
func (ssp SyncSetProperties) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if ssp.ClusterResourceID != nil {
objectMap["clusterResourceId"] = ssp.ClusterResourceID
}
if ssp.APIVersion != nil {
objectMap["apiVersion"] = ssp.APIVersion
}
if ssp.Kind != nil {
objectMap["kind"] = ssp.Kind
}
if ssp.Metadata != nil {
objectMap["metadata"] = ssp.Metadata
}
if ssp.Spec != nil {
objectMap["spec"] = ssp.Spec
}
if ssp.ClusterDeploymentRefs != nil {
objectMap["clusterDeploymentRefs"] = ssp.ClusterDeploymentRefs
}
if ssp.Resources != nil {
objectMap["resources"] = ssp.Resources
}
if ssp.Status != nil {
objectMap["status"] = ssp.Status
}
return json.Marshal(objectMap)
}
// SyncSetSpec ...
type SyncSetSpec struct {
// ClusterDeploymentRefs - ClusterDeploymentRefs map SyncSets to a Hive Cluster Deployment.
ClusterDeploymentRefs interface{} `json:"clusterDeploymentRefs,omitempty"`
// Resources - Resources represents the SyncSets configuration.
Resources map[string]interface{} `json:"resources"`
// Status - The status of the object.
Status *string `json:"status,omitempty"`
}
// MarshalJSON is the custom marshaler for SyncSetSpec.
func (sss SyncSetSpec) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if sss.ClusterDeploymentRefs != nil {
objectMap["clusterDeploymentRefs"] = sss.ClusterDeploymentRefs
}
if sss.Resources != nil {
objectMap["resources"] = sss.Resources
}
if sss.Status != nil {
objectMap["status"] = sss.Status
}
return json.Marshal(objectMap)
}
// SystemData metadata pertaining to creation and last modification of the resource.
type SystemData struct {
// CreatedBy - The identity that created the resource.

Просмотреть файл

@ -307,6 +307,125 @@ func (client OpenShiftClustersClient) GetResponder(resp *http.Response) (result
return
}
// List the operation returns properties of each OpenShift cluster.
func (client OpenShiftClustersClient) List(ctx context.Context) (result OpenShiftClusterListPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftClustersClient.List")
defer func() {
sc := -1
if result.oscl.Response.Response != nil {
sc = result.oscl.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: client.SubscriptionID,
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
return result, validation.NewError("redhatopenshift.OpenShiftClustersClient", "List", err.Error())
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.oscl.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "List", resp, "Failure sending request")
return
}
result.oscl, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "List", resp, "Failure responding to request")
return
}
if result.oscl.hasNextLink() && result.oscl.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListPreparer prepares the List request.
func (client OpenShiftClustersClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2022-09-04"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.RedHatOpenShift/openShiftClusters", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client OpenShiftClustersClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client OpenShiftClustersClient) ListResponder(resp *http.Response) (result OpenShiftClusterList, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client OpenShiftClustersClient) listNextResults(ctx context.Context, lastResults OpenShiftClusterList) (result OpenShiftClusterList, err error) {
req, err := lastResults.openShiftClusterListPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client OpenShiftClustersClient) ListComplete(ctx context.Context) (result OpenShiftClusterListIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftClustersClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx)
return
}
// ListAdminCredentials the operation returns the admin kubeconfig.
// Parameters:
// resourceGroupName - the name of the resource group. The name is case insensitive.
@ -602,125 +721,6 @@ func (client OpenShiftClustersClient) ListCredentialsResponder(resp *http.Respon
return
}
// ListMethod the operation returns properties of each OpenShift cluster.
func (client OpenShiftClustersClient) ListMethod(ctx context.Context) (result OpenShiftClusterListPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftClustersClient.ListMethod")
defer func() {
sc := -1
if result.oscl.Response.Response != nil {
sc = result.oscl.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: client.SubscriptionID,
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
return result, validation.NewError("redhatopenshift.OpenShiftClustersClient", "ListMethod", err.Error())
}
result.fn = client.listMethodNextResults
req, err := client.ListMethodPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "ListMethod", nil, "Failure preparing request")
return
}
resp, err := client.ListMethodSender(req)
if err != nil {
result.oscl.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "ListMethod", resp, "Failure sending request")
return
}
result.oscl, err = client.ListMethodResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "ListMethod", resp, "Failure responding to request")
return
}
if result.oscl.hasNextLink() && result.oscl.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListMethodPreparer prepares the ListMethod request.
func (client OpenShiftClustersClient) ListMethodPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2022-09-04"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.RedHatOpenShift/openShiftClusters", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListMethodSender sends the ListMethod request. The method will close the
// http.Response Body if it receives an error.
func (client OpenShiftClustersClient) ListMethodSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListMethodResponder handles the response to the ListMethod request. The method always
// closes the http.Response Body.
func (client OpenShiftClustersClient) ListMethodResponder(resp *http.Response) (result OpenShiftClusterList, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listMethodNextResults retrieves the next set of results, if any.
func (client OpenShiftClustersClient) listMethodNextResults(ctx context.Context, lastResults OpenShiftClusterList) (result OpenShiftClusterList, err error) {
req, err := lastResults.openShiftClusterListPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "listMethodNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListMethodSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "listMethodNextResults", resp, "Failure sending next results request")
}
result, err = client.ListMethodResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.OpenShiftClustersClient", "listMethodNextResults", resp, "Failure responding to next results request")
}
return
}
// ListMethodComplete enumerates all values, automatically crossing page boundaries as required.
func (client OpenShiftClustersClient) ListMethodComplete(ctx context.Context) (result OpenShiftClusterListIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftClustersClient.ListMethod")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListMethod(ctx)
return
}
// Update the operation returns properties of a OpenShift cluster.
// Parameters:
// resourceGroupName - the name of the resource group. The name is case insensitive.

Просмотреть файл

@ -31,25 +31,34 @@ type OperationsClientAPI interface {
var _ OperationsClientAPI = (*redhatopenshift.OperationsClient)(nil)
// ListClientAPI contains the set of methods on the ListClient type.
type ListClientAPI interface {
Versions(ctx context.Context, location string) (result redhatopenshift.ListString, err error)
// InstallVersionsClientAPI contains the set of methods on the InstallVersionsClient type.
type InstallVersionsClientAPI interface {
List(ctx context.Context, location string) (result redhatopenshift.ListString, err error)
}
var _ ListClientAPI = (*redhatopenshift.ListClient)(nil)
var _ InstallVersionsClientAPI = (*redhatopenshift.InstallVersionsClient)(nil)
// OpenShiftClustersClientAPI contains the set of methods on the OpenShiftClustersClient type.
type OpenShiftClustersClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters redhatopenshift.OpenShiftCluster) (result redhatopenshift.OpenShiftClustersCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, resourceName string) (result redhatopenshift.OpenShiftClustersDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, resourceName string) (result redhatopenshift.OpenShiftCluster, err error)
List(ctx context.Context) (result redhatopenshift.OpenShiftClusterListPage, err error)
ListComplete(ctx context.Context) (result redhatopenshift.OpenShiftClusterListIterator, err error)
ListAdminCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result redhatopenshift.OpenShiftClusterAdminKubeconfig, err error)
ListByResourceGroup(ctx context.Context, resourceGroupName string) (result redhatopenshift.OpenShiftClusterListPage, err error)
ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result redhatopenshift.OpenShiftClusterListIterator, err error)
ListCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result redhatopenshift.OpenShiftClusterCredentials, err error)
ListMethod(ctx context.Context) (result redhatopenshift.OpenShiftClusterListPage, err error)
ListMethodComplete(ctx context.Context) (result redhatopenshift.OpenShiftClusterListIterator, err error)
Update(ctx context.Context, resourceGroupName string, resourceName string, parameters redhatopenshift.OpenShiftClusterUpdate) (result redhatopenshift.OpenShiftClustersUpdateFuture, err error)
}
var _ OpenShiftClustersClientAPI = (*redhatopenshift.OpenShiftClustersClient)(nil)
// SyncSetsClientAPI contains the set of methods on the SyncSetsClient type.
type SyncSetsClientAPI interface {
Get(ctx context.Context, resourceGroupName string, resourceName string, syncSetResourceName string) (result redhatopenshift.SyncSet, err error)
List(ctx context.Context, resourceGroupName string, resourceName string) (result redhatopenshift.SyncSetListPage, err error)
ListComplete(ctx context.Context, resourceGroupName string, resourceName string) (result redhatopenshift.SyncSetListIterator, err error)
}
var _ SyncSetsClientAPI = (*redhatopenshift.SyncSetsClient)(nil)

Просмотреть файл

@ -0,0 +1,258 @@
package redhatopenshift
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"net/http"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
)
// SyncSetsClient is the rest API for Azure Red Hat OpenShift 4
type SyncSetsClient struct {
BaseClient
}
// NewSyncSetsClient creates an instance of the SyncSetsClient client.
func NewSyncSetsClient(subscriptionID string) SyncSetsClient {
return NewSyncSetsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewSyncSetsClientWithBaseURI creates an instance of the SyncSetsClient client using a custom endpoint. Use this
// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewSyncSetsClientWithBaseURI(baseURI string, subscriptionID string) SyncSetsClient {
return SyncSetsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// Get the operation returns properties of a SyncSet.
// Parameters:
// resourceGroupName - the name of the resource group. The name is case insensitive.
// resourceName - the name of the SyncSet resource.
// syncSetResourceName - the name of the SyncSet resource.
func (client SyncSetsClient) Get(ctx context.Context, resourceGroupName string, resourceName string, syncSetResourceName string) (result SyncSet, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SyncSetsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: client.SubscriptionID,
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
return result, validation.NewError("redhatopenshift.SyncSetsClient", "Get", err.Error())
}
req, err := client.GetPreparer(ctx, resourceGroupName, resourceName, syncSetResourceName)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.SyncSetsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "redhatopenshift.SyncSetsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.SyncSetsClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client SyncSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string, syncSetResourceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"resourceName": autorest.Encode("path", resourceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"syncSetResourceName": autorest.Encode("path", syncSetResourceName),
}
const APIVersion = "2022-09-04"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openshiftclusters/{resourceName}/syncSet/{syncSetResourceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client SyncSetsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client SyncSetsClient) GetResponder(resp *http.Response) (result SyncSet, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List the operation returns properties of each SyncSet.
// Parameters:
// resourceGroupName - the name of the resource group. The name is case insensitive.
// resourceName - the name of the SyncSet resource.
func (client SyncSetsClient) List(ctx context.Context, resourceGroupName string, resourceName string) (result SyncSetListPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SyncSetsClient.List")
defer func() {
sc := -1
if result.ssl.Response.Response != nil {
sc = result.ssl.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: client.SubscriptionID,
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
return result, validation.NewError("redhatopenshift.SyncSetsClient", "List", err.Error())
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName, resourceName)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.SyncSetsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.ssl.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "redhatopenshift.SyncSetsClient", "List", resp, "Failure sending request")
return
}
result.ssl, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.SyncSetsClient", "List", resp, "Failure responding to request")
return
}
if result.ssl.hasNextLink() && result.ssl.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListPreparer prepares the List request.
func (client SyncSetsClient) ListPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"resourceName": autorest.Encode("path", resourceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2022-09-04"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftCluster/{resourceName}/syncSets", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client SyncSetsClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client SyncSetsClient) ListResponder(resp *http.Response) (result SyncSetList, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client SyncSetsClient) listNextResults(ctx context.Context, lastResults SyncSetList) (result SyncSetList, err error) {
req, err := lastResults.syncSetListPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "redhatopenshift.SyncSetsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "redhatopenshift.SyncSetsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "redhatopenshift.SyncSetsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client SyncSetsClient) ListComplete(ctx context.Context, resourceGroupName string, resourceName string) (result SyncSetListIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SyncSetsClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx, resourceGroupName, resourceName)
return
}

Просмотреть файл

@ -0,0 +1,143 @@
package database
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/util/uuid"
)
const (
ClusterManagerConfigurationsGetQuery = `SELECT * FROM ClusterManagerConfigurations doc WHERE doc.key = @key`
)
type clusterManagerConfiguration struct {
c cosmosdb.ClusterManagerConfigurationDocumentClient
collc cosmosdb.CollectionClient
uuid string
uuidGenerator uuid.Generator
}
type ClusterManagerConfigurations interface {
Create(context.Context, *api.ClusterManagerConfigurationDocument) (*api.ClusterManagerConfigurationDocument, error)
Get(context.Context, string) (*api.ClusterManagerConfigurationDocument, error)
Update(context.Context, *api.ClusterManagerConfigurationDocument) (*api.ClusterManagerConfigurationDocument, error)
Delete(context.Context, *api.ClusterManagerConfigurationDocument) error
ChangeFeed() cosmosdb.ClusterManagerConfigurationDocumentIterator
NewUUID() string
}
func NewClusterManagerConfigurations(ctx context.Context, isDevelopmentMode bool, dbc cosmosdb.DatabaseClient) (ClusterManagerConfigurations, error) {
dbid, err := Name(isDevelopmentMode)
if err != nil {
return nil, err
}
collc := cosmosdb.NewCollectionClient(dbc, dbid)
documentClient := cosmosdb.NewClusterManagerConfigurationDocumentClient(collc, collClusterManager)
return NewClusterManagerConfigurationsWithProvidedClient(documentClient, collc, uuid.DefaultGenerator.Generate(), uuid.DefaultGenerator), nil
}
func NewClusterManagerConfigurationsWithProvidedClient(client cosmosdb.ClusterManagerConfigurationDocumentClient, collectionClient cosmosdb.CollectionClient, uuid string, uuidGenerator uuid.Generator) ClusterManagerConfigurations {
return &clusterManagerConfiguration{
c: client,
collc: collectionClient,
uuid: uuid,
uuidGenerator: uuidGenerator,
}
}
func (c *clusterManagerConfiguration) NewUUID() string {
return c.uuidGenerator.Generate()
}
func (c *clusterManagerConfiguration) Create(ctx context.Context, doc *api.ClusterManagerConfigurationDocument) (*api.ClusterManagerConfigurationDocument, error) {
if doc.ID != strings.ToLower(doc.ID) {
return nil, fmt.Errorf("id %q is not lower case", doc.ID)
}
var err error
doc.PartitionKey, err = c.partitionKey(doc.Key)
if err != nil {
return nil, err
}
doc, err = c.c.Create(ctx, doc.PartitionKey, doc, nil)
if err, ok := err.(*cosmosdb.Error); ok && err.StatusCode == http.StatusConflict {
err.StatusCode = http.StatusPreconditionFailed
}
return doc, err
}
func (c *clusterManagerConfiguration) Get(ctx context.Context, id string) (*api.ClusterManagerConfigurationDocument, error) {
if id != strings.ToLower(id) {
return nil, fmt.Errorf("id %q is not lower case", id)
}
partitionKey, err := c.partitionKey(id)
if err != nil {
return nil, err
}
docs, err := c.c.QueryAll(ctx, partitionKey, &cosmosdb.Query{
Query: ClusterManagerConfigurationsGetQuery,
Parameters: []cosmosdb.Parameter{
{
Name: "@key",
Value: id,
},
},
}, nil)
if err != nil {
return nil, err
}
switch {
case len(docs.ClusterManagerConfigurationDocuments) > 1:
return nil, fmt.Errorf("read %d documents, expected <= 1", len(docs.ClusterManagerConfigurationDocuments))
case len(docs.ClusterManagerConfigurationDocuments) == 1:
return docs.ClusterManagerConfigurationDocuments[0], nil
default:
return nil, &cosmosdb.Error{StatusCode: http.StatusNotFound}
}
}
func (c *clusterManagerConfiguration) Update(ctx context.Context, doc *api.ClusterManagerConfigurationDocument) (*api.ClusterManagerConfigurationDocument, error) {
return c.update(ctx, doc, nil)
}
func (c *clusterManagerConfiguration) update(ctx context.Context, doc *api.ClusterManagerConfigurationDocument, options *cosmosdb.Options) (*api.ClusterManagerConfigurationDocument, error) {
if doc.Key != strings.ToLower(doc.Key) {
return nil, fmt.Errorf("key %q is not lower case", doc.Key)
}
return c.c.Replace(ctx, doc.PartitionKey, doc, options)
}
func (c *clusterManagerConfiguration) Delete(ctx context.Context, doc *api.ClusterManagerConfigurationDocument) error {
if doc.ID != strings.ToLower(doc.ID) {
return fmt.Errorf("id %q is not lower case", doc.ID)
}
return c.c.Delete(ctx, doc.PartitionKey, doc, &cosmosdb.Options{NoETag: true})
}
func (c *clusterManagerConfiguration) ChangeFeed() cosmosdb.ClusterManagerConfigurationDocumentIterator {
return c.c.ChangeFeed(nil)
}
func (c *clusterManagerConfiguration) partitionKey(key string) (string, error) {
r, err := azure.ParseResourceID(key)
return r.SubscriptionID, err
}

Просмотреть файл

@ -3,7 +3,7 @@ package cosmosdb
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
//go:generate go run ../../../vendor/github.com/jewzaam/go-cosmosdb/cmd/gencosmosdb github.com/Azure/ARO-RP/pkg/api,AsyncOperationDocument github.com/Azure/ARO-RP/pkg/api,BillingDocument github.com/Azure/ARO-RP/pkg/api,GatewayDocument github.com/Azure/ARO-RP/pkg/api,MonitorDocument github.com/Azure/ARO-RP/pkg/api,OpenShiftClusterDocument github.com/Azure/ARO-RP/pkg/api,SubscriptionDocument github.com/Azure/ARO-RP/pkg/api,OpenShiftVersionDocument github.com/Azure/ARO-RP/pkg/api,OpenShiftClusterManagerConfigurationDocument
//go:generate go run ../../../vendor/github.com/jewzaam/go-cosmosdb/cmd/gencosmosdb github.com/Azure/ARO-RP/pkg/api,AsyncOperationDocument github.com/Azure/ARO-RP/pkg/api,BillingDocument github.com/Azure/ARO-RP/pkg/api,GatewayDocument github.com/Azure/ARO-RP/pkg/api,MonitorDocument github.com/Azure/ARO-RP/pkg/api,OpenShiftClusterDocument github.com/Azure/ARO-RP/pkg/api,SubscriptionDocument github.com/Azure/ARO-RP/pkg/api,OpenShiftVersionDocument github.com/Azure/ARO-RP/pkg/api,ClusterManagerConfigurationDocument
//go:generate go run ../../../vendor/golang.org/x/tools/cmd/goimports -local=github.com/Azure/ARO-RP -e -w ./
//go:generate go run ../../../vendor/github.com/golang/mock/mockgen -destination=../../util/mocks/$GOPACKAGE/$GOPACKAGE.go github.com/Azure/ARO-RP/pkg/database/$GOPACKAGE PermissionClient
//go:generate go run ../../../vendor/golang.org/x/tools/cmd/goimports -local=github.com/Azure/ARO-RP -e -w ../../util/mocks/$GOPACKAGE/$GOPACKAGE.go

Просмотреть файл

@ -0,0 +1,313 @@
// Code generated by github.com/jewzaam/go-cosmosdb, DO NOT EDIT.
package cosmosdb
import (
"context"
"net/http"
"strconv"
"strings"
pkg "github.com/Azure/ARO-RP/pkg/api"
)
type clusterManagerConfigurationDocumentClient struct {
*databaseClient
path string
}
// ClusterManagerConfigurationDocumentClient is a clusterManagerConfigurationDocument client
type ClusterManagerConfigurationDocumentClient interface {
Create(context.Context, string, *pkg.ClusterManagerConfigurationDocument, *Options) (*pkg.ClusterManagerConfigurationDocument, error)
List(*Options) ClusterManagerConfigurationDocumentIterator
ListAll(context.Context, *Options) (*pkg.ClusterManagerConfigurationDocuments, error)
Get(context.Context, string, string, *Options) (*pkg.ClusterManagerConfigurationDocument, error)
Replace(context.Context, string, *pkg.ClusterManagerConfigurationDocument, *Options) (*pkg.ClusterManagerConfigurationDocument, error)
Delete(context.Context, string, *pkg.ClusterManagerConfigurationDocument, *Options) error
Query(string, *Query, *Options) ClusterManagerConfigurationDocumentRawIterator
QueryAll(context.Context, string, *Query, *Options) (*pkg.ClusterManagerConfigurationDocuments, error)
ChangeFeed(*Options) ClusterManagerConfigurationDocumentIterator
}
type clusterManagerConfigurationDocumentChangeFeedIterator struct {
*clusterManagerConfigurationDocumentClient
continuation string
options *Options
}
type clusterManagerConfigurationDocumentListIterator struct {
*clusterManagerConfigurationDocumentClient
continuation string
done bool
options *Options
}
type clusterManagerConfigurationDocumentQueryIterator struct {
*clusterManagerConfigurationDocumentClient
partitionkey string
query *Query
continuation string
done bool
options *Options
}
// ClusterManagerConfigurationDocumentIterator is a clusterManagerConfigurationDocument iterator
type ClusterManagerConfigurationDocumentIterator interface {
Next(context.Context, int) (*pkg.ClusterManagerConfigurationDocuments, error)
Continuation() string
}
// ClusterManagerConfigurationDocumentRawIterator is a clusterManagerConfigurationDocument raw iterator
type ClusterManagerConfigurationDocumentRawIterator interface {
ClusterManagerConfigurationDocumentIterator
NextRaw(context.Context, int, interface{}) error
}
// NewClusterManagerConfigurationDocumentClient returns a new clusterManagerConfigurationDocument client
func NewClusterManagerConfigurationDocumentClient(collc CollectionClient, collid string) ClusterManagerConfigurationDocumentClient {
return &clusterManagerConfigurationDocumentClient{
databaseClient: collc.(*collectionClient).databaseClient,
path: collc.(*collectionClient).path + "/colls/" + collid,
}
}
func (c *clusterManagerConfigurationDocumentClient) all(ctx context.Context, i ClusterManagerConfigurationDocumentIterator) (*pkg.ClusterManagerConfigurationDocuments, error) {
allclusterManagerConfigurationDocuments := &pkg.ClusterManagerConfigurationDocuments{}
for {
clusterManagerConfigurationDocuments, err := i.Next(ctx, -1)
if err != nil {
return nil, err
}
if clusterManagerConfigurationDocuments == nil {
break
}
allclusterManagerConfigurationDocuments.Count += clusterManagerConfigurationDocuments.Count
allclusterManagerConfigurationDocuments.ResourceID = clusterManagerConfigurationDocuments.ResourceID
allclusterManagerConfigurationDocuments.ClusterManagerConfigurationDocuments = append(allclusterManagerConfigurationDocuments.ClusterManagerConfigurationDocuments, clusterManagerConfigurationDocuments.ClusterManagerConfigurationDocuments...)
}
return allclusterManagerConfigurationDocuments, nil
}
func (c *clusterManagerConfigurationDocumentClient) Create(ctx context.Context, partitionkey string, newclusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, options *Options) (clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
if options == nil {
options = &Options{}
}
options.NoETag = true
err = c.setOptions(options, newclusterManagerConfigurationDocument, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodPost, c.path+"/docs", "docs", c.path, http.StatusCreated, &newclusterManagerConfigurationDocument, &clusterManagerConfigurationDocument, headers)
return
}
func (c *clusterManagerConfigurationDocumentClient) List(options *Options) ClusterManagerConfigurationDocumentIterator {
continuation := ""
if options != nil {
continuation = options.Continuation
}
return &clusterManagerConfigurationDocumentListIterator{clusterManagerConfigurationDocumentClient: c, options: options, continuation: continuation}
}
func (c *clusterManagerConfigurationDocumentClient) ListAll(ctx context.Context, options *Options) (*pkg.ClusterManagerConfigurationDocuments, error) {
return c.all(ctx, c.List(options))
}
func (c *clusterManagerConfigurationDocumentClient) Get(ctx context.Context, partitionkey, clusterManagerConfigurationDocumentid string, options *Options) (clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
err = c.setOptions(options, nil, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodGet, c.path+"/docs/"+clusterManagerConfigurationDocumentid, "docs", c.path+"/docs/"+clusterManagerConfigurationDocumentid, http.StatusOK, nil, &clusterManagerConfigurationDocument, headers)
return
}
func (c *clusterManagerConfigurationDocumentClient) Replace(ctx context.Context, partitionkey string, newclusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, options *Options) (clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
err = c.setOptions(options, newclusterManagerConfigurationDocument, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodPut, c.path+"/docs/"+newclusterManagerConfigurationDocument.ID, "docs", c.path+"/docs/"+newclusterManagerConfigurationDocument.ID, http.StatusOK, &newclusterManagerConfigurationDocument, &clusterManagerConfigurationDocument, headers)
return
}
func (c *clusterManagerConfigurationDocumentClient) Delete(ctx context.Context, partitionkey string, clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, options *Options) (err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
err = c.setOptions(options, clusterManagerConfigurationDocument, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodDelete, c.path+"/docs/"+clusterManagerConfigurationDocument.ID, "docs", c.path+"/docs/"+clusterManagerConfigurationDocument.ID, http.StatusNoContent, nil, nil, headers)
return
}
func (c *clusterManagerConfigurationDocumentClient) Query(partitionkey string, query *Query, options *Options) ClusterManagerConfigurationDocumentRawIterator {
continuation := ""
if options != nil {
continuation = options.Continuation
}
return &clusterManagerConfigurationDocumentQueryIterator{clusterManagerConfigurationDocumentClient: c, partitionkey: partitionkey, query: query, options: options, continuation: continuation}
}
func (c *clusterManagerConfigurationDocumentClient) QueryAll(ctx context.Context, partitionkey string, query *Query, options *Options) (*pkg.ClusterManagerConfigurationDocuments, error) {
return c.all(ctx, c.Query(partitionkey, query, options))
}
func (c *clusterManagerConfigurationDocumentClient) ChangeFeed(options *Options) ClusterManagerConfigurationDocumentIterator {
continuation := ""
if options != nil {
continuation = options.Continuation
}
return &clusterManagerConfigurationDocumentChangeFeedIterator{clusterManagerConfigurationDocumentClient: c, options: options, continuation: continuation}
}
func (c *clusterManagerConfigurationDocumentClient) setOptions(options *Options, clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, headers http.Header) error {
if options == nil {
return nil
}
if clusterManagerConfigurationDocument != nil && !options.NoETag {
if clusterManagerConfigurationDocument.ETag == "" {
return ErrETagRequired
}
headers.Set("If-Match", clusterManagerConfigurationDocument.ETag)
}
if len(options.PreTriggers) > 0 {
headers.Set("X-Ms-Documentdb-Pre-Trigger-Include", strings.Join(options.PreTriggers, ","))
}
if len(options.PostTriggers) > 0 {
headers.Set("X-Ms-Documentdb-Post-Trigger-Include", strings.Join(options.PostTriggers, ","))
}
if len(options.PartitionKeyRangeID) > 0 {
headers.Set("X-Ms-Documentdb-PartitionKeyRangeID", options.PartitionKeyRangeID)
}
return nil
}
func (i *clusterManagerConfigurationDocumentChangeFeedIterator) Next(ctx context.Context, maxItemCount int) (clusterManagerConfigurationDocuments *pkg.ClusterManagerConfigurationDocuments, err error) {
headers := http.Header{}
headers.Set("A-IM", "Incremental feed")
headers.Set("X-Ms-Max-Item-Count", strconv.Itoa(maxItemCount))
if i.continuation != "" {
headers.Set("If-None-Match", i.continuation)
}
err = i.setOptions(i.options, nil, headers)
if err != nil {
return
}
err = i.do(ctx, http.MethodGet, i.path+"/docs", "docs", i.path, http.StatusOK, nil, &clusterManagerConfigurationDocuments, headers)
if IsErrorStatusCode(err, http.StatusNotModified) {
err = nil
}
if err != nil {
return
}
i.continuation = headers.Get("Etag")
return
}
func (i *clusterManagerConfigurationDocumentChangeFeedIterator) Continuation() string {
return i.continuation
}
func (i *clusterManagerConfigurationDocumentListIterator) Next(ctx context.Context, maxItemCount int) (clusterManagerConfigurationDocuments *pkg.ClusterManagerConfigurationDocuments, err error) {
if i.done {
return
}
headers := http.Header{}
headers.Set("X-Ms-Max-Item-Count", strconv.Itoa(maxItemCount))
if i.continuation != "" {
headers.Set("X-Ms-Continuation", i.continuation)
}
err = i.setOptions(i.options, nil, headers)
if err != nil {
return
}
err = i.do(ctx, http.MethodGet, i.path+"/docs", "docs", i.path, http.StatusOK, nil, &clusterManagerConfigurationDocuments, headers)
if err != nil {
return
}
i.continuation = headers.Get("X-Ms-Continuation")
i.done = i.continuation == ""
return
}
func (i *clusterManagerConfigurationDocumentListIterator) Continuation() string {
return i.continuation
}
func (i *clusterManagerConfigurationDocumentQueryIterator) Next(ctx context.Context, maxItemCount int) (clusterManagerConfigurationDocuments *pkg.ClusterManagerConfigurationDocuments, err error) {
err = i.NextRaw(ctx, maxItemCount, &clusterManagerConfigurationDocuments)
return
}
func (i *clusterManagerConfigurationDocumentQueryIterator) NextRaw(ctx context.Context, maxItemCount int, raw interface{}) (err error) {
if i.done {
return
}
headers := http.Header{}
headers.Set("X-Ms-Max-Item-Count", strconv.Itoa(maxItemCount))
headers.Set("X-Ms-Documentdb-Isquery", "True")
headers.Set("Content-Type", "application/query+json")
if i.partitionkey != "" {
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+i.partitionkey+`"]`)
} else {
headers.Set("X-Ms-Documentdb-Query-Enablecrosspartition", "True")
}
if i.continuation != "" {
headers.Set("X-Ms-Continuation", i.continuation)
}
err = i.setOptions(i.options, nil, headers)
if err != nil {
return
}
err = i.do(ctx, http.MethodPost, i.path+"/docs", "docs", i.path, http.StatusOK, &i.query, &raw, headers)
if err != nil {
return
}
i.continuation = headers.Get("X-Ms-Continuation")
i.done = i.continuation == ""
return
}
func (i *clusterManagerConfigurationDocumentQueryIterator) Continuation() string {
return i.continuation
}

Просмотреть файл

@ -0,0 +1,361 @@
// Code generated by github.com/jewzaam/go-cosmosdb, DO NOT EDIT.
package cosmosdb
import (
"context"
"fmt"
"net/http"
"sync"
"github.com/ugorji/go/codec"
pkg "github.com/Azure/ARO-RP/pkg/api"
)
type fakeClusterManagerConfigurationDocumentTriggerHandler func(context.Context, *pkg.ClusterManagerConfigurationDocument) error
type fakeClusterManagerConfigurationDocumentQueryHandler func(ClusterManagerConfigurationDocumentClient, *Query, *Options) ClusterManagerConfigurationDocumentRawIterator
var _ ClusterManagerConfigurationDocumentClient = &FakeClusterManagerConfigurationDocumentClient{}
// NewFakeClusterManagerConfigurationDocumentClient returns a FakeClusterManagerConfigurationDocumentClient
func NewFakeClusterManagerConfigurationDocumentClient(h *codec.JsonHandle) *FakeClusterManagerConfigurationDocumentClient {
return &FakeClusterManagerConfigurationDocumentClient{
jsonHandle: h,
clusterManagerConfigurationDocuments: make(map[string]*pkg.ClusterManagerConfigurationDocument),
triggerHandlers: make(map[string]fakeClusterManagerConfigurationDocumentTriggerHandler),
queryHandlers: make(map[string]fakeClusterManagerConfigurationDocumentQueryHandler),
}
}
// FakeClusterManagerConfigurationDocumentClient is a FakeClusterManagerConfigurationDocumentClient
type FakeClusterManagerConfigurationDocumentClient struct {
lock sync.RWMutex
jsonHandle *codec.JsonHandle
clusterManagerConfigurationDocuments map[string]*pkg.ClusterManagerConfigurationDocument
triggerHandlers map[string]fakeClusterManagerConfigurationDocumentTriggerHandler
queryHandlers map[string]fakeClusterManagerConfigurationDocumentQueryHandler
sorter func([]*pkg.ClusterManagerConfigurationDocument)
etag int
// returns true if documents conflict
conflictChecker func(*pkg.ClusterManagerConfigurationDocument, *pkg.ClusterManagerConfigurationDocument) bool
// err, if not nil, is an error to return when attempting to communicate
// with this Client
err error
}
// SetError sets or unsets an error that will be returned on any
// FakeClusterManagerConfigurationDocumentClient method invocation
func (c *FakeClusterManagerConfigurationDocumentClient) SetError(err error) {
c.lock.Lock()
defer c.lock.Unlock()
c.err = err
}
// SetSorter sets or unsets a sorter function which will be used to sort values
// returned by List() for test stability
func (c *FakeClusterManagerConfigurationDocumentClient) SetSorter(sorter func([]*pkg.ClusterManagerConfigurationDocument)) {
c.lock.Lock()
defer c.lock.Unlock()
c.sorter = sorter
}
// SetConflictChecker sets or unsets a function which can be used to validate
// additional unique keys in a ClusterManagerConfigurationDocument
func (c *FakeClusterManagerConfigurationDocumentClient) SetConflictChecker(conflictChecker func(*pkg.ClusterManagerConfigurationDocument, *pkg.ClusterManagerConfigurationDocument) bool) {
c.lock.Lock()
defer c.lock.Unlock()
c.conflictChecker = conflictChecker
}
// SetTriggerHandler sets or unsets a trigger handler
func (c *FakeClusterManagerConfigurationDocumentClient) SetTriggerHandler(triggerName string, trigger fakeClusterManagerConfigurationDocumentTriggerHandler) {
c.lock.Lock()
defer c.lock.Unlock()
c.triggerHandlers[triggerName] = trigger
}
// SetQueryHandler sets or unsets a query handler
func (c *FakeClusterManagerConfigurationDocumentClient) SetQueryHandler(queryName string, query fakeClusterManagerConfigurationDocumentQueryHandler) {
c.lock.Lock()
defer c.lock.Unlock()
c.queryHandlers[queryName] = query
}
func (c *FakeClusterManagerConfigurationDocumentClient) deepCopy(clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument) (*pkg.ClusterManagerConfigurationDocument, error) {
var b []byte
err := codec.NewEncoderBytes(&b, c.jsonHandle).Encode(clusterManagerConfigurationDocument)
if err != nil {
return nil, err
}
clusterManagerConfigurationDocument = nil
err = codec.NewDecoderBytes(b, c.jsonHandle).Decode(&clusterManagerConfigurationDocument)
if err != nil {
return nil, err
}
return clusterManagerConfigurationDocument, nil
}
func (c *FakeClusterManagerConfigurationDocumentClient) apply(ctx context.Context, partitionkey string, clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, options *Options, isCreate bool) (*pkg.ClusterManagerConfigurationDocument, error) {
c.lock.Lock()
defer c.lock.Unlock()
if c.err != nil {
return nil, c.err
}
clusterManagerConfigurationDocument, err := c.deepCopy(clusterManagerConfigurationDocument) // copy now because pretriggers can mutate clusterManagerConfigurationDocument
if err != nil {
return nil, err
}
if options != nil {
err := c.processPreTriggers(ctx, clusterManagerConfigurationDocument, options)
if err != nil {
return nil, err
}
}
existingClusterManagerConfigurationDocument, exists := c.clusterManagerConfigurationDocuments[clusterManagerConfigurationDocument.ID]
if isCreate && exists {
return nil, &Error{
StatusCode: http.StatusConflict,
Message: "Entity with the specified id already exists in the system",
}
}
if !isCreate {
if !exists {
return nil, &Error{StatusCode: http.StatusNotFound}
}
if clusterManagerConfigurationDocument.ETag != existingClusterManagerConfigurationDocument.ETag {
return nil, &Error{StatusCode: http.StatusPreconditionFailed}
}
}
if c.conflictChecker != nil {
for _, clusterManagerConfigurationDocumentToCheck := range c.clusterManagerConfigurationDocuments {
if c.conflictChecker(clusterManagerConfigurationDocumentToCheck, clusterManagerConfigurationDocument) {
return nil, &Error{
StatusCode: http.StatusConflict,
Message: "Entity with the specified id already exists in the system",
}
}
}
}
clusterManagerConfigurationDocument.ETag = fmt.Sprint(c.etag)
c.etag++
c.clusterManagerConfigurationDocuments[clusterManagerConfigurationDocument.ID] = clusterManagerConfigurationDocument
return c.deepCopy(clusterManagerConfigurationDocument)
}
// Create creates a ClusterManagerConfigurationDocument in the database
func (c *FakeClusterManagerConfigurationDocumentClient) Create(ctx context.Context, partitionkey string, clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, options *Options) (*pkg.ClusterManagerConfigurationDocument, error) {
return c.apply(ctx, partitionkey, clusterManagerConfigurationDocument, options, true)
}
// Replace replaces a ClusterManagerConfigurationDocument in the database
func (c *FakeClusterManagerConfigurationDocumentClient) Replace(ctx context.Context, partitionkey string, clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, options *Options) (*pkg.ClusterManagerConfigurationDocument, error) {
return c.apply(ctx, partitionkey, clusterManagerConfigurationDocument, options, false)
}
// List returns a ClusterManagerConfigurationDocumentIterator to list all ClusterManagerConfigurationDocuments in the database
func (c *FakeClusterManagerConfigurationDocumentClient) List(*Options) ClusterManagerConfigurationDocumentIterator {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return NewFakeClusterManagerConfigurationDocumentErroringRawIterator(c.err)
}
clusterManagerConfigurationDocuments := make([]*pkg.ClusterManagerConfigurationDocument, 0, len(c.clusterManagerConfigurationDocuments))
for _, clusterManagerConfigurationDocument := range c.clusterManagerConfigurationDocuments {
clusterManagerConfigurationDocument, err := c.deepCopy(clusterManagerConfigurationDocument)
if err != nil {
return NewFakeClusterManagerConfigurationDocumentErroringRawIterator(err)
}
clusterManagerConfigurationDocuments = append(clusterManagerConfigurationDocuments, clusterManagerConfigurationDocument)
}
if c.sorter != nil {
c.sorter(clusterManagerConfigurationDocuments)
}
return NewFakeClusterManagerConfigurationDocumentIterator(clusterManagerConfigurationDocuments, 0)
}
// ListAll lists all ClusterManagerConfigurationDocuments in the database
func (c *FakeClusterManagerConfigurationDocumentClient) ListAll(ctx context.Context, options *Options) (*pkg.ClusterManagerConfigurationDocuments, error) {
iter := c.List(options)
return iter.Next(ctx, -1)
}
// Get gets a ClusterManagerConfigurationDocument from the database
func (c *FakeClusterManagerConfigurationDocumentClient) Get(ctx context.Context, partitionkey string, id string, options *Options) (*pkg.ClusterManagerConfigurationDocument, error) {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return nil, c.err
}
clusterManagerConfigurationDocument, exists := c.clusterManagerConfigurationDocuments[id]
if !exists {
return nil, &Error{StatusCode: http.StatusNotFound}
}
return c.deepCopy(clusterManagerConfigurationDocument)
}
// Delete deletes a ClusterManagerConfigurationDocument from the database
func (c *FakeClusterManagerConfigurationDocumentClient) Delete(ctx context.Context, partitionKey string, clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, options *Options) error {
c.lock.Lock()
defer c.lock.Unlock()
if c.err != nil {
return c.err
}
_, exists := c.clusterManagerConfigurationDocuments[clusterManagerConfigurationDocument.ID]
if !exists {
return &Error{StatusCode: http.StatusNotFound}
}
delete(c.clusterManagerConfigurationDocuments, clusterManagerConfigurationDocument.ID)
return nil
}
// ChangeFeed is unimplemented
func (c *FakeClusterManagerConfigurationDocumentClient) ChangeFeed(*Options) ClusterManagerConfigurationDocumentIterator {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return NewFakeClusterManagerConfigurationDocumentErroringRawIterator(c.err)
}
return NewFakeClusterManagerConfigurationDocumentErroringRawIterator(ErrNotImplemented)
}
func (c *FakeClusterManagerConfigurationDocumentClient) processPreTriggers(ctx context.Context, clusterManagerConfigurationDocument *pkg.ClusterManagerConfigurationDocument, options *Options) error {
for _, triggerName := range options.PreTriggers {
if triggerHandler := c.triggerHandlers[triggerName]; triggerHandler != nil {
c.lock.Unlock()
err := triggerHandler(ctx, clusterManagerConfigurationDocument)
c.lock.Lock()
if err != nil {
return err
}
} else {
return ErrNotImplemented
}
}
return nil
}
// Query calls a query handler to implement database querying
func (c *FakeClusterManagerConfigurationDocumentClient) Query(name string, query *Query, options *Options) ClusterManagerConfigurationDocumentRawIterator {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return NewFakeClusterManagerConfigurationDocumentErroringRawIterator(c.err)
}
if queryHandler := c.queryHandlers[query.Query]; queryHandler != nil {
c.lock.RUnlock()
i := queryHandler(c, query, options)
c.lock.RLock()
return i
}
return NewFakeClusterManagerConfigurationDocumentErroringRawIterator(ErrNotImplemented)
}
// QueryAll calls a query handler to implement database querying
func (c *FakeClusterManagerConfigurationDocumentClient) QueryAll(ctx context.Context, partitionkey string, query *Query, options *Options) (*pkg.ClusterManagerConfigurationDocuments, error) {
iter := c.Query("", query, options)
return iter.Next(ctx, -1)
}
func NewFakeClusterManagerConfigurationDocumentIterator(clusterManagerConfigurationDocuments []*pkg.ClusterManagerConfigurationDocument, continuation int) ClusterManagerConfigurationDocumentRawIterator {
return &fakeClusterManagerConfigurationDocumentIterator{clusterManagerConfigurationDocuments: clusterManagerConfigurationDocuments, continuation: continuation}
}
type fakeClusterManagerConfigurationDocumentIterator struct {
clusterManagerConfigurationDocuments []*pkg.ClusterManagerConfigurationDocument
continuation int
done bool
}
func (i *fakeClusterManagerConfigurationDocumentIterator) NextRaw(ctx context.Context, maxItemCount int, out interface{}) error {
return ErrNotImplemented
}
func (i *fakeClusterManagerConfigurationDocumentIterator) Next(ctx context.Context, maxItemCount int) (*pkg.ClusterManagerConfigurationDocuments, error) {
if i.done {
return nil, nil
}
var clusterManagerConfigurationDocuments []*pkg.ClusterManagerConfigurationDocument
if maxItemCount == -1 {
clusterManagerConfigurationDocuments = i.clusterManagerConfigurationDocuments[i.continuation:]
i.continuation = len(i.clusterManagerConfigurationDocuments)
i.done = true
} else {
max := i.continuation + maxItemCount
if max > len(i.clusterManagerConfigurationDocuments) {
max = len(i.clusterManagerConfigurationDocuments)
}
clusterManagerConfigurationDocuments = i.clusterManagerConfigurationDocuments[i.continuation:max]
i.continuation += max
i.done = i.Continuation() == ""
}
return &pkg.ClusterManagerConfigurationDocuments{
ClusterManagerConfigurationDocuments: clusterManagerConfigurationDocuments,
Count: len(clusterManagerConfigurationDocuments),
}, nil
}
func (i *fakeClusterManagerConfigurationDocumentIterator) Continuation() string {
if i.continuation >= len(i.clusterManagerConfigurationDocuments) {
return ""
}
return fmt.Sprintf("%d", i.continuation)
}
// NewFakeClusterManagerConfigurationDocumentErroringRawIterator returns a ClusterManagerConfigurationDocumentRawIterator which
// whose methods return the given error
func NewFakeClusterManagerConfigurationDocumentErroringRawIterator(err error) ClusterManagerConfigurationDocumentRawIterator {
return &fakeClusterManagerConfigurationDocumentErroringRawIterator{err: err}
}
type fakeClusterManagerConfigurationDocumentErroringRawIterator struct {
err error
}
func (i *fakeClusterManagerConfigurationDocumentErroringRawIterator) Next(ctx context.Context, maxItemCount int) (*pkg.ClusterManagerConfigurationDocuments, error) {
return nil, i.err
}
func (i *fakeClusterManagerConfigurationDocumentErroringRawIterator) NextRaw(context.Context, int, interface{}) error {
return i.err
}
func (i *fakeClusterManagerConfigurationDocumentErroringRawIterator) Continuation() string {
return ""
}

Просмотреть файл

@ -1,313 +0,0 @@
// Code generated by github.com/jewzaam/go-cosmosdb, DO NOT EDIT.
package cosmosdb
import (
"context"
"net/http"
"strconv"
"strings"
pkg "github.com/Azure/ARO-RP/pkg/api"
)
type openShiftClusterManagerConfigurationDocumentClient struct {
*databaseClient
path string
}
// OpenShiftClusterManagerConfigurationDocumentClient is a openShiftClusterManagerConfigurationDocument client
type OpenShiftClusterManagerConfigurationDocumentClient interface {
Create(context.Context, string, *pkg.OpenShiftClusterManagerConfigurationDocument, *Options) (*pkg.OpenShiftClusterManagerConfigurationDocument, error)
List(*Options) OpenShiftClusterManagerConfigurationDocumentIterator
ListAll(context.Context, *Options) (*pkg.OpenShiftClusterManagerConfigurationDocuments, error)
Get(context.Context, string, string, *Options) (*pkg.OpenShiftClusterManagerConfigurationDocument, error)
Replace(context.Context, string, *pkg.OpenShiftClusterManagerConfigurationDocument, *Options) (*pkg.OpenShiftClusterManagerConfigurationDocument, error)
Delete(context.Context, string, *pkg.OpenShiftClusterManagerConfigurationDocument, *Options) error
Query(string, *Query, *Options) OpenShiftClusterManagerConfigurationDocumentRawIterator
QueryAll(context.Context, string, *Query, *Options) (*pkg.OpenShiftClusterManagerConfigurationDocuments, error)
ChangeFeed(*Options) OpenShiftClusterManagerConfigurationDocumentIterator
}
type openShiftClusterManagerConfigurationDocumentChangeFeedIterator struct {
*openShiftClusterManagerConfigurationDocumentClient
continuation string
options *Options
}
type openShiftClusterManagerConfigurationDocumentListIterator struct {
*openShiftClusterManagerConfigurationDocumentClient
continuation string
done bool
options *Options
}
type openShiftClusterManagerConfigurationDocumentQueryIterator struct {
*openShiftClusterManagerConfigurationDocumentClient
partitionkey string
query *Query
continuation string
done bool
options *Options
}
// OpenShiftClusterManagerConfigurationDocumentIterator is a openShiftClusterManagerConfigurationDocument iterator
type OpenShiftClusterManagerConfigurationDocumentIterator interface {
Next(context.Context, int) (*pkg.OpenShiftClusterManagerConfigurationDocuments, error)
Continuation() string
}
// OpenShiftClusterManagerConfigurationDocumentRawIterator is a openShiftClusterManagerConfigurationDocument raw iterator
type OpenShiftClusterManagerConfigurationDocumentRawIterator interface {
OpenShiftClusterManagerConfigurationDocumentIterator
NextRaw(context.Context, int, interface{}) error
}
// NewOpenShiftClusterManagerConfigurationDocumentClient returns a new openShiftClusterManagerConfigurationDocument client
func NewOpenShiftClusterManagerConfigurationDocumentClient(collc CollectionClient, collid string) OpenShiftClusterManagerConfigurationDocumentClient {
return &openShiftClusterManagerConfigurationDocumentClient{
databaseClient: collc.(*collectionClient).databaseClient,
path: collc.(*collectionClient).path + "/colls/" + collid,
}
}
func (c *openShiftClusterManagerConfigurationDocumentClient) all(ctx context.Context, i OpenShiftClusterManagerConfigurationDocumentIterator) (*pkg.OpenShiftClusterManagerConfigurationDocuments, error) {
allopenShiftClusterManagerConfigurationDocuments := &pkg.OpenShiftClusterManagerConfigurationDocuments{}
for {
openShiftClusterManagerConfigurationDocuments, err := i.Next(ctx, -1)
if err != nil {
return nil, err
}
if openShiftClusterManagerConfigurationDocuments == nil {
break
}
allopenShiftClusterManagerConfigurationDocuments.Count += openShiftClusterManagerConfigurationDocuments.Count
allopenShiftClusterManagerConfigurationDocuments.ResourceID = openShiftClusterManagerConfigurationDocuments.ResourceID
allopenShiftClusterManagerConfigurationDocuments.OpenShiftClusterManagerConfigurationDocuments = append(allopenShiftClusterManagerConfigurationDocuments.OpenShiftClusterManagerConfigurationDocuments, openShiftClusterManagerConfigurationDocuments.OpenShiftClusterManagerConfigurationDocuments...)
}
return allopenShiftClusterManagerConfigurationDocuments, nil
}
func (c *openShiftClusterManagerConfigurationDocumentClient) Create(ctx context.Context, partitionkey string, newopenShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, options *Options) (openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
if options == nil {
options = &Options{}
}
options.NoETag = true
err = c.setOptions(options, newopenShiftClusterManagerConfigurationDocument, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodPost, c.path+"/docs", "docs", c.path, http.StatusCreated, &newopenShiftClusterManagerConfigurationDocument, &openShiftClusterManagerConfigurationDocument, headers)
return
}
func (c *openShiftClusterManagerConfigurationDocumentClient) List(options *Options) OpenShiftClusterManagerConfigurationDocumentIterator {
continuation := ""
if options != nil {
continuation = options.Continuation
}
return &openShiftClusterManagerConfigurationDocumentListIterator{openShiftClusterManagerConfigurationDocumentClient: c, options: options, continuation: continuation}
}
func (c *openShiftClusterManagerConfigurationDocumentClient) ListAll(ctx context.Context, options *Options) (*pkg.OpenShiftClusterManagerConfigurationDocuments, error) {
return c.all(ctx, c.List(options))
}
func (c *openShiftClusterManagerConfigurationDocumentClient) Get(ctx context.Context, partitionkey, openShiftClusterManagerConfigurationDocumentid string, options *Options) (openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
err = c.setOptions(options, nil, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodGet, c.path+"/docs/"+openShiftClusterManagerConfigurationDocumentid, "docs", c.path+"/docs/"+openShiftClusterManagerConfigurationDocumentid, http.StatusOK, nil, &openShiftClusterManagerConfigurationDocument, headers)
return
}
func (c *openShiftClusterManagerConfigurationDocumentClient) Replace(ctx context.Context, partitionkey string, newopenShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, options *Options) (openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
err = c.setOptions(options, newopenShiftClusterManagerConfigurationDocument, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodPut, c.path+"/docs/"+newopenShiftClusterManagerConfigurationDocument.ID, "docs", c.path+"/docs/"+newopenShiftClusterManagerConfigurationDocument.ID, http.StatusOK, &newopenShiftClusterManagerConfigurationDocument, &openShiftClusterManagerConfigurationDocument, headers)
return
}
func (c *openShiftClusterManagerConfigurationDocumentClient) Delete(ctx context.Context, partitionkey string, openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, options *Options) (err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
err = c.setOptions(options, openShiftClusterManagerConfigurationDocument, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodDelete, c.path+"/docs/"+openShiftClusterManagerConfigurationDocument.ID, "docs", c.path+"/docs/"+openShiftClusterManagerConfigurationDocument.ID, http.StatusNoContent, nil, nil, headers)
return
}
func (c *openShiftClusterManagerConfigurationDocumentClient) Query(partitionkey string, query *Query, options *Options) OpenShiftClusterManagerConfigurationDocumentRawIterator {
continuation := ""
if options != nil {
continuation = options.Continuation
}
return &openShiftClusterManagerConfigurationDocumentQueryIterator{openShiftClusterManagerConfigurationDocumentClient: c, partitionkey: partitionkey, query: query, options: options, continuation: continuation}
}
func (c *openShiftClusterManagerConfigurationDocumentClient) QueryAll(ctx context.Context, partitionkey string, query *Query, options *Options) (*pkg.OpenShiftClusterManagerConfigurationDocuments, error) {
return c.all(ctx, c.Query(partitionkey, query, options))
}
func (c *openShiftClusterManagerConfigurationDocumentClient) ChangeFeed(options *Options) OpenShiftClusterManagerConfigurationDocumentIterator {
continuation := ""
if options != nil {
continuation = options.Continuation
}
return &openShiftClusterManagerConfigurationDocumentChangeFeedIterator{openShiftClusterManagerConfigurationDocumentClient: c, options: options, continuation: continuation}
}
func (c *openShiftClusterManagerConfigurationDocumentClient) setOptions(options *Options, openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, headers http.Header) error {
if options == nil {
return nil
}
if openShiftClusterManagerConfigurationDocument != nil && !options.NoETag {
if openShiftClusterManagerConfigurationDocument.ETag == "" {
return ErrETagRequired
}
headers.Set("If-Match", openShiftClusterManagerConfigurationDocument.ETag)
}
if len(options.PreTriggers) > 0 {
headers.Set("X-Ms-Documentdb-Pre-Trigger-Include", strings.Join(options.PreTriggers, ","))
}
if len(options.PostTriggers) > 0 {
headers.Set("X-Ms-Documentdb-Post-Trigger-Include", strings.Join(options.PostTriggers, ","))
}
if len(options.PartitionKeyRangeID) > 0 {
headers.Set("X-Ms-Documentdb-PartitionKeyRangeID", options.PartitionKeyRangeID)
}
return nil
}
func (i *openShiftClusterManagerConfigurationDocumentChangeFeedIterator) Next(ctx context.Context, maxItemCount int) (openShiftClusterManagerConfigurationDocuments *pkg.OpenShiftClusterManagerConfigurationDocuments, err error) {
headers := http.Header{}
headers.Set("A-IM", "Incremental feed")
headers.Set("X-Ms-Max-Item-Count", strconv.Itoa(maxItemCount))
if i.continuation != "" {
headers.Set("If-None-Match", i.continuation)
}
err = i.setOptions(i.options, nil, headers)
if err != nil {
return
}
err = i.do(ctx, http.MethodGet, i.path+"/docs", "docs", i.path, http.StatusOK, nil, &openShiftClusterManagerConfigurationDocuments, headers)
if IsErrorStatusCode(err, http.StatusNotModified) {
err = nil
}
if err != nil {
return
}
i.continuation = headers.Get("Etag")
return
}
func (i *openShiftClusterManagerConfigurationDocumentChangeFeedIterator) Continuation() string {
return i.continuation
}
func (i *openShiftClusterManagerConfigurationDocumentListIterator) Next(ctx context.Context, maxItemCount int) (openShiftClusterManagerConfigurationDocuments *pkg.OpenShiftClusterManagerConfigurationDocuments, err error) {
if i.done {
return
}
headers := http.Header{}
headers.Set("X-Ms-Max-Item-Count", strconv.Itoa(maxItemCount))
if i.continuation != "" {
headers.Set("X-Ms-Continuation", i.continuation)
}
err = i.setOptions(i.options, nil, headers)
if err != nil {
return
}
err = i.do(ctx, http.MethodGet, i.path+"/docs", "docs", i.path, http.StatusOK, nil, &openShiftClusterManagerConfigurationDocuments, headers)
if err != nil {
return
}
i.continuation = headers.Get("X-Ms-Continuation")
i.done = i.continuation == ""
return
}
func (i *openShiftClusterManagerConfigurationDocumentListIterator) Continuation() string {
return i.continuation
}
func (i *openShiftClusterManagerConfigurationDocumentQueryIterator) Next(ctx context.Context, maxItemCount int) (openShiftClusterManagerConfigurationDocuments *pkg.OpenShiftClusterManagerConfigurationDocuments, err error) {
err = i.NextRaw(ctx, maxItemCount, &openShiftClusterManagerConfigurationDocuments)
return
}
func (i *openShiftClusterManagerConfigurationDocumentQueryIterator) NextRaw(ctx context.Context, maxItemCount int, raw interface{}) (err error) {
if i.done {
return
}
headers := http.Header{}
headers.Set("X-Ms-Max-Item-Count", strconv.Itoa(maxItemCount))
headers.Set("X-Ms-Documentdb-Isquery", "True")
headers.Set("Content-Type", "application/query+json")
if i.partitionkey != "" {
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+i.partitionkey+`"]`)
} else {
headers.Set("X-Ms-Documentdb-Query-Enablecrosspartition", "True")
}
if i.continuation != "" {
headers.Set("X-Ms-Continuation", i.continuation)
}
err = i.setOptions(i.options, nil, headers)
if err != nil {
return
}
err = i.do(ctx, http.MethodPost, i.path+"/docs", "docs", i.path, http.StatusOK, &i.query, &raw, headers)
if err != nil {
return
}
i.continuation = headers.Get("X-Ms-Continuation")
i.done = i.continuation == ""
return
}
func (i *openShiftClusterManagerConfigurationDocumentQueryIterator) Continuation() string {
return i.continuation
}

Просмотреть файл

@ -1,361 +0,0 @@
// Code generated by github.com/jewzaam/go-cosmosdb, DO NOT EDIT.
package cosmosdb
import (
"context"
"fmt"
"net/http"
"sync"
"github.com/ugorji/go/codec"
pkg "github.com/Azure/ARO-RP/pkg/api"
)
type fakeOpenShiftClusterManagerConfigurationDocumentTriggerHandler func(context.Context, *pkg.OpenShiftClusterManagerConfigurationDocument) error
type fakeOpenShiftClusterManagerConfigurationDocumentQueryHandler func(OpenShiftClusterManagerConfigurationDocumentClient, *Query, *Options) OpenShiftClusterManagerConfigurationDocumentRawIterator
var _ OpenShiftClusterManagerConfigurationDocumentClient = &FakeOpenShiftClusterManagerConfigurationDocumentClient{}
// NewFakeOpenShiftClusterManagerConfigurationDocumentClient returns a FakeOpenShiftClusterManagerConfigurationDocumentClient
func NewFakeOpenShiftClusterManagerConfigurationDocumentClient(h *codec.JsonHandle) *FakeOpenShiftClusterManagerConfigurationDocumentClient {
return &FakeOpenShiftClusterManagerConfigurationDocumentClient{
jsonHandle: h,
openShiftClusterManagerConfigurationDocuments: make(map[string]*pkg.OpenShiftClusterManagerConfigurationDocument),
triggerHandlers: make(map[string]fakeOpenShiftClusterManagerConfigurationDocumentTriggerHandler),
queryHandlers: make(map[string]fakeOpenShiftClusterManagerConfigurationDocumentQueryHandler),
}
}
// FakeOpenShiftClusterManagerConfigurationDocumentClient is a FakeOpenShiftClusterManagerConfigurationDocumentClient
type FakeOpenShiftClusterManagerConfigurationDocumentClient struct {
lock sync.RWMutex
jsonHandle *codec.JsonHandle
openShiftClusterManagerConfigurationDocuments map[string]*pkg.OpenShiftClusterManagerConfigurationDocument
triggerHandlers map[string]fakeOpenShiftClusterManagerConfigurationDocumentTriggerHandler
queryHandlers map[string]fakeOpenShiftClusterManagerConfigurationDocumentQueryHandler
sorter func([]*pkg.OpenShiftClusterManagerConfigurationDocument)
etag int
// returns true if documents conflict
conflictChecker func(*pkg.OpenShiftClusterManagerConfigurationDocument, *pkg.OpenShiftClusterManagerConfigurationDocument) bool
// err, if not nil, is an error to return when attempting to communicate
// with this Client
err error
}
// SetError sets or unsets an error that will be returned on any
// FakeOpenShiftClusterManagerConfigurationDocumentClient method invocation
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) SetError(err error) {
c.lock.Lock()
defer c.lock.Unlock()
c.err = err
}
// SetSorter sets or unsets a sorter function which will be used to sort values
// returned by List() for test stability
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) SetSorter(sorter func([]*pkg.OpenShiftClusterManagerConfigurationDocument)) {
c.lock.Lock()
defer c.lock.Unlock()
c.sorter = sorter
}
// SetConflictChecker sets or unsets a function which can be used to validate
// additional unique keys in a OpenShiftClusterManagerConfigurationDocument
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) SetConflictChecker(conflictChecker func(*pkg.OpenShiftClusterManagerConfigurationDocument, *pkg.OpenShiftClusterManagerConfigurationDocument) bool) {
c.lock.Lock()
defer c.lock.Unlock()
c.conflictChecker = conflictChecker
}
// SetTriggerHandler sets or unsets a trigger handler
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) SetTriggerHandler(triggerName string, trigger fakeOpenShiftClusterManagerConfigurationDocumentTriggerHandler) {
c.lock.Lock()
defer c.lock.Unlock()
c.triggerHandlers[triggerName] = trigger
}
// SetQueryHandler sets or unsets a query handler
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) SetQueryHandler(queryName string, query fakeOpenShiftClusterManagerConfigurationDocumentQueryHandler) {
c.lock.Lock()
defer c.lock.Unlock()
c.queryHandlers[queryName] = query
}
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) deepCopy(openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument) (*pkg.OpenShiftClusterManagerConfigurationDocument, error) {
var b []byte
err := codec.NewEncoderBytes(&b, c.jsonHandle).Encode(openShiftClusterManagerConfigurationDocument)
if err != nil {
return nil, err
}
openShiftClusterManagerConfigurationDocument = nil
err = codec.NewDecoderBytes(b, c.jsonHandle).Decode(&openShiftClusterManagerConfigurationDocument)
if err != nil {
return nil, err
}
return openShiftClusterManagerConfigurationDocument, nil
}
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) apply(ctx context.Context, partitionkey string, openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, options *Options, isCreate bool) (*pkg.OpenShiftClusterManagerConfigurationDocument, error) {
c.lock.Lock()
defer c.lock.Unlock()
if c.err != nil {
return nil, c.err
}
openShiftClusterManagerConfigurationDocument, err := c.deepCopy(openShiftClusterManagerConfigurationDocument) // copy now because pretriggers can mutate openShiftClusterManagerConfigurationDocument
if err != nil {
return nil, err
}
if options != nil {
err := c.processPreTriggers(ctx, openShiftClusterManagerConfigurationDocument, options)
if err != nil {
return nil, err
}
}
existingOpenShiftClusterManagerConfigurationDocument, exists := c.openShiftClusterManagerConfigurationDocuments[openShiftClusterManagerConfigurationDocument.ID]
if isCreate && exists {
return nil, &Error{
StatusCode: http.StatusConflict,
Message: "Entity with the specified id already exists in the system",
}
}
if !isCreate {
if !exists {
return nil, &Error{StatusCode: http.StatusNotFound}
}
if openShiftClusterManagerConfigurationDocument.ETag != existingOpenShiftClusterManagerConfigurationDocument.ETag {
return nil, &Error{StatusCode: http.StatusPreconditionFailed}
}
}
if c.conflictChecker != nil {
for _, openShiftClusterManagerConfigurationDocumentToCheck := range c.openShiftClusterManagerConfigurationDocuments {
if c.conflictChecker(openShiftClusterManagerConfigurationDocumentToCheck, openShiftClusterManagerConfigurationDocument) {
return nil, &Error{
StatusCode: http.StatusConflict,
Message: "Entity with the specified id already exists in the system",
}
}
}
}
openShiftClusterManagerConfigurationDocument.ETag = fmt.Sprint(c.etag)
c.etag++
c.openShiftClusterManagerConfigurationDocuments[openShiftClusterManagerConfigurationDocument.ID] = openShiftClusterManagerConfigurationDocument
return c.deepCopy(openShiftClusterManagerConfigurationDocument)
}
// Create creates a OpenShiftClusterManagerConfigurationDocument in the database
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) Create(ctx context.Context, partitionkey string, openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, options *Options) (*pkg.OpenShiftClusterManagerConfigurationDocument, error) {
return c.apply(ctx, partitionkey, openShiftClusterManagerConfigurationDocument, options, true)
}
// Replace replaces a OpenShiftClusterManagerConfigurationDocument in the database
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) Replace(ctx context.Context, partitionkey string, openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, options *Options) (*pkg.OpenShiftClusterManagerConfigurationDocument, error) {
return c.apply(ctx, partitionkey, openShiftClusterManagerConfigurationDocument, options, false)
}
// List returns a OpenShiftClusterManagerConfigurationDocumentIterator to list all OpenShiftClusterManagerConfigurationDocuments in the database
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) List(*Options) OpenShiftClusterManagerConfigurationDocumentIterator {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return NewFakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator(c.err)
}
openShiftClusterManagerConfigurationDocuments := make([]*pkg.OpenShiftClusterManagerConfigurationDocument, 0, len(c.openShiftClusterManagerConfigurationDocuments))
for _, openShiftClusterManagerConfigurationDocument := range c.openShiftClusterManagerConfigurationDocuments {
openShiftClusterManagerConfigurationDocument, err := c.deepCopy(openShiftClusterManagerConfigurationDocument)
if err != nil {
return NewFakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator(err)
}
openShiftClusterManagerConfigurationDocuments = append(openShiftClusterManagerConfigurationDocuments, openShiftClusterManagerConfigurationDocument)
}
if c.sorter != nil {
c.sorter(openShiftClusterManagerConfigurationDocuments)
}
return NewFakeOpenShiftClusterManagerConfigurationDocumentIterator(openShiftClusterManagerConfigurationDocuments, 0)
}
// ListAll lists all OpenShiftClusterManagerConfigurationDocuments in the database
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) ListAll(ctx context.Context, options *Options) (*pkg.OpenShiftClusterManagerConfigurationDocuments, error) {
iter := c.List(options)
return iter.Next(ctx, -1)
}
// Get gets a OpenShiftClusterManagerConfigurationDocument from the database
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) Get(ctx context.Context, partitionkey string, id string, options *Options) (*pkg.OpenShiftClusterManagerConfigurationDocument, error) {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return nil, c.err
}
openShiftClusterManagerConfigurationDocument, exists := c.openShiftClusterManagerConfigurationDocuments[id]
if !exists {
return nil, &Error{StatusCode: http.StatusNotFound}
}
return c.deepCopy(openShiftClusterManagerConfigurationDocument)
}
// Delete deletes a OpenShiftClusterManagerConfigurationDocument from the database
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) Delete(ctx context.Context, partitionKey string, openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, options *Options) error {
c.lock.Lock()
defer c.lock.Unlock()
if c.err != nil {
return c.err
}
_, exists := c.openShiftClusterManagerConfigurationDocuments[openShiftClusterManagerConfigurationDocument.ID]
if !exists {
return &Error{StatusCode: http.StatusNotFound}
}
delete(c.openShiftClusterManagerConfigurationDocuments, openShiftClusterManagerConfigurationDocument.ID)
return nil
}
// ChangeFeed is unimplemented
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) ChangeFeed(*Options) OpenShiftClusterManagerConfigurationDocumentIterator {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return NewFakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator(c.err)
}
return NewFakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator(ErrNotImplemented)
}
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) processPreTriggers(ctx context.Context, openShiftClusterManagerConfigurationDocument *pkg.OpenShiftClusterManagerConfigurationDocument, options *Options) error {
for _, triggerName := range options.PreTriggers {
if triggerHandler := c.triggerHandlers[triggerName]; triggerHandler != nil {
c.lock.Unlock()
err := triggerHandler(ctx, openShiftClusterManagerConfigurationDocument)
c.lock.Lock()
if err != nil {
return err
}
} else {
return ErrNotImplemented
}
}
return nil
}
// Query calls a query handler to implement database querying
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) Query(name string, query *Query, options *Options) OpenShiftClusterManagerConfigurationDocumentRawIterator {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return NewFakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator(c.err)
}
if queryHandler := c.queryHandlers[query.Query]; queryHandler != nil {
c.lock.RUnlock()
i := queryHandler(c, query, options)
c.lock.RLock()
return i
}
return NewFakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator(ErrNotImplemented)
}
// QueryAll calls a query handler to implement database querying
func (c *FakeOpenShiftClusterManagerConfigurationDocumentClient) QueryAll(ctx context.Context, partitionkey string, query *Query, options *Options) (*pkg.OpenShiftClusterManagerConfigurationDocuments, error) {
iter := c.Query("", query, options)
return iter.Next(ctx, -1)
}
func NewFakeOpenShiftClusterManagerConfigurationDocumentIterator(openShiftClusterManagerConfigurationDocuments []*pkg.OpenShiftClusterManagerConfigurationDocument, continuation int) OpenShiftClusterManagerConfigurationDocumentRawIterator {
return &fakeOpenShiftClusterManagerConfigurationDocumentIterator{openShiftClusterManagerConfigurationDocuments: openShiftClusterManagerConfigurationDocuments, continuation: continuation}
}
type fakeOpenShiftClusterManagerConfigurationDocumentIterator struct {
openShiftClusterManagerConfigurationDocuments []*pkg.OpenShiftClusterManagerConfigurationDocument
continuation int
done bool
}
func (i *fakeOpenShiftClusterManagerConfigurationDocumentIterator) NextRaw(ctx context.Context, maxItemCount int, out interface{}) error {
return ErrNotImplemented
}
func (i *fakeOpenShiftClusterManagerConfigurationDocumentIterator) Next(ctx context.Context, maxItemCount int) (*pkg.OpenShiftClusterManagerConfigurationDocuments, error) {
if i.done {
return nil, nil
}
var openShiftClusterManagerConfigurationDocuments []*pkg.OpenShiftClusterManagerConfigurationDocument
if maxItemCount == -1 {
openShiftClusterManagerConfigurationDocuments = i.openShiftClusterManagerConfigurationDocuments[i.continuation:]
i.continuation = len(i.openShiftClusterManagerConfigurationDocuments)
i.done = true
} else {
max := i.continuation + maxItemCount
if max > len(i.openShiftClusterManagerConfigurationDocuments) {
max = len(i.openShiftClusterManagerConfigurationDocuments)
}
openShiftClusterManagerConfigurationDocuments = i.openShiftClusterManagerConfigurationDocuments[i.continuation:max]
i.continuation += max
i.done = i.Continuation() == ""
}
return &pkg.OpenShiftClusterManagerConfigurationDocuments{
OpenShiftClusterManagerConfigurationDocuments: openShiftClusterManagerConfigurationDocuments,
Count: len(openShiftClusterManagerConfigurationDocuments),
}, nil
}
func (i *fakeOpenShiftClusterManagerConfigurationDocumentIterator) Continuation() string {
if i.continuation >= len(i.openShiftClusterManagerConfigurationDocuments) {
return ""
}
return fmt.Sprintf("%d", i.continuation)
}
// NewFakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator returns a OpenShiftClusterManagerConfigurationDocumentRawIterator which
// whose methods return the given error
func NewFakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator(err error) OpenShiftClusterManagerConfigurationDocumentRawIterator {
return &fakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator{err: err}
}
type fakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator struct {
err error
}
func (i *fakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator) Next(ctx context.Context, maxItemCount int) (*pkg.OpenShiftClusterManagerConfigurationDocuments, error) {
return nil, i.err
}
func (i *fakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator) NextRaw(context.Context, int, interface{}) error {
return i.err
}
func (i *fakeOpenShiftClusterManagerConfigurationDocumentErroringRawIterator) Continuation() string {
return ""
}

Просмотреть файл

@ -28,13 +28,13 @@ import (
const (
collAsyncOperations = "AsyncOperations"
collBilling = "Billing"
collClusterManager = "ClusterManagerConfigurations"
collGateway = "Gateway"
collMonitors = "Monitors"
collOpenShiftClusters = "OpenShiftClusters"
collOpenShiftVersion = "OpenShiftVersions"
collPortal = "Portal"
collSubscriptions = "Subscriptions"
collOpenShiftVersion = "OpenShiftVersions"
collHiveResources = "HiveResources"
)
func NewDatabaseClient(log *logrus.Entry, env env.Core, authorizer cosmosdb.Authorizer, m metrics.Emitter, aead encryption.AEAD) (cosmosdb.DatabaseClient, error) {

Просмотреть файл

@ -1,99 +0,0 @@
package database
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"strings"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
)
type ocmClusterDocument struct {
c cosmosdb.OpenShiftClusterDocumentClient
}
type OCMClusterDocument interface {
Create(context.Context, *api.OpenShiftClusterDocument) (*api.OpenShiftClusterDocument, error)
Get(context.Context, string) (*api.OpenShiftClusterDocument, error)
Patch(context.Context, string, func(*api.OpenShiftClusterDocument) error) (*api.OpenShiftClusterDocument, error)
Delete(context.Context, *api.OpenShiftClusterDocument) error
ChangeFeed() cosmosdb.OpenShiftClusterDocumentIterator
}
func NewOCMClusterDocument(ctx context.Context, isDevelopmentMode bool, dbc cosmosdb.DatabaseClient) (OCMClusterDocument, error) {
dbid, err := Name(isDevelopmentMode)
if err != nil {
return nil, err
}
collc := cosmosdb.NewCollectionClient(dbc, dbid)
documentClient := cosmosdb.NewOpenShiftClusterDocumentClient(collc, collHiveResources)
return NewOCMClusterDocumentWithProvidedClient(documentClient), nil
}
func NewOCMClusterDocumentWithProvidedClient(client cosmosdb.OpenShiftClusterDocumentClient) OCMClusterDocument {
return &ocmClusterDocument{c: client}
}
// Only used internally by Patch()
func (c *ocmClusterDocument) replace(ctx context.Context, doc *api.OpenShiftClusterDocument) (*api.OpenShiftClusterDocument, error) {
if doc.ID != strings.ToLower(doc.ID) {
return nil, fmt.Errorf("id %q is not lower case", doc.ID)
}
return c.c.Replace(ctx, doc.ID, doc, nil)
}
func (c *ocmClusterDocument) Create(ctx context.Context, doc *api.OpenShiftClusterDocument) (*api.OpenShiftClusterDocument, error) {
if doc.ID != strings.ToLower(doc.ID) {
return nil, fmt.Errorf("id %q is not lower case", doc.ID)
}
return c.c.Create(ctx, doc.ID, doc, nil)
}
func (c *ocmClusterDocument) Get(ctx context.Context, id string) (*api.OpenShiftClusterDocument, error) {
if id != strings.ToLower(id) {
return nil, fmt.Errorf("id %q is not lower case", id)
}
return c.c.Get(ctx, id, id, nil)
}
func (c *ocmClusterDocument) Patch(ctx context.Context, id string, callback func(*api.OpenShiftClusterDocument) error) (*api.OpenShiftClusterDocument, error) {
var doc *api.OpenShiftClusterDocument
err := cosmosdb.RetryOnPreconditionFailed(func() error {
doc, err := c.Get(ctx, id)
if err != nil {
return err
}
err = callback(doc)
if err != nil {
return err
}
doc, err = c.replace(ctx, doc)
return err
})
return doc, err
}
func (c *ocmClusterDocument) Delete(ctx context.Context, doc *api.OpenShiftClusterDocument) error {
if doc.ID != strings.ToLower(doc.ID) {
return fmt.Errorf("id %q is not lower case", doc.ID)
}
return c.c.Delete(ctx, doc.ID, doc, &cosmosdb.Options{NoETag: true})
}
func (c *ocmClusterDocument) ChangeFeed() cosmosdb.OpenShiftClusterDocumentIterator {
return c.c.ChangeFeed(nil)
}

Просмотреть файл

@ -68,6 +68,27 @@
"[resourceId('Microsoft.DocumentDB/databaseAccounts/sqlDatabases', parameters('databaseAccountName'), parameters('databaseName'))]"
]
},
{
"properties": {
"resource": {
"id": "ClusterManagerConfigurations",
"partitionKey": {
"paths": [
"/partitionKey"
],
"kind": "Hash"
}
},
"options": {}
},
"name": "[concat(parameters('databaseAccountName'), '/', parameters('databaseName'), '/ClusterManagerConfigurations')]",
"type": "Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers",
"location": "[resourceGroup().location]",
"apiVersion": "2021-01-15",
"dependsOn": [
"[resourceId('Microsoft.DocumentDB/databaseAccounts/sqlDatabases', parameters('databaseAccountName'), parameters('databaseName'))]"
]
},
{
"properties": {
"resource": {
@ -204,6 +225,15 @@
"/id"
],
"kind": "Hash"
},
"uniqueKeyPolicy": {
"uniqueKeys": [
{
"paths": [
"/key"
]
}
]
}
},
"options": {}

Просмотреть файл

@ -815,6 +815,28 @@
"[resourceId('Microsoft.DocumentDB/databaseAccounts', parameters('databaseAccountName'))]"
]
},
{
"properties": {
"resource": {
"id": "ClusterManagerConfigurations",
"partitionKey": {
"paths": [
"/partitionKey"
],
"kind": "Hash"
}
},
"options": {}
},
"name": "[concat(parameters('databaseAccountName'), '/', 'ARO', '/ClusterManagerConfigurations')]",
"type": "Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers",
"location": "[resourceGroup().location]",
"apiVersion": "2021-01-15",
"dependsOn": [
"[resourceId('Microsoft.DocumentDB/databaseAccounts/sqlDatabases', parameters('databaseAccountName'), 'ARO')]",
"[resourceId('Microsoft.DocumentDB/databaseAccounts', parameters('databaseAccountName'))]"
]
},
{
"properties": {
"resource": {
@ -960,6 +982,15 @@
"/id"
],
"kind": "Hash"
},
"uniqueKeyPolicy": {
"uniqueKeys": [
{
"paths": [
"/key"
]
}
]
}
},
"options": {}

Просмотреть файл

@ -1669,6 +1669,29 @@ func (g *generator) database(databaseName string, addDependsOn bool) []*arm.Reso
"[resourceId('Microsoft.DocumentDB/databaseAccounts/sqlDatabases', parameters('databaseAccountName'), " + databaseName + ")]",
},
},
{
Resource: &mgmtdocumentdb.SQLContainerCreateUpdateParameters{
SQLContainerCreateUpdateProperties: &mgmtdocumentdb.SQLContainerCreateUpdateProperties{
Resource: &mgmtdocumentdb.SQLContainerResource{
ID: to.StringPtr("ClusterManagerConfigurations"),
PartitionKey: &mgmtdocumentdb.ContainerPartitionKey{
Paths: &[]string{
"/partitionKey",
},
Kind: mgmtdocumentdb.PartitionKindHash,
},
},
Options: &mgmtdocumentdb.CreateUpdateOptions{},
},
Name: to.StringPtr("[concat(parameters('databaseAccountName'), '/', " + databaseName + ", '/ClusterManagerConfigurations')]"),
Type: to.StringPtr("Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers"),
Location: to.StringPtr("[resourceGroup().location]"),
},
APIVersion: azureclient.APIVersion("Microsoft.DocumentDB"),
DependsOn: []string{
"[resourceId('Microsoft.DocumentDB/databaseAccounts/sqlDatabases', parameters('databaseAccountName'), " + databaseName + ")]",
},
},
{
Resource: &mgmtdocumentdb.SQLContainerCreateUpdateParameters{
SQLContainerCreateUpdateProperties: &mgmtdocumentdb.SQLContainerCreateUpdateProperties{
@ -1771,6 +1794,15 @@ func (g *generator) database(databaseName string, addDependsOn bool) []*arm.Reso
},
Kind: mgmtdocumentdb.PartitionKindHash,
},
UniqueKeyPolicy: &mgmtdocumentdb.UniqueKeyPolicy{
UniqueKeys: &[]mgmtdocumentdb.UniqueKey{
{
Paths: &[]string{
"/key",
},
},
},
},
},
Options: &mgmtdocumentdb.CreateUpdateOptions{},
},

Просмотреть файл

@ -152,7 +152,7 @@ func TestAdminCordonUncordonNode(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error) {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error) {
return k, nil
}, nil, nil)

Просмотреть файл

@ -84,7 +84,7 @@ func TestAdminDrainNode(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error) {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error) {
return k, nil
}, nil, nil)

Просмотреть файл

@ -200,7 +200,7 @@ func TestAdminKubernetesObjectsGetAndDelete(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error) {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error) {
return k, nil
}, nil, nil)
if err != nil {
@ -428,7 +428,7 @@ func TestAdminPostKubernetesObjects(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error) {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error) {
return k, nil
}, nil, nil)
if err != nil {

Просмотреть файл

@ -125,7 +125,7 @@ func TestAdminKubernetesGetPodLogs(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error) {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error) {
return k, nil
}, nil, nil)
if err != nil {

Просмотреть файл

@ -123,7 +123,7 @@ func TestAdminListOpenShiftCluster(t *testing.T) {
ti.openShiftClustersClient.SetError(tt.throwsError)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, aead, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, aead, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
return ti.enricher
})
if err != nil {

Просмотреть файл

@ -84,7 +84,7 @@ func TestAdminRedeployVM(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {
return a, nil
}, nil)

Просмотреть файл

@ -86,7 +86,7 @@ func TestAdminListResourcesList(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {
return a, nil
}, nil)

Просмотреть файл

@ -84,7 +84,7 @@ func TestAdminStartVM(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {
return a, nil
}, nil)

Просмотреть файл

@ -84,7 +84,7 @@ func TestAdminStopVM(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {
return a, nil
}, nil)

Просмотреть файл

@ -211,7 +211,7 @@ func TestAdminVMResize(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil,
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil,
func(e *logrus.Entry, i env.Interface, osc *api.OpenShiftCluster) (adminactions.KubeActions, error) {
return k, nil
}, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {

Просмотреть файл

@ -136,7 +136,7 @@ func TestAdminListVMSizeList(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, func(*logrus.Entry, env.Interface, *api.OpenShiftCluster, *api.SubscriptionDocument) (adminactions.AzureActions, error) {
return a, nil
}, nil)

Просмотреть файл

@ -96,7 +96,7 @@ func TestOpenShiftVersionList(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, nil, nil, nil, ti.openShiftVersionsDatabase, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, nil, nil, nil, nil, ti.openShiftVersionsDatabase, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)

Просмотреть файл

@ -196,7 +196,7 @@ func TestOpenShiftVersionPut(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, nil, nil, nil, ti.openShiftVersionsDatabase, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, nil, nil, nil, nil, ti.openShiftVersionsDatabase, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -133,7 +133,7 @@ func TestGetAsyncOperationResult(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -183,7 +183,7 @@ func TestGetAsyncOperationsStatus(t *testing.T) {
ti.asyncOperationsClient.SetError(tt.dbError)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -0,0 +1,51 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"net/http"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
func (f *frontend) deleteClusterManagerConfiguration(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
err := f._deleteClusterManagerConfiguration(ctx, log, r)
switch {
case cosmosdb.IsErrorStatusCode(err, http.StatusNotFound):
err = statusCodeError(http.StatusNoContent)
case err == nil:
err = statusCodeError(http.StatusAccepted)
}
reply(log, w, nil, nil, err)
}
func (f *frontend) _deleteClusterManagerConfiguration(ctx context.Context, log *logrus.Entry, r *http.Request) error {
vars := mux.Vars(r)
doc, err := f.dbClusterManagerConfiguration.Get(ctx, r.URL.Path)
switch {
case cosmosdb.IsErrorStatusCode(err, http.StatusNotFound):
return api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeResourceNotFound, "", "The Resource '%s/%s' under resource group '%s' was not found.", vars["resourceType"], vars["resourceName"], vars["resourceGroupName"])
case err != nil:
return err
}
doc.Deleting = true
_, err = f.dbClusterManagerConfiguration.Update(ctx, doc)
if err != nil {
return err
}
err = f.dbClusterManagerConfiguration.Delete(ctx, doc)
return err
}

Просмотреть файл

@ -0,0 +1,44 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/json"
"net/http"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
func (f *frontend) getClusterManagerConfiguration(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
vars := mux.Vars(r)
b, err := f._getClusterManagerConfiguration(ctx, log, r, f.apis[vars["api-version"]].ClusterManagerConfigurationConverter())
reply(log, w, nil, b, err)
}
func (f *frontend) _getClusterManagerConfiguration(ctx context.Context, log *logrus.Entry, r *http.Request, converter api.ClusterManagerConfigurationConverter) ([]byte, error) {
vars := mux.Vars(r)
doc, err := f.dbClusterManagerConfiguration.Get(ctx, r.URL.Path)
switch {
case cosmosdb.IsErrorStatusCode(err, http.StatusNotFound):
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeResourceNotFound, "", "The Resource '%s/%s/%s/%s' was not found.", vars["resourceType"], vars["resourceName"], vars["clusterManagerKind"], vars["ocmResourceName"])
case err != nil:
return nil, err
}
ext, err := converter.ToExternal(doc.ClusterManagerConfiguration)
if err != nil {
return nil, err
}
return json.MarshalIndent(ext, "", " ")
}

Просмотреть файл

@ -0,0 +1,138 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/json"
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
"github.com/Azure/ARO-RP/pkg/util/arm"
)
func (f *frontend) putOrPatchClusterManagerConfiguration(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
vars := mux.Vars(r)
var header http.Header
var b []byte
err := cosmosdb.RetryOnPreconditionFailed(func() error {
var err error
b, err = f._putOrPatchClusterManagerConfiguration(ctx, log, r, &header, f.apis[vars["api-version"]].ClusterManagerConfigurationConverter())
return err
})
reply(log, w, header, b, err)
}
func (f *frontend) _putOrPatchClusterManagerConfiguration(ctx context.Context, log *logrus.Entry, r *http.Request, header *http.Header, converter api.ClusterManagerConfigurationConverter) ([]byte, error) {
body := r.Context().Value(middleware.ContextKeyBody).([]byte)
correlationData := r.Context().Value(middleware.ContextKeyCorrelationData).(*api.CorrelationData)
// TODO implement systemdata
// systemData, _ := r.Context().Value(middleware.ContextKeySystemData).(*api.SystemData) // don't panic
_, err := f.validateSubscriptionState(ctx, r.URL.Path, api.SubscriptionStateRegistered)
if err != nil {
return nil, err
}
originalPath := r.Context().Value(middleware.ContextKeyOriginalPath).(string)
armResource, err := arm.ParseArmResourceId(originalPath)
if err != nil {
return nil, err
}
clusterURL := strings.ToLower(armResource.ParentResourceToString())
ocp, err := f.dbOpenShiftClusters.Get(ctx, clusterURL)
if err != nil && !cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) {
return nil, err
}
if ocp == nil || cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeResourceNotFound, "", "cluster does not exist.")
}
ocmdoc, _ := f.dbClusterManagerConfiguration.Get(ctx, r.URL.Path)
if err != nil && !cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) {
return nil, err
}
isCreate := ocmdoc == nil
uuid := f.dbClusterManagerConfiguration.NewUUID()
if isCreate {
ocmdoc = &api.ClusterManagerConfigurationDocument{
ID: uuid,
Key: r.URL.Path,
ClusterManagerConfiguration: &api.ClusterManagerConfiguration{
ID: originalPath,
Name: armResource.SubResource.ResourceName,
ClusterResourceId: clusterURL,
Properties: api.ClusterManagerConfigurationProperties{
Resources: body,
},
},
}
newdoc, err := f.dbClusterManagerConfiguration.Create(ctx, ocmdoc)
if err != nil {
return nil, err
}
ocmdoc = newdoc
} else {
if ocmdoc.ClusterManagerConfiguration != nil {
ocmdoc.ClusterManagerConfiguration.Properties.Resources = body
}
}
ocmdoc.CorrelationData = correlationData
// f.systemDataClusterManagerConfigurationEnricher(ocmdoc, systemData)
ocmdoc, err = f.dbClusterManagerConfiguration.Update(ctx, ocmdoc)
if err != nil {
return nil, err
}
var ext interface{}
ext, err = converter.ToExternal(ocmdoc.ClusterManagerConfiguration)
if err != nil {
return nil, err
}
b, err := json.MarshalIndent(ext, "", " ")
return b, err
}
// enrichClusterManagerSystemData will selectively overwrite systemData fields based on
// arm inputs
// func enrichSyncSetSystemData(doc *api.ClusterManagerConfigurationDocument, systemData *api.SystemData) {
// if systemData == nil {
// return
// }
// if systemData.CreatedAt != nil {
// doc.SyncSet.SystemData.CreatedAt = systemData.CreatedAt
// }
// if systemData.CreatedBy != "" {
// doc.SyncSet.SystemData.CreatedBy = systemData.CreatedBy
// }
// if systemData.CreatedByType != "" {
// doc.SyncSet.SystemData.CreatedByType = systemData.CreatedByType
// }
// if systemData.LastModifiedAt != nil {
// doc.SyncSet.SystemData.LastModifiedAt = systemData.LastModifiedAt
// }
// if systemData.LastModifiedBy != "" {
// doc.SyncSet.SystemData.LastModifiedBy = systemData.LastModifiedBy
// }
// if systemData.LastModifiedByType != "" {
// doc.SyncSet.SystemData.LastModifiedByType = systemData.LastModifiedByType
// }
// }

Просмотреть файл

@ -49,10 +49,11 @@ type frontend struct {
baseLog *logrus.Entry
env env.Interface
dbAsyncOperations database.AsyncOperations
dbOpenShiftClusters database.OpenShiftClusters
dbSubscriptions database.Subscriptions
dbOpenShiftVersions database.OpenShiftVersions
dbAsyncOperations database.AsyncOperations
dbClusterManagerConfiguration database.ClusterManagerConfigurations
dbOpenShiftClusters database.OpenShiftClusters
dbSubscriptions database.Subscriptions
dbOpenShiftVersions database.OpenShiftVersions
apis map[string]*api.Version
m metrics.Emitter
@ -72,8 +73,8 @@ type frontend struct {
ready atomic.Value
// these helps us to test and mock easier
now func() time.Time
systemDataEnricher func(*api.OpenShiftClusterDocument, *api.SystemData)
now func() time.Time
systemDataClusterDocEnricher func(*api.OpenShiftClusterDocument, *api.SystemData)
}
// Runnable represents a runnable object
@ -87,6 +88,7 @@ func NewFrontend(ctx context.Context,
baseLog *logrus.Entry,
_env env.Interface,
dbAsyncOperations database.AsyncOperations,
dbClusterManagerConfiguration database.ClusterManagerConfigurations,
dbOpenShiftClusters database.OpenShiftClusters,
dbSubscriptions database.Subscriptions,
dbOpenShiftVersions database.OpenShiftVersions,
@ -97,26 +99,27 @@ func NewFrontend(ctx context.Context,
azureActionsFactory azureActionsFactory,
ocEnricherFactory ocEnricherFactory) (Runnable, error) {
f := &frontend{
auditLog: auditLog,
baseLog: baseLog,
env: _env,
dbAsyncOperations: dbAsyncOperations,
dbOpenShiftClusters: dbOpenShiftClusters,
dbSubscriptions: dbSubscriptions,
dbOpenShiftVersions: dbOpenShiftVersions,
apis: apis,
m: m,
aead: aead,
kubeActionsFactory: kubeActionsFactory,
azureActionsFactory: azureActionsFactory,
ocEnricherFactory: ocEnricherFactory,
auditLog: auditLog,
baseLog: baseLog,
env: _env,
dbAsyncOperations: dbAsyncOperations,
dbClusterManagerConfiguration: dbClusterManagerConfiguration,
dbOpenShiftClusters: dbOpenShiftClusters,
dbSubscriptions: dbSubscriptions,
dbOpenShiftVersions: dbOpenShiftVersions,
apis: apis,
m: m,
aead: aead,
kubeActionsFactory: kubeActionsFactory,
azureActionsFactory: azureActionsFactory,
ocEnricherFactory: ocEnricherFactory,
bucketAllocator: &bucket.Random{},
startTime: time.Now(),
now: time.Now,
systemDataEnricher: enrichSystemData,
now: time.Now,
systemDataClusterDocEnricher: enrichClusterSystemData,
}
l, err := f.env.Listen()
@ -179,6 +182,16 @@ func (f *frontend) authenticatedRoutes(r *mux.Router) {
s.Methods(http.MethodPatch).HandlerFunc(f.putOrPatchOpenShiftCluster).Name("putOrPatchOpenShiftCluster")
s.Methods(http.MethodPut).HandlerFunc(f.putOrPatchOpenShiftCluster).Name("putOrPatchOpenShiftCluster")
s = r.
Path("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}/{ocmResourceType}/{ocmResourceName}").
Queries("api-version", "{api-version}").
Subrouter()
s.Methods(http.MethodDelete).HandlerFunc(f.deleteClusterManagerConfiguration).Name("deleteClusterManagerConfiguration")
s.Methods(http.MethodGet).HandlerFunc(f.getClusterManagerConfiguration).Name("getClusterManagerConfiguration")
s.Methods(http.MethodPatch).HandlerFunc(f.putOrPatchClusterManagerConfiguration).Name("putOrPatchClusterManagerConfiguration")
s.Methods(http.MethodPut).HandlerFunc(f.putOrPatchClusterManagerConfiguration).Name("putOrPatchClusterManagerConfiguration")
s = r.
Path("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}").
Queries("api-version", "{api-version}").

Просмотреть файл

@ -99,7 +99,7 @@ func TestListInstallVersions(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, nil, nil, nil, ti.openShiftVersionsDatabase, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, nil, nil, nil, nil, ti.openShiftVersionsDatabase, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)

Просмотреть файл

@ -114,7 +114,7 @@ func TestDeleteOpenShiftCluster(t *testing.T) {
ti.subscriptionsClient.SetError(tt.dbError)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -97,7 +97,7 @@ func TestGetOpenShiftCluster(t *testing.T) {
ti.openShiftClustersClient.SetError(tt.dbError)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
return ti.enricher
})
if err != nil {

Просмотреть файл

@ -197,7 +197,7 @@ func TestListOpenShiftCluster(t *testing.T) {
aead := testdatabase.NewFakeAEAD()
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, aead, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, aead, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
return ti.enricher
})
if err != nil {

Просмотреть файл

@ -57,7 +57,6 @@ func (f *frontend) _putOrPatchOpenShiftCluster(ctx context.Context, log *logrus.
if err != nil && !cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) {
return nil, err
}
isCreate := doc == nil
if isCreate {
@ -176,7 +175,7 @@ func (f *frontend) _putOrPatchOpenShiftCluster(ctx context.Context, log *logrus.
// This will update systemData from the values in the header. Old values, which
// is not provided in the header must be preserved
f.systemDataEnricher(doc, systemData)
f.systemDataClusterDocEnricher(doc, systemData)
if isCreate {
// on create, make the cluster resourcegroup ID lower case to work
@ -251,9 +250,9 @@ func (f *frontend) _putOrPatchOpenShiftCluster(ctx context.Context, log *logrus.
return b, err
}
// enrichSystemData will selectively overwrite systemData fields based on
// enrichClusterSystemData will selectively overwrite systemData fields based on
// arm inputs
func enrichSystemData(doc *api.OpenShiftClusterDocument, systemData *api.SystemData) {
func enrichClusterSystemData(doc *api.OpenShiftClusterDocument, systemData *api.SystemData) {
if systemData == nil {
return
}

Просмотреть файл

@ -567,7 +567,7 @@ func TestPutOrPatchOpenShiftClusterAdminAPI(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, apis, &noop.Noop{}, nil, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, apis, &noop.Noop{}, nil, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
return ti.enricher
})
if err != nil {
@ -575,9 +575,9 @@ func TestPutOrPatchOpenShiftClusterAdminAPI(t *testing.T) {
}
f.(*frontend).bucketAllocator = bucket.Fixed(1)
var systemDataEnricherCalled bool
f.(*frontend).systemDataEnricher = func(doc *api.OpenShiftClusterDocument, systemData *api.SystemData) {
systemDataEnricherCalled = true
var systemDataClusterDocEnricherCalled bool
f.(*frontend).systemDataClusterDocEnricher = func(doc *api.OpenShiftClusterDocument, systemData *api.SystemData) {
systemDataClusterDocEnricherCalled = true
}
go f.Run(ctx, nil, nil)
@ -633,8 +633,8 @@ func TestPutOrPatchOpenShiftClusterAdminAPI(t *testing.T) {
t.Error(err)
}
if tt.wantSystemDataEnriched != systemDataEnricherCalled {
t.Error(systemDataEnricherCalled)
if tt.wantSystemDataEnriched != systemDataClusterDocEnricherCalled {
t.Error(systemDataClusterDocEnricherCalled)
}
})
}
@ -1392,7 +1392,7 @@ func TestPutOrPatchOpenShiftCluster(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, ti.openShiftVersionsDatabase, apis, &noop.Noop{}, nil, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, ti.openShiftVersionsDatabase, apis, &noop.Noop{}, nil, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
return ti.enricher
})
if err != nil {
@ -1401,9 +1401,9 @@ func TestPutOrPatchOpenShiftCluster(t *testing.T) {
f.(*frontend).bucketAllocator = bucket.Fixed(1)
f.(*frontend).now = func() time.Time { return mockCurrentTime }
var systemDataEnricherCalled bool
f.(*frontend).systemDataEnricher = func(doc *api.OpenShiftClusterDocument, systemData *api.SystemData) {
systemDataEnricherCalled = true
var systemDataClusterDocEnricherCalled bool
f.(*frontend).systemDataClusterDocEnricher = func(doc *api.OpenShiftClusterDocument, systemData *api.SystemData) {
systemDataClusterDocEnricherCalled = true
}
go f.Run(ctx, nil, nil)
@ -1457,8 +1457,8 @@ func TestPutOrPatchOpenShiftCluster(t *testing.T) {
}
}
if tt.wantSystemDataEnriched != systemDataEnricherCalled {
t.Error(systemDataEnricherCalled)
if tt.wantSystemDataEnriched != systemDataClusterDocEnricherCalled {
t.Error(systemDataClusterDocEnricherCalled)
}
})
}
@ -1694,7 +1694,7 @@ func TestPutOrPatchOpenShiftClusterValidated(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, ti.openShiftVersionsDatabase, api.APIs, &noop.Noop{}, nil, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, ti.openShiftVersionsDatabase, api.APIs, &noop.Noop{}, nil, nil, nil, func(log *logrus.Entry, dialer proxy.Dialer, m metrics.Emitter) clusterdata.OpenShiftClusterEnricher {
return ti.enricher
})
if err != nil {
@ -1703,10 +1703,10 @@ func TestPutOrPatchOpenShiftClusterValidated(t *testing.T) {
f.(*frontend).bucketAllocator = bucket.Fixed(1)
f.(*frontend).now = func() time.Time { return mockCurrentTime }
var systemDataEnricherCalled bool
f.(*frontend).systemDataEnricher = func(doc *api.OpenShiftClusterDocument, systemData *api.SystemData) {
enrichSystemData(doc, systemData)
systemDataEnricherCalled = true
var systemDataClusterDocEnricherCalled bool
f.(*frontend).systemDataClusterDocEnricher = func(doc *api.OpenShiftClusterDocument, systemData *api.SystemData) {
enrichClusterSystemData(doc, systemData)
systemDataClusterDocEnricherCalled = true
}
go f.Run(ctx, nil, nil)
@ -1769,8 +1769,8 @@ func TestPutOrPatchOpenShiftClusterValidated(t *testing.T) {
}
}
if tt.wantSystemDataEnriched != systemDataEnricherCalled {
t.Error(systemDataEnricherCalled)
if tt.wantSystemDataEnriched != systemDataClusterDocEnricherCalled {
t.Error(systemDataClusterDocEnricherCalled)
}
})
}
@ -1865,7 +1865,7 @@ func TestEnrichSystemData(t *testing.T) {
doc := &api.OpenShiftClusterDocument{
OpenShiftCluster: &api.OpenShiftCluster{},
}
enrichSystemData(doc, tt.systemData)
enrichClusterSystemData(doc, tt.systemData)
if !reflect.DeepEqual(doc, tt.expected) {
t.Error(cmp.Diff(doc, tt.expected))

Просмотреть файл

@ -267,7 +267,7 @@ func TestPostOpenShiftClusterCredentials(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, apis, &noop.Noop{}, nil, nil, nil, nil)
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, apis, &noop.Noop{}, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -305,7 +305,7 @@ func TestPostOpenShiftClusterKubeConfigCredentials(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, apis, &noop.Noop{}, nil, nil, nil, nil)
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, apis, &noop.Noop{}, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -76,7 +76,7 @@ func TestSecurity(t *testing.T) {
log := logrus.NewEntry(logrus.StandardLogger())
auditHook, auditEntry := testlog.NewAudit()
f, err := NewFrontend(ctx, auditEntry, log, _env, nil, nil, nil, nil, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
f, err := NewFrontend(ctx, auditEntry, log, _env, nil, nil, nil, nil, nil, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -74,6 +74,8 @@ type testInfra struct {
asyncOperationsDatabase database.AsyncOperations
billingClient *cosmosdb.FakeBillingDocumentClient
billingDatabase database.Billing
clusterManagerClient *cosmosdb.FakeClusterManagerConfigurationDocumentClient
clusterManagerDatabase database.ClusterManagerConfigurations
subscriptionsClient *cosmosdb.FakeSubscriptionDocumentClient
subscriptionsDatabase database.Subscriptions
openShiftVersionsClient *cosmosdb.FakeOpenShiftVersionDocumentClient
@ -169,6 +171,12 @@ func (ti *testInfra) WithOpenShiftVersions() *testInfra {
return ti
}
func (ti *testInfra) WithClusterManagerConfigurations() *testInfra {
ti.clusterManagerDatabase, ti.clusterManagerClient = testdatabase.NewFakeClusterManager()
ti.fixture.WithClusterManagerConfigurations(ti.clusterManagerDatabase)
return ti
}
func (ti *testInfra) done() {
ti.controller.Finish()
ti.cli.CloseIdleConnections()

Просмотреть файл

@ -244,7 +244,7 @@ func TestPutSubscription(t *testing.T) {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.asyncOperationsDatabase, ti.clusterManagerDatabase, ti.openShiftClustersDatabase, ti.subscriptionsDatabase, nil, api.APIs, &noop.Noop{}, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -5,6 +5,7 @@ package swagger
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
@ -34,7 +35,9 @@ func (g *generator) generateExamples(outputDir string, s *Swagger) error {
}{
Responses: Responses{},
}
for _, v := range op.Schemes {
fmt.Println("scheme: ", v)
}
for _, param := range op.Parameters {
switch param := param.(type) {
case Reference:
@ -59,6 +62,11 @@ func (g *generator) generateExamples(outputDir string, s *Swagger) error {
Name: "location",
Parameter: "location",
})
case "../../../../../common-types/resource-management/" + g.commonTypesVersion + "/types.json#/parameters/ClusterParameter":
example.Parameters = append(example.Parameters, NameParameter{
Name: "cluster",
Parameter: "cluster",
})
}
case Parameter:
switch param.Name {
@ -67,6 +75,10 @@ func (g *generator) generateExamples(outputDir string, s *Swagger) error {
Name: param.Name,
Parameter: "resourceName",
})
example.Parameters = append(example.Parameters, NameParameter{
Name: "syncSetResourceName",
Parameter: "syncSetResourceName",
})
case "parameters":
switch param.Schema.Ref {
case "#/definitions/OpenShiftCluster":
@ -79,6 +91,16 @@ func (g *generator) generateExamples(outputDir string, s *Swagger) error {
Name: param.Name,
Parameter: g.exampleOpenShiftClusterPatchParameter(),
})
case "#/definitions/Configuration":
example.Parameters = append(example.Parameters, NameParameter{
Name: param.Name,
Parameter: g.exampleSyncSetPutParameter,
})
case "#/definitions/Syncset":
example.Parameters = append(example.Parameters, NameParameter{
Name: param.Name,
Parameter: g.exampleSyncSetPutParameter,
})
}
}
}
@ -94,6 +116,10 @@ func (g *generator) generateExamples(outputDir string, s *Swagger) error {
var body interface{}
if response.Schema != nil {
switch response.Schema.Ref {
case "#/definitions/SyncSet":
body = g.exampleSyncSetResponse()
case "#/definitions/SyncSetList":
body = g.exampleSyncSetListResponse()
case "#/definitions/OpenShiftCluster":
body = g.exampleOpenShiftClusterResponse()
case "#/definitions/OpenShiftClusterCredentials":

Просмотреть файл

@ -19,6 +19,10 @@ const apiv20220401Path = "github.com/Azure/ARO-RP/pkg/api/v20220401"
const apiv20220904Path = "github.com/Azure/ARO-RP/pkg/api/v20220904"
type generator struct {
exampleSyncSetPutParameter func() interface{}
exampleSyncSetPatchParameter func() interface{}
exampleSyncSetResponse func() interface{}
exampleSyncSetListResponse func() interface{}
exampleOpenShiftClusterPutParameter func() interface{}
exampleOpenShiftClusterPatchParameter func() interface{}
exampleOpenShiftClusterResponse func() interface{}
@ -31,6 +35,7 @@ type generator struct {
systemData bool
kubeConfig bool
installVersionList bool
clusterManager bool
xmsEnum []string
xmsSecretList []string
xmsIdentifiers []string
@ -81,6 +86,10 @@ var apis = map[string]*generator{
kubeConfig: true,
},
apiv20220904Path: {
exampleSyncSetPutParameter: v20220904.ExampleSyncSetPutParameter,
exampleSyncSetPatchParameter: v20220904.ExampleSyncSetPatchParameter,
exampleSyncSetResponse: v20220904.ExampleSyncSetResponse,
exampleSyncSetListResponse: v20220904.ExampleSyncSetListResponse,
exampleOpenShiftClusterPutParameter: v20220904.ExampleOpenShiftClusterPutParameter,
exampleOpenShiftClusterPatchParameter: v20220904.ExampleOpenShiftClusterPatchParameter,
exampleOpenShiftClusterResponse: v20220904.ExampleOpenShiftClusterResponse,
@ -95,6 +104,7 @@ var apis = map[string]*generator{
xmsIdentifiers: []string{},
commonTypesVersion: "v3",
systemData: true,
clusterManager: true,
installVersionList: true,
kubeConfig: true,
},

Просмотреть файл

@ -20,6 +20,10 @@ import (
// n==4 action on resource expecting input payload
// n==5 patch action on resource expecting input payload
// n==6 list across subscription and location
// n==7 action on child resource not expecting input payload
// n==8 action on child resource expecting input payload
// n==9 patch action on resource expecting input payload
// n==10 list child resources belonging to a parent resource
func (g *generator) populateParameters(n int, typ, friendlyName string) (s []interface{}) {
s = []interface{}{
@ -27,17 +31,17 @@ func (g *generator) populateParameters(n int, typ, friendlyName string) (s []int
Ref: "../../../../../common-types/resource-management/" + g.commonTypesVersion + "/types.json#/parameters/ApiVersionParameter",
},
}
if n > 0 {
s = append(s, Reference{
Ref: "../../../../../common-types/resource-management/" + g.commonTypesVersion + "/types.json#/parameters/SubscriptionIdParameter",
})
if n == 6 {
s = append(s, Reference{
Ref: "../../../../../common-types/resource-management/" + g.commonTypesVersion + "/types.json#/parameters/LocationParameter",
})
return
}
}
if n == 6 {
s = append(s, Reference{
Ref: "../../../../../common-types/resource-management/" + g.commonTypesVersion + "/types.json#/parameters/LocationParameter",
})
return
}
if n > 1 {
@ -56,7 +60,18 @@ func (g *generator) populateParameters(n int, typ, friendlyName string) (s []int
})
}
if n > 3 {
if n == 7 {
s = append(s, Parameter{
Name: "syncSetResourceName",
In: "path",
Description: "The name of the " + friendlyName + " resource.",
Required: true,
Type: "string",
})
return
}
if n > 3 && n != 10 {
s = append(s, Parameter{
Name: "parameters",
In: "body",
@ -68,10 +83,9 @@ func (g *generator) populateParameters(n int, typ, friendlyName string) (s []int
})
}
if n > 4 {
if n > 4 && friendlyName != "SyncSet" {
s[len(s)-1].(Parameter).Schema.Ref += "Update"
}
return
}
@ -105,8 +119,60 @@ func (g *generator) populateResponses(typ string, isDelete bool, statusCodes ...
return
}
// populateChildResourcePaths populates the paths for a child resource of a top level ARM resource
func (g *generator) populateChildResourcePaths(ps Paths, resourceProviderNamespace string, resourceType string, childResourceType string, friendlyName string) {
titleCaser := cases.Title(language.Und, cases.NoLower)
ps["/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/"+resourceProviderNamespace+"/"+resourceType+"/{resourceName}/"+childResourceType+"s"] = &PathItem{
Get: &Operation{
Tags: []string{titleCaser.String(childResourceType) + "s"},
Summary: "Lists " + friendlyName + "s that belong to that Azure Red Hat OpenShift Cluster.",
Description: "The operation returns properties of each " + friendlyName + ".",
OperationID: titleCaser.String(childResourceType) + "s_List",
Parameters: g.populateParameters(10, titleCaser.String(childResourceType), friendlyName),
Responses: g.populateResponses(titleCaser.String(childResourceType)+"List", false, http.StatusOK),
Pageable: &Pageable{
NextLinkName: "nextLink",
},
},
}
ps["/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openshiftclusters/{resourceName}/syncSet/{syncSetResourceName}"] = &PathItem{
Get: &Operation{
Tags: []string{titleCaser.String(childResourceType) + "s"},
Summary: "Gets a " + friendlyName + " with the specified subscription, resource group and resource name.",
Description: "The operation returns properties of a " + friendlyName + ".",
OperationID: titleCaser.String(childResourceType) + "s_Get",
Parameters: g.populateParameters(7, titleCaser.String(childResourceType), friendlyName),
Responses: g.populateResponses(titleCaser.String(childResourceType), false, http.StatusOK),
},
// Put: &Operation{
// Tags: []string{titleCaser.String(childResourceType) + "s"},
// Summary: "Creates or updates a " + friendlyName + " with the specified subscription, resource group and resource name.",
// Description: "The operation returns properties of a " + friendlyName + ".",
// OperationID: titleCaser.String(childResourceType) + "s_CreateOrUpdate",
// Parameters: g.populateParameters(4, titleCaser.String(childResourceType), friendlyName),
// Responses: g.populateResponses(titleCaser.String(childResourceType), false, http.StatusOK, http.StatusCreated),
// },
// Delete: &Operation{
// Tags: []string{titleCaser.String(childResourceType) + "s"},
// Summary: "Deletes a " + friendlyName + " with the specified subscription, resource group and resource name.",
// Description: "The operation returns nothing.",
// OperationID: titleCaser.String(childResourceType) + "s_Delete",
// Parameters: g.populateParameters(3, titleCaser.String(childResourceType), friendlyName),
// Responses: g.populateResponses(titleCaser.String(childResourceType), true, http.StatusAccepted, http.StatusNoContent),
// },
// Patch: &Operation{
// Tags: []string{titleCaser.String(childResourceType) + "s"},
// Summary: "Creates or updates a " + friendlyName + " with the specified subscription, resource group and resource name.",
// Description: "The operation returns properties of a " + friendlyName + ".",
// OperationID: titleCaser.String(childResourceType) + "s_Update",
// Parameters: g.populateParameters(5, titleCaser.String(childResourceType), friendlyName),
// Responses: g.populateResponses(titleCaser.String(childResourceType), false, http.StatusOK, http.StatusCreated),
// },
}
}
// populateTopLevelPaths populates the paths for a top level ARM resource
func (g *generator) populateTopLevelPaths(resourceProviderNamespace, resourceType, friendlyName string) (ps Paths) {
func (g *generator) populateTopLevelPaths(resourceProviderNamespace string, resourceType string, friendlyName string) (ps Paths) {
titleCaser := cases.Title(language.Und, cases.NoLower)
ps = Paths{}
@ -185,7 +251,6 @@ func populateExamples(ps Paths) {
if op == nil {
continue
}
op.Examples = map[string]Reference{
op.Summary: {
Ref: "./examples/" + op.OperationID + ".json",

Просмотреть файл

@ -95,13 +95,17 @@ func Run(api, outputDir string) error {
Tags: []string{"InstallVersions"},
Summary: "Lists all OpenShift versions available to install in the specified location.",
Description: "The operation returns the installable OpenShift versions as strings.",
OperationID: "List_Install_Versions",
OperationID: "InstallVersions_List",
Parameters: g.populateParameters(6, "InstallVersions", "Install Versions"),
Responses: g.populateResponses("InstallVersions", false, http.StatusOK),
},
}
}
if g.clusterManager {
g.populateChildResourcePaths(s.Paths, "Microsoft.RedHatOpenShift", "openShiftCluster", "syncSet", "SyncSet")
}
populateExamples(s.Paths)
names := []string{"OpenShiftClusterList", "OpenShiftClusterCredentials"}
if g.kubeConfig {
@ -112,6 +116,10 @@ func Run(api, outputDir string) error {
names = append(names, "InstallVersions")
}
if g.clusterManager {
names = append(names, "SyncSet", "SyncSetList")
}
err = define(s.Definitions, api, g.xmsEnum, g.xmsSecretList, g.xmsIdentifiers, names...)
if err != nil {
return err
@ -162,6 +170,20 @@ func Run(api, outputDir string) error {
}
s.Definitions[azureResource].Properties = properties
if _, ok := s.Definitions["SyncSet"]; ok {
properties = nil
for _, property := range s.Definitions["SyncSet"].Properties {
if property.Name == "properties" {
if property.Schema == nil {
property.Schema = &Schema{}
}
property.Schema.ClientFlatten = true
properties = append(properties, property)
}
}
s.Definitions["SyncSet"].Properties = properties
}
if g.systemData {
s.defineSystemData([]string{azureResource, azureResource + "Update"}, g.commonTypesVersion)
}

Просмотреть файл

@ -154,8 +154,18 @@ func (tw *typeWalker) schemaFromType(t types.Type, deps map[*types.Named]struct{
}
s.Properties = append(s.Properties, ns)
}
if field.Name() == "proxyResource" {
s.AllOf = []Schema{
{
Ref: "../../../../../common-types/resource-management/v3/types.json#/definitions/ProxyResource",
},
}
}
}
case *types.Interface:
s.Type = "object"
default:
panic(t)
}
@ -173,7 +183,6 @@ func (tw *typeWalker) _define(definitions Definitions, t *types.Named) {
if path != nil {
s.Description = strings.Trim(path[len(path)-2].(*ast.GenDecl).Doc.Text(), "\n")
s.Enum = tw.enums[t]
// Enum extensions allows non-breaking api changes
// https://github.com/Azure/autorest/tree/master/docs/extensions#x-ms-enum
c := strings.Split(t.String(), ".")
@ -200,7 +209,6 @@ func (tw *typeWalker) _define(definitions Definitions, t *types.Named) {
// define adds a Definition for the named type
func (tw *typeWalker) define(definitions Definitions, name string) {
o := tw.pkg.Types.Scope().Lookup(name)
tw._define(definitions, o.(*types.TypeName).Type().(*types.Named))
}
@ -214,6 +222,5 @@ func define(definitions Definitions, pkgname string, xmsEnumList, xmsSecretList
for _, name := range names {
th.define(definitions, name)
}
return nil
}

Просмотреть файл

@ -0,0 +1,65 @@
package arm
import (
"reflect"
"testing"
)
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
func TestArmResources(t *testing.T) {
tests := []struct {
name string
input string
want *ArmResource
wantErr bool
err string
}{
{
name: "happy path split",
input: "/subscriptions/abc/resourcegroups/v4-eastus/providers/Microsoft.RedHatOpenShift/openshiftclusters/cluster1/syncSets/syncSet1",
want: &ArmResource{
SubscriptionID: "abc",
ResourceGroup: "v4-eastus",
Provider: "Microsoft.RedHatOpenShift",
ResourceName: "cluster1",
ResourceType: "openshiftclusters",
SubResource: SubResource{
ResourceName: "syncSet1",
ResourceType: "syncSets",
},
},
},
{
name: "sad path - bad input - missing subresources",
input: "/subscriptions/abc/resourcegroups/v4-eastus/providers/Microsoft.RedHatOpenShift/openshiftclusters/cluster1",
err: "parsing failed for /subscriptions/abc/resourcegroups/v4-eastus/providers/Microsoft.RedHatOpenShift/openshiftclusters/cluster1. Invalid resource Id format",
wantErr: true,
},
{
name: "sad path - bad input - missing cluster resource",
input: "/subscriptions/abc/resourcegroups/v4-eastus/providers",
err: "parsing failed for /subscriptions/abc/resourcegroups/v4-eastus/providers. Invalid resource Id format",
wantErr: true,
},
{
name: "sad path - bad input - too many nested resource",
input: "/subscriptions/abc/resourcegroups/v4-eastus/providers/Microsoft.RedHatOpenShift/openshiftclusters/cluster1/syncSets/syncset1/nextResource",
err: "parsing failed for /subscriptions/abc/resourcegroups/v4-eastus/providers/Microsoft.RedHatOpenShift/openshiftclusters/cluster1/syncSets/syncset1/nextResource. Invalid resource Id format",
wantErr: true,
},
}
for _, test := range tests {
actual, err := ParseArmResourceId(test.input)
if err != nil {
if test.wantErr && test.err != err.Error() {
t.Fatalf("want %v, got %v", test.err, err)
}
}
if !reflect.DeepEqual(actual, test.want) {
t.Fatalf("want %v, got %v", test.want, actual)
}
}
}

62
pkg/util/arm/resources.go Normal file
Просмотреть файл

@ -0,0 +1,62 @@
package arm
import (
"fmt"
"regexp"
"strings"
)
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
// ArmResource represents a resource and its child resources.
// Typically we would use the autorest package for this, but
// It does not have support for subresources
type ArmResource struct {
SubscriptionID string
ResourceGroup string
Provider string
ResourceName string
ResourceType string
SubResource SubResource
}
// SubResource represents an ARM Proxy Resource
type SubResource struct {
ResourceName string
ResourceType string
SubResource *SubResource
}
// ParentResourcetoString returns a string of the parent object in form of azureResourceID
func (r ArmResource) ParentResourceToString() string {
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName)
}
// String function returns a string in form of azureResourceID
func (r ArmResource) String() string {
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName, r.SubResource.ResourceType, r.SubResource.ResourceName)
}
func ParseArmResourceId(resourceId string) (*ArmResource, error) {
const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+?)/(.+?)/(.+)`
resourceIDPattern := regexp.MustCompile(resourceIDPatternText)
match := resourceIDPattern.FindStringSubmatch(resourceId)
if len(match) != 8 || strings.Contains(match[7], "/") {
return nil, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceId)
}
result := &ArmResource{
SubscriptionID: match[1],
ResourceGroup: match[2],
Provider: match[3],
ResourceType: match[4],
ResourceName: match[5],
SubResource: SubResource{
ResourceType: match[6],
ResourceName: match[7],
},
}
return result, nil
}

Просмотреть файл

@ -36,7 +36,7 @@ func (c *openShiftClustersClient) DeleteAndWait(ctx context.Context, resourceGro
}
func (c *openShiftClustersClient) List(ctx context.Context) (clusters []mgmtredhatopenshift20220904.OpenShiftCluster, err error) {
page, err := c.OpenShiftClustersClient.ListMethod(ctx)
page, err := c.OpenShiftClustersClient.List(ctx)
if err != nil {
return nil, err
}

Просмотреть файл

@ -23,7 +23,7 @@ from azure.mgmt.core import ARMPipelineClient
from . import models
from ._configuration import AzureRedHatOpenShiftClientConfiguration
from .operations import ListOperations, OpenShiftClustersOperations, Operations
from .operations import InstallVersionsOperations, OpenShiftClustersOperations, Operations, SyncSetsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
@ -37,11 +37,14 @@ class AzureRedHatOpenShiftClient(object):
:ivar operations: Operations operations
:vartype operations: azure.mgmt.redhatopenshift.v2022_09_04.operations.Operations
:ivar list: ListOperations operations
:vartype list: azure.mgmt.redhatopenshift.v2022_09_04.operations.ListOperations
:ivar install_versions: InstallVersionsOperations operations
:vartype install_versions:
azure.mgmt.redhatopenshift.v2022_09_04.operations.InstallVersionsOperations
:ivar open_shift_clusters: OpenShiftClustersOperations operations
:vartype open_shift_clusters:
azure.mgmt.redhatopenshift.v2022_09_04.operations.OpenShiftClustersOperations
:ivar sync_sets: SyncSetsOperations operations
:vartype sync_sets: azure.mgmt.redhatopenshift.v2022_09_04.operations.SyncSetsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
@ -71,8 +74,9 @@ class AzureRedHatOpenShiftClient(object):
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.list = ListOperations(self._client, self._config, self._serialize, self._deserialize)
self.install_versions = InstallVersionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.open_shift_clusters = OpenShiftClustersOperations(self._client, self._config, self._serialize, self._deserialize)
self.sync_sets = SyncSetsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(

Просмотреть файл

@ -30,8 +30,12 @@ try:
from ._models_py3 import OpenShiftClusterUpdate
from ._models_py3 import Operation
from ._models_py3 import OperationList
from ._models_py3 import ProxyResource
from ._models_py3 import Resource
from ._models_py3 import ServicePrincipalProfile
from ._models_py3 import SyncSet
from ._models_py3 import SyncSetList
from ._models_py3 import SyncSetSpec
from ._models_py3 import SystemData
from ._models_py3 import TrackedResource
from ._models_py3 import WorkerProfile
@ -51,8 +55,12 @@ except (SyntaxError, ImportError):
from ._models import OpenShiftClusterUpdate # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationList # type: ignore
from ._models import ProxyResource # type: ignore
from ._models import Resource # type: ignore
from ._models import ServicePrincipalProfile # type: ignore
from ._models import SyncSet # type: ignore
from ._models import SyncSetList # type: ignore
from ._models import SyncSetSpec # type: ignore
from ._models import SystemData # type: ignore
from ._models import TrackedResource # type: ignore
from ._models import WorkerProfile # type: ignore
@ -81,8 +89,12 @@ __all__ = [
'OpenShiftClusterUpdate',
'Operation',
'OperationList',
'ProxyResource',
'Resource',
'ServicePrincipalProfile',
'SyncSet',
'SyncSetList',
'SyncSetSpec',
'SystemData',
'TrackedResource',
'WorkerProfile',

Просмотреть файл

@ -794,6 +794,47 @@ class OperationList(msrest.serialization.Model):
self.next_link = kwargs.get('next_link', None)
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.redhatopenshift.v2022_09_04.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ProxyResource, self).__init__(**kwargs)
class ServicePrincipalProfile(msrest.serialization.Model):
"""ServicePrincipalProfile represents a service principal profile.
@ -823,6 +864,161 @@ class ServicePrincipalProfile(msrest.serialization.Model):
self.client_secret = kwargs.get('client_secret', None)
class SyncSet(ProxyResource):
"""SyncSet represents a SyncSet for an Azure Red Hat OpenShift Cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.redhatopenshift.v2022_09_04.models.SystemData
:ivar cluster_resource_id: The parent Azure Red Hat OpenShift resourceID.
:vartype cluster_resource_id: str
:ivar api_version: APIVersion for the SyncSet.
:vartype api_version: str
:ivar kind: SyncSet kind.
:vartype kind: str
:ivar metadata: Metadata for the SyncSet.
:vartype metadata: dict[str, str]
:ivar spec: The SyncSet Specification.
:vartype spec: ~azure.mgmt.redhatopenshift.v2022_09_04.models.SyncSetSpec
:ivar cluster_deployment_refs: ClusterDeploymentRefs map SyncSets to a Hive Cluster Deployment.
:vartype cluster_deployment_refs: any
:ivar resources: Resources represents the SyncSets configuration.
:vartype resources: dict[str, str]
:ivar status: The status of the object.
:vartype status: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'cluster_resource_id': {'key': 'properties.clusterResourceId', 'type': 'str'},
'api_version': {'key': 'properties.apiVersion', 'type': 'str'},
'kind': {'key': 'properties.kind', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': '{str}'},
'spec': {'key': 'properties.spec', 'type': 'SyncSetSpec'},
'cluster_deployment_refs': {'key': 'properties.clusterDeploymentRefs', 'type': 'object'},
'resources': {'key': 'properties.resources', 'type': '{str}'},
'status': {'key': 'properties.status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword cluster_resource_id: The parent Azure Red Hat OpenShift resourceID.
:paramtype cluster_resource_id: str
:keyword api_version: APIVersion for the SyncSet.
:paramtype api_version: str
:keyword kind: SyncSet kind.
:paramtype kind: str
:keyword metadata: Metadata for the SyncSet.
:paramtype metadata: dict[str, str]
:keyword spec: The SyncSet Specification.
:paramtype spec: ~azure.mgmt.redhatopenshift.v2022_09_04.models.SyncSetSpec
:keyword cluster_deployment_refs: ClusterDeploymentRefs map SyncSets to a Hive Cluster
Deployment.
:paramtype cluster_deployment_refs: any
:keyword resources: Resources represents the SyncSets configuration.
:paramtype resources: dict[str, str]
:keyword status: The status of the object.
:paramtype status: str
"""
super(SyncSet, self).__init__(**kwargs)
self.cluster_resource_id = kwargs.get('cluster_resource_id', None)
self.api_version = kwargs.get('api_version', None)
self.kind = kwargs.get('kind', None)
self.metadata = kwargs.get('metadata', None)
self.spec = kwargs.get('spec', None)
self.cluster_deployment_refs = kwargs.get('cluster_deployment_refs', None)
self.resources = kwargs.get('resources', None)
self.status = kwargs.get('status', None)
class SyncSetList(msrest.serialization.Model):
"""SyncSetList represents a list of SyncSets.
:ivar value:
:vartype value: list[~azure.mgmt.redhatopenshift.v2022_09_04.models.SyncSet]
:ivar next_link: The link used to get the next page of operations.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SyncSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword value:
:paramtype value: list[~azure.mgmt.redhatopenshift.v2022_09_04.models.SyncSet]
:keyword next_link: The link used to get the next page of operations.
:paramtype next_link: str
"""
super(SyncSetList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class SyncSetSpec(msrest.serialization.Model):
"""SyncSetSpec.
:ivar cluster_deployment_refs: ClusterDeploymentRefs map SyncSets to a Hive Cluster Deployment.
:vartype cluster_deployment_refs: any
:ivar resources: Resources represents the SyncSets configuration.
:vartype resources: dict[str, any]
:ivar status: The status of the object.
:vartype status: str
"""
_attribute_map = {
'cluster_deployment_refs': {'key': 'clusterDeploymentRefs', 'type': 'object'},
'resources': {'key': 'resources', 'type': '{object}'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword cluster_deployment_refs: ClusterDeploymentRefs map SyncSets to a Hive Cluster
Deployment.
:paramtype cluster_deployment_refs: any
:keyword resources: Resources represents the SyncSets configuration.
:paramtype resources: dict[str, any]
:keyword status: The status of the object.
:paramtype status: str
"""
super(SyncSetSpec, self).__init__(**kwargs)
self.cluster_deployment_refs = kwargs.get('cluster_deployment_refs', None)
self.resources = kwargs.get('resources', None)
self.status = kwargs.get('status', None)
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.

Просмотреть файл

@ -15,7 +15,7 @@
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from typing import Any, Dict, List, Optional, Union
import msrest.serialization
@ -876,6 +876,47 @@ class OperationList(msrest.serialization.Model):
self.next_link = next_link
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.redhatopenshift.v2022_09_04.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ProxyResource, self).__init__(**kwargs)
class ServicePrincipalProfile(msrest.serialization.Model):
"""ServicePrincipalProfile represents a service principal profile.
@ -908,6 +949,177 @@ class ServicePrincipalProfile(msrest.serialization.Model):
self.client_secret = client_secret
class SyncSet(ProxyResource):
"""SyncSet represents a SyncSet for an Azure Red Hat OpenShift Cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.redhatopenshift.v2022_09_04.models.SystemData
:ivar cluster_resource_id: The parent Azure Red Hat OpenShift resourceID.
:vartype cluster_resource_id: str
:ivar api_version: APIVersion for the SyncSet.
:vartype api_version: str
:ivar kind: SyncSet kind.
:vartype kind: str
:ivar metadata: Metadata for the SyncSet.
:vartype metadata: dict[str, str]
:ivar spec: The SyncSet Specification.
:vartype spec: ~azure.mgmt.redhatopenshift.v2022_09_04.models.SyncSetSpec
:ivar cluster_deployment_refs: ClusterDeploymentRefs map SyncSets to a Hive Cluster Deployment.
:vartype cluster_deployment_refs: any
:ivar resources: Resources represents the SyncSets configuration.
:vartype resources: dict[str, str]
:ivar status: The status of the object.
:vartype status: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'cluster_resource_id': {'key': 'properties.clusterResourceId', 'type': 'str'},
'api_version': {'key': 'properties.apiVersion', 'type': 'str'},
'kind': {'key': 'properties.kind', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': '{str}'},
'spec': {'key': 'properties.spec', 'type': 'SyncSetSpec'},
'cluster_deployment_refs': {'key': 'properties.clusterDeploymentRefs', 'type': 'object'},
'resources': {'key': 'properties.resources', 'type': '{str}'},
'status': {'key': 'properties.status', 'type': 'str'},
}
def __init__(
self,
*,
cluster_resource_id: Optional[str] = None,
api_version: Optional[str] = None,
kind: Optional[str] = None,
metadata: Optional[Dict[str, str]] = None,
spec: Optional["SyncSetSpec"] = None,
cluster_deployment_refs: Optional[Any] = None,
resources: Optional[Dict[str, str]] = None,
status: Optional[str] = None,
**kwargs
):
"""
:keyword cluster_resource_id: The parent Azure Red Hat OpenShift resourceID.
:paramtype cluster_resource_id: str
:keyword api_version: APIVersion for the SyncSet.
:paramtype api_version: str
:keyword kind: SyncSet kind.
:paramtype kind: str
:keyword metadata: Metadata for the SyncSet.
:paramtype metadata: dict[str, str]
:keyword spec: The SyncSet Specification.
:paramtype spec: ~azure.mgmt.redhatopenshift.v2022_09_04.models.SyncSetSpec
:keyword cluster_deployment_refs: ClusterDeploymentRefs map SyncSets to a Hive Cluster
Deployment.
:paramtype cluster_deployment_refs: any
:keyword resources: Resources represents the SyncSets configuration.
:paramtype resources: dict[str, str]
:keyword status: The status of the object.
:paramtype status: str
"""
super(SyncSet, self).__init__(**kwargs)
self.cluster_resource_id = cluster_resource_id
self.api_version = api_version
self.kind = kind
self.metadata = metadata
self.spec = spec
self.cluster_deployment_refs = cluster_deployment_refs
self.resources = resources
self.status = status
class SyncSetList(msrest.serialization.Model):
"""SyncSetList represents a list of SyncSets.
:ivar value:
:vartype value: list[~azure.mgmt.redhatopenshift.v2022_09_04.models.SyncSet]
:ivar next_link: The link used to get the next page of operations.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SyncSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["SyncSet"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value:
:paramtype value: list[~azure.mgmt.redhatopenshift.v2022_09_04.models.SyncSet]
:keyword next_link: The link used to get the next page of operations.
:paramtype next_link: str
"""
super(SyncSetList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SyncSetSpec(msrest.serialization.Model):
"""SyncSetSpec.
:ivar cluster_deployment_refs: ClusterDeploymentRefs map SyncSets to a Hive Cluster Deployment.
:vartype cluster_deployment_refs: any
:ivar resources: Resources represents the SyncSets configuration.
:vartype resources: dict[str, any]
:ivar status: The status of the object.
:vartype status: str
"""
_attribute_map = {
'cluster_deployment_refs': {'key': 'clusterDeploymentRefs', 'type': 'object'},
'resources': {'key': 'resources', 'type': '{object}'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
*,
cluster_deployment_refs: Optional[Any] = None,
resources: Optional[Dict[str, Any]] = None,
status: Optional[str] = None,
**kwargs
):
"""
:keyword cluster_deployment_refs: ClusterDeploymentRefs map SyncSets to a Hive Cluster
Deployment.
:paramtype cluster_deployment_refs: any
:keyword resources: Resources represents the SyncSets configuration.
:paramtype resources: dict[str, any]
:keyword status: The status of the object.
:paramtype status: str
"""
super(SyncSetSpec, self).__init__(**kwargs)
self.cluster_deployment_refs = cluster_deployment_refs
self.resources = resources
self.status = status
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.

Просмотреть файл

@ -15,11 +15,13 @@
# --------------------------------------------------------------------------
from ._operations import Operations
from ._list_operations import ListOperations
from ._install_versions_operations import InstallVersionsOperations
from ._open_shift_clusters_operations import OpenShiftClustersOperations
from ._sync_sets_operations import SyncSetsOperations
__all__ = [
'Operations',
'ListOperations',
'InstallVersionsOperations',
'OpenShiftClustersOperations',
'SyncSetsOperations',
]

Просмотреть файл

@ -38,7 +38,7 @@ _SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_install_versions_request(
def build_list_request(
subscription_id, # type: str
location, # type: str
**kwargs # type: Any
@ -73,8 +73,8 @@ def build_install_versions_request(
)
# fmt: on
class ListOperations(object):
"""ListOperations operations.
class InstallVersionsOperations(object):
"""InstallVersionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
@ -96,7 +96,7 @@ class ListOperations(object):
self._config = config
@distributed_trace
def install_versions(
def list(
self,
location, # type: str
**kwargs # type: Any
@ -122,11 +122,11 @@ class ListOperations(object):
api_version = kwargs.pop('api_version', "2022-09-04") # type: str
request = build_install_versions_request(
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
api_version=api_version,
template_url=self.install_versions.metadata['url'],
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
@ -149,5 +149,5 @@ class ListOperations(object):
return deserialized
install_versions.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.RedHatOpenShift/locations/{location}/listinstallversions"} # type: ignore
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.RedHatOpenShift/locations/{location}/listinstallversions"} # type: ignore

Просмотреть файл

@ -0,0 +1,288 @@
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
subscription_id, # type: str
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-09-04") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftCluster/{resourceName}/syncSets") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_request(
subscription_id, # type: str
resource_group_name, # type: str
resource_name, # type: str
sync_set_resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-09-04") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openshiftclusters/{resourceName}/syncSet/{syncSetResourceName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"syncSetResourceName": _SERIALIZER.url("sync_set_resource_name", sync_set_resource_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
# fmt: on
class SyncSetsOperations(object):
"""SyncSetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.redhatopenshift.v2022_09_04.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SyncSetList"]
"""Lists SyncSets that belong to that Azure Red Hat OpenShift Cluster.
The operation returns properties of each SyncSet.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the SyncSet resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SyncSetList or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.redhatopenshift.v2022_09_04.models.SyncSetList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-09-04") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SyncSetList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SyncSetList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftCluster/{resourceName}/syncSets"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
sync_set_resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SyncSet"
"""Gets a SyncSet with the specified subscription, resource group and resource name.
The operation returns properties of a SyncSet.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the SyncSet resource.
:type resource_name: str
:param sync_set_resource_name: The name of the SyncSet resource.
:type sync_set_resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SyncSet, or the result of cls(response)
:rtype: ~azure.mgmt.redhatopenshift.v2022_09_04.models.SyncSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SyncSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-09-04") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
sync_set_resource_name=sync_set_resource_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SyncSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openshiftclusters/{resourceName}/syncSet/{syncSetResourceName}"} # type: ignore

Просмотреть файл

@ -4,6 +4,7 @@
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName",
"parameters": {
"location": "location",
"tags": {
@ -71,7 +72,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {
@ -133,7 +134,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2021-09-01-preview",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"202": {},

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2021-09-01-preview",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"200": {
@ -27,7 +28,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -27,7 +27,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2021-09-01-preview",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"200": {

Просмотреть файл

@ -28,7 +28,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2021-09-01-preview",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"200": {

Просмотреть файл

@ -4,6 +4,7 @@
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName",
"parameters": {
"tags": {
"key": "value"
@ -70,7 +71,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {
@ -132,7 +133,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -4,6 +4,7 @@
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName",
"parameters": {
"location": "location",
"tags": {
@ -63,7 +64,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {
@ -117,7 +118,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2020-04-30",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"202": {},

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2020-04-30",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"200": {
@ -19,7 +20,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -19,7 +19,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -20,7 +20,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2020-04-30",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"200": {

Просмотреть файл

@ -4,6 +4,7 @@
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName",
"parameters": {
"tags": {
"key": "value"
@ -62,7 +63,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {
@ -116,7 +117,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -4,6 +4,7 @@
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName",
"parameters": {
"location": "location",
"tags": {
@ -73,7 +74,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {
@ -135,7 +136,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2022-04-01",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"202": {},

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2022-04-01",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"200": {
@ -27,7 +28,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -27,7 +27,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2022-04-01",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"200": {

Просмотреть файл

@ -28,7 +28,7 @@
"provisioningState": "Succeeded",
"clusterProfile": {
"domain": "cluster.location.aroapp.io",
"version": "4.3.0",
"version": "4.11.0",
"resourceGroupId": "/subscriptions/subscriptionId/resourceGroups/clusterResourceGroup"
},
"consoleProfile": {

Просмотреть файл

@ -3,7 +3,8 @@
"api-version": "2022-04-01",
"subscriptionId": "subscriptionId",
"resourceGroupName": "resourceGroup",
"resourceName": "resourceName"
"resourceName": "resourceName",
"syncSetResourceName": "syncSetResourceName"
},
"responses": {
"200": {

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше