Managed Infrastructure Maintenance Operator - Milestone 1 (#3571)

Adds the MIMO milestone 1 ready for work in deployment testing
This commit is contained in:
Amber Brown 2024-12-09 17:33:51 +11:00 коммит произвёл GitHub
Родитель 55cde02491
Коммит 7e94614335
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
94 изменённых файлов: 6738 добавлений и 96 удалений

Просмотреть файл

@ -70,6 +70,8 @@ jobs:
- script: |
export CI=true
# Tell the E2E binary to run the MIMO tests
export ARO_E2E_MIMO=true
. secrets/env
. ./hack/e2e/run-rp-and-e2e.sh
@ -84,6 +86,9 @@ jobs:
run_selenium
validate_selenium_running
run_mimo_actuator
validate_mimo_actuator_running
run_rp
validate_rp_running
@ -128,6 +133,7 @@ jobs:
delete_e2e_cluster
kill_rp
kill_mimo_actuator
kill_selenium
kill_podman
kill_vpn

Просмотреть файл

@ -2,7 +2,7 @@ SHELL = /bin/bash
TAG ?= $(shell git describe --exact-match 2>/dev/null)
COMMIT = $(shell git rev-parse --short=7 HEAD)$(shell [[ $$(git status --porcelain) = "" ]] || echo -dirty)
ARO_IMAGE_BASE = ${RP_IMAGE_ACR}.azurecr.io/aro
E2E_FLAGS ?= -test.v --ginkgo.v --ginkgo.timeout 180m --ginkgo.flake-attempts=2 --ginkgo.junit-report=e2e-report.xml
E2E_FLAGS ?= -test.v --ginkgo.vv --ginkgo.timeout 180m --ginkgo.flake-attempts=2 --ginkgo.junit-report=e2e-report.xml
E2E_LABEL ?= !smoke&&!regressiontest
GO_FLAGS ?= -tags=containers_image_openpgp,exclude_graphdriver_btrfs,exclude_graphdriver_devicemapper
OC ?= oc
@ -68,7 +68,7 @@ aro: check-release generate
.PHONY: runlocal-rp
runlocal-rp:
go run -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro rp
go run -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro ${ARO_CMD_ARGS} rp
.PHONY: az
az: pyenv
@ -197,7 +197,11 @@ proxy:
.PHONY: runlocal-portal
runlocal-portal:
go run -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro portal
go run -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro ${ARO_CMD_ARGS} portal
.PHONY: runlocal-actuator
runlocal-actuator:
go run -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro ${ARO_CMD_ARGS} mimo-actuator
.PHONY: build-portal
build-portal:

Просмотреть файл

@ -28,6 +28,7 @@ func usage() {
fmt.Fprintf(flag.CommandLine.Output(), " %s operator {master,worker}\n", os.Args[0])
fmt.Fprintf(flag.CommandLine.Output(), " %s update-versions\n", os.Args[0])
fmt.Fprintf(flag.CommandLine.Output(), " %s update-role-sets\n", os.Args[0])
fmt.Fprintf(flag.CommandLine.Output(), " %s mimo-actuator\n", os.Args[0])
flag.PrintDefaults()
}
@ -74,6 +75,9 @@ func main() {
case "update-role-sets":
checkArgs(1)
err = updatePlatformWorkloadIdentityRoleSets(ctx, log)
case "mimo-actuator":
checkArgs(1)
err = mimoActuator(ctx, log)
default:
usage()
os.Exit(2)

103
cmd/aro/mimoactuator.go Normal file
Просмотреть файл

@ -0,0 +1,103 @@
package main
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/database"
"github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/metrics/statsd"
"github.com/Azure/ARO-RP/pkg/metrics/statsd/golang"
"github.com/Azure/ARO-RP/pkg/mimo/actuator"
"github.com/Azure/ARO-RP/pkg/mimo/tasks"
"github.com/Azure/ARO-RP/pkg/proxy"
"github.com/Azure/ARO-RP/pkg/util/encryption"
)
func mimoActuator(ctx context.Context, log *logrus.Entry) error {
stop := make(chan struct{})
_env, err := env.NewEnv(ctx, log, env.COMPONENT_MIMO_ACTUATOR)
if err != nil {
return err
}
keys := []string{}
if !_env.IsLocalDevelopmentMode() {
keys = []string{
"MDM_ACCOUNT",
"MDM_NAMESPACE",
}
}
if err = env.ValidateVars(keys...); err != nil {
return err
}
m := statsd.New(ctx, log.WithField("component", "actuator"), _env, os.Getenv("MDM_ACCOUNT"), os.Getenv("MDM_NAMESPACE"), os.Getenv("MDM_STATSD_SOCKET"))
g, err := golang.NewMetrics(_env.Logger(), m)
if err != nil {
return err
}
go g.Run()
aead, err := encryption.NewAEADWithCore(ctx, _env, env.EncryptionSecretV2Name, env.EncryptionSecretName)
if err != nil {
return err
}
dbc, err := database.NewDatabaseClientFromEnv(ctx, _env, log, m, aead)
if err != nil {
return err
}
dbName, err := env.DBName(_env)
if err != nil {
return err
}
clusters, err := database.NewOpenShiftClusters(ctx, dbc, dbName)
if err != nil {
return err
}
manifests, err := database.NewMaintenanceManifests(ctx, dbc, dbName)
if err != nil {
return err
}
dbg := database.NewDBGroup().
WithOpenShiftClusters(clusters).
WithMaintenanceManifests(manifests)
go database.EmitMIMOMetrics(ctx, log, manifests, m)
dialer, err := proxy.NewDialer(_env.IsLocalDevelopmentMode())
if err != nil {
return err
}
a := actuator.NewService(_env, _env.Logger(), dialer, dbg, m)
a.SetMaintenanceTasks(tasks.DEFAULT_MAINTENANCE_TASKS)
sigterm := make(chan os.Signal, 1)
done := make(chan struct{})
signal.Notify(sigterm, syscall.SIGTERM)
go a.Run(ctx, stop, done)
<-sigterm
log.Print("received SIGTERM")
close(stop)
<-done
return nil
}

Просмотреть файл

@ -154,7 +154,7 @@ func rp(ctx context.Context, log, audit *logrus.Entry) error {
return err
}
go database.EmitMetrics(ctx, log, dbOpenShiftClusters, metrics)
go database.EmitOpenShiftClustersMetrics(ctx, log, dbOpenShiftClusters, metrics)
feAead, err := encryption.NewMulti(ctx, _env.ServiceKeyvault(), env.FrontendEncryptionSecretV2Name, env.FrontendEncryptionSecretName)
if err != nil {
@ -172,6 +172,15 @@ func rp(ctx context.Context, log, audit *logrus.Entry) error {
WithPlatformWorkloadIdentityRoleSets(dbPlatformWorkloadIdentityRoleSets).
WithSubscriptions(dbSubscriptions)
// MIMO only activated in development for now
if _env.IsLocalDevelopmentMode() {
dbMaintenanceManifests, err := database.NewMaintenanceManifests(ctx, dbc, dbName)
if err != nil {
return err
}
dbg.WithMaintenanceManifests(dbMaintenanceManifests)
}
f, err := frontend.NewFrontend(ctx, audit, log.WithField("component", "frontend"), _env, dbg, api.APIs, metrics, clusterm, feAead, hiveClusterManager, adminactions.NewKubeActions, adminactions.NewAzureActions, adminactions.NewAppLensActions, clusterdata.NewParallelEnricher(metrics, _env))
if err != nil {
return err

22
docs/mimo/README.md Normal file
Просмотреть файл

@ -0,0 +1,22 @@
# MIMO Documentation
The Managed Infrastructure Maintenance Operator, or MIMO, is a component of the Azure Red Hat OpenShift Resource Provider (ARO-RP) which is responsible for automated maintenance of clusters provisioned by the platform.
MIMO specifically focuses on "managed infrastructure", the parts of ARO that are deployed and maintained by the RP and ARO Operator instead of by OCP (in-cluster) or Hive (out-of-cluster).
MIMO consists of two main components, the [Actuator](./actuator.md) and the [Scheduler](./scheduler.md). It is primarily interfaced with via the [Admin API](./admin-api.md).
## A Primer On MIMO
The smallest thing that you can tell MIMO to run is a **Task** (see [`pkg/mimo/tasks/`](../../pkg/mimo/tasks/)).
A Task is composed of reusable **Steps** (see [`pkg/mimo/steps/`](../../pkg/mimo/steps/)), reusing the framework utilised by AdminUpdate/Update/Install methods in `pkg/cluster/`.
A Task only runs in the scope of a singular cluster.
These steps are run in sequence and can return either **Terminal** errors (causing the ran Task to fail and not be retried) or **Transient** errors (which indicates that the Task can be retried later).
Tasks are executed by the **Actuator** by way of creation of a **Maintenance Manifest**.
This Manifest is created with the cluster ID (which is elided from the cluster-scoped Admin APIs), the Task ID (which is currently a UUID), and optional priority, "start after", and "start before" times which are filled in with defaults if not provided.
The Actuator will treat these Maintenance Manifests as a work queue, taking ones which are past their "start after" time and executing them in order of earliest start-after and priority.
After running each, a state will be written into the Manifest (with optional free-form status text) with the result of the ran Task.
Manifests past their start-before times are marked as having a "timed out" state and not ran.
Currently, Manifests are created by the Admin API.
In the future, the Scheduler will create some these Manifests depending on cluster state/version and wall-clock time, providing the ability to perform tasks like rotations of secrets autonomously.

30
docs/mimo/actuator.md Normal file
Просмотреть файл

@ -0,0 +1,30 @@
# Managed Infrastructure Maintenance Operator: Actuator
The Actuator is the MIMO component that performs execution of tasks.
The process of running tasks looks like this:
```mermaid
graph TD;
START((Start))-->QUERY;
QUERY[Fetch all State = Pending] -->SORT;
SORT[Sort tasks by RUNAFTER and PRIORITY]-->ITERATE[Iterate over tasks];
ITERATE-- Per Task -->ISEXPIRED;
subgraph PerTask[ ]
ISEXPIRED{{Is RUNBEFORE > now?}}-- Yes --> STATETIMEDOUT([State = TimedOut]) --> CONTINUE[Continue];
ISEXPIRED-- No --> DEQUEUECLUSTER;
DEQUEUECLUSTER[Claim lease on OpenShiftClusterDocument] --> DEQUEUE;
DEQUEUE[Actuator dequeues task]--> ISRETRYLIMIT;
ISRETRYLIMIT{{Have we retried the task too many times?}} -- Yes --> STATERETRYEXCEEDED([State = RetriesExceeded]) --> CONTINUE;
ISRETRYLIMIT -- No -->STATEINPROGRESS;
STATEINPROGRESS([State = InProgress]) -->RUN[[Task is run]];
RUN -- Success --> SUCCESS
RUN-- Terminal Error-->TERMINALERROR;
RUN-- Transient Error-->TRANSIENTERROR;
SUCCESS([State = Completed])-->DELEASECLUSTER
TERMINALERROR([State = Failed])-->DELEASECLUSTER;
TRANSIENTERROR([State = Pending])-->DELEASECLUSTER;
DELEASECLUSTER[Release Lease on OpenShiftClusterDocument] -->CONTINUE;
end
CONTINUE-->ITERATE;
ITERATE-- Finished -->END;
```

30
docs/mimo/admin-api.md Normal file
Просмотреть файл

@ -0,0 +1,30 @@
# Admin API
All need `api-version=admin`.
## GET /admin/RESOURCE_ID/maintenanceManifests
Returns a list of MIMO maintenance manifests.
## PUT /admin/RESOURCE_ID/maintenanceManifests
Creates a new manifest. Returns the created manifest.
### Example
```sh
curl -X PUT -k "https://localhost:8443/admin/subscriptions/fe16a035-e540-4ab7-80d9-373fa9a3d6ae/resourcegroups/v4-westeurope/providers/microsoft.redhatopenshift/openshiftclusters/abrownmimom1test/maintenanceManifests?api-version
=admin" -d '{"maintenanceTaskID": "b41749fc-af26-4ab7-b5a1-e03f3ee4cba6"}' --header "Content-Type: application/json"
```
## GET /admin/RESOURCE_ID/maintenanceManifests/MANIFEST_ID
Returns a manifest.
## DELETE /admin/RESOURCE_ID/maintenanceManifests/MANIFEST_ID
Deletes a manifest. This is only to be used as a last resort.
## POST /admin/RESOURCE_ID/maintenanceManifests/MANIFEST_ID/cancel
Cancels the manifest (the state becomes CANCELLED). It does not stop a task that is in the current process of execution.

6
docs/mimo/local-dev.md Normal file
Просмотреть файл

@ -0,0 +1,6 @@
# Local Development
1. Ensure that you have remade your databases (so that you have the MIMO ones), see [Prepare Your Dev Environment](../prepare-your-dev-environment.md).
1. Run the local RP as usual.
1. Run `make runlocal-actuator` to spawn the actuator.
1. Perform queries against the Admin API to queue/monitor MIMO manifests.

3
docs/mimo/scheduler.md Normal file
Просмотреть файл

@ -0,0 +1,3 @@
# MIMO Scheduler
The MIMO Scheduler is a planned component, but is not yet implemented.

Просмотреть файл

@ -0,0 +1,48 @@
# Writing MIMO Tasks
Writing a MIMO task consists of three major steps:
1. Writing the new functions in [`pkg/mimo/steps/`](../../pkg/mimo/steps/) which implement the specific behaviour (e.g. rotating a certificate), along with tests.
2. Writing the new Task in [`pkg/mimo/tasks/`](../../pkg/mimo/tasks/) which combines the Step you have written with any pre-existing "check" steps (e.g. `EnsureAPIServerIsUp`).
3. Adding the task with a new ID to [`pkg/mimo/const.go`](../../pkg/mimo/const.go) and `DEFAULT_MAINTENANCE_TASKS` in [`pkg/mimo/tasks/taskrunner.go`](../../pkg/mimo/tasks/taskrunner.go).
## New Step Functions
MIMO Step functions are similar to functions used in `pkg/cluster/install.go` but have additional information on the `Context` to prevent the explosion of struct members as seen in that package. Instead, the `GetTaskContext` function will return a `TaskContext` with various methods that can be used to retrieve information about the cluster, clients to perform actions in Azure, or Kubernetes clients to perform actions in the cluster.
Steps with similar logical domains should live in the same file/package. Currently, `pkg/mimo/steps/cluster/` is the only package, but functionality specific to the cluster's Azure resources may be better in a package called `pkg/mimo/steps/azure/` to make navigation easier.
Your base Action Step will look something like this:
```go
func DoSomething(ctx context.Context) error {
tc, err := mimo.GetTaskContext(ctx)
if err != nil {
return mimo.TerminalError(err)
}
return nil
}
```
Like `pkg/cluster/`, you can also implement `Condition`s which allow you to wait for some state. However, MIMO's design is such that it should not sit around for long periods of time waiting for things which should already be the case -- for example, the API server not being up should instead be a usual Action which returns one of either `mimo.TerminalError` or `mimo.TransientError`.
`TransientError`s will be retried, and do not indicate a permanent failure. This is a good fit for errors that are possibly because of timeouts, random momentary outages, or cosmic winds flipping all the bits on your NIC for a nanosecond. MIMO will retry a task (at least, a few times) whose steps return a `TransientError`.
`TerminalError`s are used when there is no likelihood of automatic recovery. For example, if an API server is healthy and returning data, but it says that some essential OpenShift object that we require is missing, it is unlikely that object will return after one or many retries in a short period of time. These failures ought to require either manual intervention because they are unexpected or indicate that a cluster is unservicable. When a `TerminalError` is returned, it will cause the Task to hard fail and MIMO will not retry it.
## Testing
MIMO provides a fake `TaskContext`, created by `test/mimo/tasks.NewFakeTestContext`. This fake takes a number of mandatory items, such as an inner `Context` for cancellation, an `env.Interface`, a `*logrus.Entry`, and a stand-in clock for testing timing. Additional parts of the `TaskContext` used can be provided by `WithXXX` functions provided at the end of the instantiator, such as `WithClientHelper` to add a `ClientHelper` that is accessible on the `TaskContext`.
Attempting to use additional parts of the `TaskContext` without providing them will cause a panic or an error to be returned, in both the fake and real `TaskContext`. This behaviour is intended to make it clearer when some dependency is required.
## Assembling a Task
Once you have your Steps, you can assemble them into a Task in [`pkg/mimo/steps/`](../../pkg/mimo/steps/). See existing Tasks for examples.
## Assumptions MIMO Makes Of Your Code
- Your Steps may be run more than once -- both if they are in a Task more than once, or because a Task has been retried. Your Step must be resilient to being reran from a partial run.
- Steps should fail fast and not sit around unless they have caused something to happen. Right now, Tasks only have a 60 minute timeout total, so use it wisely.
- Steps use the `TaskContext` interface to get clients, and should not build them itself. If a Task requires a new client, it should be implemented in `TaskContext` to ensure that it can be tested the same way as other used clients.

2
go.mod
Просмотреть файл

@ -79,6 +79,7 @@ require (
github.com/vincent-petithory/dataurl v1.0.0
go.uber.org/mock v0.4.0
golang.org/x/crypto v0.28.0
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
golang.org/x/net v0.30.0
golang.org/x/oauth2 v0.21.0
golang.org/x/sync v0.8.0
@ -259,7 +260,6 @@ require (
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/term v0.25.0 // indirect

Просмотреть файл

@ -92,6 +92,43 @@ kill_portal() {
wait $rppid
}
run_mimo_actuator() {
echo "########## 🚀 Run MIMO Actuator in background ##########"
export AZURE_ENVIRONMENT=AzurePublicCloud
./aro mimo-actuator &
}
kill_mimo_actuator() {
echo "########## Kill the MIMO Actuator running in background ##########"
rppid=$(lsof -t -i :8445)
kill $rppid
wait $rppid
}
validate_mimo_actuator_running() {
echo "########## Checking MIMO Actuator Status ##########"
ELAPSED=0
while true; do
sleep 5
http_code=$(curl -k -s -o /dev/null -w '%{http_code}' http://localhost:8445/healthz/ready)
case $http_code in
"200")
echo "########## ✅ ARO MIMO Actuator Running ##########"
break
;;
*)
echo "Attempt $ELAPSED - local MIMO Actuator is NOT up. Code : $http_code, waiting"
sleep 2
# after 40 secs return exit 1 to not block ci
ELAPSED=$((ELAPSED + 1))
if [ $ELAPSED -eq 20 ]; then
exit 1
fi
;;
esac
done
}
run_vpn() {
echo "########## 🚀 Run OpenVPN in background ##########"
echo "Using Secret secrets/$VPN"

43
pkg/api/admin/mimo.go Normal file
Просмотреть файл

@ -0,0 +1,43 @@
package admin
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
type MaintenanceManifestState string
const (
MaintenanceManifestStatePending MaintenanceManifestState = "Pending"
MaintenanceManifestStateInProgress MaintenanceManifestState = "InProgress"
MaintenanceManifestStateCompleted MaintenanceManifestState = "Completed"
MaintenanceManifestStateFailed MaintenanceManifestState = "Failed"
MaintenanceManifestStateRetriesExceeded MaintenanceManifestState = "RetriesExceeded"
MaintenanceManifestStateTimedOut MaintenanceManifestState = "TimedOut"
MaintenanceManifestStateCancelled MaintenanceManifestState = "Cancelled"
)
type MaintenanceManifest struct {
// The ID for the resource.
ID string `json:"id,omitempty"`
ClusterResourceID string `json:"clusterResourceID,omitempty"`
State MaintenanceManifestState `json:"state,omitempty"`
StatusText string `json:"statusText,omitempty"`
MaintenanceTaskID string `json:"maintenanceTaskID,omitempty"`
Priority int `json:"priority,omitempty"`
// RunAfter defines the earliest that this manifest should start running
RunAfter int `json:"runAfter,omitempty"`
// RunBefore defines the latest that this manifest should start running
RunBefore int `json:"runBefore,omitempty"`
}
// MaintenanceManifestList represents a list of MaintenanceManifests.
type MaintenanceManifestList struct {
// The list of MaintenanceManifests.
MaintenanceManifests []*MaintenanceManifest `json:"value"`
// The link used to get the next page of operations.
NextLink string `json:"nextLink,omitempty"`
}

Просмотреть файл

@ -0,0 +1,56 @@
package admin
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
)
type maintenanceManifestConverter struct{}
func (m maintenanceManifestConverter) ToExternal(d *api.MaintenanceManifestDocument, clusterNamespaced bool) interface{} {
clusterResourceID := ""
if !clusterNamespaced {
clusterResourceID = d.ClusterResourceID
}
return &MaintenanceManifest{
ID: d.ID,
ClusterResourceID: clusterResourceID,
State: MaintenanceManifestState(d.MaintenanceManifest.State),
StatusText: d.MaintenanceManifest.StatusText,
MaintenanceTaskID: d.MaintenanceManifest.MaintenanceTaskID,
Priority: d.MaintenanceManifest.Priority,
RunAfter: d.MaintenanceManifest.RunAfter,
RunBefore: d.MaintenanceManifest.RunBefore,
}
}
func (m maintenanceManifestConverter) ToExternalList(docs []*api.MaintenanceManifestDocument, nextLink string, clusterNamespaced bool) interface{} {
l := &MaintenanceManifestList{
MaintenanceManifests: make([]*MaintenanceManifest, 0, len(docs)),
NextLink: nextLink,
}
for _, doc := range docs {
l.MaintenanceManifests = append(l.MaintenanceManifests, m.ToExternal(doc, clusterNamespaced).(*MaintenanceManifest))
}
return l
}
func (m maintenanceManifestConverter) ToInternal(_i interface{}, out *api.MaintenanceManifestDocument) {
i := _i.(*MaintenanceManifest)
out.ID = i.ID
out.MaintenanceManifest.MaintenanceTaskID = i.MaintenanceTaskID
out.MaintenanceManifest.Priority = i.Priority
out.MaintenanceManifest.RunAfter = i.RunAfter
out.MaintenanceManifest.RunBefore = i.RunBefore
out.MaintenanceManifest.State = api.MaintenanceManifestState(i.State)
out.MaintenanceManifest.StatusText = i.StatusText
}

Просмотреть файл

@ -0,0 +1,59 @@
package admin
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"net/http"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/util/immutable"
)
type maintenanceManifestStaticValidator struct{}
// Validate validates a MaintenanceManifest
func (sv maintenanceManifestStaticValidator) Static(_new interface{}, _current *api.MaintenanceManifestDocument) error {
new := _new.(*MaintenanceManifest)
var current *MaintenanceManifest
if _current != nil {
current = (&maintenanceManifestConverter{}).ToExternal(_current, false).(*MaintenanceManifest)
}
err := sv.validate(new)
if err != nil {
return err
}
if current == nil {
return nil
}
return sv.validateDelta(new, current)
}
func (sv maintenanceManifestStaticValidator) validate(new *MaintenanceManifest) error {
if new.MaintenanceTaskID == "" {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "maintenanceTaskID", "Must be provided")
}
if new.RunAfter == 0 {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "runAfter", "Must be provided")
}
if new.RunBefore == 0 {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "runBefore", "Must be provided")
}
return nil
}
func (sv maintenanceManifestStaticValidator) validateDelta(new, current *MaintenanceManifest) error {
err := immutable.Validate("", new, current)
if err != nil {
err := err.(*immutable.ValidationError)
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodePropertyChangeNotAllowed, err.Target, err.Message)
}
return nil
}

Просмотреть файл

@ -5,6 +5,8 @@ package admin
import (
"time"
"github.com/Azure/go-autorest/autorest/date"
)
// OpenShiftClusterList represents a list of OpenShift clusters.
@ -500,6 +502,8 @@ const (
type RegistryProfile struct {
Name string `json:"name,omitempty"`
Username string `json:"username,omitempty"`
// IssueDate is when the username/password for the registry was last updated.
IssueDate *date.Time `json:"issueDate,omitempty"`
}
// ArchitectureVersion represents an architecture version

Просмотреть файл

@ -210,6 +210,7 @@ func (c openShiftClusterConverter) ToExternal(oc *api.OpenShiftCluster) interfac
for i, v := range oc.Properties.RegistryProfiles {
out.Properties.RegistryProfiles[i].Name = v.Name
out.Properties.RegistryProfiles[i].Username = v.Username
out.Properties.RegistryProfiles[i].IssueDate = v.IssueDate
}
}

Просмотреть файл

@ -39,6 +39,7 @@ func validateMaintenanceTask(task MaintenanceTask) error {
task == MaintenanceTaskRenewCerts ||
task == MaintenanceTaskPending ||
task == MaintenanceTaskNone ||
task == MaintenanceTaskSyncClusterObject ||
task == MaintenanceTaskCustomerActionNeeded) {
return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "properties.maintenanceTask", "Invalid enum parameter.")
}

Просмотреть файл

@ -10,6 +10,8 @@ import (
"testing"
"time"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/util/uuid"
"github.com/Azure/ARO-RP/test/validate"
@ -624,11 +626,11 @@ func TestOpenShiftClusterStaticValidateDelta(t *testing.T) {
wantErr: "400: PropertyChangeNotAllowed: properties.provisionedBy: Changing property 'properties.provisionedBy' is not allowed.",
},
{
name: "registryProfiles change is not allowed",
name: "registryProfiles username change is not allowed",
oc: func() *OpenShiftCluster {
return &OpenShiftCluster{
Properties: OpenShiftClusterProperties{
RegistryProfiles: []RegistryProfile{{Name: "test", Username: "testuser"}},
RegistryProfiles: []RegistryProfile{{Name: "test", Username: "testuser", IssueDate: toDate(time.Now())}},
},
}
},
@ -637,6 +639,20 @@ func TestOpenShiftClusterStaticValidateDelta(t *testing.T) {
},
wantErr: "400: PropertyChangeNotAllowed: properties.registryProfiles: Changing property 'properties.registryProfiles' is not allowed.",
},
{
name: "registryProfiles expiry change is not allowed",
oc: func() *OpenShiftCluster {
return &OpenShiftCluster{
Properties: OpenShiftClusterProperties{
RegistryProfiles: []RegistryProfile{{Name: "test", Username: "testuser", IssueDate: toDate(time.Now())}},
},
}
},
modify: func(oc *OpenShiftCluster) {
oc.Properties.RegistryProfiles[0].IssueDate = toDate(time.Now().UTC().Add(time.Hour * 24 * 30))
},
wantErr: "400: PropertyChangeNotAllowed: properties.registryProfiles: Changing property 'properties.registryProfiles' is not allowed.",
},
{
name: "maintenanceTask change to Everything is allowed",
oc: func() *OpenShiftCluster {
@ -766,3 +782,7 @@ func TestOpenShiftClusterStaticValidateDelta(t *testing.T) {
})
}
}
func toDate(t time.Time) *date.Time {
return &date.Time{Time: t}
}

Просмотреть файл

@ -18,5 +18,7 @@ func init() {
OpenShiftVersionStaticValidator: openShiftVersionStaticValidator{},
PlatformWorkloadIdentityRoleSetConverter: platformWorkloadIdentityRoleSetConverter{},
PlatformWorkloadIdentityRoleSetStaticValidator: platformWorkloadIdentityRoleSetStaticValidator{},
MaintenanceManifestConverter: maintenanceManifestConverter{},
MaintenanceManifestStaticValidator: maintenanceManifestStaticValidator{},
}
}

33
pkg/api/mimo.go Normal file
Просмотреть файл

@ -0,0 +1,33 @@
package api
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
type MaintenanceManifestState string
const (
MaintenanceManifestStatePending MaintenanceManifestState = "Pending"
MaintenanceManifestStateInProgress MaintenanceManifestState = "InProgress"
MaintenanceManifestStateCompleted MaintenanceManifestState = "Completed"
MaintenanceManifestStateFailed MaintenanceManifestState = "Failed"
MaintenanceManifestStateRetriesExceeded MaintenanceManifestState = "RetriesExceeded"
MaintenanceManifestStateTimedOut MaintenanceManifestState = "TimedOut"
MaintenanceManifestStateCancelled MaintenanceManifestState = "Cancelled"
)
// MaintenanceManifest represents an instance of a MaintenanceTask running on a
// given cluster.
type MaintenanceManifest struct {
MissingFields
State MaintenanceManifestState `json:"state,omitempty"`
StatusText string `json:"statusText,omitempty"`
MaintenanceTaskID string `json:"maintenanceTaskID,omitempty"`
Priority int `json:"priority,omitempty"`
// RunAfter defines the earliest that this manifest should start running
RunAfter int `json:"runAfter,omitempty"`
// RunBefore defines the latest that this manifest should start running
RunBefore int `json:"runBefore,omitempty"`
}

39
pkg/api/mimodocument.go Normal file
Просмотреть файл

@ -0,0 +1,39 @@
package api
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
type MaintenanceManifestDocuments struct {
Count int `json:"_count,omitempty"`
ResourceID string `json:"_rid,omitempty"`
MaintenanceManifestDocuments []*MaintenanceManifestDocument `json:"Documents,omitempty"`
}
func (e *MaintenanceManifestDocuments) String() string {
return encodeJSON(e)
}
type MaintenanceManifestDocument struct {
MissingFields
ID string `json:"id,omitempty"`
ResourceID string `json:"_rid,omitempty"`
Timestamp int `json:"_ts,omitempty"`
Self string `json:"_self,omitempty"`
ETag string `json:"_etag,omitempty" deep:"-"`
Attachments string `json:"_attachments,omitempty"`
TTL int `json:"ttl,omitempty"`
LSN int `json:"_lsn,omitempty"`
Metadata map[string]interface{} `json:"_metadata,omitempty"`
ClusterResourceID string `json:"clusterResourceID,omitempty"`
MaintenanceManifest MaintenanceManifest `json:"maintenanceManifest,omitempty"`
LeaseOwner string `json:"leaseOwner,omitempty" deep:"-"`
LeaseExpires int `json:"leaseExpires,omitempty" deep:"-"`
Dequeues int `json:"dequeues,omitempty"`
}
func (e *MaintenanceManifestDocument) String() string {
return encodeJSON(e)
}

Просмотреть файл

@ -9,6 +9,7 @@ import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
"github.com/Azure/go-autorest/autorest/date"
)
// OpenShiftCluster represents an OpenShift cluster
@ -211,6 +212,7 @@ const (
ProvisioningStateUpdating ProvisioningState = "Updating"
ProvisioningStateAdminUpdating ProvisioningState = "AdminUpdating"
ProvisioningStateCanceled ProvisioningState = "Canceled"
ProvisioningStateMaintenance ProvisioningState = "Maintenance"
ProvisioningStateDeleting ProvisioningState = "Deleting"
ProvisioningStateSucceeded ProvisioningState = "Succeeded"
ProvisioningStateFailed ProvisioningState = "Failed"
@ -260,6 +262,7 @@ func (t MaintenanceTask) IsMaintenanceOngoingTask() bool {
result := (t == MaintenanceTaskEverything) ||
(t == MaintenanceTaskOperator) ||
(t == MaintenanceTaskRenewCerts) ||
(t == MaintenanceTaskSyncClusterObject) ||
(t == "")
return result
}
@ -786,6 +789,8 @@ type RegistryProfile struct {
Name string `json:"name,omitempty"`
Username string `json:"username,omitempty"`
Password SecureString `json:"password,omitempty"`
// IssueDate is when the username/password for the registry was last updated.
IssueDate *date.Time `json:"issueDate,omitempty"`
}
// Install represents an install process

Просмотреть файл

@ -71,6 +71,16 @@ type SecretConverter interface {
ToInternal(interface{}, *Secret)
}
type MaintenanceManifestConverter interface {
ToExternal(doc *MaintenanceManifestDocument, clusterNamespaced bool) interface{}
ToExternalList(docs []*MaintenanceManifestDocument, nextLink string, clusterNamespaced bool) interface{}
ToInternal(interface{}, *MaintenanceManifestDocument)
}
type MaintenanceManifestStaticValidator interface {
Static(interface{}, *MaintenanceManifestDocument) error
}
// Version is a set of endpoints implemented by each API version
type Version struct {
OpenShiftClusterConverter OpenShiftClusterConverter
@ -87,6 +97,8 @@ type Version struct {
SyncIdentityProviderConverter SyncIdentityProviderConverter
SecretConverter SecretConverter
ClusterManagerStaticValidator ClusterManagerStaticValidator
MaintenanceManifestConverter MaintenanceManifestConverter
MaintenanceManifestStaticValidator MaintenanceManifestStaticValidator
}
// APIs is the map of registered API versions

Просмотреть файл

@ -7,6 +7,7 @@ import (
"context"
"time"
"github.com/Azure/go-autorest/autorest/date"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -37,7 +38,7 @@ func (m *manager) ensureACRToken(ctx context.Context) error {
if rp == nil {
// 1. choose a name and establish the intent to create a token with
// that name
rp = token.NewRegistryProfile(m.doc.OpenShiftCluster)
rp = token.NewRegistryProfile()
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
token.PutRegistryProfile(doc.OpenShiftCluster, rp)
@ -57,6 +58,7 @@ func (m *manager) ensureACRToken(ctx context.Context) error {
}
rp.Password = api.SecureString(password)
rp.IssueDate = &date.Time{Time: time.Now().UTC()}
m.doc, err = m.db.PatchWithLease(ctx, m.doc.Key, func(doc *api.OpenShiftClusterDocument) error {
token.PutRegistryProfile(doc.OpenShiftCluster, rp)

Просмотреть файл

@ -12,6 +12,8 @@ import (
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/Azure/ARO-RP/pkg/util/clusteroperators"
)
const minimumWorkerNodes = 2
@ -27,7 +29,7 @@ func (m *manager) apiServersReady(ctx context.Context) (bool, error) {
if err != nil {
return false, nil
}
return isOperatorAvailable(apiserver), nil
return clusteroperators.IsOperatorAvailable(apiserver), nil
}
func (m *manager) minimumWorkerNodesReady(ctx context.Context) (bool, error) {
@ -87,7 +89,7 @@ func (m *manager) operatorConsoleReady(ctx context.Context) (bool, error) {
if err != nil {
return false, nil
}
return isOperatorAvailable(consoleOperator), nil
return clusteroperators.IsOperatorAvailable(consoleOperator), nil
}
func (m *manager) clusterVersionReady(ctx context.Context) (bool, error) {
@ -107,15 +109,7 @@ func (m *manager) ingressControllerReady(ctx context.Context) (bool, error) {
if err != nil {
return false, nil
}
return isOperatorAvailable(ingressOperator), nil
}
func isOperatorAvailable(operator *configv1.ClusterOperator) bool {
m := make(map[configv1.ClusterStatusConditionType]configv1.ConditionStatus, len(operator.Status.Conditions))
for _, cond := range operator.Status.Conditions {
m[cond.Type] = cond.Status
}
return m[configv1.OperatorAvailable] == configv1.ConditionTrue && m[configv1.OperatorProgressing] == configv1.ConditionFalse
return clusteroperators.IsOperatorAvailable(ingressOperator), nil
}
// aroCredentialsRequestReconciled evaluates whether the openshift-azure-operator CredentialsRequest has recently been reconciled and returns true

Просмотреть файл

@ -74,59 +74,6 @@ func TestOperatorConsoleExists(t *testing.T) {
}
}
func TestIsOperatorAvailable(t *testing.T) {
for _, tt := range []struct {
name string
availableCondition configv1.ConditionStatus
progressingCondition configv1.ConditionStatus
want bool
}{
{
name: "Available && Progressing; not available",
availableCondition: configv1.ConditionTrue,
progressingCondition: configv1.ConditionTrue,
},
{
name: "Available && !Progressing; available",
availableCondition: configv1.ConditionTrue,
progressingCondition: configv1.ConditionFalse,
want: true,
},
{
name: "!Available && Progressing; not available",
availableCondition: configv1.ConditionFalse,
progressingCondition: configv1.ConditionTrue,
},
{
name: "!Available && !Progressing; not available",
availableCondition: configv1.ConditionFalse,
progressingCondition: configv1.ConditionFalse,
},
} {
operator := &configv1.ClusterOperator{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
},
Status: configv1.ClusterOperatorStatus{
Conditions: []configv1.ClusterOperatorStatusCondition{
{
Type: configv1.OperatorAvailable,
Status: tt.availableCondition,
},
{
Type: configv1.OperatorProgressing,
Status: tt.progressingCondition,
},
},
},
}
available := isOperatorAvailable(operator)
if available != tt.want {
t.Error(available)
}
}
}
func TestMinimumWorkerNodesReady(t *testing.T) {
ctx := context.Background()
const phaseFailed = "Failed"

Просмотреть файл

@ -3,7 +3,7 @@ package cosmosdb
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
//go:generate gencosmosdb github.com/Azure/ARO-RP/pkg/api,AsyncOperationDocument github.com/Azure/ARO-RP/pkg/api,BillingDocument github.com/Azure/ARO-RP/pkg/api,GatewayDocument github.com/Azure/ARO-RP/pkg/api,MonitorDocument github.com/Azure/ARO-RP/pkg/api,OpenShiftClusterDocument github.com/Azure/ARO-RP/pkg/api,SubscriptionDocument github.com/Azure/ARO-RP/pkg/api,OpenShiftVersionDocument github.com/Azure/ARO-RP/pkg/api,ClusterManagerConfigurationDocument github.com/Azure/ARO-RP/pkg/api,PlatformWorkloadIdentityRoleSetDocument
//go:generate gencosmosdb github.com/Azure/ARO-RP/pkg/api,AsyncOperationDocument github.com/Azure/ARO-RP/pkg/api,BillingDocument github.com/Azure/ARO-RP/pkg/api,GatewayDocument github.com/Azure/ARO-RP/pkg/api,MonitorDocument github.com/Azure/ARO-RP/pkg/api,OpenShiftClusterDocument github.com/Azure/ARO-RP/pkg/api,SubscriptionDocument github.com/Azure/ARO-RP/pkg/api,OpenShiftVersionDocument github.com/Azure/ARO-RP/pkg/api,ClusterManagerConfigurationDocument github.com/Azure/ARO-RP/pkg/api,PlatformWorkloadIdentityRoleSetDocument github.com/Azure/ARO-RP/pkg/api,MaintenanceManifestDocument
//go:generate goimports -local=github.com/Azure/ARO-RP -e -w ./
//go:generate mockgen -destination=../../util/mocks/$GOPACKAGE/$GOPACKAGE.go github.com/Azure/ARO-RP/pkg/database/$GOPACKAGE PermissionClient
//go:generate goimports -local=github.com/Azure/ARO-RP -e -w ../../util/mocks/$GOPACKAGE/$GOPACKAGE.go

Просмотреть файл

@ -0,0 +1,313 @@
// Code generated by github.com/jewzaam/go-cosmosdb, DO NOT EDIT.
package cosmosdb
import (
"context"
"net/http"
"strconv"
"strings"
pkg "github.com/Azure/ARO-RP/pkg/api"
)
type maintenanceManifestDocumentClient struct {
*databaseClient
path string
}
// MaintenanceManifestDocumentClient is a maintenanceManifestDocument client
type MaintenanceManifestDocumentClient interface {
Create(context.Context, string, *pkg.MaintenanceManifestDocument, *Options) (*pkg.MaintenanceManifestDocument, error)
List(*Options) MaintenanceManifestDocumentIterator
ListAll(context.Context, *Options) (*pkg.MaintenanceManifestDocuments, error)
Get(context.Context, string, string, *Options) (*pkg.MaintenanceManifestDocument, error)
Replace(context.Context, string, *pkg.MaintenanceManifestDocument, *Options) (*pkg.MaintenanceManifestDocument, error)
Delete(context.Context, string, *pkg.MaintenanceManifestDocument, *Options) error
Query(string, *Query, *Options) MaintenanceManifestDocumentRawIterator
QueryAll(context.Context, string, *Query, *Options) (*pkg.MaintenanceManifestDocuments, error)
ChangeFeed(*Options) MaintenanceManifestDocumentIterator
}
type maintenanceManifestDocumentChangeFeedIterator struct {
*maintenanceManifestDocumentClient
continuation string
options *Options
}
type maintenanceManifestDocumentListIterator struct {
*maintenanceManifestDocumentClient
continuation string
done bool
options *Options
}
type maintenanceManifestDocumentQueryIterator struct {
*maintenanceManifestDocumentClient
partitionkey string
query *Query
continuation string
done bool
options *Options
}
// MaintenanceManifestDocumentIterator is a maintenanceManifestDocument iterator
type MaintenanceManifestDocumentIterator interface {
Next(context.Context, int) (*pkg.MaintenanceManifestDocuments, error)
Continuation() string
}
// MaintenanceManifestDocumentRawIterator is a maintenanceManifestDocument raw iterator
type MaintenanceManifestDocumentRawIterator interface {
MaintenanceManifestDocumentIterator
NextRaw(context.Context, int, interface{}) error
}
// NewMaintenanceManifestDocumentClient returns a new maintenanceManifestDocument client
func NewMaintenanceManifestDocumentClient(collc CollectionClient, collid string) MaintenanceManifestDocumentClient {
return &maintenanceManifestDocumentClient{
databaseClient: collc.(*collectionClient).databaseClient,
path: collc.(*collectionClient).path + "/colls/" + collid,
}
}
func (c *maintenanceManifestDocumentClient) all(ctx context.Context, i MaintenanceManifestDocumentIterator) (*pkg.MaintenanceManifestDocuments, error) {
allmaintenanceManifestDocuments := &pkg.MaintenanceManifestDocuments{}
for {
maintenanceManifestDocuments, err := i.Next(ctx, -1)
if err != nil {
return nil, err
}
if maintenanceManifestDocuments == nil {
break
}
allmaintenanceManifestDocuments.Count += maintenanceManifestDocuments.Count
allmaintenanceManifestDocuments.ResourceID = maintenanceManifestDocuments.ResourceID
allmaintenanceManifestDocuments.MaintenanceManifestDocuments = append(allmaintenanceManifestDocuments.MaintenanceManifestDocuments, maintenanceManifestDocuments.MaintenanceManifestDocuments...)
}
return allmaintenanceManifestDocuments, nil
}
func (c *maintenanceManifestDocumentClient) Create(ctx context.Context, partitionkey string, newmaintenanceManifestDocument *pkg.MaintenanceManifestDocument, options *Options) (maintenanceManifestDocument *pkg.MaintenanceManifestDocument, err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
if options == nil {
options = &Options{}
}
options.NoETag = true
err = c.setOptions(options, newmaintenanceManifestDocument, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodPost, c.path+"/docs", "docs", c.path, http.StatusCreated, &newmaintenanceManifestDocument, &maintenanceManifestDocument, headers)
return
}
func (c *maintenanceManifestDocumentClient) List(options *Options) MaintenanceManifestDocumentIterator {
continuation := ""
if options != nil {
continuation = options.Continuation
}
return &maintenanceManifestDocumentListIterator{maintenanceManifestDocumentClient: c, options: options, continuation: continuation}
}
func (c *maintenanceManifestDocumentClient) ListAll(ctx context.Context, options *Options) (*pkg.MaintenanceManifestDocuments, error) {
return c.all(ctx, c.List(options))
}
func (c *maintenanceManifestDocumentClient) Get(ctx context.Context, partitionkey, maintenanceManifestDocumentid string, options *Options) (maintenanceManifestDocument *pkg.MaintenanceManifestDocument, err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
err = c.setOptions(options, nil, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodGet, c.path+"/docs/"+maintenanceManifestDocumentid, "docs", c.path+"/docs/"+maintenanceManifestDocumentid, http.StatusOK, nil, &maintenanceManifestDocument, headers)
return
}
func (c *maintenanceManifestDocumentClient) Replace(ctx context.Context, partitionkey string, newmaintenanceManifestDocument *pkg.MaintenanceManifestDocument, options *Options) (maintenanceManifestDocument *pkg.MaintenanceManifestDocument, err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
err = c.setOptions(options, newmaintenanceManifestDocument, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodPut, c.path+"/docs/"+newmaintenanceManifestDocument.ID, "docs", c.path+"/docs/"+newmaintenanceManifestDocument.ID, http.StatusOK, &newmaintenanceManifestDocument, &maintenanceManifestDocument, headers)
return
}
func (c *maintenanceManifestDocumentClient) Delete(ctx context.Context, partitionkey string, maintenanceManifestDocument *pkg.MaintenanceManifestDocument, options *Options) (err error) {
headers := http.Header{}
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+partitionkey+`"]`)
err = c.setOptions(options, maintenanceManifestDocument, headers)
if err != nil {
return
}
err = c.do(ctx, http.MethodDelete, c.path+"/docs/"+maintenanceManifestDocument.ID, "docs", c.path+"/docs/"+maintenanceManifestDocument.ID, http.StatusNoContent, nil, nil, headers)
return
}
func (c *maintenanceManifestDocumentClient) Query(partitionkey string, query *Query, options *Options) MaintenanceManifestDocumentRawIterator {
continuation := ""
if options != nil {
continuation = options.Continuation
}
return &maintenanceManifestDocumentQueryIterator{maintenanceManifestDocumentClient: c, partitionkey: partitionkey, query: query, options: options, continuation: continuation}
}
func (c *maintenanceManifestDocumentClient) QueryAll(ctx context.Context, partitionkey string, query *Query, options *Options) (*pkg.MaintenanceManifestDocuments, error) {
return c.all(ctx, c.Query(partitionkey, query, options))
}
func (c *maintenanceManifestDocumentClient) ChangeFeed(options *Options) MaintenanceManifestDocumentIterator {
continuation := ""
if options != nil {
continuation = options.Continuation
}
return &maintenanceManifestDocumentChangeFeedIterator{maintenanceManifestDocumentClient: c, options: options, continuation: continuation}
}
func (c *maintenanceManifestDocumentClient) setOptions(options *Options, maintenanceManifestDocument *pkg.MaintenanceManifestDocument, headers http.Header) error {
if options == nil {
return nil
}
if maintenanceManifestDocument != nil && !options.NoETag {
if maintenanceManifestDocument.ETag == "" {
return ErrETagRequired
}
headers.Set("If-Match", maintenanceManifestDocument.ETag)
}
if len(options.PreTriggers) > 0 {
headers.Set("X-Ms-Documentdb-Pre-Trigger-Include", strings.Join(options.PreTriggers, ","))
}
if len(options.PostTriggers) > 0 {
headers.Set("X-Ms-Documentdb-Post-Trigger-Include", strings.Join(options.PostTriggers, ","))
}
if len(options.PartitionKeyRangeID) > 0 {
headers.Set("X-Ms-Documentdb-PartitionKeyRangeID", options.PartitionKeyRangeID)
}
return nil
}
func (i *maintenanceManifestDocumentChangeFeedIterator) Next(ctx context.Context, maxItemCount int) (maintenanceManifestDocuments *pkg.MaintenanceManifestDocuments, err error) {
headers := http.Header{}
headers.Set("A-IM", "Incremental feed")
headers.Set("X-Ms-Max-Item-Count", strconv.Itoa(maxItemCount))
if i.continuation != "" {
headers.Set("If-None-Match", i.continuation)
}
err = i.setOptions(i.options, nil, headers)
if err != nil {
return
}
err = i.do(ctx, http.MethodGet, i.path+"/docs", "docs", i.path, http.StatusOK, nil, &maintenanceManifestDocuments, headers)
if IsErrorStatusCode(err, http.StatusNotModified) {
err = nil
}
if err != nil {
return
}
i.continuation = headers.Get("Etag")
return
}
func (i *maintenanceManifestDocumentChangeFeedIterator) Continuation() string {
return i.continuation
}
func (i *maintenanceManifestDocumentListIterator) Next(ctx context.Context, maxItemCount int) (maintenanceManifestDocuments *pkg.MaintenanceManifestDocuments, err error) {
if i.done {
return
}
headers := http.Header{}
headers.Set("X-Ms-Max-Item-Count", strconv.Itoa(maxItemCount))
if i.continuation != "" {
headers.Set("X-Ms-Continuation", i.continuation)
}
err = i.setOptions(i.options, nil, headers)
if err != nil {
return
}
err = i.do(ctx, http.MethodGet, i.path+"/docs", "docs", i.path, http.StatusOK, nil, &maintenanceManifestDocuments, headers)
if err != nil {
return
}
i.continuation = headers.Get("X-Ms-Continuation")
i.done = i.continuation == ""
return
}
func (i *maintenanceManifestDocumentListIterator) Continuation() string {
return i.continuation
}
func (i *maintenanceManifestDocumentQueryIterator) Next(ctx context.Context, maxItemCount int) (maintenanceManifestDocuments *pkg.MaintenanceManifestDocuments, err error) {
err = i.NextRaw(ctx, maxItemCount, &maintenanceManifestDocuments)
return
}
func (i *maintenanceManifestDocumentQueryIterator) NextRaw(ctx context.Context, maxItemCount int, raw interface{}) (err error) {
if i.done {
return
}
headers := http.Header{}
headers.Set("X-Ms-Max-Item-Count", strconv.Itoa(maxItemCount))
headers.Set("X-Ms-Documentdb-Isquery", "True")
headers.Set("Content-Type", "application/query+json")
if i.partitionkey != "" {
headers.Set("X-Ms-Documentdb-Partitionkey", `["`+i.partitionkey+`"]`)
} else {
headers.Set("X-Ms-Documentdb-Query-Enablecrosspartition", "True")
}
if i.continuation != "" {
headers.Set("X-Ms-Continuation", i.continuation)
}
err = i.setOptions(i.options, nil, headers)
if err != nil {
return
}
err = i.do(ctx, http.MethodPost, i.path+"/docs", "docs", i.path, http.StatusOK, &i.query, &raw, headers)
if err != nil {
return
}
i.continuation = headers.Get("X-Ms-Continuation")
i.done = i.continuation == ""
return
}
func (i *maintenanceManifestDocumentQueryIterator) Continuation() string {
return i.continuation
}

Просмотреть файл

@ -0,0 +1,361 @@
// Code generated by github.com/jewzaam/go-cosmosdb, DO NOT EDIT.
package cosmosdb
import (
"context"
"fmt"
"net/http"
"sync"
"github.com/ugorji/go/codec"
pkg "github.com/Azure/ARO-RP/pkg/api"
)
type fakeMaintenanceManifestDocumentTriggerHandler func(context.Context, *pkg.MaintenanceManifestDocument) error
type fakeMaintenanceManifestDocumentQueryHandler func(MaintenanceManifestDocumentClient, *Query, *Options) MaintenanceManifestDocumentRawIterator
var _ MaintenanceManifestDocumentClient = &FakeMaintenanceManifestDocumentClient{}
// NewFakeMaintenanceManifestDocumentClient returns a FakeMaintenanceManifestDocumentClient
func NewFakeMaintenanceManifestDocumentClient(h *codec.JsonHandle) *FakeMaintenanceManifestDocumentClient {
return &FakeMaintenanceManifestDocumentClient{
jsonHandle: h,
maintenanceManifestDocuments: make(map[string]*pkg.MaintenanceManifestDocument),
triggerHandlers: make(map[string]fakeMaintenanceManifestDocumentTriggerHandler),
queryHandlers: make(map[string]fakeMaintenanceManifestDocumentQueryHandler),
}
}
// FakeMaintenanceManifestDocumentClient is a FakeMaintenanceManifestDocumentClient
type FakeMaintenanceManifestDocumentClient struct {
lock sync.RWMutex
jsonHandle *codec.JsonHandle
maintenanceManifestDocuments map[string]*pkg.MaintenanceManifestDocument
triggerHandlers map[string]fakeMaintenanceManifestDocumentTriggerHandler
queryHandlers map[string]fakeMaintenanceManifestDocumentQueryHandler
sorter func([]*pkg.MaintenanceManifestDocument)
etag int
// returns true if documents conflict
conflictChecker func(*pkg.MaintenanceManifestDocument, *pkg.MaintenanceManifestDocument) bool
// err, if not nil, is an error to return when attempting to communicate
// with this Client
err error
}
// SetError sets or unsets an error that will be returned on any
// FakeMaintenanceManifestDocumentClient method invocation
func (c *FakeMaintenanceManifestDocumentClient) SetError(err error) {
c.lock.Lock()
defer c.lock.Unlock()
c.err = err
}
// SetSorter sets or unsets a sorter function which will be used to sort values
// returned by List() for test stability
func (c *FakeMaintenanceManifestDocumentClient) SetSorter(sorter func([]*pkg.MaintenanceManifestDocument)) {
c.lock.Lock()
defer c.lock.Unlock()
c.sorter = sorter
}
// SetConflictChecker sets or unsets a function which can be used to validate
// additional unique keys in a MaintenanceManifestDocument
func (c *FakeMaintenanceManifestDocumentClient) SetConflictChecker(conflictChecker func(*pkg.MaintenanceManifestDocument, *pkg.MaintenanceManifestDocument) bool) {
c.lock.Lock()
defer c.lock.Unlock()
c.conflictChecker = conflictChecker
}
// SetTriggerHandler sets or unsets a trigger handler
func (c *FakeMaintenanceManifestDocumentClient) SetTriggerHandler(triggerName string, trigger fakeMaintenanceManifestDocumentTriggerHandler) {
c.lock.Lock()
defer c.lock.Unlock()
c.triggerHandlers[triggerName] = trigger
}
// SetQueryHandler sets or unsets a query handler
func (c *FakeMaintenanceManifestDocumentClient) SetQueryHandler(queryName string, query fakeMaintenanceManifestDocumentQueryHandler) {
c.lock.Lock()
defer c.lock.Unlock()
c.queryHandlers[queryName] = query
}
func (c *FakeMaintenanceManifestDocumentClient) deepCopy(maintenanceManifestDocument *pkg.MaintenanceManifestDocument) (*pkg.MaintenanceManifestDocument, error) {
var b []byte
err := codec.NewEncoderBytes(&b, c.jsonHandle).Encode(maintenanceManifestDocument)
if err != nil {
return nil, err
}
maintenanceManifestDocument = nil
err = codec.NewDecoderBytes(b, c.jsonHandle).Decode(&maintenanceManifestDocument)
if err != nil {
return nil, err
}
return maintenanceManifestDocument, nil
}
func (c *FakeMaintenanceManifestDocumentClient) apply(ctx context.Context, partitionkey string, maintenanceManifestDocument *pkg.MaintenanceManifestDocument, options *Options, isCreate bool) (*pkg.MaintenanceManifestDocument, error) {
c.lock.Lock()
defer c.lock.Unlock()
if c.err != nil {
return nil, c.err
}
maintenanceManifestDocument, err := c.deepCopy(maintenanceManifestDocument) // copy now because pretriggers can mutate maintenanceManifestDocument
if err != nil {
return nil, err
}
if options != nil {
err := c.processPreTriggers(ctx, maintenanceManifestDocument, options)
if err != nil {
return nil, err
}
}
existingMaintenanceManifestDocument, exists := c.maintenanceManifestDocuments[maintenanceManifestDocument.ID]
if isCreate && exists {
return nil, &Error{
StatusCode: http.StatusConflict,
Message: "Entity with the specified id already exists in the system",
}
}
if !isCreate {
if !exists {
return nil, &Error{StatusCode: http.StatusNotFound}
}
if maintenanceManifestDocument.ETag != existingMaintenanceManifestDocument.ETag {
return nil, &Error{StatusCode: http.StatusPreconditionFailed}
}
}
if c.conflictChecker != nil {
for _, maintenanceManifestDocumentToCheck := range c.maintenanceManifestDocuments {
if c.conflictChecker(maintenanceManifestDocumentToCheck, maintenanceManifestDocument) {
return nil, &Error{
StatusCode: http.StatusConflict,
Message: "Entity with the specified id already exists in the system",
}
}
}
}
maintenanceManifestDocument.ETag = fmt.Sprint(c.etag)
c.etag++
c.maintenanceManifestDocuments[maintenanceManifestDocument.ID] = maintenanceManifestDocument
return c.deepCopy(maintenanceManifestDocument)
}
// Create creates a MaintenanceManifestDocument in the database
func (c *FakeMaintenanceManifestDocumentClient) Create(ctx context.Context, partitionkey string, maintenanceManifestDocument *pkg.MaintenanceManifestDocument, options *Options) (*pkg.MaintenanceManifestDocument, error) {
return c.apply(ctx, partitionkey, maintenanceManifestDocument, options, true)
}
// Replace replaces a MaintenanceManifestDocument in the database
func (c *FakeMaintenanceManifestDocumentClient) Replace(ctx context.Context, partitionkey string, maintenanceManifestDocument *pkg.MaintenanceManifestDocument, options *Options) (*pkg.MaintenanceManifestDocument, error) {
return c.apply(ctx, partitionkey, maintenanceManifestDocument, options, false)
}
// List returns a MaintenanceManifestDocumentIterator to list all MaintenanceManifestDocuments in the database
func (c *FakeMaintenanceManifestDocumentClient) List(*Options) MaintenanceManifestDocumentIterator {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return NewFakeMaintenanceManifestDocumentErroringRawIterator(c.err)
}
maintenanceManifestDocuments := make([]*pkg.MaintenanceManifestDocument, 0, len(c.maintenanceManifestDocuments))
for _, maintenanceManifestDocument := range c.maintenanceManifestDocuments {
maintenanceManifestDocument, err := c.deepCopy(maintenanceManifestDocument)
if err != nil {
return NewFakeMaintenanceManifestDocumentErroringRawIterator(err)
}
maintenanceManifestDocuments = append(maintenanceManifestDocuments, maintenanceManifestDocument)
}
if c.sorter != nil {
c.sorter(maintenanceManifestDocuments)
}
return NewFakeMaintenanceManifestDocumentIterator(maintenanceManifestDocuments, 0)
}
// ListAll lists all MaintenanceManifestDocuments in the database
func (c *FakeMaintenanceManifestDocumentClient) ListAll(ctx context.Context, options *Options) (*pkg.MaintenanceManifestDocuments, error) {
iter := c.List(options)
return iter.Next(ctx, -1)
}
// Get gets a MaintenanceManifestDocument from the database
func (c *FakeMaintenanceManifestDocumentClient) Get(ctx context.Context, partitionkey string, id string, options *Options) (*pkg.MaintenanceManifestDocument, error) {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return nil, c.err
}
maintenanceManifestDocument, exists := c.maintenanceManifestDocuments[id]
if !exists {
return nil, &Error{StatusCode: http.StatusNotFound}
}
return c.deepCopy(maintenanceManifestDocument)
}
// Delete deletes a MaintenanceManifestDocument from the database
func (c *FakeMaintenanceManifestDocumentClient) Delete(ctx context.Context, partitionKey string, maintenanceManifestDocument *pkg.MaintenanceManifestDocument, options *Options) error {
c.lock.Lock()
defer c.lock.Unlock()
if c.err != nil {
return c.err
}
_, exists := c.maintenanceManifestDocuments[maintenanceManifestDocument.ID]
if !exists {
return &Error{StatusCode: http.StatusNotFound}
}
delete(c.maintenanceManifestDocuments, maintenanceManifestDocument.ID)
return nil
}
// ChangeFeed is unimplemented
func (c *FakeMaintenanceManifestDocumentClient) ChangeFeed(*Options) MaintenanceManifestDocumentIterator {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return NewFakeMaintenanceManifestDocumentErroringRawIterator(c.err)
}
return NewFakeMaintenanceManifestDocumentErroringRawIterator(ErrNotImplemented)
}
func (c *FakeMaintenanceManifestDocumentClient) processPreTriggers(ctx context.Context, maintenanceManifestDocument *pkg.MaintenanceManifestDocument, options *Options) error {
for _, triggerName := range options.PreTriggers {
if triggerHandler := c.triggerHandlers[triggerName]; triggerHandler != nil {
c.lock.Unlock()
err := triggerHandler(ctx, maintenanceManifestDocument)
c.lock.Lock()
if err != nil {
return err
}
} else {
return ErrNotImplemented
}
}
return nil
}
// Query calls a query handler to implement database querying
func (c *FakeMaintenanceManifestDocumentClient) Query(name string, query *Query, options *Options) MaintenanceManifestDocumentRawIterator {
c.lock.RLock()
defer c.lock.RUnlock()
if c.err != nil {
return NewFakeMaintenanceManifestDocumentErroringRawIterator(c.err)
}
if queryHandler := c.queryHandlers[query.Query]; queryHandler != nil {
c.lock.RUnlock()
i := queryHandler(c, query, options)
c.lock.RLock()
return i
}
return NewFakeMaintenanceManifestDocumentErroringRawIterator(ErrNotImplemented)
}
// QueryAll calls a query handler to implement database querying
func (c *FakeMaintenanceManifestDocumentClient) QueryAll(ctx context.Context, partitionkey string, query *Query, options *Options) (*pkg.MaintenanceManifestDocuments, error) {
iter := c.Query("", query, options)
return iter.Next(ctx, -1)
}
func NewFakeMaintenanceManifestDocumentIterator(maintenanceManifestDocuments []*pkg.MaintenanceManifestDocument, continuation int) MaintenanceManifestDocumentRawIterator {
return &fakeMaintenanceManifestDocumentIterator{maintenanceManifestDocuments: maintenanceManifestDocuments, continuation: continuation}
}
type fakeMaintenanceManifestDocumentIterator struct {
maintenanceManifestDocuments []*pkg.MaintenanceManifestDocument
continuation int
done bool
}
func (i *fakeMaintenanceManifestDocumentIterator) NextRaw(ctx context.Context, maxItemCount int, out interface{}) error {
return ErrNotImplemented
}
func (i *fakeMaintenanceManifestDocumentIterator) Next(ctx context.Context, maxItemCount int) (*pkg.MaintenanceManifestDocuments, error) {
if i.done {
return nil, nil
}
var maintenanceManifestDocuments []*pkg.MaintenanceManifestDocument
if maxItemCount == -1 {
maintenanceManifestDocuments = i.maintenanceManifestDocuments[i.continuation:]
i.continuation = len(i.maintenanceManifestDocuments)
i.done = true
} else {
max := i.continuation + maxItemCount
if max > len(i.maintenanceManifestDocuments) {
max = len(i.maintenanceManifestDocuments)
}
maintenanceManifestDocuments = i.maintenanceManifestDocuments[i.continuation:max]
i.continuation += max
i.done = i.Continuation() == ""
}
return &pkg.MaintenanceManifestDocuments{
MaintenanceManifestDocuments: maintenanceManifestDocuments,
Count: len(maintenanceManifestDocuments),
}, nil
}
func (i *fakeMaintenanceManifestDocumentIterator) Continuation() string {
if i.continuation >= len(i.maintenanceManifestDocuments) {
return ""
}
return fmt.Sprintf("%d", i.continuation)
}
// NewFakeMaintenanceManifestDocumentErroringRawIterator returns a MaintenanceManifestDocumentRawIterator which
// whose methods return the given error
func NewFakeMaintenanceManifestDocumentErroringRawIterator(err error) MaintenanceManifestDocumentRawIterator {
return &fakeMaintenanceManifestDocumentErroringRawIterator{err: err}
}
type fakeMaintenanceManifestDocumentErroringRawIterator struct {
err error
}
func (i *fakeMaintenanceManifestDocumentErroringRawIterator) Next(ctx context.Context, maxItemCount int) (*pkg.MaintenanceManifestDocuments, error) {
return nil, i.err
}
func (i *fakeMaintenanceManifestDocumentErroringRawIterator) NextRaw(context.Context, int, interface{}) error {
return i.err
}
func (i *fakeMaintenanceManifestDocumentErroringRawIterator) Continuation() string {
return ""
}

Просмотреть файл

@ -35,6 +35,7 @@ const (
collPlatformWorkloadIdentityRoleSet = "PlatformWorkloadIdentityRoleSets"
collPortal = "Portal"
collSubscriptions = "Subscriptions"
collMaintenanceManifests = "MaintenanceManifests"
)
func NewDatabaseClient(log *logrus.Entry, _env env.Core, authorizer cosmosdb.Authorizer, m metrics.Emitter, aead encryption.AEAD, databaseAccountName string) (cosmosdb.DatabaseClient, error) {

Просмотреть файл

@ -37,6 +37,10 @@ type DatabaseGroupWithPortal interface {
Portal() (Portal, error)
}
type DatabaseGroupWithMaintenanceManifests interface {
MaintenanceManifests() (MaintenanceManifests, error)
}
type DatabaseGroup interface {
DatabaseGroupWithOpenShiftClusters
DatabaseGroupWithSubscriptions
@ -46,6 +50,7 @@ type DatabaseGroup interface {
DatabaseGroupWithAsyncOperations
DatabaseGroupWithBilling
DatabaseGroupWithPortal
DatabaseGroupWithMaintenanceManifests
WithOpenShiftClusters(db OpenShiftClusters) DatabaseGroup
WithSubscriptions(db Subscriptions) DatabaseGroup
@ -55,6 +60,7 @@ type DatabaseGroup interface {
WithAsyncOperations(db AsyncOperations) DatabaseGroup
WithBilling(db Billing) DatabaseGroup
WithPortal(db Portal) DatabaseGroup
WithMaintenanceManifests(db MaintenanceManifests) DatabaseGroup
}
type dbGroup struct {
@ -66,6 +72,7 @@ type dbGroup struct {
asyncOperations AsyncOperations
billing Billing
portal Portal
maintenanceManifests MaintenanceManifests
}
func (d *dbGroup) OpenShiftClusters() (OpenShiftClusters, error) {
@ -164,6 +171,18 @@ func (d *dbGroup) WithPortal(db Portal) DatabaseGroup {
return d
}
func (d *dbGroup) MaintenanceManifests() (MaintenanceManifests, error) {
if d.maintenanceManifests == nil {
return nil, errors.New("no MaintenanceManifests defined")
}
return d.maintenanceManifests, nil
}
func (d *dbGroup) WithMaintenanceManifests(db MaintenanceManifests) DatabaseGroup {
d.maintenanceManifests = db
return d
}
func NewDBGroup() DatabaseGroup {
return &dbGroup{}
}

Просмотреть файл

@ -13,7 +13,7 @@ import (
"github.com/Azure/ARO-RP/pkg/util/recover"
)
func EmitMetrics(ctx context.Context, log *logrus.Entry, dbOpenShiftClusters OpenShiftClusters, m metrics.Emitter) {
func EmitOpenShiftClustersMetrics(ctx context.Context, log *logrus.Entry, dbOpenShiftClusters OpenShiftClusters, m metrics.Emitter) {
defer recover.Panic(log)
t := time.NewTicker(time.Minute)
defer t.Stop()
@ -27,3 +27,18 @@ func EmitMetrics(ctx context.Context, log *logrus.Entry, dbOpenShiftClusters Ope
}
}
}
func EmitMIMOMetrics(ctx context.Context, log *logrus.Entry, dbMaintenanceManifests MaintenanceManifests, m metrics.Emitter) {
defer recover.Panic(log)
t := time.NewTicker(time.Minute)
defer t.Stop()
for range t.C {
i, err := dbMaintenanceManifests.QueueLength(ctx)
if err != nil {
log.Error(err)
} else {
m.EmitGauge("database.maintenancemanifests.queue.length", int64(i), nil)
}
}
}

250
pkg/database/mimo.go Normal file
Просмотреть файл

@ -0,0 +1,250 @@
package database
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/util/uuid"
)
const (
MaintenanceManifestDequeueQueryForCluster = `SELECT * FROM MaintenanceManifests doc WHERE doc.maintenanceManifest.state IN ("Pending") AND doc.clusterResourceID = @clusterResourceID`
MaintenanceManifestQueryForCluster = `SELECT * FROM MaintenanceManifests doc WHERE doc.clusterResourceID = @clusterResourceID`
MaintenanceManifestQueueOverallQuery = `SELECT * FROM MaintenanceManifests doc WHERE doc.maintenanceManifest.state IN ("Pending") AND (doc.leaseExpires ?? 0) < GetCurrentTimestamp() / 1000`
MaintenanceManifestQueueLengthQuery = `SELECT VALUE COUNT(1) FROM MaintenanceManifests doc WHERE doc.maintenanceManifest.state IN ("Pending") AND (doc.leaseExpires ?? 0) < GetCurrentTimestamp() / 1000`
)
type MaintenanceManifestDocumentMutator func(*api.MaintenanceManifestDocument) error
type maintenanceManifests struct {
c cosmosdb.MaintenanceManifestDocumentClient
collc cosmosdb.CollectionClient
uuid string
uuidGenerator uuid.Generator
}
type MaintenanceManifests interface {
Create(context.Context, *api.MaintenanceManifestDocument) (*api.MaintenanceManifestDocument, error)
GetByClusterResourceID(ctx context.Context, clusterResourceID string, continuation string) (cosmosdb.MaintenanceManifestDocumentIterator, error)
GetQueuedByClusterResourceID(ctx context.Context, clusterResourceID string, continuation string) (cosmosdb.MaintenanceManifestDocumentIterator, error)
Patch(context.Context, string, string, MaintenanceManifestDocumentMutator) (*api.MaintenanceManifestDocument, error)
PatchWithLease(context.Context, string, string, MaintenanceManifestDocumentMutator) (*api.MaintenanceManifestDocument, error)
Lease(ctx context.Context, clusterResourceID string, id string) (*api.MaintenanceManifestDocument, error)
EndLease(context.Context, string, string, api.MaintenanceManifestState, *string) (*api.MaintenanceManifestDocument, error)
Get(context.Context, string, string) (*api.MaintenanceManifestDocument, error)
Delete(context.Context, string, string) error
QueueLength(context.Context) (int, error)
Queued(ctx context.Context, continuation string) (cosmosdb.MaintenanceManifestDocumentIterator, error)
NewUUID() string
}
func NewMaintenanceManifests(ctx context.Context, dbc cosmosdb.DatabaseClient, dbName string) (MaintenanceManifests, error) {
collc := cosmosdb.NewCollectionClient(dbc, dbName)
documentClient := cosmosdb.NewMaintenanceManifestDocumentClient(collc, collMaintenanceManifests)
return NewMaintenanceManifestsWithProvidedClient(documentClient, collc, uuid.DefaultGenerator.Generate(), uuid.DefaultGenerator), nil
}
func NewMaintenanceManifestsWithProvidedClient(client cosmosdb.MaintenanceManifestDocumentClient, collectionClient cosmosdb.CollectionClient, uuid string, uuidGenerator uuid.Generator) MaintenanceManifests {
return &maintenanceManifests{
c: client,
uuid: uuid,
collc: collectionClient,
uuidGenerator: uuidGenerator,
}
}
func (c *maintenanceManifests) NewUUID() string {
return c.uuidGenerator.Generate()
}
func (c *maintenanceManifests) Create(ctx context.Context, doc *api.MaintenanceManifestDocument) (*api.MaintenanceManifestDocument, error) {
if doc.ID != strings.ToLower(doc.ID) {
return nil, fmt.Errorf("id %q is not lower case", doc.ID)
}
doc, err := c.c.Create(ctx, doc.ClusterResourceID, doc, nil)
if err, ok := err.(*cosmosdb.Error); ok && err.StatusCode == http.StatusConflict {
err.StatusCode = http.StatusPreconditionFailed
}
return doc, err
}
func (c *maintenanceManifests) Get(ctx context.Context, clusterResourceID string, id string) (*api.MaintenanceManifestDocument, error) {
if id != strings.ToLower(id) {
return nil, fmt.Errorf("id %q is not lower case", id)
}
return c.c.Get(ctx, clusterResourceID, id, nil)
}
// QueueLength returns the number of MaintenanceManifests which are waiting to
// be unqueued. If error occurs, 0 is returned with error message
func (c *maintenanceManifests) QueueLength(ctx context.Context) (int, error) {
partitions, err := c.collc.PartitionKeyRanges(ctx, "MaintenanceManifests")
if err != nil {
return 0, err
}
var countTotal int
for _, r := range partitions.PartitionKeyRanges {
result := c.c.Query("", &cosmosdb.Query{
Query: MaintenanceManifestQueueLengthQuery,
}, &cosmosdb.Options{
PartitionKeyRangeID: r.ID,
})
// because we aggregate count we don't expect pagination in this query result,
// so we gonna call Next() only once per partition.
var data struct {
api.MissingFields
Document []int `json:"Documents,omitempty"`
}
err := result.NextRaw(ctx, -1, &data)
if err != nil {
return 0, err
}
countTotal = countTotal + data.Document[0]
}
return countTotal, nil
}
// Queued returns the number of MaintenanceManifests which are waiting to
// be unqueued. If error occurs, 0 is returned with error message
func (c *maintenanceManifests) Queued(ctx context.Context, continuation string) (cosmosdb.MaintenanceManifestDocumentIterator, error) {
return c.c.Query("", &cosmosdb.Query{
Query: MaintenanceManifestQueueOverallQuery,
Parameters: []cosmosdb.Parameter{},
}, &cosmosdb.Options{Continuation: continuation}), nil
}
func (c *maintenanceManifests) Patch(ctx context.Context, clusterResourceID string, id string, f MaintenanceManifestDocumentMutator) (*api.MaintenanceManifestDocument, error) {
return c.patch(ctx, clusterResourceID, id, f, nil)
}
func (c *maintenanceManifests) patch(ctx context.Context, clusterResourceID string, id string, f MaintenanceManifestDocumentMutator, options *cosmosdb.Options) (*api.MaintenanceManifestDocument, error) {
var doc *api.MaintenanceManifestDocument
err := cosmosdb.RetryOnPreconditionFailed(func() (err error) {
doc, err = c.Get(ctx, clusterResourceID, id)
if err != nil {
return
}
err = f(doc)
if err != nil {
return
}
doc, err = c.update(ctx, doc, options)
return
})
return doc, err
}
// PatchWithLease performs a patch on the cluster that verifies the lease is
// being held by this client before applying.
func (c *maintenanceManifests) PatchWithLease(ctx context.Context, clusterResourceID string, id string, f MaintenanceManifestDocumentMutator) (*api.MaintenanceManifestDocument, error) {
return c.patchWithLease(ctx, clusterResourceID, id, f, &cosmosdb.Options{PreTriggers: []string{"renewLease"}})
}
func (c *maintenanceManifests) patchWithLease(ctx context.Context, clusterResourceID string, id string, f MaintenanceManifestDocumentMutator, options *cosmosdb.Options) (*api.MaintenanceManifestDocument, error) {
return c.patch(ctx, clusterResourceID, id, func(doc *api.MaintenanceManifestDocument) error {
if doc.LeaseOwner != c.uuid {
return fmt.Errorf("lost lease")
}
return f(doc)
}, options)
}
func (c *maintenanceManifests) update(ctx context.Context, doc *api.MaintenanceManifestDocument, options *cosmosdb.Options) (*api.MaintenanceManifestDocument, error) {
if doc.ID != strings.ToLower(doc.ID) {
return nil, fmt.Errorf("id %q is not lower case", doc.ID)
}
return c.c.Replace(ctx, doc.ClusterResourceID, doc, options)
}
func (c *maintenanceManifests) ChangeFeed() cosmosdb.MaintenanceManifestDocumentIterator {
return c.c.ChangeFeed(nil)
}
func (c *maintenanceManifests) GetByClusterResourceID(ctx context.Context, clusterResourceID string, continuation string) (cosmosdb.MaintenanceManifestDocumentIterator, error) {
if clusterResourceID != strings.ToLower(clusterResourceID) {
return nil, fmt.Errorf("clusterResourceID %q is not lower case", clusterResourceID)
}
return c.c.Query("", &cosmosdb.Query{
Query: MaintenanceManifestQueryForCluster,
Parameters: []cosmosdb.Parameter{
{
Name: "@clusterResourceID",
Value: clusterResourceID,
},
},
}, &cosmosdb.Options{Continuation: continuation}), nil
}
func (c *maintenanceManifests) GetQueuedByClusterResourceID(ctx context.Context, clusterResourceID string, continuation string) (cosmosdb.MaintenanceManifestDocumentIterator, error) {
if clusterResourceID != strings.ToLower(clusterResourceID) {
return nil, fmt.Errorf("clusterResourceID %q is not lower case", clusterResourceID)
}
return c.c.Query("", &cosmosdb.Query{
Query: MaintenanceManifestDequeueQueryForCluster,
Parameters: []cosmosdb.Parameter{
{
Name: "@clusterResourceID",
Value: clusterResourceID,
},
},
}, &cosmosdb.Options{Continuation: continuation}), nil
}
func (c *maintenanceManifests) EndLease(ctx context.Context, clusterResourceID string, id string, provisioningState api.MaintenanceManifestState, statusString *string) (*api.MaintenanceManifestDocument, error) {
return c.patchWithLease(ctx, clusterResourceID, id, func(doc *api.MaintenanceManifestDocument) error {
doc.MaintenanceManifest.State = provisioningState
if statusString != nil {
doc.MaintenanceManifest.StatusText = *statusString
}
doc.LeaseOwner = ""
doc.LeaseExpires = 0
return nil
}, nil)
}
// Lease performs the initial lease/dequeue on the document.
func (c *maintenanceManifests) Lease(ctx context.Context, clusterResourceID string, id string) (*api.MaintenanceManifestDocument, error) {
if clusterResourceID != strings.ToLower(clusterResourceID) {
return nil, fmt.Errorf("clusterID %q is not lower case", clusterResourceID)
}
return c.patch(ctx, clusterResourceID, id, func(doc *api.MaintenanceManifestDocument) error {
doc.LeaseOwner = c.uuid
doc.Dequeues++
doc.MaintenanceManifest.State = api.MaintenanceManifestStateInProgress
return nil
}, &cosmosdb.Options{PreTriggers: []string{"renewLease"}})
}
func (c *maintenanceManifests) Delete(ctx context.Context, clusterResourceID string, id string) error {
if clusterResourceID != strings.ToLower(clusterResourceID) {
return fmt.Errorf("clusterID %q is not lower case", clusterResourceID)
}
return c.c.Delete(ctx, clusterResourceID, &api.MaintenanceManifestDocument{ID: id}, nil)
}

Просмотреть файл

@ -17,12 +17,13 @@ import (
)
const (
OpenShiftClustersDequeueQuery = `SELECT * FROM OpenShiftClusters doc WHERE doc.openShiftCluster.properties.provisioningState IN ("Creating", "Deleting", "Updating", "AdminUpdating") AND (doc.leaseExpires ?? 0) < GetCurrentTimestamp() / 1000`
OpenShiftClustersQueueLengthQuery = `SELECT VALUE COUNT(1) FROM OpenShiftClusters doc WHERE doc.openShiftCluster.properties.provisioningState IN ("Creating", "Deleting", "Updating", "AdminUpdating") AND (doc.leaseExpires ?? 0) < GetCurrentTimestamp() / 1000`
OpenShiftClustersGetQuery = `SELECT * FROM OpenShiftClusters doc WHERE doc.key = @key`
OpenshiftClustersPrefixQuery = `SELECT * FROM OpenShiftClusters doc WHERE STARTSWITH(doc.key, @prefix)`
OpenshiftClustersClientIdQuery = `SELECT * FROM OpenShiftClusters doc WHERE doc.clientIdKey = @clientID`
OpenshiftClustersResourceGroupQuery = `SELECT * FROM OpenShiftClusters doc WHERE doc.clusterResourceGroupIdKey = @resourceGroupID`
OpenShiftClustersDequeueQuery = `SELECT * FROM OpenShiftClusters doc WHERE doc.openShiftCluster.properties.provisioningState IN ("Creating", "Deleting", "Updating", "AdminUpdating") AND (doc.leaseExpires ?? 0) < GetCurrentTimestamp() / 1000`
OpenShiftClustersQueueLengthQuery = `SELECT VALUE COUNT(1) FROM OpenShiftClusters doc WHERE doc.openShiftCluster.properties.provisioningState IN ("Creating", "Deleting", "Updating", "AdminUpdating") AND (doc.leaseExpires ?? 0) < GetCurrentTimestamp() / 1000`
OpenShiftClustersGetQuery = `SELECT * FROM OpenShiftClusters doc WHERE doc.key = @key`
OpenshiftClustersPrefixQuery = `SELECT * FROM OpenShiftClusters doc WHERE STARTSWITH(doc.key, @prefix)`
OpenshiftClustersClientIdQuery = `SELECT * FROM OpenShiftClusters doc WHERE doc.clientIdKey = @clientID`
OpenshiftClustersResourceGroupQuery = `SELECT * FROM OpenShiftClusters doc WHERE doc.clusterResourceGroupIdKey = @resourceGroupID`
OpenshiftClustersClusterResourceIDOnlyQuery = `SELECT doc.id, doc.key FROM OpenShiftClusters doc WHERE doc.openShiftCluster.properties.provisioningState NOT IN ("Creating", "Deleting")`
)
type OpenShiftClusterDocumentMutator func(*api.OpenShiftClusterDocument) error
@ -52,6 +53,8 @@ type OpenShiftClusters interface {
EndLease(context.Context, string, api.ProvisioningState, api.ProvisioningState, *string) (*api.OpenShiftClusterDocument, error)
GetByClientID(ctx context.Context, partitionKey, clientID string) (*api.OpenShiftClusterDocuments, error)
GetByClusterResourceGroupID(ctx context.Context, partitionKey, resourceGroupID string) (*api.OpenShiftClusterDocuments, error)
GetAllResourceIDs(ctx context.Context, continuation string) (cosmosdb.OpenShiftClusterDocumentIterator, error)
DoDequeue(ctx context.Context, doc *api.OpenShiftClusterDocument) (*api.OpenShiftClusterDocument, error)
NewUUID() string
}
@ -266,9 +269,7 @@ func (c *openShiftClusters) Dequeue(ctx context.Context) (*api.OpenShiftClusterD
}
for _, doc := range docs.OpenShiftClusterDocuments {
doc.LeaseOwner = c.uuid
doc.Dequeues++
doc, err = c.update(ctx, doc, &cosmosdb.Options{PreTriggers: []string{"renewLease"}})
doc, err = c.DoDequeue(ctx, doc)
if cosmosdb.IsErrorStatusCode(err, http.StatusPreconditionFailed) { // someone else got there first
continue
}
@ -277,6 +278,12 @@ func (c *openShiftClusters) Dequeue(ctx context.Context) (*api.OpenShiftClusterD
}
}
func (c *openShiftClusters) DoDequeue(ctx context.Context, doc *api.OpenShiftClusterDocument) (*api.OpenShiftClusterDocument, error) {
doc.LeaseOwner = c.uuid
doc.Dequeues++
return c.update(ctx, doc, &cosmosdb.Options{PreTriggers: []string{"renewLease"}})
}
func (c *openShiftClusters) Lease(ctx context.Context, key string) (*api.OpenShiftClusterDocument, error) {
return c.patchWithLease(ctx, key, func(doc *api.OpenShiftClusterDocument) error {
return nil
@ -347,3 +354,13 @@ func (c *openShiftClusters) GetByClusterResourceGroupID(ctx context.Context, par
}
return docs, nil
}
func (c *openShiftClusters) GetAllResourceIDs(ctx context.Context, continuation string) (cosmosdb.OpenShiftClusterDocumentIterator, error) {
return c.c.Query(
"",
&cosmosdb.Query{
Query: OpenshiftClustersClusterResourceIDOnlyQuery,
},
&cosmosdb.Options{Continuation: continuation},
), nil
}

Просмотреть файл

@ -368,6 +368,46 @@
}
},
"type": "Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers/triggers"
},
{
"apiVersion": "2023-04-15",
"dependsOn": [
"[resourceId('Microsoft.DocumentDB/databaseAccounts/sqlDatabases', parameters('databaseAccountName'), parameters('databaseName'))]"
],
"location": "[resourceGroup().location]",
"name": "[concat(parameters('databaseAccountName'), '/', parameters('databaseName'), '/MaintenanceManifests')]",
"properties": {
"options": {},
"resource": {
"defaultTtl": -1,
"id": "MaintenanceManifests",
"partitionKey": {
"kind": "Hash",
"paths": [
"/clusterResourceID"
]
}
}
},
"type": "Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers"
},
{
"apiVersion": "2023-04-15",
"dependsOn": [
"[resourceId('Microsoft.DocumentDB/databaseAccounts/sqlDatabases', parameters('databaseAccountName'), parameters('databaseName'))]",
"[resourceId('Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers', parameters('databaseAccountName'), parameters('databaseName'), 'MaintenanceManifests')]"
],
"location": "[resourceGroup().location]",
"name": "[concat(parameters('databaseAccountName'), '/', parameters('databaseName'), '/MaintenanceManifests/renewLease')]",
"properties": {
"resource": {
"body": "function trigger() {\n\t\t\t\tvar request = getContext().getRequest();\n\t\t\t\tvar body = request.getBody();\n\t\t\t\tvar date = new Date();\n\t\t\t\tbody[\"leaseExpires\"] = Math.floor(date.getTime() / 1000) + 60;\n\t\t\t\trequest.setBody(body);\n\t\t\t}",
"id": "renewLease",
"triggerOperation": "All",
"triggerType": "Pre"
}
},
"type": "Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers/triggers"
}
]
}

Просмотреть файл

@ -1026,6 +1026,34 @@ func (g *generator) database(databaseName string, addDependsOn bool) []*arm.Reso
Type: "Microsoft.DocumentDB/databaseAccounts/sqlDatabases",
}
mimo := &arm.Resource{
Resource: &sdkcosmos.SQLContainerCreateUpdateParameters{
Properties: &sdkcosmos.SQLContainerCreateUpdateProperties{
Resource: &sdkcosmos.SQLContainerResource{
ID: to.StringPtr("MaintenanceManifests"),
PartitionKey: &sdkcosmos.ContainerPartitionKey{
Paths: []*string{
to.StringPtr("/clusterResourceID"),
},
Kind: &hashPartitionKey,
},
DefaultTTL: to.Int32Ptr(-1),
},
Options: &sdkcosmos.CreateUpdateOptions{
Throughput: to.Int32Ptr(cosmosDbGatewayProvisionedThroughputHack),
},
},
Name: to.StringPtr("[concat(parameters('databaseAccountName'), '/', " + databaseName + ", '/MaintenanceManifests')]"),
Type: to.StringPtr("Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers"),
Location: to.StringPtr("[resourceGroup().location]"),
},
APIVersion: azureclient.APIVersion("Microsoft.DocumentDB"),
DependsOn: []string{
"[resourceId('Microsoft.DocumentDB/databaseAccounts/sqlDatabases', parameters('databaseAccountName'), " + databaseName + ")]",
},
Type: "Microsoft.DocumentDB/databaseAccounts/sqlDatabases",
}
if !g.production {
database.Resource.(*sdkcosmos.SQLDatabaseCreateUpdateParameters).Properties.Options = &sdkcosmos.CreateUpdateOptions{
AutoscaleSettings: &sdkcosmos.AutoscaleSettings{
@ -1034,6 +1062,7 @@ func (g *generator) database(databaseName string, addDependsOn bool) []*arm.Reso
}
portal.Resource.(*sdkcosmos.SQLContainerCreateUpdateParameters).Properties.Options = &sdkcosmos.CreateUpdateOptions{}
gateway.Resource.(*sdkcosmos.SQLContainerCreateUpdateParameters).Properties.Options = &sdkcosmos.CreateUpdateOptions{}
mimo.Resource.(*sdkcosmos.SQLContainerCreateUpdateParameters).Properties.Options = &sdkcosmos.CreateUpdateOptions{}
}
rs := []*arm.Resource{
@ -1271,6 +1300,15 @@ func (g *generator) database(databaseName string, addDependsOn bool) []*arm.Reso
g.rpCosmosDBTriggers(databaseName, "Monitors", "renewLease", renewLeaseTriggerFunction, sdkcosmos.TriggerTypePre, sdkcosmos.TriggerOperationAll),
)
// Don't deploy the MIMO databases in production yet
if !g.production {
rs = append(rs,
mimo,
// MIMO DB triggers
g.rpCosmosDBTriggers(databaseName, "MaintenanceManifests", "renewLease", renewLeaseTriggerFunction, sdkcosmos.TriggerTypePre, sdkcosmos.TriggerOperationAll),
)
}
if addDependsOn {
for i := range rs {
rs[i].DependsOn = append(rs[i].DependsOn,

2
pkg/env/core.go поставляемый
Просмотреть файл

@ -29,6 +29,8 @@ const (
COMPONENT_UPDATE_ROLE_SETS ServiceComponent = "UPDATE_ROLE_SETS"
COMPONENT_DEPLOY ServiceComponent = "DEPLOY"
COMPONENT_TOOLING ServiceComponent = "TOOLING"
COMPONENT_MIMO_SCHEDULER ServiceComponent = "MIMO_SCHEDULER"
COMPONENT_MIMO_ACTUATOR ServiceComponent = "MIMO_ACTUATOR"
)
// Core collects basic configuration information which is expected to be

3
pkg/env/dev.go поставляемый
Просмотреть файл

@ -77,6 +77,9 @@ func (d *dev) AROOperatorImage() string {
}
func (d *dev) Listen() (net.Listener, error) {
if d.Component() == string(COMPONENT_MIMO_ACTUATOR) {
return net.Listen("tcp", ":8445")
}
return net.Listen("tcp", ":8443")
}

Просмотреть файл

@ -0,0 +1,79 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/go-chi/chi/v5"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
func (f *frontend) postAdminMaintManifestCancel(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
resourceID := resourceIdFromURLParams(r)
b, err := f._postAdminMaintManifestCancel(ctx, r, resourceID)
if cloudErr, ok := err.(*api.CloudError); ok {
api.WriteCloudError(w, cloudErr)
return
}
adminReply(log, w, nil, b, err)
}
func (f *frontend) _postAdminMaintManifestCancel(ctx context.Context, r *http.Request, resourceID string) ([]byte, error) {
manifestId := chi.URLParam(r, "manifestId")
converter := f.apis[admin.APIVersion].MaintenanceManifestConverter
dbOpenShiftClusters, err := f.dbGroup.OpenShiftClusters()
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
dbMaintenanceManifests, err := f.dbGroup.MaintenanceManifests()
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
doc, err := dbOpenShiftClusters.Get(ctx, resourceID)
if err != nil {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", fmt.Sprintf("cluster not found: %s", err.Error()))
}
if doc.OpenShiftCluster.Properties.ProvisioningState == api.ProvisioningStateDeleting {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", "cluster being deleted")
}
modifiedDoc, err := dbMaintenanceManifests.Patch(ctx, resourceID, manifestId, func(mmd *api.MaintenanceManifestDocument) error {
if mmd.MaintenanceManifest.State != api.MaintenanceManifestStatePending {
return api.NewCloudError(http.StatusNotAcceptable, api.CloudErrorCodePropertyChangeNotAllowed, "", fmt.Sprintf("cannot cancel task in state %s", mmd.MaintenanceManifest.State))
}
mmd.MaintenanceManifest.State = api.MaintenanceManifestStateCancelled
return nil
})
if err != nil {
cloudErr, ok := err.(*api.CloudError)
if ok {
return nil, cloudErr
} else if cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", fmt.Sprintf("manifest not found: %s", err.Error()))
} else {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
}
return json.MarshalIndent(converter.ToExternal(modifiedDoc, true), "", " ")
}

Просмотреть файл

@ -0,0 +1,205 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/metrics/noop"
testdatabase "github.com/Azure/ARO-RP/test/database"
)
func TestMIMOCancelManifest(t *testing.T) {
mockSubID := "00000000-0000-0000-0000-000000000000"
mockTenantID := "00000000-0000-0000-0000-000000000000"
resourceID := fmt.Sprintf("/subscriptions/%s/resourcegroups/resourceGroup/providers/Microsoft.RedHatOpenShift/openShiftClusters/resourceName", mockSubID)
ctx := context.Background()
type test struct {
name string
fixtures func(f *testdatabase.Fixture)
wantStatusCode int
wantResponse *admin.MaintenanceManifest
wantResult func(f *testdatabase.Checker)
wantError string
}
for _, tt := range []*test{
{
name: "no cluster",
wantError: "404: NotFound: : cluster not found: 404 : ",
fixtures: func(f *testdatabase.Fixture) {},
wantStatusCode: http.StatusNotFound,
},
{
name: "cluster being deleted",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
Properties: api.OpenShiftClusterProperties{
ProvisioningState: api.ProvisioningStateDeleting,
},
},
})
},
wantError: "404: NotFound: : cluster being deleted",
wantStatusCode: http.StatusNotFound,
},
{
name: "no item",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
},
wantError: "404: NotFound: : manifest not found: 404 : ",
wantStatusCode: http.StatusNotFound,
},
{
name: "cancel pending",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
f.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResult: func(c *testdatabase.Checker) {
c.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: "07070707-0707-0707-0707-070707070001",
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStateCancelled,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResponse: &admin.MaintenanceManifest{
ID: "07070707-0707-0707-0707-070707070001",
MaintenanceTaskID: "exampletask",
State: admin.MaintenanceManifestStateCancelled,
Priority: 0,
RunAfter: 1,
RunBefore: 1,
},
wantStatusCode: http.StatusOK,
},
{
name: "cannot cancel failed",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
f.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStateFailed,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResult: func(c *testdatabase.Checker) {
c.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: "07070707-0707-0707-0707-070707070001",
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStateFailed,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantError: "406: PropertyChangeNotAllowed: : cannot cancel task in state Failed",
wantStatusCode: http.StatusNotAcceptable,
},
} {
t.Run(tt.name, func(t *testing.T) {
now := func() time.Time { return time.Unix(1000, 0) }
ti := newTestInfra(t).WithOpenShiftClusters().WithSubscriptions().WithMaintenanceManifests(now)
defer ti.done()
ti.fixture.AddSubscriptionDocuments(&api.SubscriptionDocument{
ID: mockSubID,
Subscription: &api.Subscription{
State: api.SubscriptionStateRegistered,
Properties: &api.SubscriptionProperties{
TenantID: mockTenantID,
},
},
})
err := ti.buildFixtures(tt.fixtures)
if err != nil {
t.Fatal(err)
}
if tt.wantResult != nil {
tt.wantResult(ti.checker)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.dbGroup, api.APIs, &noop.Noop{}, &noop.Noop{}, testdatabase.NewFakeAEAD(), nil, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}
go f.Run(ctx, nil, nil)
resp, b, err := ti.request(http.MethodPost,
fmt.Sprintf("https://server/admin%s/maintenancemanifests/07070707-0707-0707-0707-070707070001/cancel", resourceID),
nil, nil)
if err != nil {
t.Fatal(err)
}
err = validateResponse(resp, b, tt.wantStatusCode, tt.wantError, tt.wantResponse)
if err != nil {
t.Error(err)
}
for _, err := range ti.checker.CheckMaintenanceManifests(ti.maintenanceManifestsClient) {
t.Error(err)
}
})
}
}

Просмотреть файл

@ -0,0 +1,98 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
func (f *frontend) putAdminMaintManifestCreate(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
resourceID := resourceIdFromURLParams(r)
b, err := f._putAdminMaintManifestCreate(ctx, r, resourceID)
if cloudErr, ok := err.(*api.CloudError); ok {
api.WriteCloudError(w, cloudErr)
return
}
err = statusCodeError(http.StatusCreated)
adminReply(log, w, nil, b, err)
}
func (f *frontend) _putAdminMaintManifestCreate(ctx context.Context, r *http.Request, resourceID string) ([]byte, error) {
converter := f.apis[admin.APIVersion].MaintenanceManifestConverter
validator := f.apis[admin.APIVersion].MaintenanceManifestStaticValidator
body := r.Context().Value(middleware.ContextKeyBody).([]byte)
if len(body) == 0 || !json.Valid(body) {
return nil, api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidRequestContent, "", "The request content was invalid and could not be deserialized.")
}
dbOpenShiftClusters, err := f.dbGroup.OpenShiftClusters()
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
dbMaintenanceManifests, err := f.dbGroup.MaintenanceManifests()
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
doc, err := dbOpenShiftClusters.Get(ctx, resourceID)
if err != nil {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", fmt.Sprintf("cluster not found: %s", err.Error()))
}
if doc.OpenShiftCluster.Properties.ProvisioningState == api.ProvisioningStateDeleting {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", "cluster being deleted")
}
var ext *admin.MaintenanceManifest
err = json.Unmarshal(body, &ext)
if err != nil {
return nil, api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidRequestContent, "", "The request content could not be deserialized: "+err.Error())
}
// fill in some defaults
ext.ID = dbMaintenanceManifests.NewUUID()
ext.State = admin.MaintenanceManifestStatePending
if ext.RunAfter == 0 {
ext.RunAfter = int(f.now().Unix())
}
// add a 7d timeout by default
if ext.RunBefore == 0 {
ext.RunBefore = int(f.now().Add(time.Hour * 7 * 24).Unix())
}
err = validator.Static(ext, nil)
if err != nil {
return nil, err
}
manifestDoc := &api.MaintenanceManifestDocument{
ClusterResourceID: resourceID,
}
converter.ToInternal(ext, manifestDoc)
savedDoc, err := dbMaintenanceManifests.Create(ctx, manifestDoc)
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
return json.MarshalIndent(converter.ToExternal(savedDoc, true), "", " ")
}

Просмотреть файл

@ -0,0 +1,208 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/metrics/noop"
testdatabase "github.com/Azure/ARO-RP/test/database"
)
func TestMIMOCreateManifest(t *testing.T) {
mockSubID := "00000000-0000-0000-0000-000000000000"
mockTenantID := "00000000-0000-0000-0000-000000000000"
resourceID := fmt.Sprintf("/subscriptions/%s/resourcegroups/resourceGroup/providers/Microsoft.RedHatOpenShift/openShiftClusters/resourceName", mockSubID)
ctx := context.Background()
type test struct {
name string
fixtures func(f *testdatabase.Fixture)
body *admin.MaintenanceManifest
wantStatusCode int
wantResponse *admin.MaintenanceManifest
wantResult func(f *testdatabase.Checker)
wantError string
}
for _, tt := range []*test{
{
name: "no cluster",
wantError: "404: NotFound: : cluster not found: 404 : ",
fixtures: func(f *testdatabase.Fixture) {},
wantStatusCode: http.StatusNotFound,
},
{
name: "cluster being deleted",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
Properties: api.OpenShiftClusterProperties{
ProvisioningState: api.ProvisioningStateDeleting,
},
},
})
},
wantError: "404: NotFound: : cluster being deleted",
wantStatusCode: http.StatusNotFound,
},
{
name: "invalid",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
},
body: &admin.MaintenanceManifest{},
wantError: "400: InvalidParameter: maintenanceTaskID: Must be provided",
wantStatusCode: http.StatusBadRequest,
},
{
name: "good",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
},
body: &admin.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: admin.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
wantResult: func(c *testdatabase.Checker) {
c.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: "07070707-0707-0707-0707-070707070001",
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResponse: &admin.MaintenanceManifest{
ID: "07070707-0707-0707-0707-070707070001",
MaintenanceTaskID: "exampletask",
State: admin.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
wantStatusCode: http.StatusCreated,
},
{
name: "default set to pending",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
},
body: &admin.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
RunAfter: 1,
RunBefore: 1,
},
wantResult: func(c *testdatabase.Checker) {
c.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: "07070707-0707-0707-0707-070707070001",
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResponse: &admin.MaintenanceManifest{
ID: "07070707-0707-0707-0707-070707070001",
MaintenanceTaskID: "exampletask",
State: admin.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
wantStatusCode: http.StatusCreated,
},
} {
t.Run(tt.name, func(t *testing.T) {
now := func() time.Time { return time.Unix(1000, 0) }
ti := newTestInfra(t).WithOpenShiftClusters().WithSubscriptions().WithMaintenanceManifests(now)
defer ti.done()
ti.fixture.AddSubscriptionDocuments(&api.SubscriptionDocument{
ID: mockSubID,
Subscription: &api.Subscription{
State: api.SubscriptionStateRegistered,
Properties: &api.SubscriptionProperties{
TenantID: mockTenantID,
},
},
})
err := ti.buildFixtures(tt.fixtures)
if err != nil {
t.Fatal(err)
}
if tt.wantResult != nil {
tt.wantResult(ti.checker)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.dbGroup, api.APIs, &noop.Noop{}, &noop.Noop{}, testdatabase.NewFakeAEAD(), nil, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}
go f.Run(ctx, nil, nil)
resp, b, err := ti.request(http.MethodPut,
fmt.Sprintf("https://server/admin%s/maintenancemanifests", resourceID),
http.Header{
"Content-Type": []string{"application/json"},
}, tt.body)
if err != nil {
t.Fatal(err)
}
err = validateResponse(resp, b, tt.wantStatusCode, tt.wantError, tt.wantResponse)
if err != nil {
t.Error(err)
}
for _, err := range ti.checker.CheckMaintenanceManifests(ti.maintenanceManifestsClient) {
t.Error(err)
}
})
}
}

Просмотреть файл

@ -0,0 +1,58 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/go-chi/chi/v5"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
func (f *frontend) deleteAdminMaintManifest(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
resourceID := resourceIdFromURLParams(r)
b, err := f._deleteAdminMaintManifest(ctx, r, resourceID)
if cloudErr, ok := err.(*api.CloudError); ok {
api.WriteCloudError(w, cloudErr)
return
}
adminReply(log, w, nil, b, err)
}
func (f *frontend) _deleteAdminMaintManifest(ctx context.Context, r *http.Request, resourceID string) ([]byte, error) {
manifestId := chi.URLParam(r, "manifestId")
dbMaintenanceManifests, err := f.dbGroup.MaintenanceManifests()
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
// Note: We do not check if the MaintenanceManifest has a matching cluster,
// since it might be possible for a race condition to have a
// deleting/deleted cluster having a queued Manifest.
err = dbMaintenanceManifests.Delete(ctx, resourceID, manifestId)
if err != nil {
cloudErr, ok := err.(*api.CloudError)
if ok {
return nil, cloudErr
} else if cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", fmt.Sprintf("manifest not found: %s", err.Error()))
} else {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
}
return json.MarshalIndent(map[string]string{}, "", " ")
}

Просмотреть файл

@ -0,0 +1,127 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/metrics/noop"
testdatabase "github.com/Azure/ARO-RP/test/database"
)
func TestMIMODeleteManifest(t *testing.T) {
mockSubID := "00000000-0000-0000-0000-000000000000"
mockTenantID := "00000000-0000-0000-0000-000000000000"
resourceID := fmt.Sprintf("/subscriptions/%s/resourcegroups/resourceGroup/providers/Microsoft.RedHatOpenShift/openShiftClusters/resourceName", mockSubID)
ctx := context.Background()
type test struct {
name string
fixtures func(f *testdatabase.Fixture)
wantStatusCode int
wantResponse []byte
wantResult func(f *testdatabase.Checker)
wantError string
}
for _, tt := range []*test{
{
name: "no item",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
},
wantError: "404: NotFound: : manifest not found: 404 : ",
wantStatusCode: http.StatusNotFound,
},
{
name: "delete successfully",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
f.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResult: func(c *testdatabase.Checker) {
},
wantResponse: []byte("{}\n"),
wantStatusCode: http.StatusOK,
},
} {
t.Run(tt.name, func(t *testing.T) {
now := func() time.Time { return time.Unix(1000, 0) }
ti := newTestInfra(t).WithOpenShiftClusters().WithSubscriptions().WithMaintenanceManifests(now)
defer ti.done()
ti.fixture.AddSubscriptionDocuments(&api.SubscriptionDocument{
ID: mockSubID,
Subscription: &api.Subscription{
State: api.SubscriptionStateRegistered,
Properties: &api.SubscriptionProperties{
TenantID: mockTenantID,
},
},
})
err := ti.buildFixtures(tt.fixtures)
if err != nil {
t.Fatal(err)
}
if tt.wantResult != nil {
tt.wantResult(ti.checker)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.dbGroup, api.APIs, &noop.Noop{}, &noop.Noop{}, testdatabase.NewFakeAEAD(), nil, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}
go f.Run(ctx, nil, nil)
resp, b, err := ti.request(http.MethodDelete,
fmt.Sprintf("https://server/admin%s/maintenancemanifests/07070707-0707-0707-0707-070707070001", resourceID),
nil, nil)
if err != nil {
t.Fatal(err)
}
err = validateResponse(resp, b, tt.wantStatusCode, tt.wantError, tt.wantResponse)
if err != nil {
t.Error(err)
}
for _, err := range ti.checker.CheckMaintenanceManifests(ti.maintenanceManifestsClient) {
t.Error(err)
}
})
}
}

Просмотреть файл

@ -0,0 +1,64 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/go-chi/chi/v5"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
func (f *frontend) getSingleAdminMaintManifest(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
resourceID := resourceIdFromURLParams(r)
b, err := f._getSingleAdminMaintManifest(ctx, r, resourceID)
if cloudErr, ok := err.(*api.CloudError); ok {
api.WriteCloudError(w, cloudErr)
return
}
adminReply(log, w, nil, b, err)
}
func (f *frontend) _getSingleAdminMaintManifest(ctx context.Context, r *http.Request, resourceID string) ([]byte, error) {
manifestId := chi.URLParam(r, "manifestId")
converter := f.apis[admin.APIVersion].MaintenanceManifestConverter
dbOpenShiftClusters, err := f.dbGroup.OpenShiftClusters()
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
dbMaintenanceManifests, err := f.dbGroup.MaintenanceManifests()
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
doc, err := dbOpenShiftClusters.Get(ctx, resourceID)
if err != nil {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", fmt.Sprintf("cluster not found: %s", err.Error()))
}
if doc.OpenShiftCluster.Properties.ProvisioningState == api.ProvisioningStateDeleting {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", "cluster being deleted")
}
manifest, err := dbMaintenanceManifests.Get(ctx, resourceID, manifestId)
if err != nil {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", fmt.Sprintf("manifest not found: %s", err.Error()))
}
return json.MarshalIndent(converter.ToExternal(manifest, true), "", " ")
}

Просмотреть файл

@ -0,0 +1,155 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/metrics/noop"
testdatabase "github.com/Azure/ARO-RP/test/database"
)
func TestMIMOGet(t *testing.T) {
mockSubID := "00000000-0000-0000-0000-000000000000"
mockTenantID := "00000000-0000-0000-0000-000000000000"
resourceID := fmt.Sprintf("/subscriptions/%s/resourcegroups/resourceGroup/providers/Microsoft.RedHatOpenShift/openShiftClusters/resourceName", mockSubID)
ctx := context.Background()
type test struct {
name string
fixtures func(f *testdatabase.Fixture)
wantStatusCode int
wantResponse *admin.MaintenanceManifest
wantError string
}
for _, tt := range []*test{
{
name: "no cluster",
wantError: "404: NotFound: : cluster not found: 404 : ",
fixtures: func(f *testdatabase.Fixture) {},
wantStatusCode: http.StatusNotFound,
},
{
name: "cluster being deleted",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
Properties: api.OpenShiftClusterProperties{
ProvisioningState: api.ProvisioningStateDeleting,
},
},
})
},
wantError: "404: NotFound: : cluster being deleted",
wantStatusCode: http.StatusNotFound,
},
{
name: "no item",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
},
wantError: "404: NotFound: : manifest not found: 404 : ",
wantStatusCode: http.StatusNotFound,
},
{
name: "get entry",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
f.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResponse: &admin.MaintenanceManifest{
ID: "07070707-0707-0707-0707-070707070001",
MaintenanceTaskID: "exampletask",
State: admin.MaintenanceManifestStatePending,
Priority: 0,
RunAfter: 1,
RunBefore: 1,
},
wantStatusCode: http.StatusOK,
},
} {
t.Run(tt.name, func(t *testing.T) {
now := func() time.Time { return time.Unix(1000, 0) }
ti := newTestInfra(t).WithOpenShiftClusters().WithSubscriptions().WithMaintenanceManifests(now)
defer ti.done()
ti.fixture.AddSubscriptionDocuments(&api.SubscriptionDocument{
ID: mockSubID,
Subscription: &api.Subscription{
State: api.SubscriptionStateRegistered,
Properties: &api.SubscriptionProperties{
TenantID: mockTenantID,
},
},
})
err := ti.buildFixtures(tt.fixtures)
if err != nil {
t.Fatal(err)
}
a, err := ti.openShiftClustersClient.ListAll(ctx, nil)
if err != nil {
t.Fatal(err)
}
fmt.Print(a)
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.dbGroup, api.APIs, &noop.Noop{}, &noop.Noop{}, testdatabase.NewFakeAEAD(), nil, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}
go f.Run(ctx, nil, nil)
resp, b, err := ti.request(http.MethodGet,
fmt.Sprintf("https://server/admin%s/maintenancemanifests/07070707-0707-0707-0707-070707070001", resourceID),
nil, nil)
if err != nil {
t.Fatal(err)
}
err = validateResponse(resp, b, tt.wantStatusCode, tt.wantError, tt.wantResponse)
if err != nil {
t.Error(err)
}
})
}
}

Просмотреть файл

@ -0,0 +1,96 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/json"
"fmt"
"math"
"net/http"
"strconv"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
func (f *frontend) getAdminMaintManifests(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
resourceID := resourceIdFromURLParams(r)
b, err := f._getAdminMaintManifests(ctx, r, resourceID)
if cloudErr, ok := err.(*api.CloudError); ok {
api.WriteCloudError(w, cloudErr)
return
}
adminReply(log, w, nil, b, err)
}
func (f *frontend) _getAdminMaintManifests(ctx context.Context, r *http.Request, resourceID string) ([]byte, error) {
limitstr := r.URL.Query().Get("limit")
limit, err := strconv.Atoi(limitstr)
if err != nil {
limit = 100
}
converter := f.apis[admin.APIVersion].MaintenanceManifestConverter
dbOpenShiftClusters, err := f.dbGroup.OpenShiftClusters()
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
dbMaintenanceManifests, err := f.dbGroup.MaintenanceManifests()
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
doc, err := dbOpenShiftClusters.Get(ctx, resourceID)
if err != nil {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", fmt.Sprintf("cluster not found: %s", err.Error()))
}
if doc.OpenShiftCluster.Properties.ProvisioningState == api.ProvisioningStateDeleting {
return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", "cluster being deleted")
}
skipToken, err := f.parseSkipToken(r.URL.String())
if err != nil {
return nil, err
}
i, err := dbMaintenanceManifests.GetByClusterResourceID(ctx, resourceID, skipToken)
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
docList := make([]*api.MaintenanceManifestDocument, 0)
for {
docs, err := i.Next(ctx, int(math.Min(float64(limit), 10)))
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", fmt.Errorf("failed reading next manifest document: %w", err).Error())
}
if docs == nil {
break
}
docList = append(docList, docs.MaintenanceManifestDocuments...)
if len(docList) >= limit {
break
}
}
nextLink, err := f.buildNextLink(r.Header.Get("Referer"), i.Continuation())
if err != nil {
return nil, err
}
return json.MarshalIndent(converter.ToExternalList(docList, nextLink, true), "", " ")
}

Просмотреть файл

@ -0,0 +1,210 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"strings"
"testing"
"time"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/metrics/noop"
testdatabase "github.com/Azure/ARO-RP/test/database"
)
func TestMIMOListManifests(t *testing.T) {
mockSubID := "00000000-0000-0000-0000-000000000000"
mockTenantID := "00000000-0000-0000-0000-000000000000"
resourceID := fmt.Sprintf("/subscriptions/%s/resourcegroups/resourceGroup/providers/Microsoft.RedHatOpenShift/openShiftClusters/resourceName", mockSubID)
ctx := context.Background()
type test struct {
name string
fixtures func(f *testdatabase.Fixture)
limit int
wantStatusCode int
wantResponse *admin.MaintenanceManifestList
wantError string
}
for _, tt := range []*test{
{
name: "no entries",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
},
wantResponse: &admin.MaintenanceManifestList{
MaintenanceManifests: []*admin.MaintenanceManifest{},
},
wantStatusCode: http.StatusOK,
},
{
name: "single entry",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
f.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResponse: &admin.MaintenanceManifestList{
MaintenanceManifests: []*admin.MaintenanceManifest{
{
ID: "07070707-0707-0707-0707-070707070001",
MaintenanceTaskID: "exampletask",
State: admin.MaintenanceManifestStatePending,
Priority: 0,
RunAfter: 1,
RunBefore: 1,
},
},
},
wantStatusCode: http.StatusOK,
},
{
name: "limit over",
limit: 1,
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
f.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
f.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampleset2",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResponse: &admin.MaintenanceManifestList{
NextLink: "https://mockrefererhost/?%24skipToken=" + url.QueryEscape(base64.StdEncoding.EncodeToString([]byte("FAKE1"))),
MaintenanceManifests: []*admin.MaintenanceManifest{
{
ID: "07070707-0707-0707-0707-070707070001",
MaintenanceTaskID: "exampletask",
State: admin.MaintenanceManifestStatePending,
Priority: 0,
RunAfter: 1,
RunBefore: 1,
},
},
},
wantStatusCode: http.StatusOK,
},
{
name: "cluster being deleted",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
Properties: api.OpenShiftClusterProperties{
ProvisioningState: api.ProvisioningStateDeleting,
},
},
})
},
wantError: "404: NotFound: : cluster being deleted",
wantStatusCode: http.StatusNotFound,
},
{
name: "missing cluster",
wantError: "404: NotFound: : cluster not found: 404 : ",
wantStatusCode: http.StatusNotFound,
},
} {
t.Run(tt.name, func(t *testing.T) {
now := func() time.Time { return time.Unix(1000, 0) }
ti := newTestInfra(t).WithOpenShiftClusters().WithSubscriptions().WithMaintenanceManifests(now)
defer ti.done()
ti.fixture.AddSubscriptionDocuments(&api.SubscriptionDocument{
ID: mockSubID,
Subscription: &api.Subscription{
State: api.SubscriptionStateRegistered,
Properties: &api.SubscriptionProperties{
TenantID: mockTenantID,
},
},
})
err := ti.buildFixtures(tt.fixtures)
if err != nil {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.dbGroup, api.APIs, &noop.Noop{}, &noop.Noop{}, testdatabase.NewFakeAEAD(), nil, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}
go f.Run(ctx, nil, nil)
if tt.limit == 0 {
tt.limit = 100
}
fmt.Printf("limit: %d", tt.limit)
resp, b, err := ti.request(http.MethodGet,
fmt.Sprintf("https://server/admin%s/maintenancemanifests?limit=%d", resourceID, tt.limit),
http.Header{
"Referer": []string{"https://mockrefererhost/"},
}, nil)
if err != nil {
t.Fatal(err)
}
err = validateResponse(resp, b, tt.wantStatusCode, tt.wantError, tt.wantResponse)
if err != nil {
t.Error(err)
}
})
}
}

Просмотреть файл

@ -0,0 +1,81 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/json"
"fmt"
"math"
"net/http"
"strconv"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
func (f *frontend) getAdminQueuedMaintManifests(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
b, err := f._getAdminQueuedMaintManifests(ctx, r)
if cloudErr, ok := err.(*api.CloudError); ok {
api.WriteCloudError(w, cloudErr)
return
}
adminReply(log, w, nil, b, err)
}
func (f *frontend) _getAdminQueuedMaintManifests(ctx context.Context, r *http.Request) ([]byte, error) {
limitstr := r.URL.Query().Get("limit")
limit, err := strconv.Atoi(limitstr)
if err != nil {
limit = 100
}
converter := f.apis[admin.APIVersion].MaintenanceManifestConverter
dbMaintenanceManifests, err := f.dbGroup.MaintenanceManifests()
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
skipToken, err := f.parseSkipToken(r.URL.String())
if err != nil {
return nil, err
}
i, err := dbMaintenanceManifests.Queued(ctx, skipToken)
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error())
}
docList := make([]*api.MaintenanceManifestDocument, 0)
for {
docs, err := i.Next(ctx, int(math.Min(float64(limit), 10)))
if err != nil {
return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", fmt.Errorf("failed reading next manifest document: %w", err).Error())
}
if docs == nil {
break
}
docList = append(docList, docs.MaintenanceManifestDocuments...)
if len(docList) >= limit {
break
}
}
nextLink, err := f.buildNextLink(r.Header.Get("Referer"), i.Continuation())
if err != nil {
return nil, err
}
return json.MarshalIndent(converter.ToExternalList(docList, nextLink, false), "", " ")
}

Просмотреть файл

@ -0,0 +1,189 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"strings"
"testing"
"time"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/metrics/noop"
testdatabase "github.com/Azure/ARO-RP/test/database"
)
func TestMIMOQueuedManifests(t *testing.T) {
mockSubID := "00000000-0000-0000-0000-000000000000"
mockTenantID := "00000000-0000-0000-0000-000000000000"
resourceID := fmt.Sprintf("/subscriptions/%s/resourcegroups/resourceGroup/providers/Microsoft.RedHatOpenShift/openShiftClusters/resourceName", mockSubID)
ctx := context.Background()
type test struct {
name string
fixtures func(f *testdatabase.Fixture)
limit int
wantStatusCode int
wantResponse *admin.MaintenanceManifestList
wantError string
}
for _, tt := range []*test{
{
name: "no entries",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
},
wantResponse: &admin.MaintenanceManifestList{
MaintenanceManifests: []*admin.MaintenanceManifest{},
},
wantStatusCode: http.StatusOK,
},
{
name: "single entry",
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
f.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResponse: &admin.MaintenanceManifestList{
MaintenanceManifests: []*admin.MaintenanceManifest{
{
ID: "07070707-0707-0707-0707-070707070001",
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceTaskID: "exampletask",
State: admin.MaintenanceManifestStatePending,
Priority: 0,
RunAfter: 1,
RunBefore: 1,
},
},
},
wantStatusCode: http.StatusOK,
},
{
name: "limit over",
limit: 1,
fixtures: func(f *testdatabase.Fixture) {
f.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(resourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: resourceID,
Name: "resourceName",
Type: "Microsoft.RedHatOpenShift/openshiftClusters",
},
})
f.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampletask",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
f.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceManifest: api.MaintenanceManifest{
MaintenanceTaskID: "exampleset2",
State: api.MaintenanceManifestStatePending,
RunAfter: 1,
RunBefore: 1,
},
})
},
wantResponse: &admin.MaintenanceManifestList{
NextLink: "https://mockrefererhost/?%24skipToken=" + url.QueryEscape(base64.StdEncoding.EncodeToString([]byte("FAKE1"))),
MaintenanceManifests: []*admin.MaintenanceManifest{
{
ID: "07070707-0707-0707-0707-070707070001",
ClusterResourceID: strings.ToLower(resourceID),
MaintenanceTaskID: "exampletask",
State: admin.MaintenanceManifestStatePending,
Priority: 0,
RunAfter: 1,
RunBefore: 1,
},
},
},
wantStatusCode: http.StatusOK,
},
} {
t.Run(tt.name, func(t *testing.T) {
now := func() time.Time { return time.Unix(1000, 0) }
ti := newTestInfra(t).WithOpenShiftClusters().WithSubscriptions().WithMaintenanceManifests(now)
defer ti.done()
ti.fixture.AddSubscriptionDocuments(&api.SubscriptionDocument{
ID: mockSubID,
Subscription: &api.Subscription{
State: api.SubscriptionStateRegistered,
Properties: &api.SubscriptionProperties{
TenantID: mockTenantID,
},
},
})
err := ti.buildFixtures(tt.fixtures)
if err != nil {
t.Fatal(err)
}
f, err := NewFrontend(ctx, ti.audit, ti.log, ti.env, ti.dbGroup, api.APIs, &noop.Noop{}, &noop.Noop{}, testdatabase.NewFakeAEAD(), nil, nil, nil, nil, nil)
if err != nil {
t.Fatal(err)
}
go f.Run(ctx, nil, nil)
if tt.limit == 0 {
tt.limit = 100
}
fmt.Printf("limit: %d", tt.limit)
resp, b, err := ti.request(http.MethodGet,
fmt.Sprintf("https://server/admin/maintenancemanifests/queued?limit=%d", tt.limit),
http.Header{
"Referer": []string{"https://mockrefererhost/"},
}, nil)
if err != nil {
t.Fatal(err)
}
err = validateResponse(resp, b, tt.wantStatusCode, tt.wantError, tt.wantResponse)
if err != nil {
t.Error(err)
}
})
}
}

Просмотреть файл

@ -10,10 +10,12 @@ import (
"log"
"net"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/go-chi/chi/v5"
chiMiddlewares "github.com/go-chi/chi/v5/middleware"
"github.com/sirupsen/logrus"
@ -48,6 +50,7 @@ type frontendDBs interface {
database.DatabaseGroupWithAsyncOperations
database.DatabaseGroupWithSubscriptions
database.DatabaseGroupWithPlatformWorkloadIdentityRoleSets
database.DatabaseGroupWithMaintenanceManifests
}
type kubeActionsFactory func(*logrus.Entry, env.Interface, *api.OpenShiftCluster) (adminactions.KubeActions, error)
@ -286,6 +289,10 @@ func (f *frontend) chiAuthenticatedRoutes(router chi.Router) {
})
r.Get("/supportedvmsizes", f.supportedvmsizes)
r.Route("/maintenancemanifests", func(r chi.Router) {
r.Get("/queued", f.getAdminQueuedMaintManifests)
})
r.Route("/subscriptions/{subscriptionId}", func(r chi.Router) {
r.Route("/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}", func(r chi.Router) {
// Etcd recovery
@ -326,6 +333,17 @@ func (f *frontend) chiAuthenticatedRoutes(router chi.Router) {
r.With(f.maintenanceMiddleware.UnplannedMaintenanceSignal).Post("/etcdcertificaterenew", f.postAdminOpenShiftClusterEtcdCertificateRenew)
r.With(f.maintenanceMiddleware.UnplannedMaintenanceSignal).Post("/deletemanagedresource", f.postAdminOpenShiftDeleteManagedResource)
// MIMO
r.Route("/maintenancemanifests", func(r chi.Router) {
r.Get("/", f.getAdminMaintManifests)
r.Put("/", f.putAdminMaintManifestCreate)
r.Route("/{manifestId}", func(r chi.Router) {
r.Get("/", f.getSingleAdminMaintManifest)
r.Delete("/", f.deleteAdminMaintManifest)
r.Post("/cancel", f.postAdminMaintManifestCancel)
})
})
})
})
@ -493,3 +511,21 @@ func frontendOperationResultLog(log *logrus.Entry, method string, err error) {
log = log.WithField("errorDetails", err.Error())
log.Info("front end operation failed")
}
// resourceIdFromURLParams returns an Azure Resource ID built out of the
// individual parameters of the URL.
func resourceIdFromURLParams(r *http.Request) string {
subID, resType, resProvider, resName, resGroupName := chi.URLParam(r, "subscriptionId"),
chi.URLParam(r, "resourceType"),
chi.URLParam(r, "resourceProviderNamespace"),
chi.URLParam(r, "resourceName"),
chi.URLParam(r, "resourceGroupName")
return strings.ToLower(azure.Resource{
SubscriptionID: subID,
ResourceGroup: resGroupName,
ResourceType: resType,
ResourceName: resName,
Provider: resProvider,
}.String())
}

Просмотреть файл

@ -15,6 +15,7 @@ import (
"net/http"
"reflect"
"testing"
"time"
"github.com/go-test/deep"
"github.com/sirupsen/logrus"
@ -83,6 +84,8 @@ type testInfra struct {
openShiftVersionsDatabase database.OpenShiftVersions
platformWorkloadIdentityRoleSetsClient *cosmosdb.FakePlatformWorkloadIdentityRoleSetDocumentClient
platformWorkloadIdentityRoleSetsDatabase database.PlatformWorkloadIdentityRoleSets
maintenanceManifestsClient *cosmosdb.FakeMaintenanceManifestDocumentClient
maintenanceManifestsDatabase database.MaintenanceManifests
}
func newTestInfra(t *testing.T) *testInfra {
@ -204,6 +207,13 @@ func (ti *testInfra) WithClusterManagerConfigurations() *testInfra {
return ti
}
func (ti *testInfra) WithMaintenanceManifests(now func() time.Time) *testInfra {
ti.maintenanceManifestsDatabase, ti.maintenanceManifestsClient = testdatabase.NewFakeMaintenanceManifests(now)
ti.fixture.WithMaintenanceManifests(ti.maintenanceManifestsDatabase)
ti.dbGroup.WithMaintenanceManifests(ti.maintenanceManifestsDatabase)
return ti
}
func (ti *testInfra) done() {
ti.controller.Finish()
ti.cli.CloseIdleConnections()
@ -254,7 +264,7 @@ func (ti *testInfra) request(method, url string, header http.Header, in interfac
func validateResponse(resp *http.Response, b []byte, wantStatusCode int, wantError string, wantResponse interface{}) error {
if resp.StatusCode != wantStatusCode {
return fmt.Errorf("unexpected status code %d, wanted %d", resp.StatusCode, wantStatusCode)
return fmt.Errorf("unexpected status code %d, wanted %d: %s", resp.StatusCode, wantStatusCode, string(b))
}
if wantError != "" {
@ -264,8 +274,8 @@ func validateResponse(resp *http.Response, b []byte, wantStatusCode int, wantErr
return err
}
if cloudErr.Error() != wantError {
return fmt.Errorf("unexpected error %s, wanted %s", cloudErr.Error(), wantError)
if diff := deep.Equal(cloudErr.Error(), wantError); diff != nil {
return fmt.Errorf("unexpected error %s, wanted %s (%s)", cloudErr.Error(), wantError, diff)
}
return nil

Просмотреть файл

@ -0,0 +1,422 @@
package actuator
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"errors"
"fmt"
"strings"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
"go.uber.org/mock/gomock"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/mimo/tasks"
"github.com/Azure/ARO-RP/pkg/util/mimo"
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
testdatabase "github.com/Azure/ARO-RP/test/database"
testlog "github.com/Azure/ARO-RP/test/util/log"
)
var _ = Describe("MIMO Actuator", Ordered, func() {
var fixtures *testdatabase.Fixture
var checker *testdatabase.Checker
var manifests database.MaintenanceManifests
var manifestsClient *cosmosdb.FakeMaintenanceManifestDocumentClient
var clusters database.OpenShiftClusters
var clustersClient *cosmosdb.FakeOpenShiftClusterDocumentClient
var a Actuator
var ctx context.Context
var cancel context.CancelFunc
//var hook *test.Hook
var log *logrus.Entry
var _env env.Interface
var controller *gomock.Controller
mockSubID := "00000000-0000-0000-0000-000000000000"
clusterResourceID := fmt.Sprintf("/subscriptions/%s/resourcegroups/resourceGroup/providers/Microsoft.RedHatOpenShift/openShiftClusters/resourceName", mockSubID)
AfterAll(func() {
if cancel != nil {
cancel()
}
if controller != nil {
controller.Finish()
}
})
BeforeAll(func() {
controller = gomock.NewController(nil)
_env = mock_env.NewMockInterface(controller)
ctx, cancel = context.WithCancel(context.Background())
_, log = testlog.New()
fixtures = testdatabase.NewFixture()
checker = testdatabase.NewChecker()
})
BeforeEach(func() {
now := func() time.Time { return time.Unix(120, 0) }
manifests, manifestsClient = testdatabase.NewFakeMaintenanceManifests(now)
clusters, clustersClient = testdatabase.NewFakeOpenShiftClusters()
a = &actuator{
log: log,
env: _env,
clusterResourceID: strings.ToLower(clusterResourceID),
mmf: manifests,
oc: clusters,
tasks: map[string]tasks.MaintenanceTask{},
now: now,
}
})
JustBeforeEach(func() {
err := fixtures.WithOpenShiftClusters(clusters).WithMaintenanceManifests(manifests).Create()
Expect(err).ToNot(HaveOccurred())
})
When("old manifest", func() {
var manifestID string
BeforeEach(func() {
fixtures.Clear()
fixtures.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(clusterResourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: clusterResourceID,
Properties: api.OpenShiftClusterProperties{
ProvisioningState: api.ProvisioningStateSucceeded,
},
},
})
manifestID = manifests.NewUUID()
fixtures.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: manifestID,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStatePending,
RunBefore: 60,
RunAfter: 0,
},
})
checker.Clear()
checker.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: manifestID,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStateTimedOut,
StatusText: "timed out at 1970-01-01 00:02:00 +0000 UTC",
RunBefore: 60,
RunAfter: 0,
},
})
checker.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(clusterResourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: clusterResourceID,
Properties: api.OpenShiftClusterProperties{
ProvisioningState: api.ProvisioningStateSucceeded,
},
},
})
})
It("expires them", func() {
didWork, err := a.Process(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(didWork).To(BeFalse())
errs := checker.CheckMaintenanceManifests(manifestsClient)
Expect(errs).To(BeNil(), fmt.Sprintf("%v", errs))
errs = checker.CheckOpenShiftClusters(clustersClient)
Expect(errs).To(BeNil(), fmt.Sprintf("%v", errs))
})
})
When("new manifest", func() {
var manifestID string
BeforeEach(func() {
fixtures.Clear()
fixtures.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(clusterResourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: clusterResourceID,
Properties: api.OpenShiftClusterProperties{
ProvisioningState: api.ProvisioningStateSucceeded,
MaintenanceState: api.MaintenanceStateNone,
},
},
})
manifestID = manifests.NewUUID()
fixtures.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: manifestID,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStatePending,
MaintenanceTaskID: "0",
RunBefore: 600,
RunAfter: 0,
},
})
checker.Clear()
checker.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: manifestID,
Dequeues: 1,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStateCompleted,
MaintenanceTaskID: "0",
StatusText: "done",
RunBefore: 600,
RunAfter: 0,
},
})
checker.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(clusterResourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: clusterResourceID,
Properties: api.OpenShiftClusterProperties{
ProvisioningState: api.ProvisioningStateSucceeded,
MaintenanceState: api.MaintenanceStateNone,
},
},
})
})
It("runs them", func() {
a.AddMaintenanceTasks(map[string]tasks.MaintenanceTask{
"0": func(th mimo.TaskContext, mmd *api.MaintenanceManifestDocument, oscd *api.OpenShiftClusterDocument) error {
// check that we are in progress during this
Expect(mmd.MaintenanceManifest.State).To(Equal(api.MaintenanceManifestStateInProgress))
th.SetResultMessage("done")
return nil
},
})
didWork, err := a.Process(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(didWork).To(BeTrue())
errs := checker.CheckMaintenanceManifests(manifestsClient)
Expect(errs).To(BeNil(), fmt.Sprintf("%v", errs))
errs = checker.CheckOpenShiftClusters(clustersClient)
Expect(errs).To(BeNil(), fmt.Sprintf("%v", errs))
})
})
When("new manifest for a task which repeatedly fails", func() {
var manifestID string
BeforeEach(func() {
fixtures.Clear()
fixtures.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(clusterResourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: clusterResourceID,
},
})
manifestID = manifests.NewUUID()
fixtures.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: manifestID,
// Set the dequeue count to right before it would fail
Dequeues: maxDequeueCount - 1,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStatePending,
MaintenanceTaskID: "0",
RunBefore: 600,
RunAfter: 0,
},
})
checker.Clear()
checker.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: manifestID,
Dequeues: maxDequeueCount,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStateRetriesExceeded,
MaintenanceTaskID: "0",
StatusText: "did not succeed after 5 times, failing -- TransientError: oh no",
RunBefore: 600,
RunAfter: 0,
},
})
})
It("stops after retries exceeded", func() {
a.AddMaintenanceTasks(map[string]tasks.MaintenanceTask{
"0": func(th mimo.TaskContext, mmd *api.MaintenanceManifestDocument, oscd *api.OpenShiftClusterDocument) error {
return mimo.TransientError(errors.New("oh no"))
},
})
didWork, err := a.Process(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(didWork).To(BeTrue())
errs := checker.CheckMaintenanceManifests(manifestsClient)
Expect(errs).To(BeNil(), fmt.Sprintf("%v", errs))
})
})
When("new manifests", func() {
var manifestIDs []string
BeforeEach(func() {
fixtures.Clear()
fixtures.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(clusterResourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: clusterResourceID,
},
})
manifestIDs = []string{manifests.NewUUID(), manifests.NewUUID(), manifests.NewUUID()}
fixtures.AddMaintenanceManifestDocuments(&api.MaintenanceManifestDocument{
ID: manifestIDs[0],
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStatePending,
MaintenanceTaskID: "0",
RunBefore: 600,
RunAfter: 0,
Priority: 2,
},
},
&api.MaintenanceManifestDocument{
ID: manifestIDs[1],
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStatePending,
MaintenanceTaskID: "1",
RunBefore: 600,
RunAfter: 0,
Priority: 1,
},
},
&api.MaintenanceManifestDocument{
ID: manifestIDs[2],
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStatePending,
MaintenanceTaskID: "2",
RunBefore: 600,
RunAfter: 1,
Priority: 0,
},
})
checker.Clear()
checker.AddMaintenanceManifestDocuments(
&api.MaintenanceManifestDocument{
ID: manifestIDs[0],
Dequeues: 1,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStateCompleted,
MaintenanceTaskID: "0",
StatusText: "done",
RunBefore: 600,
RunAfter: 0,
Priority: 2,
},
},
&api.MaintenanceManifestDocument{
ID: manifestIDs[1],
Dequeues: 1,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStateCompleted,
MaintenanceTaskID: "1",
StatusText: "done",
RunBefore: 600,
RunAfter: 0,
Priority: 1,
},
},
&api.MaintenanceManifestDocument{
ID: manifestIDs[2],
Dequeues: 1,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStateCompleted,
MaintenanceTaskID: "2",
StatusText: "done",
RunBefore: 600,
RunAfter: 1,
Priority: 0,
},
})
})
It("runs them in priority order", func() {
ordering := []string{}
a.AddMaintenanceTasks(map[string]tasks.MaintenanceTask{
"0": func(th mimo.TaskContext, mmd *api.MaintenanceManifestDocument, oscd *api.OpenShiftClusterDocument) error {
ordering = append(ordering, "0")
th.SetResultMessage("done")
return nil
},
"1": func(th mimo.TaskContext, mmd *api.MaintenanceManifestDocument, oscd *api.OpenShiftClusterDocument) error {
ordering = append(ordering, "1")
th.SetResultMessage("done")
return nil
},
"2": func(th mimo.TaskContext, mmd *api.MaintenanceManifestDocument, oscd *api.OpenShiftClusterDocument) error {
ordering = append(ordering, "2")
th.SetResultMessage("done")
return nil
},
})
didWork, err := a.Process(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(didWork).To(BeTrue())
// We expect 1 (start time of 0, but higher priority), then 0 (start
// time of 0, lower priority), then 2 (start time of 1, then highest
// priority)
Expect(ordering).To(BeEquivalentTo([]string{"1", "0", "2"}))
errs := checker.CheckMaintenanceManifests(manifestsClient)
Expect(errs).To(BeNil(), fmt.Sprintf("%v", errs))
})
})
})
func TestActuator(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Actuator Suite")
}

Просмотреть файл

@ -0,0 +1,258 @@
package actuator
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"sort"
"strings"
"time"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database"
"github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/mimo/tasks"
utilmimo "github.com/Azure/ARO-RP/pkg/util/mimo"
)
const maxDequeueCount = 5
type Actuator interface {
Process(context.Context) (bool, error)
AddMaintenanceTasks(map[string]tasks.MaintenanceTask)
}
type actuator struct {
env env.Interface
log *logrus.Entry
now func() time.Time
clusterResourceID string
oc database.OpenShiftClusters
mmf database.MaintenanceManifests
tasks map[string]tasks.MaintenanceTask
}
func NewActuator(
ctx context.Context,
_env env.Interface,
log *logrus.Entry,
clusterResourceID string,
oc database.OpenShiftClusters,
mmf database.MaintenanceManifests,
now func() time.Time) (Actuator, error) {
a := &actuator{
env: _env,
log: log,
clusterResourceID: strings.ToLower(clusterResourceID),
oc: oc,
mmf: mmf,
tasks: make(map[string]tasks.MaintenanceTask),
now: now,
}
return a, nil
}
func (a *actuator) AddMaintenanceTasks(tasks map[string]tasks.MaintenanceTask) {
maps.Copy(a.tasks, tasks)
}
func (a *actuator) Process(ctx context.Context) (bool, error) {
// Get the manifests for this cluster which need to be worked
i, err := a.mmf.GetQueuedByClusterResourceID(ctx, a.clusterResourceID, "")
if err != nil {
err = fmt.Errorf("failed getting manifests: %w", err)
a.log.Error(err)
return false, err
}
docList := make([]*api.MaintenanceManifestDocument, 0)
for {
docs, err := i.Next(ctx, -1)
if err != nil {
err = fmt.Errorf("failed reading next manifest document: %w", err)
a.log.Error(err)
return false, err
}
if docs == nil {
break
}
docList = append(docList, docs.MaintenanceManifestDocuments...)
}
manifestsToAction := make([]*api.MaintenanceManifestDocument, 0)
// Order manifests in order of RunAfter, and then Priority for ones with the
// same RunAfter.
sort.SliceStable(docList, func(i, j int) bool {
if docList[i].MaintenanceManifest.RunAfter == docList[j].MaintenanceManifest.RunAfter {
return docList[i].MaintenanceManifest.Priority < docList[j].MaintenanceManifest.Priority
}
return docList[i].MaintenanceManifest.RunAfter < docList[j].MaintenanceManifest.RunAfter
})
evaluationTime := a.now()
// Check for manifests that have timed out first
for _, doc := range docList {
if evaluationTime.After(time.Unix(int64(doc.MaintenanceManifest.RunBefore), 0)) {
// timed out, mark as such
a.log.Infof("marking %v as outdated: %v older than %v", doc.ID, doc.MaintenanceManifest.RunBefore, evaluationTime.UTC())
_, err := a.mmf.Patch(ctx, a.clusterResourceID, doc.ID, func(d *api.MaintenanceManifestDocument) error {
d.MaintenanceManifest.State = api.MaintenanceManifestStateTimedOut
d.MaintenanceManifest.StatusText = fmt.Sprintf("timed out at %s", evaluationTime.UTC())
return nil
})
if err != nil {
a.log.Error(fmt.Errorf("failed to patch manifest %s with state TimedOut; will still attempt to process other manifests: %w", doc.ID, err))
}
} else {
// not timed out, do something about it
manifestsToAction = append(manifestsToAction, doc)
}
}
// Nothing to do, don't dequeue
if len(manifestsToAction) == 0 {
return false, nil
}
a.log.Infof("Processing %d manifests", len(manifestsToAction))
// Dequeue the document
oc, err := a.oc.Get(ctx, a.clusterResourceID)
if err != nil {
return false, fmt.Errorf("failed getting cluster document: %w", err)
}
oc, err = a.oc.DoDequeue(ctx, oc)
if err != nil {
return false, fmt.Errorf("failed dequeuing cluster document: %w", err) // This will include StatusPreconditionFaileds
}
// Mark the maintenance state as unplanned and put it in AdminUpdating
a.log.Infof("Marking cluster as in AdminUpdating")
oc, err = a.oc.PatchWithLease(ctx, a.clusterResourceID, func(oscd *api.OpenShiftClusterDocument) error {
oscd.OpenShiftCluster.Properties.LastProvisioningState = oscd.OpenShiftCluster.Properties.ProvisioningState
oscd.OpenShiftCluster.Properties.ProvisioningState = api.ProvisioningStateAdminUpdating
oscd.OpenShiftCluster.Properties.MaintenanceState = api.MaintenanceStateUnplanned
return nil
})
if err != nil {
err = fmt.Errorf("failed setting provisioning state on cluster document: %w", err)
a.log.Error(err)
// attempt to dequeue the document, for what it's worth
_, leaseErr := a.oc.EndLease(ctx, a.clusterResourceID, oc.OpenShiftCluster.Properties.LastProvisioningState, oc.OpenShiftCluster.Properties.FailedProvisioningState, nil)
if leaseErr != nil {
return false, fmt.Errorf("failed ending lease early on cluster document: %w", leaseErr)
}
return false, err
}
taskContext := newTaskContext(ctx, a.env, a.log, oc)
// Execute on the manifests we want to action
for _, doc := range manifestsToAction {
taskLog := a.log.WithFields(logrus.Fields{
"manifestID": doc.ID,
"taskID": doc.MaintenanceManifest.MaintenanceTaskID,
})
taskLog.Info("begin processing manifest")
// Attempt a dequeue
doc, err = a.mmf.Lease(ctx, a.clusterResourceID, doc.ID)
if err != nil {
// log and continue to the next task if it doesn't work
a.log.Error(err)
continue
}
// error if we don't know what this task is, then continue
f, ok := a.tasks[doc.MaintenanceManifest.MaintenanceTaskID]
if !ok {
a.log.Errorf("not found %v", doc.MaintenanceManifest.MaintenanceTaskID)
msg := "task ID not registered"
_, err = a.mmf.EndLease(ctx, doc.ClusterResourceID, doc.ID, api.MaintenanceManifestStateFailed, &msg)
if err != nil {
a.log.Error(fmt.Errorf("failed ending lease early on manifest: %w", err))
}
continue
}
var state api.MaintenanceManifestState
var msg string
taskLog.Info("executing manifest")
// Perform the task with a timeout
err = taskContext.RunInTimeout(time.Minute*60, func() error {
innerErr := f(taskContext, doc, oc)
if innerErr != nil {
return innerErr
}
return taskContext.Err()
})
// Pull the result message out of the task context to save, if it is set
msg = taskContext.GetResultMessage()
if err != nil {
if doc.Dequeues >= maxDequeueCount {
msg = fmt.Sprintf("did not succeed after %d times, failing -- %s", doc.Dequeues, err.Error())
state = api.MaintenanceManifestStateRetriesExceeded
taskLog.Error(msg)
} else if utilmimo.IsRetryableError(err) {
// If an error is retryable (i.e explicitly marked as a transient error
// by wrapping it in utilmimo.TransientError), then mark it back as
// Pending so that it will get picked up and retried.
state = api.MaintenanceManifestStatePending
taskLog.Error(fmt.Errorf("task returned a retryable error: %w", err))
} else {
// Terminal errors (explicitly marked or unwrapped) cause task failure
state = api.MaintenanceManifestStateFailed
taskLog.Error(fmt.Errorf("task returned a terminal error: %w", err))
}
} else {
// Mark tasks that don't have an error as succeeded implicitly
state = api.MaintenanceManifestStateCompleted
taskLog.Info("manifest executed successfully")
}
_, err = a.mmf.EndLease(ctx, doc.ClusterResourceID, doc.ID, state, &msg)
if err != nil {
taskLog.Error(fmt.Errorf("failed ending lease on manifest: %w", err))
}
taskLog.Info("manifest processing complete")
}
// Remove any set maintenance state
a.log.Info("removing maintenance state on cluster")
oc, err = a.oc.PatchWithLease(ctx, a.clusterResourceID, func(oscd *api.OpenShiftClusterDocument) error {
oscd.OpenShiftCluster.Properties.MaintenanceState = api.MaintenanceStateNone
return nil
})
if err != nil {
a.log.Error(fmt.Errorf("failed removing maintenance state on cluster document, but continuing: %w", err))
}
// release the OpenShiftCluster
a.log.Info("ending lease on cluster")
_, err = a.oc.EndLease(ctx, a.clusterResourceID, oc.OpenShiftCluster.Properties.LastProvisioningState, oc.OpenShiftCluster.Properties.FailedProvisioningState, nil)
if err != nil {
return false, fmt.Errorf("failed ending lease on cluster document: %w", err)
}
return true, nil
}

Просмотреть файл

@ -0,0 +1,316 @@
package actuator
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"errors"
"log"
"net"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database"
"github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/metrics"
"github.com/Azure/ARO-RP/pkg/mimo/tasks"
"github.com/Azure/ARO-RP/pkg/proxy"
"github.com/Azure/ARO-RP/pkg/util/buckets"
"github.com/Azure/ARO-RP/pkg/util/heartbeat"
utillog "github.com/Azure/ARO-RP/pkg/util/log"
"github.com/Azure/ARO-RP/pkg/util/recover"
)
type Runnable interface {
Run(context.Context, <-chan struct{}, chan<- struct{}) error
}
type service struct {
dialer proxy.Dialer
baseLog *logrus.Entry
env env.Interface
dbGroup actuatorDBs
m metrics.Emitter
mu sync.RWMutex
cond *sync.Cond
stopping *atomic.Bool
workers *atomic.Int32
b buckets.BucketWorker
lastChangefeed atomic.Value //time.Time
startTime time.Time
pollTime time.Duration
now func() time.Time
tasks map[string]tasks.MaintenanceTask
serveHealthz bool
}
type actuatorDBs interface {
database.DatabaseGroupWithOpenShiftClusters
database.DatabaseGroupWithMaintenanceManifests
}
func NewService(env env.Interface, log *logrus.Entry, dialer proxy.Dialer, dbg actuatorDBs, m metrics.Emitter) *service {
s := &service{
env: env,
baseLog: log,
dialer: dialer,
dbGroup: dbg,
m: m,
stopping: &atomic.Bool{},
workers: &atomic.Int32{},
startTime: time.Now(),
now: time.Now,
pollTime: time.Minute,
serveHealthz: true,
}
s.b = buckets.NewBucketWorker(log, s.worker, &s.mu)
return s
}
func (s *service) SetMaintenanceTasks(tasks map[string]tasks.MaintenanceTask) {
s.tasks = tasks
}
func (s *service) Run(ctx context.Context, stop <-chan struct{}, done chan<- struct{}) error {
defer recover.Panic(s.baseLog)
// Only enable the healthz endpoint if configured (disabled in unit tests)
if s.serveHealthz {
c := &healthz.Handler{
Checks: map[string]healthz.Checker{
"ready": func(h *http.Request) error {
if !s.checkReady() {
return errors.New("not ready")
}
return nil
},
},
}
m := http.NewServeMux()
m.Handle("/healthz", http.StripPrefix("/healthz", c))
// Handle healthz subpaths
m.Handle("/healthz/", http.StripPrefix("/healthz", c))
h := &http.Server{
Handler: m,
ErrorLog: log.New(s.baseLog.Writer(), "", 0),
BaseContext: func(net.Listener) context.Context { return ctx },
}
listener, err := s.env.Listen()
if err != nil {
return err
}
go func() {
err := h.Serve(listener)
if err != http.ErrServerClosed {
s.baseLog.Error(err)
}
}()
}
t := time.NewTicker(10 * time.Second)
defer t.Stop()
if stop != nil {
go func() {
defer recover.Panic(s.baseLog)
<-stop
s.baseLog.Print("stopping")
s.stopping.Store(true)
s.cond.Signal()
}()
}
go heartbeat.EmitHeartbeat(s.baseLog, s.m, "actuator.heartbeat", nil, s.checkReady)
lastGotDocs := make(map[string]*api.OpenShiftClusterDocument)
for {
if s.stopping.Load() {
break
}
old, err := s.poll(ctx, lastGotDocs)
if err != nil {
s.baseLog.Error(err)
} else {
lastGotDocs = old
}
<-t.C
}
if !s.env.FeatureIsSet(env.FeatureDisableReadinessDelay) {
s.waitForWorkerCompletion()
}
s.baseLog.Print("exiting")
close(done)
return nil
}
// Temporary method of updating without the changefeed -- the reason why is
// complicated
func (s *service) poll(ctx context.Context, oldDocs map[string]*api.OpenShiftClusterDocument) (map[string]*api.OpenShiftClusterDocument, error) {
dbOpenShiftClusters, err := s.dbGroup.OpenShiftClusters()
if err != nil {
return nil, err
}
// Fetch all of the cluster UUIDs
i, err := dbOpenShiftClusters.GetAllResourceIDs(ctx, "")
if err != nil {
return nil, err
}
docs := make([]*api.OpenShiftClusterDocument, 0)
for {
d, err := i.Next(ctx, -1)
if err != nil {
return nil, err
}
if d == nil {
break
}
docs = append(docs, d.OpenShiftClusterDocuments...)
}
s.baseLog.Debugf("fetched %d clusters from CosmosDB", len(docs))
docMap := make(map[string]*api.OpenShiftClusterDocument)
for _, d := range docs {
docMap[strings.ToLower(d.Key)] = d
}
// remove docs that don't exist in the new set (removed clusters)
for _, oldCluster := range maps.Keys(oldDocs) {
_, ok := docMap[strings.ToLower(oldCluster)]
if !ok {
s.b.DeleteDoc(oldDocs[oldCluster])
s.baseLog.Debugf("removed %s from buckets", oldCluster)
}
}
s.baseLog.Debugf("updating %d clusters", len(docMap))
for _, cluster := range maps.Values(docMap) {
s.b.UpsertDoc(cluster)
}
// Store when we last fetched the clusters
s.lastChangefeed.Store(s.now())
return docMap, nil
}
func (s *service) waitForWorkerCompletion() {
s.mu.Lock()
for s.workers.Load() > 0 {
s.cond.Wait()
}
s.mu.Unlock()
}
func (s *service) checkReady() bool {
lastChangefeedTime, ok := s.lastChangefeed.Load().(time.Time)
if !ok {
return false
}
if s.env.IsLocalDevelopmentMode() {
return (time.Since(lastChangefeedTime) < time.Minute) // did we update our list of clusters recently?
} else {
return (time.Since(lastChangefeedTime) < time.Minute) && // did we update our list of clusters recently?
(time.Since(s.startTime) > 2*time.Minute) // are we running for at least 2 minutes?
}
}
func (s *service) worker(stop <-chan struct{}, delay time.Duration, id string) {
defer recover.Panic(s.baseLog)
log := utillog.EnrichWithResourceID(s.baseLog, id)
log.Debugf("starting worker for %s in %s...", id, delay.String())
// Wait for a randomised delay before starting
time.Sleep(delay)
dbOpenShiftClusters, err := s.dbGroup.OpenShiftClusters()
if err != nil {
log.Error(err)
return
}
dbMaintenanceManifests, err := s.dbGroup.MaintenanceManifests()
if err != nil {
log.Error(err)
return
}
a, err := NewActuator(context.Background(), s.env, log, id, dbOpenShiftClusters, dbMaintenanceManifests, s.now)
if err != nil {
log.Error(err)
return
}
// load in the tasks for the Actuator from the controller
a.AddMaintenanceTasks(s.tasks)
t := time.NewTicker(s.pollTime)
defer func() {
log.Debugf("stopping worker for %s...", id)
t.Stop()
}()
out:
for {
if s.stopping.Load() {
break
}
func() {
s.workers.Add(1)
s.m.EmitGauge("mimo.actuator.workers.active.count", int64(s.workers.Load()), nil)
defer func() {
s.workers.Add(-1)
s.m.EmitGauge("mimo.actuator.workers.active.count", int64(s.workers.Load()), nil)
}()
_, err := a.Process(context.Background())
if err != nil {
log.Error(err)
}
}()
select {
case <-t.C:
case <-stop:
break out
}
}
}

Просмотреть файл

@ -0,0 +1,258 @@
package actuator
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
"go.uber.org/mock/gomock"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/metrics"
"github.com/Azure/ARO-RP/pkg/mimo/tasks"
"github.com/Azure/ARO-RP/pkg/util/mimo"
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
testdatabase "github.com/Azure/ARO-RP/test/database"
testlog "github.com/Azure/ARO-RP/test/util/log"
)
type fakeMetricsEmitter struct {
Metrics map[string]int64
}
func newfakeMetricsEmitter() *fakeMetricsEmitter {
m := make(map[string]int64)
return &fakeMetricsEmitter{
Metrics: m,
}
}
func (e *fakeMetricsEmitter) EmitGauge(metricName string, metricValue int64, dimensions map[string]string) {
e.Metrics[metricName] = metricValue
}
func (e *fakeMetricsEmitter) EmitFloat(metricName string, metricValue float64, dimensions map[string]string) {
}
var _ = Describe("MIMO Actuator Service", Ordered, func() {
var fixtures *testdatabase.Fixture
var checker *testdatabase.Checker
var manifests database.MaintenanceManifests
var manifestsClient *cosmosdb.FakeMaintenanceManifestDocumentClient
var clusters database.OpenShiftClusters
//var clustersClient cosmosdb.OpenShiftClusterDocumentClient
var m metrics.Emitter
var svc *service
var ctx context.Context
var cancel context.CancelFunc
//var hook *test.Hook
var log *logrus.Entry
var _env env.Interface
var controller *gomock.Controller
mockSubID := "00000000-0000-0000-0000-000000000000"
clusterResourceID := fmt.Sprintf("/subscriptions/%s/resourcegroups/resourceGroup/providers/Microsoft.RedHatOpenShift/openShiftClusters/resourceName", mockSubID)
AfterAll(func() {
if cancel != nil {
cancel()
}
if controller != nil {
controller.Finish()
}
})
BeforeAll(func() {
controller = gomock.NewController(nil)
_env = mock_env.NewMockInterface(controller)
ctx, cancel = context.WithCancel(context.Background())
_, log = testlog.New()
m = newfakeMetricsEmitter()
fixtures = testdatabase.NewFixture()
checker = testdatabase.NewChecker()
})
BeforeEach(func() {
now := func() time.Time { return time.Unix(120, 0) }
manifests, manifestsClient = testdatabase.NewFakeMaintenanceManifests(now)
clusters, _ = testdatabase.NewFakeOpenShiftClusters()
dbg := database.NewDBGroup().WithMaintenanceManifests(manifests).WithOpenShiftClusters(clusters)
svc = NewService(_env, log, nil, dbg, m)
svc.now = now
svc.serveHealthz = false
})
JustBeforeEach(func() {
err := fixtures.WithOpenShiftClusters(clusters).WithMaintenanceManifests(manifests).Create()
Expect(err).ToNot(HaveOccurred())
})
When("clusters are polled", func() {
BeforeEach(func() {
fixtures.Clear()
fixtures.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(clusterResourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: clusterResourceID,
},
})
})
AfterAll(func() {
svc.b.Stop()
})
It("updates the available clusters", func() {
lastGotDocs := make(map[string]*api.OpenShiftClusterDocument)
newOld, err := svc.poll(ctx, lastGotDocs)
Expect(err).ToNot(HaveOccurred())
Expect(newOld).To(HaveLen(1))
})
It("removes clusters if they are not in the doc", func() {
svc.b.UpsertDoc(&api.OpenShiftClusterDocument{Key: clusterResourceID + "2"})
lastGotDocs := make(map[string]*api.OpenShiftClusterDocument)
lastGotDocs[clusterResourceID+"2"] = &api.OpenShiftClusterDocument{Key: clusterResourceID + "2"}
newOld, err := svc.poll(ctx, lastGotDocs)
Expect(err).ToNot(HaveOccurred())
Expect(newOld).To(HaveLen(1))
})
})
When("maintenance needs to occur", func() {
var manifestID string
BeforeEach(func() {
fixtures.Clear()
fixtures.AddOpenShiftClusterDocuments(&api.OpenShiftClusterDocument{
Key: strings.ToLower(clusterResourceID),
OpenShiftCluster: &api.OpenShiftCluster{
ID: clusterResourceID,
Properties: api.OpenShiftClusterProperties{
ProvisioningState: api.ProvisioningStateSucceeded,
NetworkProfile: api.NetworkProfile{
PodCIDR: "0.0.0.0/32",
},
},
},
})
manifestID = manifests.NewUUID()
manifestID2 := manifests.NewUUID()
fixtures.AddMaintenanceManifestDocuments(
&api.MaintenanceManifestDocument{
ID: manifestID,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStatePending,
RunBefore: 60,
RunAfter: 0,
},
},
&api.MaintenanceManifestDocument{
ID: manifestID2,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStatePending,
RunBefore: 300,
RunAfter: 0,
MaintenanceTaskID: "0000-0000-0001",
},
})
checker.Clear()
checker.AddMaintenanceManifestDocuments(
&api.MaintenanceManifestDocument{
ID: manifestID,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStateTimedOut,
StatusText: "timed out at 1970-01-01 00:02:00 +0000 UTC",
RunBefore: 60,
RunAfter: 0,
},
},
&api.MaintenanceManifestDocument{
ID: manifestID2,
Dequeues: 1,
ClusterResourceID: strings.ToLower(clusterResourceID),
MaintenanceManifest: api.MaintenanceManifest{
State: api.MaintenanceManifestStateCompleted,
StatusText: "ok",
RunBefore: 300,
RunAfter: 0,
MaintenanceTaskID: "0000-0000-0001",
},
},
)
})
It("expires them", func() {
// run once
done := make(chan struct{})
svc.pollTime = time.Second
svc.SetMaintenanceTasks(map[string]tasks.MaintenanceTask{
"0000-0000-0001": func(th mimo.TaskContext, mmd *api.MaintenanceManifestDocument, oscd *api.OpenShiftClusterDocument) error {
svc.stopping.Store(true)
th.SetResultMessage("ok")
return nil
},
})
svc.worker(done, 0*time.Second, clusterResourceID)
errs := checker.CheckMaintenanceManifests(manifestsClient)
Expect(errs).To(BeNil(), fmt.Sprintf("%v", errs))
})
It("loads the full cluster document", func() {
// run once
done := make(chan struct{})
svc.pollTime = time.Second
svc.SetMaintenanceTasks(map[string]tasks.MaintenanceTask{
"0000-0000-0001": func(th mimo.TaskContext, mmd *api.MaintenanceManifestDocument, oscd *api.OpenShiftClusterDocument) error {
// Only the ClusterResourceID is available to the bucket
// worker, so make sure this is the full document
Expect(oscd.OpenShiftCluster.Properties.NetworkProfile.PodCIDR).To(Equal("0.0.0.0/32"))
svc.stopping.Store(true)
th.SetResultMessage("ok")
return nil
},
})
svc.worker(done, 0*time.Second, clusterResourceID)
errs := checker.CheckMaintenanceManifests(manifestsClient)
Expect(errs).To(BeNil(), fmt.Sprintf("%v", errs))
})
})
})

143
pkg/mimo/actuator/task.go Normal file
Просмотреть файл

@ -0,0 +1,143 @@
package actuator
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/sirupsen/logrus"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
"github.com/Azure/ARO-RP/pkg/util/mimo"
"github.com/Azure/ARO-RP/pkg/util/restconfig"
)
type th struct {
originalCtx context.Context
ctx context.Context
env env.Interface
log *logrus.Entry
resultMessage string
oc *api.OpenShiftClusterDocument
_ch clienthelper.Interface
}
// force interface checking
var _ mimo.TaskContext = &th{}
func newTaskContext(ctx context.Context, env env.Interface, log *logrus.Entry, oc *api.OpenShiftClusterDocument) *th {
return &th{
originalCtx: ctx,
ctx: ctx,
env: env,
log: log,
oc: oc,
_ch: nil,
}
}
func (t *th) RunInTimeout(timeout time.Duration, f func() error) error {
newctx, cancel := context.WithTimeout(t.originalCtx, timeout)
t.ctx = newctx
defer func() {
cancel()
t.ctx = t.originalCtx
}()
return f()
}
// context stuff
func (t *th) Deadline() (time.Time, bool) {
return t.ctx.Deadline()
}
func (t *th) Done() <-chan struct{} {
return t.ctx.Done()
}
func (t *th) Err() error {
return t.ctx.Err()
}
func (t *th) Value(key any) any {
return t.ctx.Value(key)
}
func (t *th) Environment() env.Interface {
return t.env
}
func (t *th) ClientHelper() (clienthelper.Interface, error) {
if t._ch != nil {
return t._ch, nil
}
restConfig, err := restconfig.RestConfig(t.env, t.oc.OpenShiftCluster)
if err != nil {
return nil, err
}
mapper, err := apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery)
if err != nil {
return nil, err
}
client, err := client.New(restConfig, client.Options{
Mapper: mapper,
})
if err != nil {
return nil, err
}
t._ch = clienthelper.NewWithClient(t.log, client)
return t._ch, nil
}
func (t *th) Log() *logrus.Entry {
return t.log
}
func (t *th) Now() time.Time {
return time.Now()
}
func (t *th) SetResultMessage(msg string) {
t.resultMessage = msg
}
func (t *th) GetResultMessage() string {
return t.resultMessage
}
func (t *th) GetClusterUUID() string {
return t.oc.ID
}
func (t *th) GetOpenShiftClusterProperties() api.OpenShiftClusterProperties {
return t.oc.OpenShiftCluster.Properties
}
// localFpAuthorizer implements mimo.TaskContext.
func (t *th) LocalFpAuthorizer() (autorest.Authorizer, error) {
localFPAuthorizer, err := t.env.FPAuthorizer(t.env.TenantID(), nil, t.env.Environment().ResourceManagerScope)
if err != nil {
return nil, err
}
return localFPAuthorizer, nil
}
// GetOpenshiftClusterDocument implements mimo.TaskContext.
func (t *th) GetOpenshiftClusterDocument() *api.OpenShiftClusterDocument {
return t.oc
}

10
pkg/mimo/const.go Normal file
Просмотреть файл

@ -0,0 +1,10 @@
package mimo
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
const (
TLS_CERT_ROTATION_ID = "9b741734-6505-447f-8510-85eb0ae561a2"
OPERATOR_FLAGS_UPDATE_ID = "b41749fc-af26-4ab7-b5a1-e03f3ee4cba6"
ACR_TOKEN_CHECKER_ID = "082978ce-3700-4972-835f-53d48658d291"
)

Просмотреть файл

@ -0,0 +1,63 @@
package cluster
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"errors"
"fmt"
"time"
"github.com/Azure/ARO-RP/pkg/util/acrtoken"
"github.com/Azure/ARO-RP/pkg/util/mimo"
)
const (
daysValid = 90
daysShouldRotate = 45
)
// EnsureACRTokenIsValid checks the expiry date of the Azure Container Registry (ACR) Token from the RegistryProfile.
// It returns an error if the expiry date is past the date now or if there is no registry profile found.
func EnsureACRTokenIsValid(ctx context.Context) error {
th, err := mimo.GetTaskContext(ctx)
if err != nil {
return mimo.TerminalError(err)
}
env := th.Environment()
localFpAuthorizer, err := th.LocalFpAuthorizer()
if err != nil {
return mimo.TerminalError(err)
}
manager, err := acrtoken.NewManager(env, localFpAuthorizer)
if err != nil {
return err
}
registryProfiles := th.GetOpenShiftClusterProperties().RegistryProfiles
rp := manager.GetRegistryProfileFromSlice(registryProfiles)
if rp != nil {
var now = time.Now().UTC()
issueDate := rp.IssueDate
if issueDate == nil {
return mimo.TerminalError(errors.New("no issue date detected, please rotate token"))
}
daysInterval := int32(now.Sub(issueDate.Time).Hours() / 24)
switch {
case daysInterval > daysValid:
return mimo.TerminalError(fmt.Errorf("azure container registry (acr) token is not valid, %d days have passed", daysInterval))
case daysInterval >= daysShouldRotate:
return mimo.TerminalError(fmt.Errorf("%d days have passed since azure container registry (acr) token was issued, please rotate the token now", daysInterval))
default:
th.SetResultMessage("azure container registry (acr) token is valid")
}
}
return mimo.TerminalError(errors.New("no registry profile detected"))
}

Просмотреть файл

@ -0,0 +1,146 @@
package cluster
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"testing"
"time"
. "github.com/onsi/gomega"
"github.com/Azure/go-autorest/autorest/date"
"go.uber.org/mock/gomock"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/util/azureclient"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
testtasks "github.com/Azure/ARO-RP/test/mimo/tasks"
testclienthelper "github.com/Azure/ARO-RP/test/util/clienthelper"
utilerror "github.com/Azure/ARO-RP/test/util/error"
testlog "github.com/Azure/ARO-RP/test/util/log"
)
const (
registryResourceID = "/subscriptions/93aeba23-2f76-4307-be82-02921df010cf/resourceGroups/global/providers/Microsoft.ContainerRegistry/registries/arointsvc"
clusterUUID = "512a50c8-2a43-4c2a-8fd9-a5539475df2a"
publicACR = "arosvc.azurecr.io"
intACR = "arointsvc.azurecr.io"
user = "testuser"
)
func TestEnsureACRToken(t *testing.T) {
ctx := context.Background()
for _, tt := range []struct {
name string
azureEnv azureclient.AROEnvironment
oc func() *api.OpenShiftCluster
wantErr string
}{
{
name: "not found",
azureEnv: azureclient.PublicCloud,
oc: func() *api.OpenShiftCluster {
return &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{},
}
},
wantErr: "TerminalError: no registry profile detected",
},
{
name: "No issue date",
azureEnv: azureclient.PublicCloud,
oc: func() *api.OpenShiftCluster {
return &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{
RegistryProfiles: []*api.RegistryProfile{
{
Name: intACR,
Username: user,
IssueDate: nil,
},
},
},
}
},
wantErr: "TerminalError: no issue date detected, please rotate token",
},
{
name: "Expired",
azureEnv: azureclient.PublicCloud,
oc: func() *api.OpenShiftCluster {
return &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{
RegistryProfiles: []*api.RegistryProfile{
{
Name: publicACR,
Username: user,
IssueDate: &date.Time{Time: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)},
},
{
Name: intACR,
Username: user,
IssueDate: &date.Time{Time: time.Now().UTC().AddDate(0, 0, -100)},
},
},
},
}
},
wantErr: "TerminalError: azure container registry (acr) token is not valid, 100 days have passed",
},
{
name: "Should rotate token",
azureEnv: azureclient.PublicCloud,
oc: func() *api.OpenShiftCluster {
return &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{
RegistryProfiles: []*api.RegistryProfile{
{
Name: publicACR,
Username: user,
IssueDate: &date.Time{Time: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)},
},
{
Name: intACR,
Username: user,
IssueDate: &date.Time{Time: time.Now().UTC().AddDate(0, 0, -50)},
},
},
},
}
},
wantErr: "TerminalError: 50 days have passed since azure container registry (acr) token was issued, please rotate the token now",
},
} {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
controller := gomock.NewController(t)
_env := mock_env.NewMockInterface(controller)
_env.EXPECT().ACRResourceID().AnyTimes().Return(registryResourceID)
_env.EXPECT().ACRDomain().AnyTimes().Return(intACR)
_env.EXPECT().Environment().AnyTimes().Return(&tt.azureEnv)
_, log := testlog.New()
builder := fake.NewClientBuilder()
ch := clienthelper.NewWithClient(log, testclienthelper.NewHookingClient(builder.Build()))
tc := testtasks.NewFakeTestContext(
ctx, _env, log, func() time.Time { return time.Unix(100, 0) },
testtasks.WithClientHelper(ch),
testtasks.WithOpenShiftClusterProperties(clusterUUID, tt.oc().Properties),
)
err := EnsureACRTokenIsValid(tc)
if tt.wantErr != "" && err != nil {
utilerror.AssertErrorMessage(t, err, tt.wantErr)
} else if tt.wantErr != "" && err == nil {
t.Errorf("wanted error %s", tt.wantErr)
} else if tt.wantErr == "" {
g.Expect(err).ToNot(HaveOccurred())
}
})
}
}

Просмотреть файл

@ -0,0 +1,46 @@
package cluster
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"errors"
configv1 "github.com/openshift/api/config/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"github.com/Azure/ARO-RP/pkg/util/clusteroperators"
"github.com/Azure/ARO-RP/pkg/util/mimo"
)
func EnsureAPIServerIsUp(ctx context.Context) error {
th, err := mimo.GetTaskContext(ctx)
if err != nil {
return err
}
ch, err := th.ClientHelper()
if err != nil {
return mimo.TerminalError(err)
}
co := &configv1.ClusterOperator{}
err = ch.GetOne(ctx, types.NamespacedName{Name: "kube-apiserver"}, co)
if err != nil {
// 404 on kube-apiserver is likely terminal
if kerrors.IsNotFound(err) {
return mimo.TerminalError(err)
}
return mimo.TransientError(err)
}
available := clusteroperators.IsOperatorAvailable(co)
if !available {
return mimo.TransientError(errors.New(clusteroperators.OperatorStatusText(co)))
}
return nil
}

Просмотреть файл

@ -0,0 +1,108 @@
package cluster
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"testing"
"time"
. "github.com/onsi/gomega"
configv1 "github.com/openshift/api/config/v1"
"go.uber.org/mock/gomock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
testtasks "github.com/Azure/ARO-RP/test/mimo/tasks"
testclienthelper "github.com/Azure/ARO-RP/test/util/clienthelper"
testlog "github.com/Azure/ARO-RP/test/util/log"
)
func TestAPIServerIsUp(t *testing.T) {
ctx := context.Background()
for _, tt := range []struct {
name string
objects []runtime.Object
wantErr string
}{
{
name: "not found",
objects: []runtime.Object{},
wantErr: `TerminalError: clusteroperators.config.openshift.io "kube-apiserver" not found`,
},
{
name: "not ready",
objects: []runtime.Object{
&configv1.ClusterOperator{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-apiserver",
},
Status: configv1.ClusterOperatorStatus{
Conditions: []configv1.ClusterOperatorStatusCondition{
{
Type: configv1.OperatorAvailable,
Status: configv1.ConditionFalse,
},
{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionTrue,
},
},
},
},
},
wantErr: `TransientError: kube-apiserver Available=False, Progressing=True`,
},
{
name: "ready",
objects: []runtime.Object{
&configv1.ClusterOperator{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-apiserver",
},
Status: configv1.ClusterOperatorStatus{
Conditions: []configv1.ClusterOperatorStatusCondition{
{
Type: configv1.OperatorAvailable,
Status: configv1.ConditionTrue,
},
{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionFalse,
},
},
},
},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
controller := gomock.NewController(t)
_env := mock_env.NewMockInterface(controller)
_, log := testlog.New()
builder := fake.NewClientBuilder().WithRuntimeObjects(tt.objects...)
ch := clienthelper.NewWithClient(log, testclienthelper.NewHookingClient(builder.Build()))
tc := testtasks.NewFakeTestContext(
ctx, _env, log, func() time.Time { return time.Unix(100, 0) },
testtasks.WithClientHelper(ch),
)
err := EnsureAPIServerIsUp(tc)
if tt.wantErr != "" && err != nil {
g.Expect(err).To(MatchError(tt.wantErr))
} else if tt.wantErr != "" && err == nil {
t.Errorf("wanted error %s", tt.wantErr)
} else if tt.wantErr == "" {
g.Expect(err).ToNot(HaveOccurred())
}
})
}
}

Просмотреть файл

@ -0,0 +1,56 @@
package cluster
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
arov1alpha1 "github.com/Azure/ARO-RP/pkg/operator/apis/aro.openshift.io/v1alpha1"
"github.com/Azure/ARO-RP/pkg/util/mimo"
)
// UpdateClusterOperatorFlags updates the OperatorFlags object in the ARO
// Cluster custom resource.
func UpdateClusterOperatorFlags(ctx context.Context) error {
th, err := mimo.GetTaskContext(ctx)
if err != nil {
return mimo.TerminalError(err)
}
props := th.GetOpenShiftClusterProperties()
ch, err := th.ClientHelper()
if err != nil {
return mimo.TerminalError(err)
}
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
clusterObj := &arov1alpha1.Cluster{}
err = ch.GetOne(ctx, types.NamespacedName{Name: arov1alpha1.SingletonClusterName}, clusterObj)
if err != nil {
if kerrors.IsNotFound(err) {
// cluster doc being gone is unrecoverable
return mimo.TerminalError(err)
}
return mimo.TransientError(err)
}
clusterObj.Spec.OperatorFlags = arov1alpha1.OperatorFlags(props.OperatorFlags)
err = ch.Update(ctx, clusterObj)
if err != nil {
if kerrors.IsConflict(err) {
return err
} else {
return mimo.TransientError(err)
}
}
return nil
})
}

Просмотреть файл

@ -0,0 +1,127 @@
package cluster
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"testing"
"time"
. "github.com/onsi/gomega"
"github.com/go-test/deep"
"go.uber.org/mock/gomock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/Azure/ARO-RP/pkg/api"
arov1alpha1 "github.com/Azure/ARO-RP/pkg/operator/apis/aro.openshift.io/v1alpha1"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
testtasks "github.com/Azure/ARO-RP/test/mimo/tasks"
testclienthelper "github.com/Azure/ARO-RP/test/util/clienthelper"
testlog "github.com/Azure/ARO-RP/test/util/log"
)
func TestOperatorFlags(t *testing.T) {
ctx := context.Background()
for _, tt := range []struct {
name string
objects []runtime.Object
wantObjects []runtime.Object
wantErr string
}{
{
name: "not found",
objects: []runtime.Object{},
wantErr: `TerminalError: clusters.aro.openshift.io "cluster" not found`,
},
{
name: "not ready",
objects: []runtime.Object{
&arov1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: arov1alpha1.SingletonClusterName,
ResourceVersion: "1000",
},
Spec: arov1alpha1.ClusterSpec{
OperatorFlags: arov1alpha1.OperatorFlags{
"foo": "bar",
},
},
},
},
wantObjects: []runtime.Object{
&arov1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: arov1alpha1.SingletonClusterName,
ResourceVersion: "1001",
},
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: arov1alpha1.SchemeGroupVersion.String(),
},
Spec: arov1alpha1.ClusterSpec{
OperatorFlags: arov1alpha1.OperatorFlags{
"foo": "baz",
"gaz": "data",
},
},
},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
controller := gomock.NewController(t)
_env := mock_env.NewMockInterface(controller)
_, log := testlog.New()
ocDoc := &api.OpenShiftClusterDocument{
ID: "0000",
OpenShiftCluster: &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{
OperatorFlags: api.OperatorFlags{
"foo": "baz",
"gaz": "data",
},
},
},
}
builder := fake.NewClientBuilder().WithRuntimeObjects(tt.objects...)
ch := clienthelper.NewWithClient(log, testclienthelper.NewHookingClient(builder.Build()))
tc := testtasks.NewFakeTestContext(
ctx, _env, log, func() time.Time { return time.Unix(100, 0) },
testtasks.WithClientHelper(ch), testtasks.WithOpenShiftClusterDocument(ocDoc),
)
err := UpdateClusterOperatorFlags(tc)
if tt.wantErr != "" && err != nil {
g.Expect(err).To(MatchError(tt.wantErr))
} else if tt.wantErr != "" && err == nil {
t.Errorf("wanted error %s", tt.wantErr)
} else if tt.wantErr == "" {
g.Expect(err).ToNot(HaveOccurred())
}
if len(tt.wantObjects) > 0 {
for _, i := range tt.wantObjects {
o, err := scheme.Scheme.New(i.GetObjectKind().GroupVersionKind())
g.Expect(err).ToNot(HaveOccurred())
err = ch.GetOne(ctx, client.ObjectKeyFromObject(i.(client.Object)), o)
g.Expect(err).ToNot(HaveOccurred())
r := deep.Equal(i, o)
g.Expect(r).To(BeEmpty())
}
}
})
}
}

Просмотреть файл

@ -0,0 +1,103 @@
package cluster
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
configv1 "github.com/openshift/api/config/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"github.com/Azure/ARO-RP/pkg/cluster"
"github.com/Azure/ARO-RP/pkg/util/dns"
"github.com/Azure/ARO-RP/pkg/util/mimo"
)
func RotateAPIServerCertificate(ctx context.Context) error {
th, err := mimo.GetTaskContext(ctx)
if err != nil {
return mimo.TerminalError(err)
}
ch, err := th.ClientHelper()
if err != nil {
return mimo.TerminalError(err)
}
env := th.Environment()
secretName := th.GetClusterUUID() + "-apiserver"
for _, namespace := range []string{"openshift-config", "openshift-azure-operator"} {
err = cluster.EnsureTLSSecretFromKeyvault(
ctx, env.ClusterKeyvault(), ch, types.NamespacedName{Namespace: namespace, Name: secretName}, secretName,
)
if err != nil {
return err
}
}
return nil
}
func EnsureAPIServerServingCertificateConfiguration(ctx context.Context) error {
th, err := mimo.GetTaskContext(ctx)
if err != nil {
return mimo.TerminalError(err)
}
ch, err := th.ClientHelper()
if err != nil {
return mimo.TerminalError(err)
}
env := th.Environment()
clusterProperties := th.GetOpenShiftClusterProperties()
managedDomain, err := dns.ManagedDomain(env, clusterProperties.ClusterProfile.Domain)
if err != nil {
// if it fails the belt&braces check then not much we can do
return mimo.TerminalError(err)
}
if managedDomain == "" {
th.SetResultMessage("apiserver certificate is not managed")
return nil
}
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
apiserver := &configv1.APIServer{}
err := ch.GetOne(ctx, types.NamespacedName{Name: "cluster"}, apiserver)
if err != nil {
if kerrors.IsNotFound(err) {
// apiserver not being found is probably unrecoverable
return mimo.TerminalError(err)
}
return mimo.TransientError(err)
}
apiserver.Spec.ServingCerts.NamedCertificates = []configv1.APIServerNamedServingCert{
{
Names: []string{
"api." + managedDomain,
},
ServingCertificate: configv1.SecretNameReference{
Name: th.GetClusterUUID() + "-apiserver",
},
},
}
err = ch.Update(ctx, apiserver)
if err != nil {
if kerrors.IsConflict(err) {
return err
} else {
return mimo.TransientError(err)
}
}
return nil
})
}

Просмотреть файл

@ -0,0 +1,106 @@
package cluster
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"testing"
"time"
. "github.com/onsi/gomega"
configv1 "github.com/openshift/api/config/v1"
"go.uber.org/mock/gomock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
testtasks "github.com/Azure/ARO-RP/test/mimo/tasks"
testlog "github.com/Azure/ARO-RP/test/util/log"
)
func TestConfigureAPIServerCertificates(t *testing.T) {
ctx := context.Background()
clusterUUID := "512a50c8-2a43-4c2a-8fd9-a5539475df2a"
for _, tt := range []struct {
name string
objects []runtime.Object
check func(clienthelper.Interface, Gomega) error
wantErr string
}{
{
name: "not found",
objects: []runtime.Object{},
wantErr: `TerminalError: apiservers.config.openshift.io "cluster" not found`,
},
{
name: "secrets referenced",
objects: []runtime.Object{
&configv1.APIServer{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster",
},
Spec: configv1.APIServerSpec{},
},
},
check: func(i clienthelper.Interface, g Gomega) error {
apiserver := &configv1.APIServer{}
err := i.GetOne(ctx, types.NamespacedName{Name: "cluster"}, apiserver)
if err != nil {
return err
}
g.Expect(apiserver.Spec.ServingCerts.NamedCertificates).To(Equal([]configv1.APIServerNamedServingCert{
{
Names: []string{"api.something.example.com"},
ServingCertificate: configv1.SecretNameReference{
Name: "512a50c8-2a43-4c2a-8fd9-a5539475df2a-apiserver",
},
},
}))
return nil
},
},
} {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
controller := gomock.NewController(t)
_env := mock_env.NewMockInterface(controller)
_env.EXPECT().Domain().AnyTimes().Return("example.com")
_, log := testlog.New()
builder := fake.NewClientBuilder().WithRuntimeObjects(tt.objects...)
ch := clienthelper.NewWithClient(log, builder.Build())
tc := testtasks.NewFakeTestContext(
ctx, _env, log, func() time.Time { return time.Unix(100, 0) },
testtasks.WithClientHelper(ch),
testtasks.WithOpenShiftClusterProperties(clusterUUID, api.OpenShiftClusterProperties{
ClusterProfile: api.ClusterProfile{
Domain: "something",
},
}),
)
err := EnsureAPIServerServingCertificateConfiguration(tc)
if tt.wantErr != "" && err != nil {
g.Expect(err).To(MatchError(tt.wantErr))
} else if tt.wantErr != "" && err == nil {
t.Errorf("wanted error %s", tt.wantErr)
} else if tt.wantErr == "" {
g.Expect(err).ToNot(HaveOccurred())
}
if tt.check != nil {
g.Expect(tt.check(ch, g)).ToNot(HaveOccurred())
}
})
}
}

Просмотреть файл

@ -0,0 +1,37 @@
package example
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
configv1 "github.com/openshift/api/config/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/Azure/ARO-RP/pkg/util/mimo"
)
func ReportClusterVersion(ctx context.Context) error {
th, err := mimo.GetTaskContext(ctx)
if err != nil {
return err
}
ch, err := th.ClientHelper()
if err != nil {
return err
}
cv := &configv1.ClusterVersion{}
err = ch.GetOne(ctx, types.NamespacedName{Name: "version"}, cv)
if err != nil {
return fmt.Errorf("unable to get ClusterVersion: %w", err)
}
th.SetResultMessage(fmt.Sprintf("cluster version is: %s", cv.Status.History[0].Version))
return nil
}

Просмотреть файл

@ -0,0 +1,56 @@
package example
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"testing"
"time"
. "github.com/onsi/gomega"
configv1 "github.com/openshift/api/config/v1"
"go.uber.org/mock/gomock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
testtasks "github.com/Azure/ARO-RP/test/mimo/tasks"
testclienthelper "github.com/Azure/ARO-RP/test/util/clienthelper"
testlog "github.com/Azure/ARO-RP/test/util/log"
)
func TestTask(t *testing.T) {
RegisterTestingT(t)
ctx := context.Background()
controller := gomock.NewController(t)
_env := mock_env.NewMockInterface(controller)
_, log := testlog.New()
builder := fake.NewClientBuilder().WithRuntimeObjects(
&configv1.ClusterVersion{
ObjectMeta: metav1.ObjectMeta{
Name: "version",
},
Status: configv1.ClusterVersionStatus{
History: []configv1.UpdateHistory{
{
State: configv1.CompletedUpdate,
Version: "4.99.123",
},
},
},
},
)
ch := clienthelper.NewWithClient(log, testclienthelper.NewHookingClient(builder.Build()))
tc := testtasks.NewFakeTestContext(
ctx, _env, log, func() time.Time { return time.Unix(100, 0) },
testtasks.WithClientHelper(ch),
)
err := ReportClusterVersion(tc)
Expect(err).ToNot(HaveOccurred())
Expect(tc.GetResultMessage()).To(Equal("cluster version is: 4.99.123"))
}

Просмотреть файл

@ -0,0 +1,19 @@
package tasks
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/mimo/steps/cluster"
"github.com/Azure/ARO-RP/pkg/util/mimo"
"github.com/Azure/ARO-RP/pkg/util/steps"
)
func ACRTokenChecker(t mimo.TaskContext, doc *api.MaintenanceManifestDocument, oc *api.OpenShiftClusterDocument) error {
s := []steps.Step{
steps.Action(cluster.EnsureACRTokenIsValid),
}
return run(t, s)
}

Просмотреть файл

@ -0,0 +1,11 @@
package tasks
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/util/mimo"
)
type MaintenanceTask func(mimo.TaskContext, *api.MaintenanceManifestDocument, *api.OpenShiftClusterDocument) error

Просмотреть файл

@ -0,0 +1,21 @@
package tasks
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/mimo/steps/cluster"
"github.com/Azure/ARO-RP/pkg/util/mimo"
"github.com/Azure/ARO-RP/pkg/util/steps"
)
func UpdateOperatorFlags(t mimo.TaskContext, doc *api.MaintenanceManifestDocument, oc *api.OpenShiftClusterDocument) error {
s := []steps.Step{
steps.Action(cluster.EnsureAPIServerIsUp),
steps.Action(cluster.UpdateClusterOperatorFlags),
}
return run(t, s)
}

Просмотреть файл

@ -0,0 +1,26 @@
package tasks
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"time"
"github.com/Azure/ARO-RP/pkg/mimo"
utilmimo "github.com/Azure/ARO-RP/pkg/util/mimo"
"github.com/Azure/ARO-RP/pkg/util/steps"
)
const DEFAULT_POLL_TIME = time.Second * 10
const DEFAULT_TIMEOUT_DURATION = time.Minute * 20
var DEFAULT_MAINTENANCE_TASKS = map[string]MaintenanceTask{
mimo.TLS_CERT_ROTATION_ID: TLSCertRotation,
mimo.ACR_TOKEN_CHECKER_ID: ACRTokenChecker,
mimo.OPERATOR_FLAGS_UPDATE_ID: UpdateOperatorFlags,
}
func run(t utilmimo.TaskContext, s []steps.Step) error {
_, err := steps.Run(t, t.Log(), DEFAULT_POLL_TIME, s, t.Now)
return err
}

Просмотреть файл

@ -0,0 +1,22 @@
package tasks
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/mimo/steps/cluster"
"github.com/Azure/ARO-RP/pkg/util/mimo"
"github.com/Azure/ARO-RP/pkg/util/steps"
)
func TLSCertRotation(t mimo.TaskContext, doc *api.MaintenanceManifestDocument, oc *api.OpenShiftClusterDocument) error {
s := []steps.Step{
steps.Action(cluster.EnsureAPIServerIsUp),
steps.Action(cluster.RotateAPIServerCertificate),
steps.Action(cluster.EnsureAPIServerServingCertificateConfiguration),
}
return run(t, s)
}

Просмотреть файл

@ -5,12 +5,13 @@ package acrtoken
import (
"context"
"fmt"
"net/http"
"time"
mgmtcontainerregistry "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/ARO-RP/pkg/api"
@ -21,7 +22,8 @@ import (
type Manager interface {
GetRegistryProfile(oc *api.OpenShiftCluster) *api.RegistryProfile
NewRegistryProfile(oc *api.OpenShiftCluster) *api.RegistryProfile
GetRegistryProfileFromSlice(oc []*api.RegistryProfile) *api.RegistryProfile
NewRegistryProfile() *api.RegistryProfile
PutRegistryProfile(oc *api.OpenShiftCluster, rp *api.RegistryProfile)
EnsureTokenAndPassword(ctx context.Context, rp *api.RegistryProfile) (string, error)
RotateTokenPassword(ctx context.Context, rp *api.RegistryProfile) error
@ -34,6 +36,9 @@ type manager struct {
tokens containerregistry.TokensClient
registries containerregistry.RegistriesClient
uuid uuid.Generator
now func() time.Time
}
func NewManager(env env.Interface, localFPAuthorizer autorest.Authorizer) (Manager, error) {
@ -48,6 +53,8 @@ func NewManager(env env.Interface, localFPAuthorizer autorest.Authorizer) (Manag
tokens: containerregistry.NewTokensClient(env.Environment(), r.SubscriptionID, localFPAuthorizer),
registries: containerregistry.NewRegistriesClient(env.Environment(), r.SubscriptionID, localFPAuthorizer),
uuid: uuid.DefaultGenerator,
now: time.Now,
}
return m, nil
@ -55,7 +62,7 @@ func NewManager(env env.Interface, localFPAuthorizer autorest.Authorizer) (Manag
func (m *manager) GetRegistryProfile(oc *api.OpenShiftCluster) *api.RegistryProfile {
for i, rp := range oc.Properties.RegistryProfiles {
if rp.Name == fmt.Sprintf("%s.%s", m.r.ResourceName, m.env.Environment().ContainerRegistryDNSSuffix) {
if rp.Name == m.env.ACRDomain() {
return oc.Properties.RegistryProfiles[i]
}
}
@ -63,10 +70,21 @@ func (m *manager) GetRegistryProfile(oc *api.OpenShiftCluster) *api.RegistryProf
return nil
}
func (m *manager) NewRegistryProfile(oc *api.OpenShiftCluster) *api.RegistryProfile {
func (m *manager) GetRegistryProfileFromSlice(registryProfiles []*api.RegistryProfile) *api.RegistryProfile {
for _, rp := range registryProfiles {
if rp.Name == m.env.ACRDomain() {
return rp
}
}
return nil
}
func (m *manager) NewRegistryProfile() *api.RegistryProfile {
return &api.RegistryProfile{
Name: fmt.Sprintf("%s.%s", m.r.ResourceName, m.env.Environment().ContainerRegistryDNSSuffix),
Username: "token-" + uuid.DefaultGenerator.Generate(),
Name: m.env.ACRDomain(),
Username: "token-" + m.uuid.Generate(),
IssueDate: &date.Time{Time: m.now().UTC()},
}
}

Просмотреть файл

@ -12,16 +12,20 @@ import (
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
"github.com/go-test/deep"
"github.com/stretchr/testify/assert"
"go.uber.org/mock/gomock"
"github.com/Azure/ARO-RP/pkg/api"
mock_containerregistry "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/containerregistry"
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
"github.com/Azure/ARO-RP/test/util/deterministicuuid"
)
const (
tokenName = "token-12345"
registryResourceID = "/subscriptions/93aeba23-2f76-4307-be82-02921df010cf/resourceGroups/global/providers/Microsoft.ContainerRegistry/registries/arointsvc"
registryDomain = "arointsvc.example.com"
)
func TestEnsureTokenAndPassword(t *testing.T) {
@ -67,7 +71,7 @@ func TestEnsureTokenAndPassword(t *testing.T) {
tokens: tokens,
}
password, err := m.EnsureTokenAndPassword(ctx, &api.RegistryProfile{Username: tokenName})
password, err := m.EnsureTokenAndPassword(ctx, &api.RegistryProfile{Username: tokenName, IssueDate: &date.Time{Time: time.Now().AddDate(0, 0, -50)}})
if err != nil {
t.Fatal(err)
}
@ -178,12 +182,17 @@ func toDate(t time.Time) *date.Time {
func setupManager(controller *gomock.Controller, tc *mock_containerregistry.MockTokensClient, rc *mock_containerregistry.MockRegistriesClient) *manager {
env := mock_env.NewMockInterface(controller)
env.EXPECT().ACRResourceID().AnyTimes().Return(registryResourceID)
env.EXPECT().ACRDomain().AnyTimes().Return(registryDomain)
r, _ := azure.ParseResourceID(registryResourceID)
u := deterministicuuid.NewTestUUIDGenerator(0x22)
now := func() time.Time { return time.UnixMilli(1000) }
return &manager{
env: env,
r: r,
tokens: tc,
registries: rc,
uuid: u,
now: now,
}
}
@ -216,3 +225,115 @@ func generateCredentialsParameters(tpn mgmtcontainerregistry.TokenPasswordName)
Name: tpn,
}
}
func TestGetRegistryProfiles(t *testing.T) {
a := assert.New(t)
controller := gomock.NewController(t)
mgr := setupManager(controller, nil, nil)
ocWithProfile := &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{
RegistryProfiles: []*api.RegistryProfile{
{
Name: "notwanted.example.com",
Username: "other",
},
{
Name: "arointsvc.example.com",
Username: "foo",
},
},
},
}
ocWithoutProfile := &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{
RegistryProfiles: []*api.RegistryProfile{
{
Name: "notwanted.example.com",
Username: "other",
},
},
},
}
// GetRegistryProfile finds it successfully
r := mgr.GetRegistryProfile(ocWithProfile)
a.NotNil(r)
a.Equal("arointsvc.example.com", r.Name)
a.Equal("foo", r.Username)
// GetRegistryProfile can't find it as it doesn't exist
r = mgr.GetRegistryProfile(ocWithoutProfile)
a.Nil(r)
// GetRegistryProfileFromSlice finds it successfully
r = mgr.GetRegistryProfileFromSlice(ocWithProfile.Properties.RegistryProfiles)
a.NotNil(r)
a.Equal("arointsvc.example.com", r.Name)
a.Equal("foo", r.Username)
// GetRegistryProfileFromSlice can't find it as it doesn't exist
r = mgr.GetRegistryProfileFromSlice(ocWithoutProfile.Properties.RegistryProfiles)
a.Nil(r)
}
func TestNewAndPutRegistryProfile(t *testing.T) {
a := assert.New(t)
controller := gomock.NewController(t)
mgr := setupManager(controller, nil, nil)
newProfile := mgr.NewRegistryProfile()
a.NotNil(newProfile)
a.Equal("token-22222222-2222-2222-2222-222222220001", newProfile.Username)
a.Equal("1970-01-01T00:00:01Z", newProfile.IssueDate.Format(time.RFC3339))
ocWithProfile := &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{
RegistryProfiles: []*api.RegistryProfile{
{
Name: "arointsvc.example.com",
Username: "foo",
},
{
Name: "notwanted.example.com",
Username: "other",
},
},
},
}
ocWithoutProfile := &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{
RegistryProfiles: []*api.RegistryProfile{
{
Name: "notwanted.example.com",
Username: "other",
},
},
},
}
// If it doesn't exist, it appends it
mgr.PutRegistryProfile(ocWithoutProfile, newProfile)
a.Len(ocWithoutProfile.Properties.RegistryProfiles, 2)
// If it does exist, it replaces it
mgr.PutRegistryProfile(ocWithProfile, newProfile)
a.Len(ocWithProfile.Properties.RegistryProfiles, 2)
// Check that it has been replaced
for _, err := range deep.Equal(
ocWithProfile.Properties.RegistryProfiles,
[]*api.RegistryProfile{
{
Name: "arointsvc.example.com",
Username: "token-22222222-2222-2222-2222-222222220001",
IssueDate: &date.Time{Time: time.UnixMilli(1000)},
},
{
Name: "notwanted.example.com",
Username: "other",
},
}) {
t.Error(err)
}
}

Просмотреть файл

@ -0,0 +1,60 @@
package buckets
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"strings"
"sync"
"time"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/util/bucket"
)
type WorkerFunc func(<-chan struct{}, time.Duration, string)
type monitor struct {
baseLog *logrus.Entry
bucketCount int
buckets map[int]struct{}
mu *sync.RWMutex
docs map[string]*cacheDoc
worker WorkerFunc
}
type BucketWorker interface {
Stop()
Doc(string) *api.OpenShiftClusterDocument
DeleteDoc(*api.OpenShiftClusterDocument)
UpsertDoc(*api.OpenShiftClusterDocument)
}
func NewBucketWorker(log *logrus.Entry, worker WorkerFunc, mu *sync.RWMutex) *monitor {
return &monitor{
baseLog: log,
worker: worker,
docs: map[string]*cacheDoc{},
buckets: map[int]struct{}{},
bucketCount: bucket.Buckets,
mu: mu,
}
}
func (mon *monitor) Doc(id string) *api.OpenShiftClusterDocument {
id = strings.ToLower(id)
v := mon.docs[id]
if v == nil {
return nil
}
return v.doc
}

86
pkg/util/buckets/cache.go Normal file
Просмотреть файл

@ -0,0 +1,86 @@
package buckets
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"math/rand"
"strings"
"time"
"github.com/Azure/ARO-RP/pkg/api"
)
type cacheDoc struct {
doc *api.OpenShiftClusterDocument
stop chan<- struct{}
}
// deleteDoc deletes the given document from mon.docs, signalling the associated
// monitoring goroutine to stop if it exists. Caller must hold mon.mu.Lock.
func (mon *monitor) DeleteDoc(doc *api.OpenShiftClusterDocument) {
id := strings.ToLower(doc.ID)
v := mon.docs[id]
if v != nil {
if v.stop != nil {
close(mon.docs[id].stop)
}
delete(mon.docs, id)
}
}
// upsertDoc inserts or updates the given document into mon.docs, starting an
// associated monitoring goroutine if the document is in a bucket owned by us.
// Caller must hold mon.mu.Lock.
func (mon *monitor) UpsertDoc(doc *api.OpenShiftClusterDocument) {
id := strings.ToLower(doc.ID)
v := mon.docs[id]
if v == nil {
v = &cacheDoc{}
mon.docs[id] = v
}
v.doc = doc
mon.FixDoc(doc)
}
// fixDoc ensures that there is a monitoring goroutine for the given document
// iff it is in a bucket owned by us. Caller must hold mon.mu.Lock.
func (mon *monitor) FixDoc(doc *api.OpenShiftClusterDocument) {
id := strings.ToLower(doc.ID)
v := mon.docs[id]
mon.baseLog.Debugf("fixing doc %s (%s)", doc.ID, doc.Key)
// TODO: bucketing logic
//_, ours := mon.buckets[v.doc.Bucket]
ours := true
if !ours && v.stop != nil {
mon.baseLog.Debugf("stopping channel for %s", doc.ID)
close(v.stop)
v.stop = nil
} else if ours && v.stop == nil {
ch := make(chan struct{})
v.stop = ch
delay := time.Duration(rand.Intn(60)) * time.Second
go mon.worker(ch, delay, doc.Key)
}
}
// Stop stops all workers.
func (mon *monitor) Stop() {
mon.mu.Lock()
defer mon.mu.Unlock()
for _, v := range mon.docs {
if v.stop != nil {
close(v.stop)
v.stop = nil
}
}
}

Просмотреть файл

@ -0,0 +1,28 @@
package clusteroperators
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"fmt"
configv1 "github.com/openshift/api/config/v1"
)
func IsOperatorAvailable(operator *configv1.ClusterOperator) bool {
m := make(map[configv1.ClusterStatusConditionType]configv1.ConditionStatus, len(operator.Status.Conditions))
for _, cond := range operator.Status.Conditions {
m[cond.Type] = cond.Status
}
return m[configv1.OperatorAvailable] == configv1.ConditionTrue && m[configv1.OperatorProgressing] == configv1.ConditionFalse
}
func OperatorStatusText(operator *configv1.ClusterOperator) string {
m := make(map[configv1.ClusterStatusConditionType]configv1.ConditionStatus, len(operator.Status.Conditions))
for _, cond := range operator.Status.Conditions {
m[cond.Type] = cond.Status
}
return fmt.Sprintf("%s %s=%s, %s=%s", operator.Name,
configv1.OperatorAvailable, m[configv1.OperatorAvailable], configv1.OperatorProgressing, m[configv1.OperatorProgressing],
)
}

Просмотреть файл

@ -0,0 +1,120 @@
package clusteroperators
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"testing"
configv1 "github.com/openshift/api/config/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestIsOperatorAvailable(t *testing.T) {
for _, tt := range []struct {
name string
availableCondition configv1.ConditionStatus
progressingCondition configv1.ConditionStatus
want bool
}{
{
name: "Available && Progressing; not available",
availableCondition: configv1.ConditionTrue,
progressingCondition: configv1.ConditionTrue,
},
{
name: "Available && !Progressing; available",
availableCondition: configv1.ConditionTrue,
progressingCondition: configv1.ConditionFalse,
want: true,
},
{
name: "!Available && Progressing; not available",
availableCondition: configv1.ConditionFalse,
progressingCondition: configv1.ConditionTrue,
},
{
name: "!Available && !Progressing; not available",
availableCondition: configv1.ConditionFalse,
progressingCondition: configv1.ConditionFalse,
},
} {
operator := &configv1.ClusterOperator{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
},
Status: configv1.ClusterOperatorStatus{
Conditions: []configv1.ClusterOperatorStatusCondition{
{
Type: configv1.OperatorAvailable,
Status: tt.availableCondition,
},
{
Type: configv1.OperatorProgressing,
Status: tt.progressingCondition,
},
},
},
}
available := IsOperatorAvailable(operator)
if available != tt.want {
t.Error(available)
}
}
}
func TestOperatorStatusText(t *testing.T) {
for _, tt := range []struct {
name string
availableCondition configv1.ConditionStatus
progressingCondition configv1.ConditionStatus
want string
}{
{
name: "Available && Progressing; not available",
availableCondition: configv1.ConditionTrue,
progressingCondition: configv1.ConditionTrue,
want: "server Available=True, Progressing=True",
},
{
name: "Available && !Progressing; available",
availableCondition: configv1.ConditionTrue,
progressingCondition: configv1.ConditionFalse,
want: "server Available=True, Progressing=False",
},
{
name: "!Available && Progressing; not available",
availableCondition: configv1.ConditionFalse,
progressingCondition: configv1.ConditionTrue,
want: "server Available=False, Progressing=True",
},
{
name: "!Available && !Progressing; not available",
availableCondition: configv1.ConditionFalse,
progressingCondition: configv1.ConditionFalse,
want: "server Available=False, Progressing=False",
},
} {
operator := &configv1.ClusterOperator{
ObjectMeta: metav1.ObjectMeta{
Name: "server",
},
Status: configv1.ClusterOperatorStatus{
Conditions: []configv1.ClusterOperatorStatusCondition{
{
Type: configv1.OperatorAvailable,
Status: tt.availableCondition,
},
{
Type: configv1.OperatorProgressing,
Status: tt.progressingCondition,
},
},
},
}
available := OperatorStatusText(operator)
if available != tt.want {
t.Error(available)
}
}
}

57
pkg/util/mimo/errors.go Normal file
Просмотреть файл

@ -0,0 +1,57 @@
package mimo
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import "fmt"
type MIMOErrorVariety string
const (
MIMOErrorTypeTransientError MIMOErrorVariety = "TransientError"
MIMOErrorTypeTerminalError MIMOErrorVariety = "TerminalError"
)
type MIMOError interface {
error
MIMOErrorVariety() MIMOErrorVariety
}
type wrappedMIMOError struct {
error
variety MIMOErrorVariety
}
func (f wrappedMIMOError) MIMOErrorVariety() MIMOErrorVariety {
return f.variety
}
func (f wrappedMIMOError) Error() string {
return fmt.Sprintf("%s: %s", f.variety, f.error.Error())
}
func NewMIMOError(err error, variety MIMOErrorVariety) MIMOError {
return wrappedMIMOError{
error: err,
variety: variety,
}
}
func TerminalError(err error) MIMOError {
return NewMIMOError(err, MIMOErrorTypeTerminalError)
}
func TransientError(err error) MIMOError {
return NewMIMOError(err, MIMOErrorTypeTransientError)
}
func IsRetryableError(err error) bool {
e, ok := err.(wrappedMIMOError)
if !ok {
return false
}
if e.MIMOErrorVariety() == MIMOErrorTypeTransientError {
return true
}
return false
}

Просмотреть файл

@ -0,0 +1,43 @@
package mimo
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
)
type TaskContext interface {
context.Context
Now() time.Time
Environment() env.Interface
ClientHelper() (clienthelper.Interface, error)
Log() *logrus.Entry
LocalFpAuthorizer() (autorest.Authorizer, error)
// OpenShiftCluster
GetClusterUUID() string
GetOpenShiftClusterProperties() api.OpenShiftClusterProperties
GetOpenshiftClusterDocument() *api.OpenShiftClusterDocument
SetResultMessage(string)
GetResultMessage() string
}
func GetTaskContext(c context.Context) (TaskContext, error) {
r, ok := c.(TaskContext)
if !ok {
return nil, fmt.Errorf("cannot convert %v", r)
}
return r, nil
}

Просмотреть файл

@ -27,12 +27,26 @@ type Checker struct {
openShiftVersionDocuments []*api.OpenShiftVersionDocument
platformWorkloadIdentityRoleSetDocuments []*api.PlatformWorkloadIdentityRoleSetDocument
validationResult []*api.ValidationResult
maintenanceManifestDocuments []*api.MaintenanceManifestDocument
}
func NewChecker() *Checker {
return &Checker{}
}
func (f *Checker) Clear() {
f.openshiftClusterDocuments = []*api.OpenShiftClusterDocument{}
f.subscriptionDocuments = []*api.SubscriptionDocument{}
f.billingDocuments = []*api.BillingDocument{}
f.asyncOperationDocuments = []*api.AsyncOperationDocument{}
f.portalDocuments = []*api.PortalDocument{}
f.gatewayDocuments = []*api.GatewayDocument{}
f.openShiftVersionDocuments = []*api.OpenShiftVersionDocument{}
f.platformWorkloadIdentityRoleSetDocuments = []*api.PlatformWorkloadIdentityRoleSetDocument{}
f.validationResult = []*api.ValidationResult{}
f.maintenanceManifestDocuments = []*api.MaintenanceManifestDocument{}
}
func (f *Checker) AddOpenShiftClusterDocuments(docs ...*api.OpenShiftClusterDocument) {
for _, doc := range docs {
docCopy, err := deepCopy(doc)
@ -132,6 +146,17 @@ func (f *Checker) AddValidationResult(docs ...*api.ValidationResult) {
}
}
func (f *Checker) AddMaintenanceManifestDocuments(docs ...*api.MaintenanceManifestDocument) {
for _, doc := range docs {
docCopy, err := deepCopy(doc)
if err != nil {
panic(err)
}
f.maintenanceManifestDocuments = append(f.maintenanceManifestDocuments, docCopy.(*api.MaintenanceManifestDocument))
}
}
func (f *Checker) CheckOpenShiftClusters(openShiftClusters *cosmosdb.FakeOpenShiftClusterDocumentClient) (errs []error) {
ctx := context.Background()
@ -309,3 +334,27 @@ func (f *Checker) CheckPlatformWorkloadIdentityRoleSets(roleSets *cosmosdb.FakeP
return errs
}
func (f *Checker) CheckMaintenanceManifests(client *cosmosdb.FakeMaintenanceManifestDocumentClient) (errs []error) {
ctx := context.Background()
all, err := client.ListAll(ctx, nil)
if err != nil {
return []error{err}
}
sort.Slice(all.MaintenanceManifestDocuments, func(i, j int) bool {
return all.MaintenanceManifestDocuments[i].ID < all.MaintenanceManifestDocuments[j].ID
})
if len(f.maintenanceManifestDocuments) != 0 && len(all.MaintenanceManifestDocuments) == len(f.maintenanceManifestDocuments) {
diff := deep.Equal(all.MaintenanceManifestDocuments, f.maintenanceManifestDocuments)
for _, i := range diff {
errs = append(errs, errors.New(i))
}
} else if len(all.MaintenanceManifestDocuments) != 0 || len(f.maintenanceManifestDocuments) != 0 {
errs = append(errs, fmt.Errorf("document length different, %d vs %d", len(all.MaintenanceManifestDocuments), len(f.maintenanceManifestDocuments)))
}
return errs
}

Просмотреть файл

@ -21,6 +21,7 @@ type Fixture struct {
openShiftVersionDocuments []*api.OpenShiftVersionDocument
platformWorkloadIdentityRoleSetDocuments []*api.PlatformWorkloadIdentityRoleSetDocument
clusterManagerConfigurationDocuments []*api.ClusterManagerConfigurationDocument
maintenanceManifestDocuments []*api.MaintenanceManifestDocument
openShiftClustersDatabase database.OpenShiftClusters
billingDatabase database.Billing
@ -31,6 +32,7 @@ type Fixture struct {
openShiftVersionsDatabase database.OpenShiftVersions
platformWorkloadIdentityRoleSetsDatabase database.PlatformWorkloadIdentityRoleSets
clusterManagerConfigurationsDatabase database.ClusterManagerConfigurations
maintenanceManifestsDatabase database.MaintenanceManifests
openShiftVersionsUUID uuid.Generator
platformWorkloadIdentityRoleSetsUUID uuid.Generator
@ -40,6 +42,19 @@ func NewFixture() *Fixture {
return &Fixture{}
}
func (f *Fixture) Clear() {
f.openshiftClusterDocuments = []*api.OpenShiftClusterDocument{}
f.subscriptionDocuments = []*api.SubscriptionDocument{}
f.billingDocuments = []*api.BillingDocument{}
f.asyncOperationDocuments = []*api.AsyncOperationDocument{}
f.portalDocuments = []*api.PortalDocument{}
f.gatewayDocuments = []*api.GatewayDocument{}
f.openShiftVersionDocuments = []*api.OpenShiftVersionDocument{}
f.clusterManagerConfigurationDocuments = []*api.ClusterManagerConfigurationDocument{}
f.platformWorkloadIdentityRoleSetDocuments = []*api.PlatformWorkloadIdentityRoleSetDocument{}
f.maintenanceManifestDocuments = []*api.MaintenanceManifestDocument{}
}
func (f *Fixture) WithClusterManagerConfigurations(db database.ClusterManagerConfigurations) *Fixture {
f.clusterManagerConfigurationsDatabase = db
return f
@ -87,6 +102,11 @@ func (f *Fixture) WithPlatformWorkloadIdentityRoleSets(db database.PlatformWorkl
return f
}
func (f *Fixture) WithMaintenanceManifests(db database.MaintenanceManifests) *Fixture {
f.maintenanceManifestsDatabase = db
return f
}
func (f *Fixture) AddOpenShiftClusterDocuments(docs ...*api.OpenShiftClusterDocument) {
for _, doc := range docs {
docCopy, err := deepCopy(doc)
@ -186,6 +206,17 @@ func (f *Fixture) AddClusterManagerConfigurationDocuments(docs ...*api.ClusterMa
}
}
func (f *Fixture) AddMaintenanceManifestDocuments(docs ...*api.MaintenanceManifestDocument) {
for _, doc := range docs {
docCopy, err := deepCopy(doc)
if err != nil {
panic(err)
}
f.maintenanceManifestDocuments = append(f.maintenanceManifestDocuments, docCopy.(*api.MaintenanceManifestDocument))
}
}
func (f *Fixture) Create() error {
ctx := context.Background()
@ -264,5 +295,15 @@ func (f *Fixture) Create() error {
}
}
for _, i := range f.maintenanceManifestDocuments {
if i.ID == "" {
i.ID = f.maintenanceManifestsDatabase.NewUUID()
}
_, err := f.maintenanceManifestsDatabase.Create(ctx, i)
if err != nil {
return err
}
}
return nil
}

Просмотреть файл

@ -4,6 +4,8 @@ package database
// Licensed under the Apache License 2.0.
import (
"time"
"github.com/ugorji/go/codec"
"github.com/Azure/ARO-RP/pkg/database"
@ -86,3 +88,12 @@ func NewFakeClusterManager() (db database.ClusterManagerConfigurations, client *
db = database.NewClusterManagerConfigurationsWithProvidedClient(client, coll, "", uuid)
return db, client
}
func NewFakeMaintenanceManifests(now func() time.Time) (db database.MaintenanceManifests, client *cosmosdb.FakeMaintenanceManifestDocumentClient) {
uuid := deterministicuuid.NewTestUUIDGenerator(deterministicuuid.MAINTENANCE_MANIFESTS)
coll := &fakeCollectionClient{}
client = cosmosdb.NewFakeMaintenanceManifestDocumentClient(jsonHandle)
injectMaintenanceManifests(client, now)
db = database.NewMaintenanceManifestsWithProvidedClient(client, coll, "", uuid)
return db, client
}

Просмотреть файл

@ -0,0 +1,136 @@
package database
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"cmp"
"context"
"slices"
"strconv"
"time"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
)
func injectMaintenanceManifests(c *cosmosdb.FakeMaintenanceManifestDocumentClient, now func() time.Time) {
c.SetQueryHandler(database.MaintenanceManifestQueryForCluster, func(client cosmosdb.MaintenanceManifestDocumentClient, query *cosmosdb.Query, options *cosmosdb.Options) cosmosdb.MaintenanceManifestDocumentRawIterator {
return fakeMaintenanceManifestsForCluster(client, query, options, now)
})
c.SetQueryHandler(database.MaintenanceManifestDequeueQueryForCluster, func(client cosmosdb.MaintenanceManifestDocumentClient, query *cosmosdb.Query, options *cosmosdb.Options) cosmosdb.MaintenanceManifestDocumentRawIterator {
return fakeMaintenanceManifestsDequeueForCluster(client, query, options, now)
})
c.SetQueryHandler(database.MaintenanceManifestQueueOverallQuery, func(client cosmosdb.MaintenanceManifestDocumentClient, query *cosmosdb.Query, options *cosmosdb.Options) cosmosdb.MaintenanceManifestDocumentRawIterator {
return fakeMaintenanceManifestsQueuedAll(client, query, options, now)
})
c.SetTriggerHandler("renewLease", func(ctx context.Context, doc *api.MaintenanceManifestDocument) error {
return fakeMaintenanceManifestsRenewLeaseTrigger(ctx, doc, now)
})
}
func fakeMaintenanceManifestsDequeueForCluster(client cosmosdb.MaintenanceManifestDocumentClient, query *cosmosdb.Query, options *cosmosdb.Options, now func() time.Time) cosmosdb.MaintenanceManifestDocumentRawIterator {
startingIndex, err := fakeMaintenanceManifestsGetContinuation(options)
if err != nil {
return cosmosdb.NewFakeMaintenanceManifestDocumentErroringRawIterator(err)
}
input, err := client.ListAll(context.Background(), nil)
if err != nil {
// TODO: should this never happen?
panic(err)
}
clusterResourceID := query.Parameters[0].Value
var results []*api.MaintenanceManifestDocument
for _, r := range input.MaintenanceManifestDocuments {
if r.ClusterResourceID != clusterResourceID {
continue
}
if r.MaintenanceManifest.State != api.MaintenanceManifestStatePending {
continue
}
if r.LeaseExpires > 0 && int64(r.LeaseExpires) < time.Now().Unix() {
continue
}
results = append(results, r)
}
return cosmosdb.NewFakeMaintenanceManifestDocumentIterator(results, startingIndex)
}
func fakeMaintenanceManifestsForCluster(client cosmosdb.MaintenanceManifestDocumentClient, query *cosmosdb.Query, options *cosmosdb.Options, now func() time.Time) cosmosdb.MaintenanceManifestDocumentRawIterator {
startingIndex, err := fakeMaintenanceManifestsGetContinuation(options)
if err != nil {
return cosmosdb.NewFakeMaintenanceManifestDocumentErroringRawIterator(err)
}
input, err := client.ListAll(context.Background(), nil)
if err != nil {
// TODO: should this never happen?
panic(err)
}
clusterResourceID := query.Parameters[0].Value
var results []*api.MaintenanceManifestDocument
for _, r := range input.MaintenanceManifestDocuments {
if r.ClusterResourceID != clusterResourceID {
continue
}
results = append(results, r)
}
slices.SortFunc(results, func(a, b *api.MaintenanceManifestDocument) int {
return cmp.Compare(a.ID, b.ID)
})
return cosmosdb.NewFakeMaintenanceManifestDocumentIterator(results, startingIndex)
}
func fakeMaintenanceManifestsQueuedAll(client cosmosdb.MaintenanceManifestDocumentClient, query *cosmosdb.Query, options *cosmosdb.Options, now func() time.Time) cosmosdb.MaintenanceManifestDocumentRawIterator {
startingIndex, err := fakeMaintenanceManifestsGetContinuation(options)
if err != nil {
return cosmosdb.NewFakeMaintenanceManifestDocumentErroringRawIterator(err)
}
input, err := client.ListAll(context.Background(), nil)
if err != nil {
// TODO: should this never happen?
panic(err)
}
var results []*api.MaintenanceManifestDocument
for _, r := range input.MaintenanceManifestDocuments {
if r.MaintenanceManifest.State != api.MaintenanceManifestStatePending {
continue
}
if r.LeaseExpires > 0 && int64(r.LeaseExpires) < time.Now().Unix() {
continue
}
results = append(results, r)
}
slices.SortFunc(results, func(a, b *api.MaintenanceManifestDocument) int {
return cmp.Compare(a.ID, b.ID)
})
return cosmosdb.NewFakeMaintenanceManifestDocumentIterator(results, startingIndex)
}
func fakeMaintenanceManifestsRenewLeaseTrigger(ctx context.Context, doc *api.MaintenanceManifestDocument, now func() time.Time) error {
doc.LeaseExpires = int(now().Unix()) + 60
return nil
}
func fakeMaintenanceManifestsGetContinuation(options *cosmosdb.Options) (startingIndex int, err error) {
if options != nil && options.Continuation != "" {
startingIndex, err = strconv.Atoi(options.Continuation)
}
return
}

Просмотреть файл

@ -151,6 +151,28 @@ func openShiftClusterConflictChecker(one *api.OpenShiftClusterDocument, two *api
return false
}
func fakeOpenShiftClustersOnlyResourceID(client cosmosdb.OpenShiftClusterDocumentClient, query *cosmosdb.Query, options *cosmosdb.Options) cosmosdb.OpenShiftClusterDocumentRawIterator {
startingIndex, err := fakeOpenShiftClustersGetContinuation(options)
if err != nil {
return cosmosdb.NewFakeOpenShiftClusterDocumentErroringRawIterator(err)
}
docs, err := fakeOpenShiftClustersGetAllDocuments(client)
if err != nil {
return cosmosdb.NewFakeOpenShiftClusterDocumentErroringRawIterator(err)
}
newDocs := make([]*api.OpenShiftClusterDocument, 0)
for _, d := range docs {
newDocs = append(newDocs, &api.OpenShiftClusterDocument{
Key: d.Key,
})
}
return cosmosdb.NewFakeOpenShiftClusterDocumentIterator(newDocs, startingIndex)
}
func injectOpenShiftClusters(c *cosmosdb.FakeOpenShiftClusterDocumentClient) {
c.SetQueryHandler(database.OpenShiftClustersDequeueQuery, fakeOpenShiftClustersDequeueQuery)
c.SetQueryHandler(database.OpenShiftClustersQueueLengthQuery, fakeOpenShiftClustersQueueLengthQuery)
@ -158,6 +180,7 @@ func injectOpenShiftClusters(c *cosmosdb.FakeOpenShiftClusterDocumentClient) {
c.SetQueryHandler(database.OpenshiftClustersClientIdQuery, fakeOpenshiftClustersMatchQuery)
c.SetQueryHandler(database.OpenshiftClustersResourceGroupQuery, fakeOpenshiftClustersMatchQuery)
c.SetQueryHandler(database.OpenshiftClustersPrefixQuery, fakeOpenshiftClustersPrefixQuery)
c.SetQueryHandler(database.OpenshiftClustersClusterResourceIDOnlyQuery, fakeOpenShiftClustersOnlyResourceID)
c.SetTriggerHandler("renewLease", fakeOpenShiftClustersRenewLeaseTrigger)

Просмотреть файл

@ -15,11 +15,40 @@ import (
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/env"
)
func adminRequest(ctx context.Context, method, path string, params url.Values, strict bool, in, out interface{}) (*http.Response, error) {
type adminReqOpts struct {
Option string
Value interface{}
}
func logOnError(log *logrus.Entry) []adminReqOpts {
return []adminReqOpts{
{
Option: "log",
Value: log,
},
{
Option: "logOnError",
Value: true,
},
}
}
func _getAdminReqOpt(key string, opts []adminReqOpts) (interface{}, bool) {
for _, i := range opts {
if i.Option == key {
return i.Value, true
}
}
return nil, false
}
func adminRequest(ctx context.Context, method, path string, params url.Values, strict bool, in, out interface{}, opts ...adminReqOpts) (*http.Response, error) {
if !env.IsLocalDevelopmentMode() {
return nil, errors.New("only development RP mode is supported")
}
@ -69,7 +98,24 @@ func adminRequest(ctx context.Context, method, path string, params url.Values, s
}()
if out != nil && resp.Header.Get("Content-Type") == "application/json" {
decoder := json.NewDecoder(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return resp, err
}
if resp.StatusCode == http.StatusInternalServerError {
_, ok := _getAdminReqOpt("logOnError", opts)
if ok {
log, ok := _getAdminReqOpt("log", opts)
if ok {
logger := log.(*logrus.Entry)
logger.Errorf("Failed request, content: %s", string(body))
}
}
}
decoder := json.NewDecoder(bytes.NewBuffer(body))
// If strict is set, enable DisallowUnknownFields. This is used to
// verify that the response doesn't contain any fields that are not
// defined, namely systemData.

Просмотреть файл

@ -14,12 +14,18 @@ import (
"github.com/Azure/ARO-RP/pkg/api/admin"
)
var _ = Describe("[Admin API] Cluster admin update action", func() {
var _ = Describe("[Admin API] Cluster admin update action", Serial, func() {
BeforeEach(skipIfNotInDevelopmentEnv)
It("must run cluster update operation on a cluster", func(ctx context.Context) {
var oc = &admin.OpenShiftCluster{}
// Wait for the cluster to be in a succeeded state before continuing
Eventually(func(g Gomega, ctx context.Context) {
oc = adminGetCluster(g, ctx, clusterResourceID)
g.Expect(oc.Properties.ProvisioningState).To(Equal(admin.ProvisioningStateSucceeded))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
By("triggering the update via RP admin API")
resp, err := adminRequest(ctx, http.MethodPatch, clusterResourceID, nil, true, json.RawMessage("{}"), oc)
Expect(err).NotTo(HaveOccurred())

121
test/e2e/mimo_actuator.go Normal file
Просмотреть файл

@ -0,0 +1,121 @@
package e2e
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"encoding/json"
"net/http"
"net/url"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
"github.com/Azure/ARO-RP/pkg/api/admin"
"github.com/Azure/ARO-RP/pkg/mimo"
"github.com/Azure/ARO-RP/pkg/operator"
"github.com/Azure/ARO-RP/pkg/util/uuid"
)
var _ = Describe("MIMO Actuator E2E Testing", Serial, func() {
BeforeEach(func() {
skipIfNotInDevelopmentEnv()
skipIfMIMOActuatorNotEnabled()
DeferCleanup(func(ctx context.Context) {
// reset feature flags to their default values
var oc = &admin.OpenShiftCluster{}
resp, err := adminRequest(ctx,
http.MethodPatch, clusterResourceID, nil, true,
json.RawMessage("{\"operatorFlagsMergeStrategy\": \"reset\", \"properties\": {\"maintenanceTask\": \"SyncClusterObject\"}}"), oc)
Expect(err).NotTo(HaveOccurred())
Expect(resp.StatusCode).To(Equal(http.StatusOK))
// Wait for it to settle
time.Sleep(5 * time.Second)
// Wait for the flag reset to finish applying
Eventually(func(g Gomega, ctx context.Context) {
oc = adminGetCluster(g, ctx, clusterResourceID)
g.Expect(oc.Properties.ProvisioningState).To(Equal(admin.ProvisioningStateSucceeded))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
})
})
It("Should be able to schedule and run a maintenance set via the admin API", func(ctx context.Context) {
var oc = &admin.OpenShiftCluster{}
testflag := "aro.e2e.testflag." + uuid.DefaultGenerator.Generate()
By("set a bogus flag on the cluster")
resp, err := adminRequest(ctx,
http.MethodPatch, clusterResourceID, nil, true,
json.RawMessage("{\"properties\": {\"maintenanceTask\": \"SyncClusterObject\", \"operatorFlags\": {\""+testflag+"\": \"true\"}}}"), oc)
Expect(err).NotTo(HaveOccurred())
Expect(resp.StatusCode).To(Equal(http.StatusOK))
By("waiting for the update to complete")
Eventually(func(g Gomega, ctx context.Context) {
oc = adminGetCluster(g, ctx, clusterResourceID)
g.Expect(oc.Properties.ProvisioningState).To(Equal(admin.ProvisioningStateSucceeded))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
By("check the flag is set in the cluster")
co, err := clients.AROClusters.AroV1alpha1().Clusters().Get(ctx, "cluster", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
flag, ok := co.Spec.OperatorFlags[testflag]
Expect(ok).To(BeTrue())
Expect(flag).To(Equal("true"))
By("change the flag in-cluster to a wrong value")
// get the flag we want to check for
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
co, err := clients.AROClusters.AroV1alpha1().Clusters().Get(ctx, "cluster", metav1.GetOptions{})
if err != nil {
return err
}
co.Spec.OperatorFlags[testflag] = operator.FlagFalse
_, err = clients.AROClusters.AroV1alpha1().Clusters().Update(ctx, co, metav1.UpdateOptions{})
return err
})
Expect(err).NotTo(HaveOccurred())
By("creating the flag update manifest via the API")
out := &admin.MaintenanceManifest{}
resp, err = adminRequest(ctx,
http.MethodPut, "/admin"+clusterResourceID+"/maintenancemanifests",
url.Values{}, true, &admin.MaintenanceManifest{
MaintenanceTaskID: mimo.OPERATOR_FLAGS_UPDATE_ID,
}, &out, logOnError(log)...)
Expect(err).NotTo(HaveOccurred())
Expect(resp.StatusCode).To(Equal(http.StatusCreated))
manifestID := out.ID
By("waiting for the manifest run to complete")
Eventually(func(g Gomega, ctx context.Context) {
fetchedManifest := &admin.MaintenanceManifest{}
resp, err = adminRequest(ctx,
http.MethodGet, "/admin"+clusterResourceID+"/maintenancemanifests/"+manifestID,
url.Values{}, true, nil, &fetchedManifest)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(resp.StatusCode).To(Equal(http.StatusOK))
g.Expect(fetchedManifest.State).To(Equal(admin.MaintenanceManifestStateCompleted))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
By("checking the flag has been set back in the cluster")
co, err = clients.AROClusters.AroV1alpha1().Clusters().Get(ctx, "cluster", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
flag, ok = co.Spec.OperatorFlags[testflag]
Expect(ok).To(BeTrue())
Expect(flag).To(Equal("true"), "MIMO manifest has not run")
})
})

Просмотреть файл

@ -131,6 +131,12 @@ func skipIfSeleniumNotEnabled() {
}
}
func skipIfMIMOActuatorNotEnabled() {
if os.Getenv("ARO_E2E_MIMO") == "" {
Skip("ARO_E2E_MIMO not set, skipping MIMO e2e")
}
}
func skipIfNotHiveManagedCluster(adminAPICluster *admin.OpenShiftCluster) {
if adminAPICluster.Properties.HiveProfile == (admin.HiveProfile{}) {
Skip("skipping tests because this ARO cluster has not been created/adopted by Hive")

Просмотреть файл

@ -0,0 +1,120 @@
package tasks
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/env"
"github.com/Azure/ARO-RP/pkg/util/clienthelper"
)
type fakeTestContext struct {
context.Context
now func() time.Time
env env.Interface
ch clienthelper.Interface
log *logrus.Entry
clusterUUID string
clusterResourceID string
properties api.OpenShiftClusterProperties
resultMessage string
}
type Option func(*fakeTestContext)
func WithClientHelper(ch clienthelper.Interface) Option {
return func(ftc *fakeTestContext) {
ftc.ch = ch
}
}
func WithOpenShiftClusterDocument(oc *api.OpenShiftClusterDocument) Option {
return func(ftc *fakeTestContext) {
ftc.clusterUUID = oc.ID
ftc.clusterResourceID = oc.OpenShiftCluster.ID
ftc.properties = oc.OpenShiftCluster.Properties
}
}
func WithOpenShiftClusterProperties(uuid string, oc api.OpenShiftClusterProperties) Option {
return func(ftc *fakeTestContext) {
ftc.clusterUUID = uuid
ftc.properties = oc
}
}
func NewFakeTestContext(ctx context.Context, env env.Interface, log *logrus.Entry, now func() time.Time, o ...Option) *fakeTestContext {
ftc := &fakeTestContext{
Context: ctx,
env: env,
log: log,
now: now,
}
for _, i := range o {
i(ftc)
}
return ftc
}
func (t *fakeTestContext) LocalFpAuthorizer() (autorest.Authorizer, error) {
myAuthorizer := autorest.NullAuthorizer{}
return myAuthorizer, nil
}
func (t *fakeTestContext) GetOpenshiftClusterDocument() *api.OpenShiftClusterDocument {
myCD := &api.OpenShiftClusterDocument{}
return myCD
}
// handle
func (t *fakeTestContext) Environment() env.Interface {
return t.env
}
func (t *fakeTestContext) ClientHelper() (clienthelper.Interface, error) {
if t.ch == nil {
return nil, fmt.Errorf("missing clienthelper")
}
return t.ch, nil
}
func (t *fakeTestContext) Log() *logrus.Entry {
return t.log
}
func (t *fakeTestContext) Now() time.Time {
return t.now()
}
// OpenShiftCluster
func (t *fakeTestContext) GetClusterUUID() string {
if t.clusterUUID == "" {
panic("didn't set up openshiftcluster in test")
}
return t.clusterUUID
}
func (t *fakeTestContext) GetOpenShiftClusterProperties() api.OpenShiftClusterProperties {
if t.clusterUUID == "" {
panic("didn't set up openshiftcluster in test")
}
return t.properties
}
func (t *fakeTestContext) SetResultMessage(s string) {
t.resultMessage = s
}
func (t *fakeTestContext) GetResultMessage() string {
return t.resultMessage
}

Просмотреть файл

@ -19,6 +19,7 @@ const (
GATEWAY
OPENSHIFT_VERSIONS
CLUSTERMANAGER
MAINTENANCE_MANIFESTS
)
type gen struct {