Merge branch 'master' into add-az-list-install-versions

This commit is contained in:
Carlo Wisse 2022-08-23 17:34:49 +10:00
Родитель 1efe7bf690 cae9508907
Коммит 807b3ac878
29 изменённых файлов: 519 добавлений и 248 удалений

25
.github/workflows/ci-go.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
name: ci-go
on:
push:
tags:
- v*
branches:
- master
pull_request:
permissions:
contents: read
jobs:
ci-from-docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: build from buildah
uses: redhat-actions/buildah-build@v2
with:
image: test-image
tags: ${{ github.sha }}
context: .
containerfiles: ./Dockerfile.ci

Просмотреть файл

@ -1,23 +1,19 @@
# Azure DevOps Pipeline building rp images and pushing to int acr
# Azure DevOps Pipeline for generating release notes
trigger: none
pr: none
variables:
- template: vars.yml
- name: TAG
- group: PROD CI Credentials
jobs:
- job: Build_and_push_images
- job: Generate_release_notes
condition: startsWith(variables['build.sourceBranch'], 'refs/tags/v2')
displayName: Build release
displayName: Generate release notes
pool:
name: ARO-CI
demands: go-1.17
steps:
- template: ./templates/template-checkout.yml
- template: ./templates/template-az-cli-login.yml
parameters:
azureDevOpsJSONSPN: $(aro-v4-ci-devops-spn)
@ -31,19 +27,6 @@ jobs:
## set the variable
echo "##vso[task.setvariable variable=TAG]${TAG}"
- template: ./templates/template-push-images-to-acr-tagged.yml
parameters:
rpImageACR: $(RP_IMAGE_ACR)
imageTag: $(TAG)
- template: ./templates/template-az-cli-logout.yml
- script: |
cp -a --parents aro "$(Build.ArtifactStagingDirectory)"
displayName: Copy artifacts
- task: PublishBuildArtifacts@1
displayName: Publish Artifacts
name: aro_deployer
- script: |
set -xe
MESSAGE="$(git for-each-ref refs/tags/${TAG} --format='%(contents)')"

Просмотреть файл

@ -13,7 +13,8 @@ pr: none
variables:
Cdp_Definition_Build_Count: $[counter('', 0)] # needed for onebranch.pipeline.version task https://aka.ms/obpipelines/versioning
LinuxContainerImage: cdpxlinux.azurecr.io/user/aro/ubi8-gotoolset-1.17.7-13:20220526 # Docker image which is used to build the project https://aka.ms/obpipelines/containers
ONEBRANCH_AME_ACR_LOGIN: cdpxb8e9ef87cd634085ab141c637806568c00.azurecr.io
LinuxContainerImage: $(ONEBRANCH_AME_ACR_LOGIN)/b8e9ef87-cd63-4085-ab14-1c637806568c/official/ubi8/go-toolset:1.17.7-13 # Docker image which is used to build the project https://aka.ms/obpipelines/containers
Debian_Frontend: noninteractive
resources:

Просмотреть файл

@ -13,7 +13,8 @@ pr: none
variables:
Cdp_Definition_Build_Count: $[counter('', 0)] # needed for onebranch.pipeline.version task https://aka.ms/obpipelines/versioning
LinuxContainerImage: cdpxlinux.azurecr.io/user/aro/ubi8-gotoolset-1.17.7-13:20220526 # Docker image which is used to build the project https://aka.ms/obpipelines/containers
ONEBRANCH_AME_ACR_LOGIN: cdpxb8e9ef87cd634085ab141c637806568c00.azurecr.io
LinuxContainerImage: $(ONEBRANCH_AME_ACR_LOGIN)/b8e9ef87-cd63-4085-ab14-1c637806568c/official/ubi8/go-toolset:1.17.7-13 # Docker image which is used to build the project https://aka.ms/obpipelines/containers
Debian_Frontend: noninteractive
resources:

Просмотреть файл

@ -3,9 +3,10 @@ steps:
displayName: Build Multi Stage Dockerfile
inputs:
repositoryName: aro-rp
dockerFileRelPath: ./Dockerfile.aro-multistage-onebranch
dockerFileRelPath: ./Dockerfile.aro-multistage
dockerFileContextPath: ./
registry: cdpxlinux.azurecr.io
registry: cdpxb8e9ef87cd634085ab141c637806568c00.azurecr.io
arguments: --build-arg REGISTRY=registry.access.redhat.com
saveImageToPath: aro-rp.tar
buildkit: 1
enable_network: true

Просмотреть файл

@ -1,22 +0,0 @@
# Uses a multi-stage container build to build the RP in OneBranch.
#
# TODO:
# OneBranch pipelines currently pull from CDPx which is deprecated. As a temporary fix before
# we migrate to a new solution, this multistage dockerfile uses the same image in both steps to avoid
# needing to pull images that aren't hosted in CDPx.
FROM cdpxlinux.azurecr.io/user/aro/ubi8-gotoolset-1.17.7-13:20220526 AS builder
ENV GOOS=linux \
GOPATH=/go/
WORKDIR ${GOPATH}/src/github.com/Azure/ARO-RP
USER root
RUN yum update -y
COPY . ${GOPATH}/src/github.com/Azure/ARO-RP/
RUN make aro && make e2e.test
FROM cdpxlinux.azurecr.io/user/aro/ubi8-gotoolset-1.17.7-13:20220526
USER root
RUN yum -y update && yum -y clean all
COPY --from=builder /go/src/github.com/Azure/ARO-RP/aro /go/src/github.com/Azure/ARO-RP/e2e.test /usr/local/bin/
ENTRYPOINT ["aro"]
EXPOSE 2222/tcp 8080/tcp 8443/tcp 8444/tcp 8445/tcp
USER 1000

36
Dockerfile.ci Normal file
Просмотреть файл

@ -0,0 +1,36 @@
FROM registry.access.redhat.com/ubi8/go-toolset:1.17.7
USER root
RUN mkdir -p /root/go/src/github.com/Azure/ARO-RP
WORKDIR /root/go/src/github.com/Azure/ARO-RP
ENV GOPATH=/root/go
#we have multiple steps for copy so we can make use of caching
COPY vendor/ vendor
COPY docs/ docs
COPY hack/ hack
COPY swagger/ swagger
COPY test/ test
COPY python/ python
COPY portal/ portal
COPY cmd/ cmd
COPY pkg/ pkg
#COPY all files with an extension (directories not copied)
COPY ?*.* .
COPY Makefile LICENSE .
COPY .git .git
COPY .gitignore .gitignore
COPY .pipelines .pipelines
COPY .gdn .gdn
COPY .github .github
COPY .env .env
COPY .sha256sum .sha256sum
COPY .config .config
RUN hack/ci-utils/build.sh

Просмотреть файл

@ -23,15 +23,16 @@ you are tagging the right commit with the changes you want to release.
## Release pipeline
Currently the release is done manually via the [pipeline](https://github.com/Azure/ARO-RP/blob/master/.pipelines/build-and-push-images-tagged.yml).
The pipeline does not have any parameter, instead the pipeline is started on the `tag` as illustrated on the image.
![Start pipelines with tag](img/pipelines.png "Aro Monitor Architecture")
Currently the release is done manually via the [EV2 pipelines](https://msazure.visualstudio.com/AzureRedHatOpenShift/_wiki/wikis/ARO.wiki/233405/Performing-Release).
### Release page
Once the release is built, new item is added to the [GitHub release page](https://github.com/Azure/ARO-RP/releases) with
The generate release notes pipeline does not have any parameter, instead the pipeline is started on the `tag` as illustrated on the image.
![Start pipelines with tag](img/pipelines.png "Aro Monitor Architecture")
Once the release notes pipeline is finished, a new item is added to the [GitHub release page](https://github.com/Azure/ARO-RP/releases) with
the following format:
```
@ -44,11 +45,6 @@ ${{tag annotation}}
- hash commit message
- hash commit message
## Assets:
- aro binary
- code zipped
```
The title of the release is the used `git tag`. The description is extracted

20
hack/ci-utils/build.sh Executable file
Просмотреть файл

@ -0,0 +1,20 @@
#!/bin/bash
isClean() {
if [[ ! -z "$(git status -s)" ]]
then
echo "there are some modified files"
git status
exit 1
fi
}
set -xe
make generate
isClean
make build-all
isClean
make unit-test-go
isClean

Просмотреть файл

@ -295,6 +295,7 @@ func (ocb *openShiftClusterBackend) endLease(ctx context.Context, log *logrus.En
func (ocb *openShiftClusterBackend) asyncOperationResultLog(log *logrus.Entry, initialProvisioningState api.ProvisioningState, backendErr error) {
log = log.WithFields(logrus.Fields{
"LOGKIND": "asyncqos",
"resultType": utillog.SuccessResultType,
"operationType": initialProvisioningState.String(),
})

Просмотреть файл

@ -12,7 +12,6 @@ import (
"strings"
mgmtnetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-08-01/network"
mgmtfeatures "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
@ -45,16 +44,19 @@ func (m *manager) ensureInfraID(ctx context.Context) (err error) {
func (m *manager) ensureResourceGroup(ctx context.Context) error {
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
group := mgmtfeatures.ResourceGroup{
Location: &m.doc.OpenShiftCluster.Location,
ManagedBy: &m.doc.OpenShiftCluster.ID,
}
if m.env.IsLocalDevelopmentMode() {
// grab tags so we do not accidently remove them on createOrUpdate, set purge tag to true for dev clusters
rg, err := m.resourceGroups.Get(ctx, resourceGroup)
if err == nil {
group.Tags = rg.Tags
// Retain the existing resource group configuration (such as tags) if it exists
group, err := m.resourceGroups.Get(ctx, resourceGroup)
if err != nil {
if detailedErr, ok := err.(autorest.DetailedError); !ok || detailedErr.StatusCode != http.StatusNotFound {
return err
}
}
group.Location = &m.doc.OpenShiftCluster.Location
group.ManagedBy = &m.doc.OpenShiftCluster.ID
// HACK: set purge=true on dev clusters so our purger wipes them out since there is not deny assignment in place
if m.env.IsLocalDevelopmentMode() {
if group.Tags == nil {
group.Tags = map[string]*string{}
}
@ -64,7 +66,7 @@ func (m *manager) ensureResourceGroup(ctx context.Context) error {
// According to https://stackoverflow.microsoft.com/a/245391/62320,
// re-PUTting our RG should re-create RP RBAC after a customer subscription
// migrates between tenants.
_, err := m.resourceGroups.CreateOrUpdate(ctx, resourceGroup, group)
_, err = m.resourceGroups.CreateOrUpdate(ctx, resourceGroup, group)
var serviceError *azure.ServiceError
// CreateOrUpdate wraps DetailedError wrapping a *RequestError (if error generated in ResourceGroup CreateOrUpdateResponder at least)

Просмотреть файл

@ -5,7 +5,9 @@ package cluster
import (
"context"
"errors"
"fmt"
"net/http"
"strings"
"testing"
@ -25,7 +27,7 @@ import (
testdatabase "github.com/Azure/ARO-RP/test/database"
)
func TestCreateAndUpdateErrors(t *testing.T) {
func TestEnsureResourceGroup(t *testing.T) {
ctx := context.Background()
clusterID := "test-cluster"
resourceGroupName := "fakeResourceGroup"
@ -37,41 +39,126 @@ func TestCreateAndUpdateErrors(t *testing.T) {
ManagedBy: &clusterID,
}
groupWithTags := group
groupWithTags.Tags = map[string]*string{
"yeet": to.StringPtr("yote"),
}
disallowedByPolicy := autorest.NewErrorWithError(&azure.RequestError{
ServiceError: &azure.ServiceError{Code: "RequestDisallowedByPolicy"},
}, "", "", nil, "")
for _, tt := range []struct {
name string
result mgmtfeatures.ResourceGroup
mocks func(*mock_features.MockResourceGroupsClient, interface{})
mocks func(*mock_features.MockResourceGroupsClient, *mock_env.MockInterface)
wantErr string
}{
{
name: "ResourceGroup creation was fine",
mocks: func(rg *mock_features.MockResourceGroupsClient, result interface{}) {
name: "success - rg doesn't exist",
mocks: func(rg *mock_features.MockResourceGroupsClient, env *mock_env.MockInterface) {
rg.EXPECT().
CreateOrUpdate(ctx, resourceGroupName, group).
Return(result, nil)
Get(gomock.Any(), resourceGroupName).
Return(group, autorest.DetailedError{StatusCode: http.StatusNotFound})
rg.EXPECT().
CreateOrUpdate(gomock.Any(), resourceGroupName, group).
Return(group, nil)
env.EXPECT().
IsLocalDevelopmentMode().
Return(false)
env.EXPECT().
EnsureARMResourceGroupRoleAssignment(gomock.Any(), gomock.Any(), resourceGroupName).
Return(nil)
},
},
{
name: "ResourceGroup creation failed with RequestDisallowedByPolicy",
mocks: func(rg *mock_features.MockResourceGroupsClient, result interface{}) {
name: "success - rg doesn't exist and localdev mode tags set",
mocks: func(rg *mock_features.MockResourceGroupsClient, env *mock_env.MockInterface) {
groupWithLocalDevTags := group
groupWithLocalDevTags.Tags = map[string]*string{
"purge": to.StringPtr("true"),
}
rg.EXPECT().
CreateOrUpdate(ctx, resourceGroupName, group).
Return(result, disallowedByPolicy)
Get(gomock.Any(), resourceGroupName).
Return(group, autorest.DetailedError{StatusCode: http.StatusNotFound})
rg.EXPECT().
CreateOrUpdate(gomock.Any(), resourceGroupName, groupWithLocalDevTags).
Return(groupWithLocalDevTags, nil)
env.EXPECT().
IsLocalDevelopmentMode().
Return(true)
env.EXPECT().
EnsureARMResourceGroupRoleAssignment(gomock.Any(), gomock.Any(), resourceGroupName).
Return(nil)
},
},
{
name: "success - rg exists and maintain tags",
mocks: func(rg *mock_features.MockResourceGroupsClient, env *mock_env.MockInterface) {
rg.EXPECT().
Get(gomock.Any(), resourceGroupName).
Return(groupWithTags, nil)
rg.EXPECT().
CreateOrUpdate(gomock.Any(), resourceGroupName, groupWithTags).
Return(groupWithTags, nil)
env.EXPECT().
IsLocalDevelopmentMode().
Return(false)
env.EXPECT().
EnsureARMResourceGroupRoleAssignment(gomock.Any(), gomock.Any(), resourceGroupName).
Return(nil)
},
},
{
name: "fail - get rg returns generic error",
mocks: func(rg *mock_features.MockResourceGroupsClient, env *mock_env.MockInterface) {
rg.EXPECT().
Get(gomock.Any(), resourceGroupName).
Return(group, errors.New("generic error"))
},
wantErr: "generic error",
},
{
name: "fail - CreateOrUpdate returns requestdisallowedbypolicy",
mocks: func(rg *mock_features.MockResourceGroupsClient, env *mock_env.MockInterface) {
rg.EXPECT().
Get(gomock.Any(), resourceGroupName).
Return(group, autorest.DetailedError{StatusCode: http.StatusNotFound})
rg.EXPECT().
CreateOrUpdate(gomock.Any(), resourceGroupName, group).
Return(group, disallowedByPolicy)
env.EXPECT().
IsLocalDevelopmentMode().
Return(false)
},
wantErr: `400: DeploymentFailed: : Deployment failed. Details: : : {"code":"RequestDisallowedByPolicy","message":"","target":null,"details":null,"innererror":null,"additionalInfo":null}`,
},
{
name: "ResourceGroup creation failed with other error",
mocks: func(rg *mock_features.MockResourceGroupsClient, result interface{}) {
name: "fail - CreateOrUpdate returns generic error",
mocks: func(rg *mock_features.MockResourceGroupsClient, env *mock_env.MockInterface) {
rg.EXPECT().
CreateOrUpdate(ctx, resourceGroupName, group).
Return(result, fmt.Errorf("Any other error"))
Get(gomock.Any(), resourceGroupName).
Return(group, autorest.DetailedError{StatusCode: http.StatusNotFound})
rg.EXPECT().
CreateOrUpdate(gomock.Any(), resourceGroupName, group).
Return(group, errors.New("generic error"))
env.EXPECT().
IsLocalDevelopmentMode().
Return(false)
},
wantErr: "Any other error",
wantErr: "generic error",
},
} {
t.Run(tt.name, func(t *testing.T) {
@ -79,12 +166,10 @@ func TestCreateAndUpdateErrors(t *testing.T) {
defer controller.Finish()
resourceGroupsClient := mock_features.NewMockResourceGroupsClient(controller)
tt.mocks(resourceGroupsClient, tt.result)
env := mock_env.NewMockInterface(controller)
tt.mocks(resourceGroupsClient, env)
env.EXPECT().Location().AnyTimes().Return(location)
env.EXPECT().EnsureARMResourceGroupRoleAssignment(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
env.EXPECT().IsLocalDevelopmentMode().Return(false)
m := &manager{
log: logrus.NewEntry(logrus.StandardLogger()),

Просмотреть файл

@ -272,3 +272,109 @@ func createManagerForTests(t *testing.T, existingNamespaceName string) *manager
}
return m
}
func TestHiveEnsureResources(t *testing.T) {
ctx := context.Background()
for _, tt := range []struct {
testName string
clusterManagerMock func(mockCtrl *gomock.Controller, m *manager) *mock_hive.MockClusterManager
wantErr string
}{
{
testName: "returns error if cluster manager returns error",
clusterManagerMock: func(mockCtrl *gomock.Controller, m *manager) *mock_hive.MockClusterManager {
mockClusterManager := mock_hive.NewMockClusterManager(mockCtrl)
mockClusterManager.EXPECT().CreateOrUpdate(ctx, m.subscriptionDoc, m.doc).Return(fmt.Errorf("cluster manager error"))
return mockClusterManager
},
wantErr: "cluster manager error",
},
{
testName: "does not return error if cluster manager is nil",
},
{
testName: "calls cluster manager CreateOrUpdate with correct parameters",
clusterManagerMock: func(mockCtrl *gomock.Controller, m *manager) *mock_hive.MockClusterManager {
mockClusterManager := mock_hive.NewMockClusterManager(mockCtrl)
mockClusterManager.EXPECT().CreateOrUpdate(ctx, m.subscriptionDoc, m.doc).Return(nil)
return mockClusterManager
},
},
} {
t.Run(tt.testName, func(t *testing.T) {
controller := gomock.NewController(t)
defer controller.Finish()
m := createManagerForTests(t, "")
if tt.clusterManagerMock != nil {
m.hiveClusterManager = tt.clusterManagerMock(controller, m)
}
err := m.hiveEnsureResources(ctx)
if err != nil && err.Error() != tt.wantErr ||
err == nil && tt.wantErr != "" {
t.Error(err)
}
},
)
}
}
func TestHiveDeleteResources(t *testing.T) {
ctx := context.Background()
for _, tt := range []struct {
testName string
namespace string
clusterManagerMock func(mockCtrl *gomock.Controller, namespaceName string) *mock_hive.MockClusterManager
wantErr string
}{
{
testName: "doesn't return error if cluster manager is nil",
},
{
testName: "deletes namespace if it exists",
namespace: "existing-namespace",
clusterManagerMock: func(mockCtrl *gomock.Controller, namespaceName string) *mock_hive.MockClusterManager {
mockClusterManager := mock_hive.NewMockClusterManager(mockCtrl)
mockClusterManager.EXPECT().Delete(ctx, namespaceName).Return(nil)
return mockClusterManager
},
},
{
testName: "doesn't attempt to delete namespace if it doesn't exist",
clusterManagerMock: func(mockCtrl *gomock.Controller, namespaceName string) *mock_hive.MockClusterManager {
mockClusterManager := mock_hive.NewMockClusterManager(mockCtrl)
mockClusterManager.EXPECT().Delete(ctx, namespaceName).Times(0)
return mockClusterManager
},
},
{
testName: "returns error if cluster manager returns error",
namespace: "existing-namespace",
clusterManagerMock: func(mockCtrl *gomock.Controller, namespaceName string) *mock_hive.MockClusterManager {
mockClusterManager := mock_hive.NewMockClusterManager(mockCtrl)
mockClusterManager.EXPECT().Delete(ctx, namespaceName).Return(fmt.Errorf("cluster manager error"))
return mockClusterManager
},
wantErr: "cluster manager error",
},
} {
t.Run(tt.testName, func(t *testing.T) {
controller := gomock.NewController(t)
defer controller.Finish()
m := createManagerForTests(t, tt.namespace)
if tt.clusterManagerMock != nil {
m.hiveClusterManager = tt.clusterManagerMock(controller, m.doc.OpenShiftCluster.Properties.HiveProfile.Namespace)
}
err := m.hiveDeleteResources(ctx)
if err != nil && err.Error() != tt.wantErr ||
err == nil && tt.wantErr != "" {
t.Error(err)
}
})
}
}

Просмотреть файл

@ -9,6 +9,7 @@ import (
"github.com/Azure/go-autorest/autorest/to"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
)
@ -42,7 +43,7 @@ func (m *manager) configureDefaultStorageClass(ctx context.Context) error {
encryptedSC := newEncryptedStorageClass(m.doc.OpenShiftCluster.Properties.WorkerProfiles[0].DiskEncryptionSetID)
_, err = m.kubernetescli.StorageV1().StorageClasses().Create(ctx, encryptedSC, metav1.CreateOptions{})
if err != nil {
if err != nil && !kerrors.IsAlreadyExists(err) {
return err
}

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Просмотреть файл

@ -6,13 +6,14 @@ package deploy
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"fmt"
"io/ioutil"
"reflect"
"strings"
"github.com/Azure/go-autorest/autorest/to"
"github.com/ghodss/yaml"
"golang.org/x/crypto/ssh"
)
// NOTICE: when modifying the config definition here, don't forget to update
@ -156,9 +157,12 @@ func (conf *RPConfig) validate() error {
if err != nil {
return err
}
pubKey := x509.MarshalPKCS1PublicKey(&key.PublicKey)
publicKey := string(pubKey)
conf.Configuration.SSHPublicKey = (&publicKey)
publicRsaKey, err := ssh.NewPublicKey(&key.PublicKey)
if err != nil {
return err
}
publicKeyBytes := ssh.MarshalAuthorizedKey(publicRsaKey)
conf.Configuration.SSHPublicKey = to.StringPtr(string(publicKeyBytes))
}
for i := 0; i < v.NumField(); i++ {

Просмотреть файл

@ -602,6 +602,18 @@ cat >/etc/fluentbit/fluentbit.conf <<'EOF'
Remove_wildcard _
Remove TIMESTAMP
[FILTER]
Name rewrite_tag
Match journald
Rule $LOGKIND asyncqos asyncqos true
[FILTER]
Name modify
Match asyncqos
Remove CLIENT_PRINCIPAL_NAME
Remove FILE
Remove COMPONENT
[FILTER]
Name rewrite_tag
Match journald

Просмотреть файл

@ -12,8 +12,6 @@ import (
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
@ -21,40 +19,16 @@ func (f *frontend) postAdminOpenShiftClusterRedeployVM(w http.ResponseWriter, r
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
r.URL.Path = filepath.Dir(r.URL.Path)
err := f._postAdminOpenShiftClusterRedeployVM(ctx, r, log)
err := f._postAdminOpenShiftClusterRedeployVM(log, ctx, r)
adminReply(log, w, nil, nil, err)
}
func (f *frontend) _postAdminOpenShiftClusterRedeployVM(ctx context.Context, r *http.Request, log *logrus.Entry) error {
func (f *frontend) _postAdminOpenShiftClusterRedeployVM(log *logrus.Entry, ctx context.Context, r *http.Request) error {
vars := mux.Vars(r)
vmName := r.URL.Query().Get("vmName")
err := validateAdminVMName(vmName)
azActionsWrapper, err := f.newAzureActionsWrapper(log, ctx, vmName, strings.TrimPrefix(r.URL.Path, "/admin"), vars)
if err != nil {
return err
}
resourceID := strings.TrimPrefix(r.URL.Path, "/admin")
doc, err := f.dbOpenShiftClusters.Get(ctx, resourceID)
switch {
case cosmosdb.IsErrorStatusCode(err, http.StatusNotFound):
return api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeResourceNotFound, "", "The Resource '%s/%s' under resource group '%s' was not found.", vars["resourceType"], vars["resourceName"], vars["resourceGroupName"])
case err != nil:
return err
}
subscriptionDoc, err := f.getSubscriptionDocument(ctx, doc.Key)
if err != nil {
return err
}
a, err := f.azureActionsFactory(log, f.env, doc.OpenShiftCluster, subscriptionDoc)
if err != nil {
return err
}
return a.VMRedeployAndWait(ctx, vmName)
return f.adminAction.VMRedeployAndWait(ctx, azActionsWrapper.vmName)
}

Просмотреть файл

@ -12,8 +12,6 @@ import (
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
@ -21,40 +19,17 @@ func (f *frontend) postAdminOpenShiftClusterStartVM(w http.ResponseWriter, r *ht
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
r.URL.Path = filepath.Dir(r.URL.Path)
err := f._postAdminOpenShiftClusterStartVM(ctx, r, log)
err := f._postAdminOpenShiftClusterStartVM(log, ctx, r)
adminReply(log, w, nil, nil, err)
}
func (f *frontend) _postAdminOpenShiftClusterStartVM(ctx context.Context, r *http.Request, log *logrus.Entry) error {
func (f *frontend) _postAdminOpenShiftClusterStartVM(log *logrus.Entry, ctx context.Context, r *http.Request) error {
vars := mux.Vars(r)
vmName := r.URL.Query().Get("vmName")
err := validateAdminVMName(vmName)
azActionsWrapper, err := f.newAzureActionsWrapper(log, ctx, vmName, strings.TrimPrefix(r.URL.Path, "/admin"), vars)
if err != nil {
return err
}
resourceID := strings.TrimPrefix(r.URL.Path, "/admin")
doc, err := f.dbOpenShiftClusters.Get(ctx, resourceID)
switch {
case cosmosdb.IsErrorStatusCode(err, http.StatusNotFound):
return api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeResourceNotFound, "", "The Resource '%s/%s' under resource group '%s' was not found.", vars["resourceType"], vars["resourceName"], vars["resourceGroupName"])
case err != nil:
return err
}
subscriptionDoc, err := f.getSubscriptionDocument(ctx, doc.Key)
if err != nil {
return err
}
a, err := f.azureActionsFactory(log, f.env, doc.OpenShiftCluster, subscriptionDoc)
if err != nil {
return err
}
return a.VMStartAndWait(ctx, vmName)
return f.adminAction.VMStartAndWait(ctx, azActionsWrapper.vmName)
}

Просмотреть файл

@ -12,8 +12,6 @@ import (
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
@ -21,40 +19,17 @@ func (f *frontend) postAdminOpenShiftClusterStopVM(w http.ResponseWriter, r *htt
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
r.URL.Path = filepath.Dir(r.URL.Path)
err := f._postAdminOpenShiftClusterStopVM(ctx, r, log)
err := f._postAdminOpenShiftClusterStopVM(log, ctx, r)
adminReply(log, w, nil, nil, err)
}
func (f *frontend) _postAdminOpenShiftClusterStopVM(ctx context.Context, r *http.Request, log *logrus.Entry) error {
func (f *frontend) _postAdminOpenShiftClusterStopVM(log *logrus.Entry, ctx context.Context, r *http.Request) error {
vars := mux.Vars(r)
vmName := r.URL.Query().Get("vmName")
err := validateAdminVMName(vmName)
azActionsWrapper, err := f.newAzureActionsWrapper(log, ctx, vmName, strings.TrimPrefix(r.URL.Path, "/admin"), vars)
if err != nil {
return err
}
resourceID := strings.TrimPrefix(r.URL.Path, "/admin")
doc, err := f.dbOpenShiftClusters.Get(ctx, resourceID)
switch {
case cosmosdb.IsErrorStatusCode(err, http.StatusNotFound):
return api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeResourceNotFound, "", "The Resource '%s/%s' under resource group '%s' was not found.", vars["resourceType"], vars["resourceName"], vars["resourceGroupName"])
case err != nil:
return err
}
subscriptionDoc, err := f.getSubscriptionDocument(ctx, doc.Key)
if err != nil {
return err
}
a, err := f.azureActionsFactory(log, f.env, doc.OpenShiftCluster, subscriptionDoc)
if err != nil {
return err
}
return a.VMStopAndWait(ctx, vmName)
return f.adminAction.VMStopAndWait(ctx, azActionsWrapper.vmName)
}

Просмотреть файл

@ -17,7 +17,6 @@ import (
kruntime "k8s.io/apimachinery/pkg/runtime"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
"github.com/Azure/ARO-RP/pkg/frontend/middleware"
)
@ -25,34 +24,14 @@ func (f *frontend) postAdminOpenShiftClusterVMResize(w http.ResponseWriter, r *h
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
r.URL.Path = filepath.Dir(r.URL.Path)
err := f._postAdminOpenShiftClusterVMResize(ctx, r, log)
err := f._postAdminOpenShiftClusterVMResize(log, ctx, r)
adminReply(log, w, nil, nil, err)
}
func (f *frontend) _postAdminOpenShiftClusterVMResize(ctx context.Context, r *http.Request, log *logrus.Entry) error {
func (f *frontend) _postAdminOpenShiftClusterVMResize(log *logrus.Entry, ctx context.Context, r *http.Request) error {
vars := mux.Vars(r)
vmName := r.URL.Query().Get("vmName")
err := validateAdminVMName(vmName)
if err != nil {
return err
}
resourceID := strings.TrimPrefix(r.URL.Path, "/admin")
doc, err := f.dbOpenShiftClusters.Get(ctx, resourceID)
switch {
case cosmosdb.IsErrorStatusCode(err, http.StatusNotFound):
return api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeResourceNotFound, "",
"The Resource '%s/%s' under resource group '%s' was not found.",
vars["resourceType"], vars["resourceName"], vars["resourceGroupName"])
case err != nil:
return err
}
subscriptionDoc, err := f.getSubscriptionDocument(ctx, doc.Key)
azActionsWrapper, err := f.newAzureActionsWrapper(log, ctx, vmName, strings.TrimPrefix(r.URL.Path, "/admin"), vars)
if err != nil {
return err
}
@ -63,12 +42,7 @@ func (f *frontend) _postAdminOpenShiftClusterVMResize(ctx context.Context, r *ht
return err
}
a, err := f.azureActionsFactory(log, f.env, doc.OpenShiftCluster, subscriptionDoc)
if err != nil {
return err
}
k, err := f.kubeActionsFactory(log, f.env, doc.OpenShiftCluster)
k, err := f.kubeActionsFactory(log, f.env, azActionsWrapper.doc.OpenShiftCluster)
if err != nil {
return err
}
@ -95,7 +69,7 @@ func (f *frontend) _postAdminOpenShiftClusterVMResize(ctx context.Context, r *ht
continue
}
if strings.EqualFold(vmName, node.ObjectMeta.Name) {
if strings.EqualFold(azActionsWrapper.vmName, node.ObjectMeta.Name) {
nodeExists = true
break
}
@ -104,8 +78,8 @@ func (f *frontend) _postAdminOpenShiftClusterVMResize(ctx context.Context, r *ht
if !nodeExists {
return api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "",
`"The master node '%s' under resource group '%s' was not found."`,
vmName, vars["resourceGroupName"])
azActionsWrapper.vmName, vars["resourceGroupName"])
}
return a.VMResize(ctx, vmName, vmSize)
return f.adminAction.VMResize(ctx, azActionsWrapper.vmName, vmSize)
}

Просмотреть файл

@ -24,9 +24,7 @@ func (f *frontend) getAdminOpenShiftClusterVMResizeOptions(w http.ResponseWriter
ctx := r.Context()
log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry)
r.URL.Path = filepath.Dir(r.URL.Path)
b, err := f._getAdminOpenShiftClusterVMResizeOptions(ctx, r, log)
adminReply(log, w, nil, b, err)
}

Просмотреть файл

@ -0,0 +1,53 @@
package frontend
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"net/http"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/database/cosmosdb"
)
type azVmActionsWrapper struct {
vmName string
doc *api.OpenShiftClusterDocument
}
func (f *frontend) newAzureActionsWrapper(log *logrus.Entry, ctx context.Context, vmName, resourceID string, vars map[string]string) (azVmActionsWrapper, error) {
err := validateAdminVMName(vmName)
if err != nil {
return azVmActionsWrapper{}, err
}
if err != nil {
return azVmActionsWrapper{}, err
}
doc, err := f.dbOpenShiftClusters.Get(ctx, resourceID)
switch {
case cosmosdb.IsErrorStatusCode(err, http.StatusNotFound):
return azVmActionsWrapper{}, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeResourceNotFound, "",
"The Resource '%s/%s' under resource group '%s' was not found.",
vars["resourceType"], vars["resourceName"], vars["resourceGroupName"])
case err != nil:
return azVmActionsWrapper{}, err
}
subscriptionDoc, err := f.getSubscriptionDocument(ctx, doc.Key)
if err != nil {
return azVmActionsWrapper{}, err
}
f.adminAction, err = f.azureActionsFactory(log, f.env, doc.OpenShiftCluster, subscriptionDoc)
if err != nil {
return azVmActionsWrapper{}, err
}
return azVmActionsWrapper{
vmName: vmName,
doc: doc,
}, nil
}

Просмотреть файл

@ -60,6 +60,7 @@ type frontend struct {
kubeActionsFactory kubeActionsFactory
azureActionsFactory azureActionsFactory
ocEnricherFactory ocEnricherFactory
adminAction adminactions.AzureActions
l net.Listener
s *http.Server

Просмотреть файл

@ -73,7 +73,7 @@ const (
[FILTER]
Name grep
Match containers
Regex NAMESPACE ^(?:default|kube-.*|openshift|openshift-.*)$
Regex NAMESPACE ^(?:default|kube-.*|openshift|(?!openshift-(logging|gitops|user-workload-monitoring))(openshift-.*))$
[FILTER]
Name nest

Просмотреть файл

@ -89,9 +89,7 @@ func New(log *logrus.Entry, env env.Interface, oc *api.OpenShiftCluster, arocli
type deploymentData struct {
Image string
Version string
GitCommit string
IsLocalDevelopment bool
HasVersion bool
}
func templateManifests(data deploymentData) ([][]byte, error) {
@ -125,18 +123,24 @@ func templateManifests(data deploymentData) ([][]byte, error) {
func (o *operator) createDeploymentData() deploymentData {
image := o.env.AROOperatorImage()
hasVersion := false
// HACK: Override for ARO_IMAGE env variable setup in local-dev mode
version := "latest"
if strings.Contains(image, ":") {
str := strings.Split(image, ":")
version = str[len(str)-1]
}
// Set version correctly if it's overridden
if o.oc.Properties.OperatorVersion != "" {
image = fmt.Sprintf("%s/aro", o.env.ACRDomain())
hasVersion = true
version = o.oc.Properties.OperatorVersion
image = fmt.Sprintf("%s/aro:%s", o.env.ACRDomain(), version)
}
return deploymentData{
IsLocalDevelopment: o.env.IsLocalDevelopmentMode(),
Image: image,
Version: o.oc.Properties.OperatorVersion,
GitCommit: version.GitCommit,
HasVersion: hasVersion,
Version: version,
}
}

Просмотреть файл

@ -5,6 +5,7 @@ package deploy
import (
"errors"
"reflect"
"testing"
"github.com/golang/mock/gomock"
@ -13,7 +14,6 @@ import (
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/pkg/util/cmp"
mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env"
"github.com/Azure/ARO-RP/pkg/util/version"
)
func TestCheckIngressIP(t *testing.T) {
@ -130,6 +130,79 @@ func TestCheckIngressIP(t *testing.T) {
}
}
func TestCreateDeploymentData(t *testing.T) {
operatorImageTag := "v20071110"
operatorImageUntagged := "arosvc.azurecr.io/aro"
operatorImageWithTag := operatorImageUntagged + ":" + operatorImageTag
for _, tt := range []struct {
name string
mock func(*mock_env.MockInterface, *api.OpenShiftCluster)
operatorVersionOverride string
expected deploymentData
}{
{
name: "no image override, use default",
mock: func(env *mock_env.MockInterface, oc *api.OpenShiftCluster) {
env.EXPECT().
AROOperatorImage().
Return(operatorImageWithTag)
},
expected: deploymentData{
Image: operatorImageWithTag,
Version: operatorImageTag},
},
{
name: "no image tag, use latest version",
mock: func(env *mock_env.MockInterface, oc *api.OpenShiftCluster) {
env.EXPECT().
AROOperatorImage().
Return(operatorImageUntagged)
},
expected: deploymentData{
Image: operatorImageUntagged,
Version: "latest"},
},
{
name: "OperatorVersion override set",
mock: func(env *mock_env.MockInterface, oc *api.OpenShiftCluster) {
env.EXPECT().
AROOperatorImage().
Return(operatorImageUntagged)
env.EXPECT().
ACRDomain().
Return("docker.io")
oc.Properties.OperatorVersion = "override"
},
expected: deploymentData{
Image: "docker.io/aro:override",
Version: "override"},
},
} {
t.Run(tt.name, func(t *testing.T) {
controller := gomock.NewController(t)
defer controller.Finish()
env := mock_env.NewMockInterface(controller)
env.EXPECT().IsLocalDevelopmentMode().Return(tt.expected.IsLocalDevelopment).AnyTimes()
oc := &api.OpenShiftCluster{Properties: api.OpenShiftClusterProperties{}}
tt.mock(env, oc)
o := operator{
oc: oc,
env: env,
}
deploymentData := o.createDeploymentData()
if !reflect.DeepEqual(deploymentData, tt.expected) {
t.Errorf("actual deployment: %v, expected %v", deploymentData, tt.expected)
}
})
}
}
func TestOperatorVersion(t *testing.T) {
type test struct {
name string
@ -144,7 +217,7 @@ func TestOperatorVersion(t *testing.T) {
oc: func() *api.OpenShiftClusterProperties {
return &api.OpenShiftClusterProperties{}
},
wantVersion: version.GitCommit,
wantVersion: "latest",
wantPullspec: "defaultaroimagefromenv",
},
{

Просмотреть файл

@ -2,12 +2,8 @@ apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: aro-operator-master
{{ if .HasVersion}}
version: {{ .Version }}
{{ else }}
version: unknown
{{end}}
app: aro-operator-master
version: {{ .Version }}
name: aro-operator-master
namespace: openshift-azure-operator
spec:
@ -30,12 +26,12 @@ spec:
args:
- operator
- master
image: "{{ .Image }}{{ if .HasVersion}}:{{ if .IsLocalDevelopment }}{{ .GitCommit }}{{ else }}{{ .Version }}{{end}}{{end}}"
image: "{{ .Image }}"
name: aro-operator
{{ if .IsLocalDevelopment}}
env:
env:
- name: "RP_MODE"
value: "development"
value: "development"
{{ end }}
ports:
- containerPort: 8080

Просмотреть файл

@ -2,12 +2,8 @@ apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: aro-operator-worker
{{ if .HasVersion}}
version: {{ .Version }}
{{ else }}
version: unknown
{{end}}
app: aro-operator-worker
version: {{ .Version }}
name: aro-operator-worker
namespace: openshift-azure-operator
spec:
@ -30,13 +26,13 @@ spec:
args:
- operator
- worker
image: "{{ .Image }}{{ if .HasVersion}}:{{ if .IsLocalDevelopment }}{{ .GitCommit }}{{ else }}{{ .Version }}{{end}}{{end}}"
{{ if .IsLocalDevelopment}}
env:
- name: "RP_MODE"
value: "development"
{{ end }}
image: "{{ .Image }}"
name: aro-operator
{{ if .IsLocalDevelopment}}
env:
- name: "RP_MODE"
value: "development"
{{ end }}
livenessProbe:
httpGet:
path: /healthz/ready