This commit is contained in:
Jeff Yuan 2023-05-30 11:30:18 +12:00
Родитель 5dc6dc1434
Коммит 874262ad34
6 изменённых файлов: 341 добавлений и 257 удалений

Просмотреть файл

@ -10,16 +10,13 @@ import (
"time"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
arov1alpha1 "github.com/Azure/ARO-RP/pkg/operator/apis/aro.openshift.io/v1alpha1"
"github.com/Azure/ARO-RP/pkg/operator/controllers/guardrails/config"
@ -144,21 +141,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.
// SetupWithManager setup our manager
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
pullSecretPredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
return (o.GetName() == pullSecretName.Name && o.GetNamespace() == pullSecretName.Namespace)
})
aroClusterPredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
return o.GetName() == arov1alpha1.SingletonClusterName
})
grBuilder := ctrl.NewControllerManagedBy(mgr).
For(&arov1alpha1.Cluster{}, builder.WithPredicates(aroClusterPredicate)).
Watches(
&source.Kind{Type: &corev1.Secret{}},
&handler.EnqueueRequestForObject{},
builder.WithPredicates(pullSecretPredicate),
)
For(&arov1alpha1.Cluster{}, builder.WithPredicates(aroClusterPredicate))
resources, err := r.deployer.Template(&config.GuardRailsDeploymentConfig{}, staticFiles)
if err != nil {

Просмотреть файл

@ -24,19 +24,13 @@ import (
"github.com/Azure/ARO-RP/pkg/util/dynamichelper"
)
func (r *Reconciler) getPolicyConfig(ctx context.Context, na string) (string, string, error) {
func (r *Reconciler) getPolicyConfig(ctx context.Context, instance *arov1alpha1.Cluster, na string) (string, string, error) {
parts := strings.Split(na, ".")
if len(parts) < 1 {
return "", "", errors.New("unrecognised name: " + na)
}
name := parts[0]
instance := &arov1alpha1.Cluster{}
err := r.client.Get(ctx, types.NamespacedName{Name: arov1alpha1.SingletonClusterName}, instance)
if err != nil {
return "", "", err
}
managedPath := fmt.Sprintf(controllerPolicyManagedTemplate, name)
managed := instance.Spec.OperatorFlags.GetWithDefault(managedPath, "false")
@ -52,10 +46,16 @@ func (r *Reconciler) ensurePolicy(ctx context.Context, fs embed.FS, path string)
return err
}
instance := &arov1alpha1.Cluster{}
err = r.client.Get(ctx, types.NamespacedName{Name: arov1alpha1.SingletonClusterName}, instance)
if err != nil {
return err
}
creates := make([]kruntime.Object, 0)
buffer := new(bytes.Buffer)
for _, templ := range template.Templates() {
managed, enforcement, err := r.getPolicyConfig(ctx, templ.Name())
managed, enforcement, err := r.getPolicyConfig(ctx, instance, templ.Name())
if err != nil {
return err
}

Просмотреть файл

@ -115,38 +115,35 @@ kind: AROPrivilegedNamespace
metadata:
name: aro-privileged-namespace-deny
spec:
enforcementAction: {{.Enforcement}}
match:
kinds:
- apiGroups: [""]
kinds: ["Service",
kinds: [
"Pod",
"Deployment",
"Namespace",
"ReplicaSet",
"StatefulSets",
"DaemonSet",
"Jobs",
"CronJob",
"ReplicationController",
"Role",
"ClusterRole",
"roleBinding",
"ClusterRoleBinding",
"Secret",
"Service",
"ServiceAccount",
"CustomResourceDefinition",
"PodDisruptionBudget",
"ReplicationController",
"ResourceQuota",
"PodSecurityPolicy"]
]
- apiGroups: ["apps"]
kinds: ["Deployment", "ReplicaSet", "StatefulSet", "DaemonSet"]
- apiGroups: ["batch"]
kinds: ["Job", "CronJob"]
- apiGroups: ["rbac.authorization.k8s.io"]
kinds: ["Role", "RoleBinding"]
- apiGroups: ["policy"]
kinds: ["PodDisruptionBudget"]
```
## Test the rego
* install opa cli, refer https://www.openpolicyagent.org/docs/v0.11.0/get-started/
* install opa cli, refer https://github.com/open-policy-agent/opa/releases/
* after _test.go is done, test it out, and fix the problem
```sh
opa test *.rego [-v] #-v for verbose
opa test ../library/common.rego *.rego [-v] #-v for verbose
```
## Generate the Constraint Templates
@ -228,3 +225,81 @@ or below cmd after test.sh has been executed:
```sh
gator verify . [-v] #-v for verbose
```
It is now good to test your policy on a real cluster.
## Enable your policy on a dev cluster
Set up local dev env following “Deploy development RP” section if not already: https://github.com/Azure/ARO-RP/blob/master/docs/deploy-development-rp.md
Deploy a dev cluster $CLUSTER in your preferred region, cmd example:
```sh
CLUSTER=jeff-test-aro go run ./hack/cluster create
```
Scale the standard aro operator to 0, cmd:
```sh
oc scale -n openshift-azure-operator deployment/aro-operator-master --replicas=0
```
Run aro operator from local code, cmd example:
```sh
CLUSTER=jeff-test-aro go run -tags aro,containers_image_openpgp ./cmd/aro operator master
```
Wait a couple of minutes until aro operator fully synchronized
Enable guardrails, set it to managed, cmd:
```sh
oc patch cluster.aro.openshift.io cluster --type json -p '[{ "op": "replace", "path": "/spec/operatorflags/aro.guardrails.deploy.managed", "value":"true" }]'
oc patch cluster.aro.openshift.io cluster --type json -p '[{ "op": "replace", "path": "/spec/operatorflags/aro.guardrails.enabled", "value":"true" }]'
```
The sequence for above cmds is essential, please dont change the order!
Use below cmd to verify the gatekeeper is deployed and ready
```sh
$ oc get all -n openshift-azure-guardrails
NAME READY STATUS RESTARTS AGE
pod/gatekeeper-audit-67c4c669c7-mrr6w 1/1 Running 0 10h
pod/gatekeeper-controller-manager-b887b69bd-mzhsh 1/1 Running 0 10h
pod/gatekeeper-controller-manager-b887b69bd-tb8zc 1/1 Running 0 10h
pod/gatekeeper-controller-manager-b887b69bd-xnvv4 1/1 Running 0 10h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/gatekeeper-webhook-service ClusterIP 172.30.51.233 <none> 443/TCP 35h
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/gatekeeper-audit 1/1 1 1 10h
deployment.apps/gatekeeper-controller-manager 3/3 3 3 10h
NAME DESIRED CURRENT READY AGE
replicaset.apps/gatekeeper-audit-67c4c669c7 1 1 1 10h
replicaset.apps/gatekeeper-controller-manager-b887b69bd 3 3 3 10h
```
Verify ConstraintTemplate is created
```sh
$ oc get constrainttemplate
NAME AGE
arodenylabels 20h
$ oc get constraint
```
Enforce the machine rule, cmd:
```sh
oc patch cluster.aro.openshift.io cluster --type json -p '[{ "op": "replace", "path": "/spec/operatorflags/aro.guardrails.policies.aro-machines-deny.managed", "value":"true" }]'
oc patch cluster.aro.openshift.io cluster --type json -p '[{ "op": "replace", "path": "/spec/operatorflags/aro.guardrails.policies.aro-machines-deny.enforcement", "value":"deny" }]'
```
Note: the feature flag name is the corresponding Constraint FILE name, which can be found under pkg/operator/controllers/guardrails/policies/gkconstraints/, Eg, aro-machines-deny.yaml
Verify corresponding gatekeeper Constraint has been created:
```sh
$ oc get constraint
NAME ENFORCEMENT-ACTION TOTAL-VIOLATIONS
aro-machines-deny deny
```
Once the constraint is created, you are all good to rock with your policy!

Просмотреть файл

@ -149,8 +149,8 @@ func (depl *deployer) removeOne(ctx context.Context, obj kruntime.Object) (strin
}
name := nameValue.String()
ns := nsValue.String()
if reflect.TypeOf(obj).String() == "*v1.Namespace" {
// dont delete the namespace for now
if obj.GetObjectKind().GroupVersionKind().GroupKind().String() == "Namespace" {
// don't delete the namespace for now
return name, nil
}
errDelete := depl.dh.EnsureDeletedGVR(ctx, obj.GetObjectKind().GroupVersionKind().GroupKind().String(), ns, name, "")

Просмотреть файл

@ -12,7 +12,6 @@ import (
hivev1 "github.com/openshift/hive/apis/hive/v1"
mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
"github.com/sirupsen/logrus"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
extensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
@ -119,92 +118,6 @@ func (dh *dynamicHelper) Ensure(ctx context.Context, objs ...kruntime.Object) er
return nil
}
// the UnstructuredObj related stuff is specifically for the Guardrails
// to handle the gatekeeper Constraint as it does not have a scheme that can be imported
func (dh *dynamicHelper) ensureUnstructuredObj(ctx context.Context, uns *unstructured.Unstructured) error {
gvr, err := dh.Resolve(uns.GroupVersionKind().GroupKind().String(), uns.GroupVersionKind().Version)
if err != nil {
return err
}
create := false
obj, err := dh.dynamicClient.Resource(*gvr).Namespace(uns.GetNamespace()).Get(ctx, uns.GetName(), metav1.GetOptions{})
if err != nil {
if !notFound(err) {
return err
}
create = true
}
if create {
dh.log.Infof("Create %s", keyFunc(uns.GroupVersionKind().GroupKind(), uns.GetNamespace(), uns.GetName()))
if _, err = dh.dynamicClient.Resource(*gvr).Namespace(uns.GetNamespace()).Create(ctx, uns, metav1.CreateOptions{}); err != nil {
return err
}
return nil
}
enNew, err := GetEnforcementAction(uns)
if err != nil {
return nil
}
enOld, err := GetEnforcementAction(obj)
if err != nil {
return nil
}
if strings.EqualFold(enOld, enNew) {
// currently EnforcementAction is the only part that may change in an update
return nil
}
dh.log.Infof("Update %s: enforcementAction: %s->%s", keyFunc(uns.GroupVersionKind().GroupKind(), uns.GetNamespace(), uns.GetName()), enOld, enNew)
uns.SetResourceVersion(obj.GetResourceVersion())
if _, err = dh.dynamicClient.Resource(*gvr).Namespace(uns.GetNamespace()).Update(ctx, uns, metav1.UpdateOptions{}); err != nil {
return err
}
return nil
}
func GetEnforcementAction(obj *unstructured.Unstructured) (string, error) {
name := obj.GetName()
ns := obj.GetNamespace()
field, ok := obj.Object["spec"]
if !ok {
return "", fmt.Errorf("%s/%s: get spec failed", ns, name)
}
spec, ok := field.(map[string]interface{})
if !ok {
return "", fmt.Errorf("%s/%s: spec: %T is not map", ns, name, field)
}
field, ok = spec["enforcementAction"]
if !ok {
return "", fmt.Errorf("%s/%s: get enforcementAction failed", ns, name)
}
enforce, ok := field.(string)
if !ok {
return "", fmt.Errorf("%s/%s: enforcementAction: %T is not string", ns, name, field)
}
return enforce, nil
}
func (dh *dynamicHelper) deleteUnstructuredObj(ctx context.Context, groupKind, namespace, name string) error {
gvr, err := dh.Resolve(groupKind, "")
if err != nil {
return err
}
uns, err := dh.dynamicClient.Resource(*gvr).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return nil
}
if err != nil || uns == nil {
return err
}
if err = dh.dynamicClient.Resource(*gvr).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{}); !(err == nil || notFound(err)) {
return err
}
return nil
}
func (dh *dynamicHelper) ensureOne(ctx context.Context, new kruntime.Object) error {
gvks, _, err := scheme.Scheme.ObjectKinds(new)
if err != nil {
@ -411,132 +324,3 @@ func makeURLSegments(gvr *schema.GroupVersionResource, namespace, name string) (
return url
}
func notFound(err error) bool {
if err == nil || kerrors.IsNotFound(err) || strings.Contains(strings.ToLower(err.Error()), "notfound") {
return true
}
return false
}
// mergeGK takes the existing (old) and desired (new) objects. It checks the
// the interested fields in the *new* object to see if an update is necessary,
// fixes up the *old* object if needed, and returns the difference for
// debugging purposes. The reason for using *old* as basis is that the *old*
// object are changed by gatekeeper binaries and the changes must be kept.
func mergeGK(old, new kruntime.Object) (kruntime.Object, bool, string, error) {
if reflect.TypeOf(old) != reflect.TypeOf(new) {
return nil, false, "", fmt.Errorf("types differ: %T %T", old, new)
}
expected := old.DeepCopyObject()
// 1. Set defaults on old. This gets rid of many false positive diffs.
scheme.Scheme.Default(expected)
// 2. Do fix-ups on a per-Kind basis.
hasChanged := false
switch new.(type) {
case *appsv1.Deployment:
hasChanged = handleDeployment(new, expected)
case *admissionregistrationv1.ValidatingWebhookConfiguration:
hasChanged = handleValidatingWebhook(new, expected)
case *admissionregistrationv1.MutatingWebhookConfiguration:
hasChanged = handleMutatingWebhook(new, expected)
}
var diff string
if _, ok := expected.(*corev1.Secret); !ok { // Don't show a diff if kind is Secret
diff = cmp.Diff(old, expected)
}
return expected, hasChanged, diff, nil
}
func handleDeployment(new, expected kruntime.Object) bool {
hasChanged := false
newDeployment, expectedDeployment := new.(*appsv1.Deployment), expected.(*appsv1.Deployment)
for i := range expectedDeployment.Spec.Template.Spec.Containers {
ec := &expectedDeployment.Spec.Template.Spec.Containers[i]
nc := &newDeployment.Spec.Template.Spec.Containers[i]
if ec.Image != nc.Image {
ec.Image = nc.Image
hasChanged = true
}
if cmpAndCopy(&nc.Resources.Limits, &ec.Resources.Limits) {
hasChanged = true
}
if cmpAndCopy(&nc.Resources.Requests, &ec.Resources.Requests) {
hasChanged = true
}
}
return hasChanged
}
func handleValidatingWebhook(new, expected kruntime.Object) bool {
hasChanged := false
newWebhook := new.(*admissionregistrationv1.ValidatingWebhookConfiguration)
expectedWebhook := expected.(*admissionregistrationv1.ValidatingWebhookConfiguration)
for i := range expectedWebhook.Webhooks {
if expectedWebhook.Webhooks[i].FailurePolicy != nil &&
newWebhook.Webhooks[i].FailurePolicy != nil &&
*expectedWebhook.Webhooks[i].FailurePolicy != *newWebhook.Webhooks[i].FailurePolicy {
*expectedWebhook.Webhooks[i].FailurePolicy = *newWebhook.Webhooks[i].FailurePolicy
hasChanged = true
}
if expectedWebhook.Webhooks[i].TimeoutSeconds != nil &&
newWebhook.Webhooks[i].TimeoutSeconds != nil &&
*expectedWebhook.Webhooks[i].TimeoutSeconds != *newWebhook.Webhooks[i].TimeoutSeconds {
*expectedWebhook.Webhooks[i].TimeoutSeconds = *newWebhook.Webhooks[i].TimeoutSeconds
hasChanged = true
}
}
return hasChanged
}
func handleMutatingWebhook(new, expected kruntime.Object) bool {
hasChanged := false
newWebhook := new.(*admissionregistrationv1.MutatingWebhookConfiguration)
expectedWebhook := expected.(*admissionregistrationv1.MutatingWebhookConfiguration)
for i := range expectedWebhook.Webhooks {
if expectedWebhook.Webhooks[i].FailurePolicy != nil &&
newWebhook.Webhooks[i].FailurePolicy != nil &&
*expectedWebhook.Webhooks[i].FailurePolicy != *newWebhook.Webhooks[i].FailurePolicy {
*expectedWebhook.Webhooks[i].FailurePolicy = *newWebhook.Webhooks[i].FailurePolicy
hasChanged = true
}
if expectedWebhook.Webhooks[i].TimeoutSeconds != nil &&
newWebhook.Webhooks[i].TimeoutSeconds != nil &&
*expectedWebhook.Webhooks[i].TimeoutSeconds != *newWebhook.Webhooks[i].TimeoutSeconds {
*expectedWebhook.Webhooks[i].TimeoutSeconds = *newWebhook.Webhooks[i].TimeoutSeconds
hasChanged = true
}
}
return hasChanged
}
func cmpAndCopy(srcPtr, dstPtr *corev1.ResourceList) bool {
src, dst := *srcPtr, *dstPtr
hasChanged := false
for key, val := range dst {
if !val.Equal(src[key]) {
dst[key] = src[key].DeepCopy()
hasChanged = true
}
}
return hasChanged
}
func (dh *dynamicHelper) IsConstraintTemplateReady(ctx context.Context, name string) (bool, error) {
gvr := schema.GroupVersionResource{Group: "templates.gatekeeper.sh", Version: "v1beta1", Resource: "constrainttemplates"}
ct, err := dh.dynamicClient.Resource(gvr).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, err
}
status, ok := ct.Object["status"].(map[string]interface{})
if !ok {
return false, nil
}
created, ok := status["created"].(bool)
if !ok {
return false, nil
}
return created, nil
}

Просмотреть файл

@ -0,0 +1,237 @@
package dynamichelper
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"reflect"
"strings"
"github.com/Azure/ARO-RP/pkg/util/cmp"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
kruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
)
// the UnstructuredObj related stuff is specifically for the Guardrails
// to handle the gatekeeper Constraint as it does not have a scheme that can be imported
func (dh *dynamicHelper) ensureUnstructuredObj(ctx context.Context, uns *unstructured.Unstructured) error {
gvr, err := dh.Resolve(uns.GroupVersionKind().GroupKind().String(), uns.GroupVersionKind().Version)
if err != nil {
return err
}
create := false
obj, err := dh.dynamicClient.Resource(*gvr).Namespace(uns.GetNamespace()).Get(ctx, uns.GetName(), metav1.GetOptions{})
if err != nil {
if !notFound(err) {
return err
}
create = true
}
if create {
dh.log.Infof("Create %s", keyFunc(uns.GroupVersionKind().GroupKind(), uns.GetNamespace(), uns.GetName()))
if _, err = dh.dynamicClient.Resource(*gvr).Namespace(uns.GetNamespace()).Create(ctx, uns, metav1.CreateOptions{}); err != nil {
return err
}
return nil
}
enNew, err := GetEnforcementAction(uns)
if err != nil {
return nil
}
enOld, err := GetEnforcementAction(obj)
if err != nil {
return nil
}
if strings.EqualFold(enOld, enNew) {
// currently EnforcementAction is the only part that may change in an update
return nil
}
dh.log.Infof("Update %s: enforcementAction: %s->%s", keyFunc(uns.GroupVersionKind().GroupKind(), uns.GetNamespace(), uns.GetName()), enOld, enNew)
uns.SetResourceVersion(obj.GetResourceVersion())
if _, err = dh.dynamicClient.Resource(*gvr).Namespace(uns.GetNamespace()).Update(ctx, uns, metav1.UpdateOptions{}); err != nil {
return err
}
return nil
}
func GetEnforcementAction(obj *unstructured.Unstructured) (string, error) {
name := obj.GetName()
ns := obj.GetNamespace()
field, ok := obj.Object["spec"]
if !ok {
return "", fmt.Errorf("%s/%s: get spec failed", ns, name)
}
spec, ok := field.(map[string]interface{})
if !ok {
return "", fmt.Errorf("%s/%s: spec: %T is not map", ns, name, field)
}
field, ok = spec["enforcementAction"]
if !ok {
return "", fmt.Errorf("%s/%s: get enforcementAction failed", ns, name)
}
enforce, ok := field.(string)
if !ok {
return "", fmt.Errorf("%s/%s: enforcementAction: %T is not string", ns, name, field)
}
return enforce, nil
}
func (dh *dynamicHelper) deleteUnstructuredObj(ctx context.Context, groupKind, namespace, name string) error {
gvr, err := dh.Resolve(groupKind, "")
if err != nil {
return err
}
uns, err := dh.dynamicClient.Resource(*gvr).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return nil
}
if err != nil || uns == nil {
return err
}
if err = dh.dynamicClient.Resource(*gvr).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{}); !(err == nil || notFound(err)) {
return err
}
return nil
}
func notFound(err error) bool {
if err == nil || kerrors.IsNotFound(err) || strings.Contains(strings.ToLower(err.Error()), "notfound") {
return true
}
return false
}
// mergeGK takes the existing (old) and desired (new) objects. It checks the
// the interested fields in the *new* object to see if an update is necessary,
// fixes up the *old* object if needed, and returns the difference for
// debugging purposes. The reason for using *old* as basis is that the *old*
// object are changed by gatekeeper binaries and the changes must be kept.
func mergeGK(old, new kruntime.Object) (kruntime.Object, bool, string, error) {
if reflect.TypeOf(old) != reflect.TypeOf(new) {
return nil, false, "", fmt.Errorf("types differ: %T %T", old, new)
}
expected := old.DeepCopyObject()
// 1. Set defaults on old. This gets rid of many false positive diffs.
scheme.Scheme.Default(expected)
// 2. Do fix-ups on a per-Kind basis.
hasChanged := false
switch new.(type) {
case *appsv1.Deployment:
hasChanged = handleDeployment(new, expected)
case *admissionregistrationv1.ValidatingWebhookConfiguration:
hasChanged = handleValidatingWebhook(new, expected)
case *admissionregistrationv1.MutatingWebhookConfiguration:
hasChanged = handleMutatingWebhook(new, expected)
}
var diff string
if _, ok := expected.(*corev1.Secret); !ok { // Don't show a diff if kind is Secret
diff = cmp.Diff(old, expected)
}
return expected, hasChanged, diff, nil
}
func handleDeployment(new, expected kruntime.Object) bool {
hasChanged := false
newDeployment, expectedDeployment := new.(*appsv1.Deployment), expected.(*appsv1.Deployment)
for i := range expectedDeployment.Spec.Template.Spec.Containers {
ec := &expectedDeployment.Spec.Template.Spec.Containers[i]
nc := &newDeployment.Spec.Template.Spec.Containers[i]
if ec.Image != nc.Image {
ec.Image = nc.Image
hasChanged = true
}
if cmpAndCopy(&nc.Resources.Limits, &ec.Resources.Limits) {
hasChanged = true
}
if cmpAndCopy(&nc.Resources.Requests, &ec.Resources.Requests) {
hasChanged = true
}
}
return hasChanged
}
func handleValidatingWebhook(new, expected kruntime.Object) bool {
hasChanged := false
newWebhook := new.(*admissionregistrationv1.ValidatingWebhookConfiguration)
expectedWebhook := expected.(*admissionregistrationv1.ValidatingWebhookConfiguration)
for i := range expectedWebhook.Webhooks {
if expectedWebhook.Webhooks[i].FailurePolicy != nil &&
newWebhook.Webhooks[i].FailurePolicy != nil &&
*expectedWebhook.Webhooks[i].FailurePolicy != *newWebhook.Webhooks[i].FailurePolicy {
*expectedWebhook.Webhooks[i].FailurePolicy = *newWebhook.Webhooks[i].FailurePolicy
hasChanged = true
}
if expectedWebhook.Webhooks[i].TimeoutSeconds != nil &&
newWebhook.Webhooks[i].TimeoutSeconds != nil &&
*expectedWebhook.Webhooks[i].TimeoutSeconds != *newWebhook.Webhooks[i].TimeoutSeconds {
*expectedWebhook.Webhooks[i].TimeoutSeconds = *newWebhook.Webhooks[i].TimeoutSeconds
hasChanged = true
}
}
return hasChanged
}
func handleMutatingWebhook(new, expected kruntime.Object) bool {
hasChanged := false
newWebhook := new.(*admissionregistrationv1.MutatingWebhookConfiguration)
expectedWebhook := expected.(*admissionregistrationv1.MutatingWebhookConfiguration)
for i := range expectedWebhook.Webhooks {
if expectedWebhook.Webhooks[i].FailurePolicy != nil &&
newWebhook.Webhooks[i].FailurePolicy != nil &&
*expectedWebhook.Webhooks[i].FailurePolicy != *newWebhook.Webhooks[i].FailurePolicy {
*expectedWebhook.Webhooks[i].FailurePolicy = *newWebhook.Webhooks[i].FailurePolicy
hasChanged = true
}
if expectedWebhook.Webhooks[i].TimeoutSeconds != nil &&
newWebhook.Webhooks[i].TimeoutSeconds != nil &&
*expectedWebhook.Webhooks[i].TimeoutSeconds != *newWebhook.Webhooks[i].TimeoutSeconds {
*expectedWebhook.Webhooks[i].TimeoutSeconds = *newWebhook.Webhooks[i].TimeoutSeconds
hasChanged = true
}
}
return hasChanged
}
func cmpAndCopy(srcPtr, dstPtr *corev1.ResourceList) bool {
src, dst := *srcPtr, *dstPtr
hasChanged := false
for key, val := range dst {
if !val.Equal(src[key]) {
dst[key] = src[key].DeepCopy()
hasChanged = true
}
}
return hasChanged
}
func (dh *dynamicHelper) IsConstraintTemplateReady(ctx context.Context, name string) (bool, error) {
gvr := schema.GroupVersionResource{Group: "templates.gatekeeper.sh", Version: "v1beta1", Resource: "constrainttemplates"}
ct, err := dh.dynamicClient.Resource(gvr).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, err
}
status, ok := ct.Object["status"].(map[string]interface{})
if !ok {
return false, nil
}
created, ok := status["created"].(bool)
if !ok {
return false, nil
}
return created, nil
}