зеркало из https://github.com/Azure/ARO-RP.git
Merge pull request #1152 from jim-minter/dhconsolidate
Consolidate and clean up operator controllers
This commit is contained in:
Коммит
a4a19cf027
|
@ -11,8 +11,6 @@ import (
|
|||
"github.com/Azure/go-autorest/autorest/to"
|
||||
projectv1 "github.com/openshift/api/project/v1"
|
||||
securityv1 "github.com/openshift/api/security/v1"
|
||||
securityclient "github.com/openshift/client-go/security/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
@ -28,34 +26,8 @@ const (
|
|||
GenevaKeyName = "gcskey.pem"
|
||||
)
|
||||
|
||||
type GenevaLogging interface {
|
||||
Resources(context.Context) ([]runtime.Object, error)
|
||||
}
|
||||
|
||||
type genevaLogging struct {
|
||||
log *logrus.Entry
|
||||
cluster *arov1alpha1.Cluster
|
||||
|
||||
seccli securityclient.Interface
|
||||
|
||||
gcscert []byte
|
||||
gcskey []byte
|
||||
}
|
||||
|
||||
func New(log *logrus.Entry, cluster *arov1alpha1.Cluster, seccli securityclient.Interface, gcscert, gcskey []byte) GenevaLogging {
|
||||
return &genevaLogging{
|
||||
log: log,
|
||||
cluster: cluster,
|
||||
|
||||
seccli: seccli,
|
||||
|
||||
gcscert: gcscert,
|
||||
gcskey: gcskey,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *genevaLogging) securityContextConstraints(ctx context.Context, name, serviceAccountName string) (*securityv1.SecurityContextConstraints, error) {
|
||||
scc, err := g.seccli.SecurityV1().SecurityContextConstraints().Get(ctx, "privileged", metav1.GetOptions{})
|
||||
func (g *GenevaloggingReconciler) securityContextConstraints(ctx context.Context, name, serviceAccountName string) (*securityv1.SecurityContextConstraints, error) {
|
||||
scc, err := g.securitycli.SecurityV1().SecurityContextConstraints().Get(ctx, "privileged", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -68,7 +40,11 @@ func (g *genevaLogging) securityContextConstraints(ctx context.Context, name, se
|
|||
return scc, nil
|
||||
}
|
||||
|
||||
func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
||||
func (g *GenevaloggingReconciler) daemonset(cluster *arov1alpha1.Cluster) (*appsv1.DaemonSet, error) {
|
||||
r, err := azure.ParseResourceID(cluster.Spec.ResourceID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -143,7 +119,7 @@ func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fluentbit-journal",
|
||||
Image: version.FluentbitImage(g.cluster.Spec.ACRDomain),
|
||||
Image: version.FluentbitImage(cluster.Spec.ACRDomain),
|
||||
Command: []string{
|
||||
"/opt/td-agent-bit/bin/td-agent-bit",
|
||||
},
|
||||
|
@ -180,7 +156,7 @@ func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
|||
},
|
||||
{
|
||||
Name: "fluentbit-containers",
|
||||
Image: version.FluentbitImage(g.cluster.Spec.ACRDomain),
|
||||
Image: version.FluentbitImage(cluster.Spec.ACRDomain),
|
||||
Command: []string{
|
||||
"/opt/td-agent-bit/bin/td-agent-bit",
|
||||
},
|
||||
|
@ -217,7 +193,7 @@ func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
|||
},
|
||||
{
|
||||
Name: "fluentbit-audit",
|
||||
Image: version.FluentbitImage(g.cluster.Spec.ACRDomain),
|
||||
Image: version.FluentbitImage(cluster.Spec.ACRDomain),
|
||||
Command: []string{
|
||||
"/opt/td-agent-bit/bin/td-agent-bit",
|
||||
},
|
||||
|
@ -254,7 +230,7 @@ func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
|||
},
|
||||
{
|
||||
Name: "mdsd",
|
||||
Image: version.MdsdImage(g.cluster.Spec.ACRDomain),
|
||||
Image: version.MdsdImage(cluster.Spec.ACRDomain),
|
||||
Command: []string{
|
||||
"/usr/sbin/mdsd",
|
||||
},
|
||||
|
@ -269,7 +245,7 @@ func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
|||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "MONITORING_GCS_ENVIRONMENT",
|
||||
Value: g.cluster.Spec.GenevaLogging.MonitoringGCSEnvironment,
|
||||
Value: cluster.Spec.GenevaLogging.MonitoringGCSEnvironment,
|
||||
},
|
||||
{
|
||||
Name: "MONITORING_GCS_ACCOUNT",
|
||||
|
@ -277,7 +253,7 @@ func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
|||
},
|
||||
{
|
||||
Name: "MONITORING_GCS_REGION",
|
||||
Value: g.cluster.Spec.Location,
|
||||
Value: cluster.Spec.Location,
|
||||
},
|
||||
{
|
||||
Name: "MONITORING_GCS_CERT_CERTFILE",
|
||||
|
@ -293,7 +269,7 @@ func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
|||
},
|
||||
{
|
||||
Name: "MONITORING_CONFIG_VERSION",
|
||||
Value: g.cluster.Spec.GenevaLogging.ConfigVersion,
|
||||
Value: cluster.Spec.GenevaLogging.ConfigVersion,
|
||||
},
|
||||
{
|
||||
Name: "MONITORING_USE_GENEVA_CONFIG_SERVICE",
|
||||
|
@ -301,7 +277,7 @@ func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
|||
},
|
||||
{
|
||||
Name: "MONITORING_TENANT",
|
||||
Value: g.cluster.Spec.Location,
|
||||
Value: cluster.Spec.Location,
|
||||
},
|
||||
{
|
||||
Name: "MONITORING_ROLE",
|
||||
|
@ -318,7 +294,7 @@ func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
|||
},
|
||||
{
|
||||
Name: "RESOURCE_ID",
|
||||
Value: strings.ToLower(g.cluster.Spec.ResourceID),
|
||||
Value: strings.ToLower(cluster.Spec.ResourceID),
|
||||
},
|
||||
{
|
||||
Name: "SUBSCRIPTION_ID",
|
||||
|
@ -358,16 +334,16 @@ func (g *genevaLogging) daemonset(r azure.Resource) *appsv1.DaemonSet {
|
|||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *genevaLogging) Resources(ctx context.Context) ([]runtime.Object, error) {
|
||||
r, err := azure.ParseResourceID(g.cluster.Spec.ResourceID)
|
||||
func (g *GenevaloggingReconciler) resources(ctx context.Context, cluster *arov1alpha1.Cluster, gcscert, gcskey []byte) ([]runtime.Object, error) {
|
||||
scc, err := g.securityContextConstraints(ctx, "privileged-genevalogging", kubeServiceAccount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scc, err := g.securityContextConstraints(ctx, "privileged-genevalogging", kubeServiceAccount)
|
||||
daemonset, err := g.daemonset(cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -385,8 +361,8 @@ func (g *genevaLogging) Resources(ctx context.Context) ([]runtime.Object, error)
|
|||
Namespace: kubeNamespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
GenevaCertName: g.gcscert,
|
||||
GenevaKeyName: g.gcskey,
|
||||
GenevaCertName: gcscert,
|
||||
GenevaKeyName: gcskey,
|
||||
},
|
||||
},
|
||||
&v1.ConfigMap{
|
||||
|
@ -408,6 +384,6 @@ func (g *genevaLogging) Resources(ctx context.Context) ([]runtime.Object, error)
|
|||
},
|
||||
},
|
||||
scc,
|
||||
g.daemonset(r),
|
||||
daemonset,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -5,21 +5,16 @@ package genevalogging
|
|||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
securityv1 "github.com/openshift/api/security/v1"
|
||||
securityclient "github.com/openshift/client-go/security/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/Azure/ARO-RP/pkg/operator"
|
||||
|
@ -74,57 +69,29 @@ func (r *GenevaloggingReconciler) Reconcile(request ctrl.Request) (ctrl.Result,
|
|||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
gl := New(r.log, instance, r.securitycli, mysec.Data[GenevaCertName], mysec.Data[GenevaKeyName])
|
||||
|
||||
resources, err := gl.Resources(ctx)
|
||||
resources, err := r.resources(ctx, instance, mysec.Data[GenevaCertName], mysec.Data[GenevaKeyName])
|
||||
if err != nil {
|
||||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
for _, res := range resources {
|
||||
o, err := meta.Accessor(res)
|
||||
if err != nil {
|
||||
r.log.Errorf("Accessor %s/%s: %v", o.GetNamespace(), o.GetName(), err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// This sets the reference on all objects that we create
|
||||
// to our cluster instance. This causes the Owns() below to work and
|
||||
// to get Reconcile events when anything happens to our objects.
|
||||
err = controllerutil.SetControllerReference(instance, o, scheme.Scheme)
|
||||
if err != nil {
|
||||
r.log.Errorf("SetControllerReference %s/%s: %v", o.GetNamespace(), o.GetName(), err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
err = dynamichelper.HashWorkloadConfigs(resources)
|
||||
err = dynamichelper.SetControllerReferences(resources, instance)
|
||||
if err != nil {
|
||||
r.log.Errorf("HashWorkloadConfigs %v", err)
|
||||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
uns := make([]*unstructured.Unstructured, 0, len(resources))
|
||||
for _, res := range resources {
|
||||
un := &unstructured.Unstructured{}
|
||||
err = scheme.Scheme.Convert(res, un, nil)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
uns = append(uns, un)
|
||||
uns, err := dynamichelper.Prepare(resources)
|
||||
if err != nil {
|
||||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
sort.Slice(uns, func(i, j int) bool {
|
||||
return dynamichelper.CreateOrder(uns[i], uns[j])
|
||||
})
|
||||
|
||||
for _, un := range uns {
|
||||
err = dh.Ensure(ctx, un)
|
||||
if err != nil {
|
||||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
err = dh.Ensure(ctx, uns...)
|
||||
if err != nil {
|
||||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
|
|
|
@ -5,19 +5,14 @@ package routefix
|
|||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
securityclient "github.com/openshift/client-go/security/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
arov1alpha1 "github.com/Azure/ARO-RP/pkg/operator/apis/aro.openshift.io/v1alpha1"
|
||||
|
@ -55,6 +50,7 @@ func (r *RouteFixReconciler) Reconcile(request ctrl.Request) (ctrl.Result, error
|
|||
|
||||
instance, err := r.arocli.Clusters().Get(ctx, request.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -73,49 +69,22 @@ func (r *RouteFixReconciler) Reconcile(request ctrl.Request) (ctrl.Result, error
|
|||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
for _, res := range resources {
|
||||
o, err := meta.Accessor(res)
|
||||
if err != nil {
|
||||
r.log.Errorf("Accessor %s/%s: %v", o.GetNamespace(), o.GetName(), err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// This sets the reference on all objects that we create
|
||||
// to our cluster instance. This causes the Owns() below to work and
|
||||
// to get Reconcile events when anything happens to our objects.
|
||||
err = controllerutil.SetControllerReference(instance, o, scheme.Scheme)
|
||||
if err != nil {
|
||||
r.log.Errorf("SetControllerReference %s/%s: %v", o.GetNamespace(), o.GetName(), err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
err = dynamichelper.HashWorkloadConfigs(resources)
|
||||
err = dynamichelper.SetControllerReferences(resources, instance)
|
||||
if err != nil {
|
||||
r.log.Errorf("HashWorkloadConfigs %v", err)
|
||||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
uns := make([]*unstructured.Unstructured, 0, len(resources))
|
||||
for _, res := range resources {
|
||||
un := &unstructured.Unstructured{}
|
||||
err = scheme.Scheme.Convert(res, un, nil)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
uns = append(uns, un)
|
||||
uns, err := dynamichelper.Prepare(resources)
|
||||
if err != nil {
|
||||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
sort.Slice(uns, func(i, j int) bool {
|
||||
return dynamichelper.CreateOrder(uns[i], uns[j])
|
||||
})
|
||||
|
||||
for _, un := range uns {
|
||||
err = dh.Ensure(ctx, un)
|
||||
if err != nil {
|
||||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
err = dh.Ensure(ctx, uns...)
|
||||
if err != nil {
|
||||
r.log.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
|
|
|
@ -6,7 +6,6 @@ package deploy
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
|
@ -18,7 +17,6 @@ import (
|
|||
extensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -185,20 +183,11 @@ func (o *operator) CreateOrUpdate(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
uns := make([]*unstructured.Unstructured, 0, len(resources))
|
||||
for _, res := range resources {
|
||||
un := &unstructured.Unstructured{}
|
||||
err = scheme.Scheme.Convert(res, un, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uns = append(uns, un)
|
||||
uns, err := dynamichelper.Prepare(resources)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sort.Slice(uns, func(i, j int) bool {
|
||||
return dynamichelper.CreateOrder(uns[i], uns[j])
|
||||
})
|
||||
|
||||
for _, un := range uns {
|
||||
err = o.dh.Ensure(ctx, un)
|
||||
if err != nil {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
// rather than list every GK, just list the ones whose creation really has to be
|
||||
// brought forward
|
||||
var createOrder = map[string]int{
|
||||
var createOrderMap = map[string]int{
|
||||
// non-namespaced resources
|
||||
"CustomResourceDefinition.apiextensions.k8s.io": 1, // before custom resources
|
||||
"ClusterRole.rbac.authorization.k8s.io": 2, // before workload resources
|
||||
|
@ -26,15 +26,15 @@ var createOrder = map[string]int{
|
|||
|
||||
const createOrderMax = 99
|
||||
|
||||
// CreateOrder is to be used in a sort.Slice() comparison. It is to help make
|
||||
// createOrder is to be used in a sort.Slice() comparison. It is to help make
|
||||
// sure that resources are created in an order that causes a reliable startup.
|
||||
func CreateOrder(i, j *unstructured.Unstructured) bool {
|
||||
io, ok := createOrder[i.GroupVersionKind().GroupKind().String()]
|
||||
func createOrder(i, j *unstructured.Unstructured) bool {
|
||||
io, ok := createOrderMap[i.GroupVersionKind().GroupKind().String()]
|
||||
if !ok {
|
||||
io = createOrderMax
|
||||
}
|
||||
|
||||
jo, ok := createOrder[j.GroupVersionKind().GroupKind().String()]
|
||||
jo, ok := createOrderMap[j.GroupVersionKind().GroupKind().String()]
|
||||
if !ok {
|
||||
jo = createOrderMax
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ func TestCreateOrder(t *testing.T) {
|
|||
}
|
||||
|
||||
sort.Slice(test, func(i, j int) bool {
|
||||
return CreateOrder(test[i], test[j])
|
||||
return createOrder(test[i], test[j])
|
||||
})
|
||||
|
||||
if !reflect.DeepEqual(expect, test) {
|
||||
|
|
|
@ -29,7 +29,7 @@ type Interface interface {
|
|||
RefreshAPIResources() error
|
||||
CreateOrUpdate(ctx context.Context, obj *unstructured.Unstructured) error
|
||||
Delete(ctx context.Context, groupKind, namespace, name string) error
|
||||
Ensure(ctx context.Context, o *unstructured.Unstructured) error
|
||||
Ensure(ctx context.Context, objs ...*unstructured.Unstructured) error
|
||||
Get(ctx context.Context, groupKind, namespace, name string) (*unstructured.Unstructured, error)
|
||||
List(ctx context.Context, groupKind, namespace string) (*unstructured.UnstructuredList, error)
|
||||
}
|
||||
|
@ -173,7 +173,18 @@ func (dh *dynamicHelper) Delete(ctx context.Context, groupKind, namespace, name
|
|||
// is intended to ensure that an object matches a desired state. It is tolerant
|
||||
// of unspecified fields in the desired state (e.g. it will leave typically
|
||||
// leave .status untouched).
|
||||
func (dh *dynamicHelper) Ensure(ctx context.Context, o *unstructured.Unstructured) error {
|
||||
func (dh *dynamicHelper) Ensure(ctx context.Context, objs ...*unstructured.Unstructured) error {
|
||||
for _, o := range objs {
|
||||
err := dh.ensureOne(ctx, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dh *dynamicHelper) ensureOne(ctx context.Context, o *unstructured.Unstructured) error {
|
||||
gvr, err := dh.findGVR(o.GroupVersionKind().GroupKind().String(), o.GroupVersionKind().Version)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -11,14 +11,80 @@ import (
|
|||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
// HashWorkloadConfigs iterates daemonsets, walks their volumes, and updates
|
||||
func SetControllerReferences(resources []runtime.Object, owner metav1.Object) error {
|
||||
for _, resource := range resources {
|
||||
r, err := meta.Accessor(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = controllerutil.SetControllerReference(owner, r, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Prepare(resources []runtime.Object) ([]*unstructured.Unstructured, error) {
|
||||
err := hashWorkloadConfigs(resources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uns := make([]*unstructured.Unstructured, 0, len(resources))
|
||||
for _, resource := range resources {
|
||||
un := &unstructured.Unstructured{}
|
||||
err = scheme.Scheme.Convert(resource, un, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uns = append(uns, un)
|
||||
}
|
||||
|
||||
sort.Slice(uns, func(i, j int) bool {
|
||||
return createOrder(uns[i], uns[j])
|
||||
})
|
||||
|
||||
return uns, nil
|
||||
}
|
||||
|
||||
func addWorkloadHashes(o *metav1.ObjectMeta, t *v1.PodTemplateSpec, configToHash map[string]string) {
|
||||
for _, v := range t.Spec.Volumes {
|
||||
if v.Secret != nil {
|
||||
if hash, found := configToHash[keyFunc(schema.GroupKind{Kind: "Secret"}, o.Namespace, v.Secret.SecretName)]; found {
|
||||
if t.Annotations == nil {
|
||||
t.Annotations = map[string]string{}
|
||||
}
|
||||
t.Annotations["checksum/secret-"+v.Secret.SecretName] = hash
|
||||
}
|
||||
}
|
||||
|
||||
if v.ConfigMap != nil {
|
||||
if hash, found := configToHash[keyFunc(schema.GroupKind{Kind: "ConfigMap"}, o.Namespace, v.ConfigMap.Name)]; found {
|
||||
if t.Annotations == nil {
|
||||
t.Annotations = map[string]string{}
|
||||
}
|
||||
t.Annotations["checksum/configmap-"+v.ConfigMap.Name] = hash
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// hashWorkloadConfigs iterates daemonsets, walks their volumes, and updates
|
||||
// their pod templates with annotations that include the hashes of the content
|
||||
// for each configmap or secret.
|
||||
func HashWorkloadConfigs(resources []runtime.Object) error {
|
||||
func hashWorkloadConfigs(resources []runtime.Object) error {
|
||||
// map config resources to their hashed content
|
||||
configToHash := map[string]string{}
|
||||
for _, o := range resources {
|
||||
|
@ -36,29 +102,13 @@ func HashWorkloadConfigs(resources []runtime.Object) error {
|
|||
for _, o := range resources {
|
||||
switch o := o.(type) {
|
||||
case *appsv1.DaemonSet:
|
||||
for _, v := range o.Spec.Template.Spec.Volumes {
|
||||
if v.Secret != nil {
|
||||
if hash, found := configToHash[keyFunc(schema.GroupKind{Kind: "Secret"}, o.Namespace, v.Secret.SecretName)]; found {
|
||||
if o.Spec.Template.Annotations == nil {
|
||||
o.Spec.Template.Annotations = map[string]string{}
|
||||
}
|
||||
o.Spec.Template.Annotations["checksum/secret-"+v.Secret.SecretName] = hash
|
||||
}
|
||||
}
|
||||
addWorkloadHashes(&o.ObjectMeta, &o.Spec.Template, configToHash)
|
||||
|
||||
if v.ConfigMap != nil {
|
||||
if hash, found := configToHash[keyFunc(schema.GroupKind{Kind: "ConfigMap"}, o.Namespace, v.ConfigMap.Name)]; found {
|
||||
if o.Spec.Template.Annotations == nil {
|
||||
o.Spec.Template.Annotations = map[string]string{}
|
||||
}
|
||||
o.Spec.Template.Annotations["checksum/configmap-"+v.ConfigMap.Name] = hash
|
||||
}
|
||||
}
|
||||
}
|
||||
case *appsv1.Deployment:
|
||||
addWorkloadHashes(&o.ObjectMeta, &o.Spec.Template, configToHash)
|
||||
|
||||
case *appsv1.Deployment, *appsv1.StatefulSet:
|
||||
// TODO: add as/when needed
|
||||
return fmt.Errorf("unimplemented: %T", o)
|
||||
case *appsv1.StatefulSet:
|
||||
addWorkloadHashes(&o.ObjectMeta, &o.Spec.Template, configToHash)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ func TestHashWorkloadConfigs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
err := HashWorkloadConfigs([]runtime.Object{cm, sec, ds})
|
||||
err := hashWorkloadConfigs([]runtime.Object{cm, sec, ds})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -64,17 +64,22 @@ func (mr *MockInterfaceMockRecorder) Delete(arg0, arg1, arg2, arg3 interface{})
|
|||
}
|
||||
|
||||
// Ensure mocks base method
|
||||
func (m *MockInterface) Ensure(arg0 context.Context, arg1 *unstructured.Unstructured) error {
|
||||
func (m *MockInterface) Ensure(arg0 context.Context, arg1 ...*unstructured.Unstructured) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Ensure", arg0, arg1)
|
||||
varargs := []interface{}{arg0}
|
||||
for _, a := range arg1 {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "Ensure", varargs...)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Ensure indicates an expected call of Ensure
|
||||
func (mr *MockInterfaceMockRecorder) Ensure(arg0, arg1 interface{}) *gomock.Call {
|
||||
func (mr *MockInterfaceMockRecorder) Ensure(arg0 interface{}, arg1 ...interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ensure", reflect.TypeOf((*MockInterface)(nil).Ensure), arg0, arg1)
|
||||
varargs := append([]interface{}{arg0}, arg1...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ensure", reflect.TypeOf((*MockInterface)(nil).Ensure), varargs...)
|
||||
}
|
||||
|
||||
// Get mocks base method
|
||||
|
|
Загрузка…
Ссылка в новой задаче