зеркало из https://github.com/Azure/orkestra.git
Add Conditions Object to Statuses of AppGroup (#157)
* Add conditions to the application group status * Fixing bugs in the build * Updating with other condition types * Adding condition type for workflow and application group reconciliation * Remove unneded progressing call * Surface condition objects for the underlying subcharts * Cleanup getting chart status code * Fix bug in checking generation for update * Move observed generation update * CRD update u=in charts dir Signed-off-by: Nitish Malhotra <nitish.malhotra@gmail.com> * Patch instead of update to prevent conflict * Fix a couple patch bugs * Remove spcl handing of terminating ns No special handing of terminating ns Fixed bug in handleRemediation Signed-off-by: Nitish Malhotra <nitish.malhotra@gmail.com> Co-authored-by: Nitish Malhotra <nitish.malhotra@gmail.com>
This commit is contained in:
Родитель
01666c98c6
Коммит
7b79374a25
|
@ -1,5 +1,5 @@
|
|||
# Build the manager binary
|
||||
FROM golang:1.13 as builder
|
||||
FROM golang:1.15 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"github.com/Azure/Orkestra/pkg/meta"
|
||||
helmopv1 "github.com/fluxcd/helm-operator/pkg/apis/helm.fluxcd.io/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -83,14 +84,25 @@ type ChartRef struct {
|
|||
// ChartStatus shows the current status of the Application Reconciliation process
|
||||
type ChartStatus struct {
|
||||
// Phase reflects the current state of the HelmRelease
|
||||
// +optional
|
||||
Phase helmopv1.HelmReleasePhase `json:"phase,omitempty"`
|
||||
|
||||
// Error string from the error during reconciliation (if any)
|
||||
// +optional
|
||||
Error string `json:"error,omitempty"`
|
||||
|
||||
// Version of the chart/subchart
|
||||
// +optional
|
||||
Version string `json:"version,omitempty"`
|
||||
|
||||
// Staged if true denotes that the chart/subchart has been pushed to the
|
||||
// staging helm repo
|
||||
// +optional
|
||||
Staged bool `json:"staged,omitempty"`
|
||||
|
||||
// +optional
|
||||
// Conditions holds the conditions for the ChartStatus
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty"`
|
||||
}
|
||||
|
||||
// ApplicationGroupSpec defines the desired state of ApplicationGroup
|
||||
|
@ -125,10 +137,15 @@ type DAG struct {
|
|||
// ApplicationStatus shows the current status of the application helm release
|
||||
type ApplicationStatus struct {
|
||||
// Name of the application
|
||||
// +optional
|
||||
Name string `json:"name"`
|
||||
|
||||
// ChartStatus for the application helm chart
|
||||
// +optional
|
||||
ChartStatus `json:",inline"`
|
||||
|
||||
// Subcharts contains the subchart chart status
|
||||
// +optional
|
||||
Subcharts map[string]ChartStatus `json:"subcharts,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -151,27 +168,92 @@ type ApplicationGroupStatus struct {
|
|||
|
||||
// Phase is the reconciliation phase
|
||||
// +optional
|
||||
Phase ReconciliationPhase `json:"phase,omitempty"`
|
||||
|
||||
// Update is an internal flag used to trigger a workflow update
|
||||
// +optional
|
||||
Update bool `json:"update,omitempty"`
|
||||
|
||||
// Error string from errors during reconciliation
|
||||
// +optional
|
||||
Error string `json:"error,omitempty"`
|
||||
|
||||
// ObservedGeneration captures the last generation
|
||||
// that was captured and completed by the reconciler
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// Conditions holds the conditions of the ApplicationGroup
|
||||
// +optional
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty"`
|
||||
}
|
||||
|
||||
// Progressing resets the conditions of the ApplicationGroup to
|
||||
// metav1.Condition of type meta.ReadyCondition with status 'Unknown' and
|
||||
// meta.StartingReason reason and message.
|
||||
func (in *ApplicationGroup) Progressing() {
|
||||
in.Status.Conditions = []metav1.Condition{}
|
||||
meta.SetResourceCondition(in, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "workflow is reconciling...")
|
||||
meta.SetResourceCondition(in, meta.DeployCondition, metav1.ConditionUnknown, meta.ProgressingReason, "application group is reconciling...")
|
||||
}
|
||||
|
||||
// RollingBack sets the meta.ReadyCondition to 'True' and
|
||||
// meta.RollingBack reason and message
|
||||
func (in *ApplicationGroup) RollingBack() {
|
||||
meta.SetResourceCondition(in, meta.ReadyCondition, metav1.ConditionTrue, meta.FailedReason, "workflow failed because of helmreleases, rolling back...")
|
||||
meta.SetResourceCondition(in, meta.DeployCondition, metav1.ConditionTrue, meta.RollingBackReason, "rolling back because of failed helm releases...")
|
||||
}
|
||||
|
||||
// Succeeded sets the meta.ReadyCondition to 'True', with the given
|
||||
// meta.Succeeded reason and message
|
||||
func (in *ApplicationGroup) ReadySucceeded() {
|
||||
meta.SetResourceCondition(in, meta.ReadyCondition, metav1.ConditionTrue, meta.SucceededReason, "workflow and reconciliation succeeded")
|
||||
}
|
||||
|
||||
// Failed sets the meta.ReadyCondition to 'True' and
|
||||
// meta.FailedReason reason and message
|
||||
func (in *ApplicationGroup) ReadyFailed(message string) {
|
||||
meta.SetResourceCondition(in, meta.ReadyCondition, metav1.ConditionTrue, meta.FailedReason, message)
|
||||
}
|
||||
|
||||
// Succeeded sets the meta.DeployCondition to 'True', with the given
|
||||
// meta.Succeeded reason and message
|
||||
func (in *ApplicationGroup) DeploySucceeded() {
|
||||
meta.SetResourceCondition(in, meta.DeployCondition, metav1.ConditionTrue, meta.SucceededReason, "application group reconciliation succeeded")
|
||||
}
|
||||
|
||||
// Failed sets the meta.DeployCondition to 'True' and
|
||||
// meta.FailedReason reason and message
|
||||
func (in *ApplicationGroup) DeployFailed(message string) {
|
||||
meta.SetResourceCondition(in, meta.DeployCondition, metav1.ConditionTrue, meta.FailedReason, message)
|
||||
}
|
||||
|
||||
// GetReadyCondition gets the string condition.Reason of the
|
||||
// meta.ReadyCondition type
|
||||
func (in *ApplicationGroup) GetReadyCondition() string {
|
||||
condition := meta.GetResourceCondition(in, meta.ReadyCondition)
|
||||
if condition == nil {
|
||||
return meta.ProgressingReason
|
||||
}
|
||||
return condition.Reason
|
||||
}
|
||||
|
||||
// GetDeployCondition gets the string condition.Reason of the
|
||||
// meta.ReadyCondition type
|
||||
func (in *ApplicationGroup) GetDeployCondition() string {
|
||||
condition := meta.GetResourceCondition(in, meta.DeployCondition)
|
||||
if condition == nil {
|
||||
return meta.ProgressingReason
|
||||
}
|
||||
return condition.Reason
|
||||
}
|
||||
|
||||
// GetStatusConditions gets the status conditions from the
|
||||
// ApplicationGroup status
|
||||
func (in *ApplicationGroup) GetStatusConditions() *[]metav1.Condition {
|
||||
return &in.Status.Conditions
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:path=applicationgroups,scope=Cluster
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=`.status.phase`
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`
|
||||
// +kubebuilder:printcolumn:name="Deploy",type="string",JSONPath=".status.conditions[?(@.type==\"Deploy\")].reason"
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].reason"
|
||||
// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// ApplicationGroup is the Schema for the applicationgroups API
|
||||
type ApplicationGroup struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
|
|
@ -9,6 +9,7 @@ package v1alpha1
|
|||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
|
@ -120,6 +121,13 @@ func (in *ApplicationGroupStatus) DeepCopyInto(out *ApplicationGroupStatus) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationGroupStatus.
|
||||
|
@ -167,12 +175,12 @@ func (in *ApplicationSpec) DeepCopy() *ApplicationSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) {
|
||||
*out = *in
|
||||
out.ChartStatus = in.ChartStatus
|
||||
in.ChartStatus.DeepCopyInto(&out.ChartStatus)
|
||||
if in.Subcharts != nil {
|
||||
in, out := &in.Subcharts, &out.Subcharts
|
||||
*out = make(map[string]ChartStatus, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -212,6 +220,13 @@ func (in *ChartRef) DeepCopy() *ChartRef {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ChartStatus) DeepCopyInto(out *ChartStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChartStatus.
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -17,8 +17,14 @@ spec:
|
|||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.phase
|
||||
name: Phase
|
||||
- jsonPath: .status.conditions[?(@.type=="Deploy")].reason
|
||||
name: Deploy
|
||||
type: string
|
||||
- jsonPath: .status.conditions[?(@.type=="Ready")].reason
|
||||
name: Ready
|
||||
type: string
|
||||
- jsonPath: .status.conditions[?(@.type=="Ready")].message
|
||||
name: Message
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
|
@ -29,10 +35,14 @@ spec:
|
|||
description: ApplicationGroup is the Schema for the applicationgroups API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
|
@ -56,19 +66,34 @@ spec:
|
|||
description: Namespace of the application
|
||||
type: string
|
||||
spec:
|
||||
description: Spec contains the application spec including the chart info and overlay values
|
||||
description: Spec contains the application spec including the
|
||||
chart info and overlay values
|
||||
properties:
|
||||
chart:
|
||||
description: Chart holds the values needed to pull the chart
|
||||
properties:
|
||||
authSecretRef:
|
||||
description: AuthSecretRef is a reference to the auth secret to access a private helm repository
|
||||
description: AuthSecretRef is a reference to the auth
|
||||
secret to access a private helm repository
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent.
|
||||
type: string
|
||||
fieldPath:
|
||||
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
|
||||
description: 'If referring to a piece of an object
|
||||
instead of an entire object, this string should
|
||||
contain a valid JSON/Go field access statement,
|
||||
such as desiredState.manifest.containers[2]. For
|
||||
example, if the object reference is to a container
|
||||
within a pod, this would take on a value like:
|
||||
"spec.containers{name}" (where "name" refers to
|
||||
the name of the container that triggered the event)
|
||||
or if no container name is specified "spec.containers[2]"
|
||||
(container with index 2 in this pod). This syntax
|
||||
is chosen only to have some well-defined way of
|
||||
referencing a part of an object. TODO: this design
|
||||
is not final and this field is subject to change
|
||||
in the future.'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
|
@ -77,17 +102,21 @@ spec:
|
|||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
|
||||
type: string
|
||||
namespace:
|
||||
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
|
||||
description: 'Namespace of the referent. More info:
|
||||
https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
|
||||
type: string
|
||||
resourceVersion:
|
||||
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
|
||||
description: 'Specific resourceVersion to which
|
||||
this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
|
||||
type: string
|
||||
uid:
|
||||
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
|
||||
type: string
|
||||
type: object
|
||||
chartPullSecret:
|
||||
description: ChartPullSecret holds the reference to the authentication secret for accessing the Helm repository using HTTPS basic auth. NOT IMPLEMENTED!
|
||||
description: ChartPullSecret holds the reference to
|
||||
the authentication secret for accessing the Helm repository
|
||||
using HTTPS basic auth. NOT IMPLEMENTED!
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
|
@ -95,22 +124,33 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
git:
|
||||
description: Git URL is the URL of the Git repository, e.g. `git@github.com:org/repo`, `http://github.com/org/repo`, or `ssh://git@example.com:2222/org/repo.git`.
|
||||
description: Git URL is the URL of the Git repository,
|
||||
e.g. `git@github.com:org/repo`, `http://github.com/org/repo`,
|
||||
or `ssh://git@example.com:2222/org/repo.git`.
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of the Helm chart _without_ an alias, e.g. redis (for `helm upgrade [flags] stable/redis`).
|
||||
description: Name is the name of the Helm chart _without_
|
||||
an alias, e.g. redis (for `helm upgrade [flags] stable/redis`).
|
||||
type: string
|
||||
path:
|
||||
description: Path is the path to the chart relative to the repository root.
|
||||
description: Path is the path to the chart relative
|
||||
to the repository root.
|
||||
type: string
|
||||
ref:
|
||||
description: Ref is the Git branch (or other reference) to use. Defaults to 'master', or the configured default Git ref.
|
||||
description: Ref is the Git branch (or other reference)
|
||||
to use. Defaults to 'master', or the configured default
|
||||
Git ref.
|
||||
type: string
|
||||
repository:
|
||||
description: RepoURL is the URL of the Helm repository, e.g. `https://kubernetes-charts.storage.googleapis.com` or `https://charts.example.com`.
|
||||
description: RepoURL is the URL of the Helm repository,
|
||||
e.g. `https://kubernetes-charts.storage.googleapis.com`
|
||||
or `https://charts.example.com`.
|
||||
type: string
|
||||
secretRef:
|
||||
description: SecretRef holds the authentication secret for accessing the Git repository (over HTTPS). The credentials will be added to an HTTPS GitURL before the mirror is started.
|
||||
description: SecretRef holds the authentication secret
|
||||
for accessing the Git repository (over HTTPS). The
|
||||
credentials will be added to an HTTPS GitURL before
|
||||
the mirror is started.
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
|
@ -120,30 +160,43 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
skipDepUpdate:
|
||||
description: SkipDepUpdate will tell the operator to skip running 'helm dep update' before installing or upgrading the chart, the chart dependencies _must_ be present for this to succeed.
|
||||
description: SkipDepUpdate will tell the operator to
|
||||
skip running 'helm dep update' before installing or
|
||||
upgrading the chart, the chart dependencies _must_
|
||||
be present for this to succeed.
|
||||
type: boolean
|
||||
version:
|
||||
description: Version is the targeted Helm chart version, e.g. 7.0.1.
|
||||
description: Version is the targeted Helm chart version,
|
||||
e.g. 7.0.1.
|
||||
type: string
|
||||
type: object
|
||||
release:
|
||||
description: Release holds the values to apply to the helm release
|
||||
description: Release holds the values to apply to the helm
|
||||
release
|
||||
properties:
|
||||
forceUpgrade:
|
||||
description: Force will mark this Helm release to `--force` upgrades. This forces the resource updates through delete/recreate if needed.
|
||||
description: Force will mark this Helm release to `--force`
|
||||
upgrades. This forces the resource updates through
|
||||
delete/recreate if needed.
|
||||
type: boolean
|
||||
helmVersion:
|
||||
default: v3
|
||||
description: HelmVersion is the version of Helm to target. If not supplied, the lowest _enabled Helm version_ will be targeted.
|
||||
description: HelmVersion is the version of Helm to target.
|
||||
If not supplied, the lowest _enabled Helm version_
|
||||
will be targeted.
|
||||
enum:
|
||||
- v2
|
||||
- v3
|
||||
type: string
|
||||
targetNamespace:
|
||||
description: TargetNamespace overrides the targeted namespace for the Helm release. The default namespace equals to the namespace of the HelmRelease resource.
|
||||
description: TargetNamespace overrides the targeted
|
||||
namespace for the Helm release. The default namespace
|
||||
equals to the namespace of the HelmRelease resource.
|
||||
type: string
|
||||
timeout:
|
||||
description: Timeout is the time to wait for any individual Kubernetes operation (like Jobs for hooks) during installation and upgrade operations.
|
||||
description: Timeout is the time to wait for any individual
|
||||
Kubernetes operation (like Jobs for hooks) during
|
||||
installation and upgrade operations.
|
||||
format: int64
|
||||
type: integer
|
||||
values:
|
||||
|
@ -151,16 +204,22 @@ spec:
|
|||
type: object
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
wait:
|
||||
description: Wait will mark this Helm release to wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful.
|
||||
description: Wait will mark this Helm release to wait
|
||||
until all Pods, PVCs, Services, and minimum number
|
||||
of Pods of a Deployment, StatefulSet, or ReplicaSet
|
||||
are in a ready state before marking the release as
|
||||
successful.
|
||||
type: boolean
|
||||
type: object
|
||||
subcharts:
|
||||
description: Subcharts provides the dependency order among the subcharts of the application
|
||||
description: Subcharts provides the dependency order among
|
||||
the subcharts of the application
|
||||
items:
|
||||
description: DAG contains the dependency information
|
||||
properties:
|
||||
dependencies:
|
||||
description: Dependencies on other applications by name
|
||||
description: Dependencies on other applications by
|
||||
name
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
|
@ -182,23 +241,164 @@ spec:
|
|||
status:
|
||||
description: ApplicationGroupStatus defines the observed state of ApplicationGroup
|
||||
properties:
|
||||
error:
|
||||
description: Error string from errors during reconciliation
|
||||
type: string
|
||||
conditions:
|
||||
description: Conditions holds the conditions of the ApplicationGroup
|
||||
items:
|
||||
description: "Condition contains details for one aspect of the current
|
||||
state of this API Resource. --- This struct is intended for direct
|
||||
use as an array at the field path .status.conditions. For example,
|
||||
type FooStatus struct{ // Represents the observations of a
|
||||
foo's current state. // Known .status.conditions.type are:
|
||||
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
|
||||
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
|
||||
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
|
||||
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
|
||||
\n // other fields }"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: lastTransitionTime is the last time the condition
|
||||
transitioned from one status to another. This should be when
|
||||
the underlying condition changed. If that is not known, then
|
||||
using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: message is a human readable message indicating
|
||||
details about the transition. This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: observedGeneration represents the .metadata.generation
|
||||
that the condition was set based upon. For instance, if .metadata.generation
|
||||
is currently 12, but the .status.conditions[x].observedGeneration
|
||||
is 9, the condition is out of date with respect to the current
|
||||
state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: reason contains a programmatic identifier indicating
|
||||
the reason for the condition's last transition. Producers
|
||||
of specific condition types may define expected values and
|
||||
meanings for this field, and whether the values are considered
|
||||
a guaranteed API. The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
--- Many .condition.type values are consistent across resources
|
||||
like Available, but because arbitrary conditions can be useful
|
||||
(see .node.status.conditions), the ability to deconflict is
|
||||
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
observedGeneration:
|
||||
description: ObservedGeneration captures the last generation that was captured and completed by the reconciler
|
||||
description: ObservedGeneration captures the last generation that
|
||||
was captured and completed by the reconciler
|
||||
format: int64
|
||||
type: integer
|
||||
phase:
|
||||
description: Phase is the reconciliation phase
|
||||
type: string
|
||||
status:
|
||||
description: Applications status
|
||||
items:
|
||||
description: ApplicationStatus shows the current status of the application helm release
|
||||
description: ApplicationStatus shows the current status of the application
|
||||
helm release
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions holds the conditions for the ChartStatus
|
||||
items:
|
||||
description: "Condition contains details for one aspect of
|
||||
the current state of this API Resource. --- This struct
|
||||
is intended for direct use as an array at the field path
|
||||
.status.conditions. For example, type FooStatus struct{
|
||||
\ // Represents the observations of a foo's current state.
|
||||
\ // Known .status.conditions.type are: \"Available\",
|
||||
\"Progressing\", and \"Degraded\" // +patchMergeKey=type
|
||||
\ // +patchStrategy=merge // +listType=map //
|
||||
+listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\"
|
||||
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
|
||||
\n // other fields }"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: lastTransitionTime is the last time the condition
|
||||
transitioned from one status to another. This should
|
||||
be when the underlying condition changed. If that is
|
||||
not known, then using the time when the API field changed
|
||||
is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: message is a human readable message indicating
|
||||
details about the transition. This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: observedGeneration represents the .metadata.generation
|
||||
that the condition was set based upon. For instance,
|
||||
if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration
|
||||
is 9, the condition is out of date with respect to the
|
||||
current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: reason contains a programmatic identifier
|
||||
indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected
|
||||
values and meanings for this field, and whether the
|
||||
values are considered a guaranteed API. The value should
|
||||
be a CamelCase string. This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False,
|
||||
Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
--- Many .condition.type values are consistent across
|
||||
resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability
|
||||
to deconflict is important. The regex it matches is
|
||||
(dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
error:
|
||||
description: Error string from the error during reconciliation (if any)
|
||||
description: Error string from the error during reconciliation
|
||||
(if any)
|
||||
type: string
|
||||
name:
|
||||
description: Name of the application
|
||||
|
@ -222,14 +422,96 @@ spec:
|
|||
- RollbackFailed
|
||||
type: string
|
||||
staged:
|
||||
description: Staged if true denotes that the chart/subchart has been pushed to the staging helm repo
|
||||
description: Staged if true denotes that the chart/subchart
|
||||
has been pushed to the staging helm repo
|
||||
type: boolean
|
||||
subcharts:
|
||||
additionalProperties:
|
||||
description: ChartStatus shows the current status of the Application Reconciliation process
|
||||
description: ChartStatus shows the current status of the Application
|
||||
Reconciliation process
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions holds the conditions for the ChartStatus
|
||||
items:
|
||||
description: "Condition contains details for one aspect
|
||||
of the current state of this API Resource. --- This
|
||||
struct is intended for direct use as an array at the
|
||||
field path .status.conditions. For example, type
|
||||
FooStatus struct{ // Represents the observations
|
||||
of a foo's current state. // Known .status.conditions.type
|
||||
are: \"Available\", \"Progressing\", and \"Degraded\"
|
||||
\ // +patchMergeKey=type // +patchStrategy=merge
|
||||
\ // +listType=map // +listMapKey=type Conditions
|
||||
[]metav1.Condition `json:\"conditions,omitempty\"
|
||||
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
|
||||
\n // other fields }"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: lastTransitionTime is the last time
|
||||
the condition transitioned from one status to
|
||||
another. This should be when the underlying condition
|
||||
changed. If that is not known, then using the
|
||||
time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: message is a human readable message
|
||||
indicating details about the transition. This
|
||||
may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: observedGeneration represents the .metadata.generation
|
||||
that the condition was set based upon. For instance,
|
||||
if .metadata.generation is currently 12, but the
|
||||
.status.conditions[x].observedGeneration is 9,
|
||||
the condition is out of date with respect to the
|
||||
current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: reason contains a programmatic identifier
|
||||
indicating the reason for the condition's last
|
||||
transition. Producers of specific condition types
|
||||
may define expected values and meanings for this
|
||||
field, and whether the values are considered a
|
||||
guaranteed API. The value should be a CamelCase
|
||||
string. This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True,
|
||||
False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in
|
||||
foo.example.com/CamelCase. --- Many .condition.type
|
||||
values are consistent across resources like Available,
|
||||
but because arbitrary conditions can be useful
|
||||
(see .node.status.conditions), the ability to
|
||||
deconflict is important. The regex it matches
|
||||
is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
error:
|
||||
description: Error string from the error during reconciliation (if any)
|
||||
description: Error string from the error during reconciliation
|
||||
(if any)
|
||||
type: string
|
||||
phase:
|
||||
description: Phase reflects the current state of the HelmRelease
|
||||
|
@ -250,7 +532,8 @@ spec:
|
|||
- RollbackFailed
|
||||
type: string
|
||||
staged:
|
||||
description: Staged if true denotes that the chart/subchart has been pushed to the staging helm repo
|
||||
description: Staged if true denotes that the chart/subchart
|
||||
has been pushed to the staging helm repo
|
||||
type: boolean
|
||||
version:
|
||||
description: Version of the chart/subchart
|
||||
|
@ -261,12 +544,10 @@ spec:
|
|||
version:
|
||||
description: Version of the chart/subchart
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
update:
|
||||
description: Update is an internal flag used to trigger a workflow update
|
||||
description: Phase is the reconciliation phase
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
|
|
|
@ -5,7 +5,6 @@ metadata:
|
|||
spec:
|
||||
applications:
|
||||
- name: ambassador
|
||||
namespace: ambassador
|
||||
dependencies: []
|
||||
spec:
|
||||
chart:
|
||||
|
@ -29,7 +28,6 @@ spec:
|
|||
service:
|
||||
type: ClusterIP
|
||||
- name: bookinfo
|
||||
namespace: bookinfo
|
||||
dependencies: [ambassador]
|
||||
spec:
|
||||
chart:
|
||||
|
|
|
@ -11,12 +11,14 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Azure/Orkestra/pkg"
|
||||
"github.com/Azure/Orkestra/pkg/meta"
|
||||
"github.com/Azure/Orkestra/pkg/registry"
|
||||
"github.com/Azure/Orkestra/pkg/workflow"
|
||||
v1alpha12 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
|
||||
helmopv1 "github.com/fluxcd/helm-operator/pkg/apis/helm.fluxcd.io/v1"
|
||||
"github.com/go-logr/logr"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
@ -81,12 +83,11 @@ type ApplicationGroupReconciler struct {
|
|||
// +kubebuilder:rbac:groups=orkestra.azure.microsoft.com,resources=applicationgroups,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=orkestra.azure.microsoft.com,resources=applicationgroups/status,verbs=get;update;patch
|
||||
|
||||
func (r *ApplicationGroupReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
func (r *ApplicationGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
var requeue bool
|
||||
var err error
|
||||
var appGroup orkestrav1alpha1.ApplicationGroup
|
||||
|
||||
ctx := context.Background()
|
||||
logr := r.Log.WithValues(appgroupNameKey, req.NamespacedName.Name)
|
||||
|
||||
if err := r.Get(ctx, req.NamespacedName, &appGroup); err != nil {
|
||||
|
@ -98,6 +99,8 @@ func (r *ApplicationGroupReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(appGroup.DeepCopy())
|
||||
|
||||
// Check if this is an update event to the ApplicationGroup
|
||||
// in which case unmarshal the last successful spec into a
|
||||
// variable
|
||||
|
@ -117,6 +120,10 @@ func (r *ApplicationGroupReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||
if appGroup.Finalizers != nil {
|
||||
logr.Info("cleaning up the applicationgroup resource")
|
||||
|
||||
// Change the app group spec into a progressing state
|
||||
appGroup.Progressing()
|
||||
_ = r.Status().Patch(ctx, &appGroup, patch)
|
||||
|
||||
// Reverse the entire workflow to remove all the Helm Releases
|
||||
|
||||
// unset the last successful spec annotation
|
||||
|
@ -127,8 +134,8 @@ func (r *ApplicationGroupReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||
requeue = false
|
||||
_ = r.cleanupWorkflow(ctx, logr, appGroup)
|
||||
appGroup.Finalizers = nil
|
||||
_ = r.Update(ctx, &appGroup)
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, requeue, nil)
|
||||
_ = r.Patch(ctx, &appGroup, patch)
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, patch, requeue, nil)
|
||||
}
|
||||
// Do nothing
|
||||
return ctrl.Result{Requeue: false}, nil
|
||||
|
@ -140,51 +147,63 @@ func (r *ApplicationGroupReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||
// Add finalizer if it doesnt already exist
|
||||
if appGroup.Finalizers == nil {
|
||||
appGroup.Finalizers = []string{finalizer}
|
||||
_ = r.Update(ctx, &appGroup)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
err = r.Patch(ctx, &appGroup, patch)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// If the (needs) Rollback phase is present in the reconciled version,
|
||||
// we must rollback the application group to the last successful spec.
|
||||
// This should only happen on updates and not during installs.
|
||||
if appGroup.Status.Phase == orkestrav1alpha1.Rollback {
|
||||
if appGroup.GetDeployCondition() == meta.RollingBackReason {
|
||||
logr.Info("Rolling back to last successful application group spec")
|
||||
appGroup.Spec = r.lastSuccessfulApplicationGroup.DeepCopy().Spec
|
||||
err = r.Update(ctx, &appGroup)
|
||||
err = r.Patch(ctx, &appGroup, patch)
|
||||
if err != nil {
|
||||
appGroup.DeployFailed(err.Error())
|
||||
logr.Error(err, "failed to update ApplicationGroup instance while rolling back")
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, requeue, err)
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, patch, requeue, err)
|
||||
}
|
||||
appGroup.Status.Phase = ""
|
||||
|
||||
// If we are able to update to the previous spec
|
||||
// Change the app group spec into a progressing state
|
||||
appGroup.Progressing()
|
||||
_ = r.Status().Patch(ctx, &appGroup, patch)
|
||||
|
||||
requeue = true
|
||||
err = nil
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, requeue, err)
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, patch, requeue, err)
|
||||
}
|
||||
|
||||
// Create/Update scenario
|
||||
// Compares the current generation to the generation that was last
|
||||
// seen and updated by the reconciler
|
||||
if appGroup.Generation != appGroup.Status.ObservedGeneration {
|
||||
// Update scenario if observed generation isn't past the initial 0 generation
|
||||
if appGroup.Status.ObservedGeneration != 0 {
|
||||
appGroup.Status.Update = true
|
||||
}
|
||||
// Change the app group spec into a progressing state
|
||||
appGroup.Progressing()
|
||||
_ = r.Status().Patch(ctx, &appGroup, patch)
|
||||
|
||||
requeue, err = r.reconcile(ctx, logr, r.WorkflowNS, &appGroup)
|
||||
if err != nil {
|
||||
logr.Error(err, "failed to reconcile ApplicationGroup instance")
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, requeue, err)
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, patch, requeue, err)
|
||||
}
|
||||
|
||||
appGroup.Status.ObservedGeneration = appGroup.Generation
|
||||
switch appGroup.Status.Phase {
|
||||
case orkestrav1alpha1.Init, orkestrav1alpha1.Running:
|
||||
switch appGroup.GetReadyCondition() {
|
||||
case meta.ProgressingReason:
|
||||
logr.V(1).Info("workflow in init/running state. requeue and reconcile after a short period")
|
||||
requeue = true
|
||||
err = nil
|
||||
case orkestrav1alpha1.Succeeded:
|
||||
case meta.SucceededReason:
|
||||
logr.V(1).Info("workflow ran to completion and succeeded")
|
||||
requeue = false
|
||||
err = nil
|
||||
case orkestrav1alpha1.Error:
|
||||
case meta.FailedReason:
|
||||
requeue = false
|
||||
err = fmt.Errorf("workflow in failure/error condition")
|
||||
logr.Error(err, "workflow in failure/error condition")
|
||||
|
@ -193,7 +212,14 @@ func (r *ApplicationGroupReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||
err = nil
|
||||
}
|
||||
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, requeue, err)
|
||||
if err == nil {
|
||||
// Only update the observed generation when the reconciliation succeeds
|
||||
// This only updates on changes to spec
|
||||
appGroup.Status.ObservedGeneration = appGroup.Generation
|
||||
appGroup.DeploySucceeded()
|
||||
}
|
||||
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, patch, requeue, err)
|
||||
}
|
||||
|
||||
// Calculate the cumulative status of the generated Workflow
|
||||
|
@ -212,86 +238,49 @@ func (r *ApplicationGroupReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||
if err != nil {
|
||||
logr.Error(err, "failed to find generated workflow instance")
|
||||
requeue = false
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, requeue, err)
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, patch, requeue, err)
|
||||
}
|
||||
|
||||
if wfs.Items.Len() == 0 {
|
||||
err = fmt.Errorf("listed workflows len is 0")
|
||||
err = fmt.Errorf("no associated workflow found")
|
||||
logr.Error(err, "no associated workflow found")
|
||||
requeue = false
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, requeue, err)
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, patch, requeue, err)
|
||||
}
|
||||
|
||||
// determine the associated/generated workflow status
|
||||
var phase orkestrav1alpha1.ReconciliationPhase
|
||||
wfStatus := wfs.Items[0].Status.Phase
|
||||
switch wfStatus {
|
||||
case v1alpha12.NodeError, v1alpha12.NodeFailed:
|
||||
phase = orkestrav1alpha1.Error
|
||||
case v1alpha12.NodePending, v1alpha12.NodeRunning:
|
||||
phase = orkestrav1alpha1.Running
|
||||
appGroup.ReadyFailed(string(wfStatus))
|
||||
case v1alpha12.NodeSucceeded:
|
||||
phase = orkestrav1alpha1.Succeeded
|
||||
// case v1alpha12.NodeSkipped:
|
||||
// phase = orkestrav1alpha1.Succeeded
|
||||
appGroup.ReadySucceeded()
|
||||
}
|
||||
|
||||
appGroup.Status.Phase = phase
|
||||
|
||||
helmReleaseStatusMap := make(map[string]helmopv1.HelmReleasePhase)
|
||||
|
||||
// XXX (nitishm) Not sure why this happens ???
|
||||
// Lookup all associated HelmReleases for status as well since the Workflow will not always reflect the status of the HelmRelease
|
||||
// Lookup Workflow by ownership and heritage labels
|
||||
helmReleases := helmopv1.HelmReleaseList{}
|
||||
err = r.List(ctx, &helmReleases, listOption)
|
||||
helmReleaseStatusMap, chartConditionMap, subChartConditionMap, err := r.marshallChartStatus(ctx, appGroup)
|
||||
if err != nil {
|
||||
logr.Error(err, "failed to find generated HelmRelease instance")
|
||||
requeue = false
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, requeue, err)
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, patch, false, err)
|
||||
}
|
||||
|
||||
for _, hr := range helmReleases.Items {
|
||||
name := hr.Name
|
||||
if v, ok := hr.GetAnnotations()["orkestra/parent-chart"]; ok {
|
||||
// Use the parent charts name
|
||||
name = v
|
||||
}
|
||||
// XXX (nitishm) Needs more thought and testing
|
||||
if _, ok := helmReleaseStatusMap[name]; ok {
|
||||
if hr.Status.Phase != helmopv1.HelmReleasePhaseSucceeded {
|
||||
helmReleaseStatusMap[name] = hr.Status.Phase
|
||||
}
|
||||
} else {
|
||||
helmReleaseStatusMap[name] = hr.Status.Phase
|
||||
}
|
||||
}
|
||||
|
||||
// Update each application status using the HelmRelease status
|
||||
v := make([]orkestrav1alpha1.ApplicationStatus, 0)
|
||||
for _, app := range appGroup.Status.Applications {
|
||||
app.ChartStatus.Phase = helmReleaseStatusMap[app.Name]
|
||||
v = append(v, app)
|
||||
}
|
||||
appGroup.Status.Applications = v
|
||||
appGroup.Status.Applications = getAppStatus(&appGroup, helmReleaseStatusMap, chartConditionMap, subChartConditionMap)
|
||||
|
||||
// This is the cumulative status from the workflow phase and the helmrelease object statuses
|
||||
err = componentStatus(phase, helmReleaseStatusMap)
|
||||
err = allHelmReleaseStatus(appGroup, helmReleaseStatusMap)
|
||||
if err != nil {
|
||||
// Any error arising from the workflow or the helmreleases should be marked as a NodeError
|
||||
appGroup.Status.Phase = orkestrav1alpha1.Error
|
||||
logr.Error(err, "")
|
||||
appGroup.ReadyFailed(err.Error())
|
||||
}
|
||||
|
||||
switch appGroup.Status.Phase {
|
||||
case orkestrav1alpha1.Running, orkestrav1alpha1.Init:
|
||||
switch appGroup.GetReadyCondition() {
|
||||
case meta.ProgressingReason:
|
||||
logr.V(1).Info("workflow in init/running state. requeue and reconcile after a short period")
|
||||
requeue = true
|
||||
err = nil
|
||||
case orkestrav1alpha1.Succeeded:
|
||||
case meta.SucceededReason:
|
||||
logr.V(1).Info("workflow ran to completion and succeeded")
|
||||
requeue = false
|
||||
err = nil
|
||||
case orkestrav1alpha1.Error:
|
||||
case meta.FailedReason:
|
||||
requeue = false
|
||||
err = fmt.Errorf("workflow in failure/error condition : %w", err)
|
||||
logr.Error(err, "")
|
||||
|
@ -300,7 +289,7 @@ func (r *ApplicationGroupReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||
err = nil
|
||||
}
|
||||
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, requeue, err)
|
||||
return r.handleResponseAndEvent(ctx, logr, appGroup, patch, requeue, err)
|
||||
}
|
||||
|
||||
func (r *ApplicationGroupReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
|
@ -311,22 +300,23 @@ func (r *ApplicationGroupReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *ApplicationGroupReconciler) handleResponseAndEvent(ctx context.Context, logr logr.Logger, grp orkestrav1alpha1.ApplicationGroup, requeue bool, err error) (ctrl.Result, error) {
|
||||
func (r *ApplicationGroupReconciler) handleResponseAndEvent(ctx context.Context, logr logr.Logger, grp orkestrav1alpha1.ApplicationGroup,
|
||||
patch client.Patch, requeue bool, err error) (ctrl.Result, error) {
|
||||
var errStr string
|
||||
if err != nil {
|
||||
errStr = err.Error()
|
||||
grp.ReadyFailed(errStr)
|
||||
} else {
|
||||
grp.DeploySucceeded()
|
||||
}
|
||||
|
||||
grp.Status.Error = errStr
|
||||
|
||||
_ = r.Status().Update(ctx, &grp)
|
||||
|
||||
if grp.Status.Error == "" && grp.Status.Phase == orkestrav1alpha1.Succeeded {
|
||||
err2 := r.Status().Patch(ctx, &grp, patch)
|
||||
if err2 == nil && grp.GetReadyCondition() == meta.SucceededReason {
|
||||
// Annotate the resource with the last successful ApplicationGroup spec
|
||||
b, _ := json.Marshal(&grp)
|
||||
grp.SetAnnotations(map[string]string{lastSuccessfulApplicationGroupKey: string(b)})
|
||||
r.lastSuccessfulApplicationGroup = grp.DeepCopy()
|
||||
_ = r.Update(ctx, &grp)
|
||||
_ = r.Patch(ctx, &grp, patch)
|
||||
|
||||
r.Recorder.Event(&grp, "Normal", "ReconcileSuccess", fmt.Sprintf("Successfully reconciled ApplicationGroup %s", grp.Name))
|
||||
}
|
||||
|
@ -337,19 +327,19 @@ func (r *ApplicationGroupReconciler) handleResponseAndEvent(ctx context.Context,
|
|||
|
||||
if err != nil {
|
||||
if !r.DisableRemediation {
|
||||
return r.handleRemediation(ctx, logr, grp, err)
|
||||
return r.handleRemediation(ctx, logr, grp, patch, err)
|
||||
}
|
||||
}
|
||||
|
||||
if requeue {
|
||||
interval := requeueAfter
|
||||
if grp.Status.Phase != orkestrav1alpha1.Running {
|
||||
if grp.GetReadyCondition() != meta.ProgressingReason {
|
||||
interval = requeueAfterLong
|
||||
}
|
||||
return reconcile.Result{Requeue: true, RequeueAfter: interval}, err
|
||||
return reconcile.Result{RequeueAfter: interval}, nil
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: requeue}, err
|
||||
return reconcile.Result{Requeue: requeue}, nil
|
||||
}
|
||||
|
||||
func initApplications(appGroup *orkestrav1alpha1.ApplicationGroup) {
|
||||
|
@ -367,7 +357,8 @@ func initApplications(appGroup *orkestrav1alpha1.ApplicationGroup) {
|
|||
}
|
||||
}
|
||||
|
||||
func (r *ApplicationGroupReconciler) handleRemediation(ctx context.Context, logr logr.Logger, g orkestrav1alpha1.ApplicationGroup, err error) (ctrl.Result, error) {
|
||||
func (r *ApplicationGroupReconciler) handleRemediation(ctx context.Context, logr logr.Logger, g orkestrav1alpha1.ApplicationGroup,
|
||||
patch client.Patch, err error) (ctrl.Result, error) {
|
||||
// Rollback to previous successful spec since the annotation was set and this is
|
||||
// an UPDATE event
|
||||
if r.lastSuccessfulApplicationGroup != nil {
|
||||
|
@ -390,13 +381,13 @@ func (r *ApplicationGroupReconciler) handleRemediation(ctx context.Context, logr
|
|||
err = r.List(ctx, &helmReleases, listOption)
|
||||
if err != nil {
|
||||
logr.Error(err, "failed to find generated HelmRelease instances")
|
||||
return reconcile.Result{Requeue: false}, err
|
||||
return reconcile.Result{Requeue: false}, nil
|
||||
}
|
||||
|
||||
err = r.rollbackFailedHelmReleases(ctx, helmReleases.Items)
|
||||
if err != nil {
|
||||
logr.Error(err, "failed to rollback failed HelmRelease instances")
|
||||
return reconcile.Result{Requeue: false}, err
|
||||
return reconcile.Result{Requeue: false}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -404,18 +395,97 @@ func (r *ApplicationGroupReconciler) handleRemediation(ctx context.Context, logr
|
|||
// mark the object as requiring rollback so that we can rollback
|
||||
// to the previous versions of all the applications in the ApplicationGroup
|
||||
// using the last successful spec
|
||||
g.Status.Phase = orkestrav1alpha1.Rollback
|
||||
_ = r.Status().Update(ctx, &g)
|
||||
|
||||
return reconcile.Result{Requeue: true, RequeueAfter: requeueAfter}, nil
|
||||
g.RollingBack()
|
||||
_ = r.Status().Patch(ctx, &g, patch)
|
||||
return reconcile.Result{RequeueAfter: requeueAfter}, nil
|
||||
}
|
||||
// Reverse and cleanup the workflow and associated helmreleases
|
||||
g.RollingBack()
|
||||
_ = r.Status().Patch(ctx, &g, patch)
|
||||
|
||||
_ = r.cleanupWorkflow(ctx, logr, g)
|
||||
|
||||
return reconcile.Result{Requeue: false}, err
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func componentStatus(phase orkestrav1alpha1.ReconciliationPhase, apps map[string]helmopv1.HelmReleasePhase) error {
|
||||
// marshallChartStatus lists all of the HelmRelease objects that were deployed and assigns
|
||||
// their status to the appropriate maps corresponding to their chart of subchart.
|
||||
// These statuses are used to update the application status above
|
||||
func (r *ApplicationGroupReconciler) marshallChartStatus(ctx context.Context, appGroup orkestrav1alpha1.ApplicationGroup) (
|
||||
helmReleaseStatusMap map[string]helmopv1.HelmReleasePhase,
|
||||
chartConditionMap map[string][]metav1.Condition,
|
||||
subChartConditionMap map[string]map[string][]metav1.Condition,
|
||||
err error) {
|
||||
listOption := client.MatchingLabels{
|
||||
workflow.OwnershipLabel: appGroup.Name,
|
||||
workflow.HeritageLabel: workflow.Project,
|
||||
}
|
||||
|
||||
// Init the mappings
|
||||
helmReleaseStatusMap = make(map[string]helmopv1.HelmReleasePhase)
|
||||
chartConditionMap = make(map[string][]metav1.Condition)
|
||||
subChartConditionMap = make(map[string]map[string][]metav1.Condition)
|
||||
|
||||
// XXX (nitishm) Not sure why this happens ???
|
||||
// Lookup all associated HelmReleases for status as well since the Workflow will not always reflect the status of the HelmRelease
|
||||
// Lookup Workflow by ownership and heritage labels
|
||||
helmReleases := helmopv1.HelmReleaseList{}
|
||||
err = r.List(ctx, &helmReleases, listOption)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "failed to find generated HelmRelease instance")
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
for _, hr := range helmReleases.Items {
|
||||
parent := hr.Name
|
||||
if v, ok := hr.GetAnnotations()["orkestra/parent-chart"]; ok {
|
||||
// Use the parent charts name
|
||||
parent = v
|
||||
}
|
||||
|
||||
// Add the associated conditions for that helm chart to the helm chart condition
|
||||
// If the helm chart is a subchart, then add that to the subchart condition
|
||||
if parent == hr.Name {
|
||||
chartConditionMap[parent] = append(chartConditionMap[parent], meta.ToStatusConditions(hr.Status.Conditions)...)
|
||||
} else {
|
||||
if _, ok := subChartConditionMap[parent]; !ok {
|
||||
subChartConditionMap[parent] = make(map[string][]metav1.Condition)
|
||||
}
|
||||
subChartConditionMap[parent][hr.Spec.ReleaseName] = append(subChartConditionMap[parent][hr.Spec.ReleaseName], meta.ToStatusConditions(hr.Status.Conditions)...)
|
||||
}
|
||||
|
||||
// XXX (nitishm) Needs more thought and testing
|
||||
if _, ok := helmReleaseStatusMap[parent]; ok {
|
||||
if hr.Status.Phase != helmopv1.HelmReleasePhaseSucceeded {
|
||||
helmReleaseStatusMap[parent] = hr.Status.Phase
|
||||
}
|
||||
} else {
|
||||
helmReleaseStatusMap[parent] = hr.Status.Phase
|
||||
}
|
||||
}
|
||||
return helmReleaseStatusMap, chartConditionMap, subChartConditionMap, nil
|
||||
}
|
||||
|
||||
func getAppStatus(
|
||||
appGroup *orkestrav1alpha1.ApplicationGroup,
|
||||
helmReleaseStatusMap map[string]helmopv1.HelmReleasePhase,
|
||||
chartConditionMap map[string][]metav1.Condition,
|
||||
subChartConditionMap map[string]map[string][]metav1.Condition) []orkestrav1alpha1.ApplicationStatus {
|
||||
// Update each application status using the HelmRelease status
|
||||
var v []orkestrav1alpha1.ApplicationStatus
|
||||
for _, app := range appGroup.Status.Applications {
|
||||
app.ChartStatus.Conditions = chartConditionMap[app.Name]
|
||||
app.ChartStatus.Phase = helmReleaseStatusMap[app.Name]
|
||||
for subchartName, subchartStatus := range app.Subcharts {
|
||||
subchartStatus.Conditions = subChartConditionMap[app.Name][subchartName]
|
||||
app.Subcharts[subchartName] = subchartStatus
|
||||
}
|
||||
v = append(v, app)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func allHelmReleaseStatus(appGroup orkestrav1alpha1.ApplicationGroup, apps map[string]helmopv1.HelmReleasePhase) error {
|
||||
for _, v := range apps {
|
||||
switch v {
|
||||
case helmopv1.HelmReleasePhaseFailed, helmopv1.HelmReleasePhaseDeployFailed, helmopv1.HelmReleasePhaseChartFetchFailed, helmopv1.HelmReleasePhaseTestFailed, helmopv1.HelmReleasePhaseRollbackFailed:
|
||||
|
@ -423,10 +493,9 @@ func componentStatus(phase orkestrav1alpha1.ReconciliationPhase, apps map[string
|
|||
}
|
||||
}
|
||||
|
||||
if phase == orkestrav1alpha1.Error {
|
||||
if appGroup.GetReadyCondition() == meta.FailedReason {
|
||||
return ErrWorkflowInFailureStatus
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package controllers
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
|
@ -12,7 +11,6 @@ import (
|
|||
orkestrav1alpha1 "github.com/Azure/Orkestra/api/v1alpha1"
|
||||
"github.com/Azure/Orkestra/pkg"
|
||||
"github.com/Azure/Orkestra/pkg/registry"
|
||||
"github.com/Azure/Orkestra/pkg/workflow"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/jinzhu/copier"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
|
@ -42,7 +40,6 @@ func (r *ApplicationGroupReconciler) reconcile(ctx context.Context, l logr.Logge
|
|||
if len(appGroup.Spec.Applications) == 0 {
|
||||
l.Error(ErrInvalidSpec, "ApplicationGroup must list atleast one Application")
|
||||
err := fmt.Errorf("application group must list atleast one Application : %w", ErrInvalidSpec)
|
||||
appGroup.Status.Error = err.Error()
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
@ -71,7 +68,6 @@ func (r *ApplicationGroupReconciler) reconcileApplications(l logr.Logger, appGro
|
|||
repoCfg, err := registry.GetHelmRepoConfig(&application, r.Client)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to get repo configuration for repo at URL %s: %w", application.Spec.Chart.RepoURL, err)
|
||||
appGroup.Status.Error = err.Error()
|
||||
ll.Error(err, "failed to add helm repo ")
|
||||
return err
|
||||
}
|
||||
|
@ -79,7 +75,6 @@ func (r *ApplicationGroupReconciler) reconcileApplications(l logr.Logger, appGro
|
|||
err = r.RegistryClient.AddRepo(repoCfg)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to add helm repo at URL %s: %w", application.Spec.Chart.RepoURL, err)
|
||||
appGroup.Status.Error = err.Error()
|
||||
ll.Error(err, "failed to add helm repo ")
|
||||
return err
|
||||
}
|
||||
|
@ -101,7 +96,6 @@ func (r *ApplicationGroupReconciler) reconcileApplications(l logr.Logger, appGro
|
|||
}()
|
||||
if err != nil || appCh == nil {
|
||||
err = fmt.Errorf("failed to pull application chart %s/%s:%s : %w", repoKey, name, version, err)
|
||||
appGroup.Status.Error = err.Error()
|
||||
ll.Error(err, "failed to pull application chart")
|
||||
return err
|
||||
}
|
||||
|
@ -131,7 +125,6 @@ func (r *ApplicationGroupReconciler) reconcileApplications(l logr.Logger, appGro
|
|||
err = fmt.Errorf("failed to validate application subchart for staging registry : %w", err)
|
||||
cs.Error = err.Error()
|
||||
appGroup.Status.Applications[i].Subcharts[sc.Name()] = cs
|
||||
appGroup.Status.Error = cs.Error
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -153,7 +146,6 @@ func (r *ApplicationGroupReconciler) reconcileApplications(l logr.Logger, appGro
|
|||
err = fmt.Errorf("failed to save subchart package as tgz at location %s : %w", path, err)
|
||||
cs.Error = err.Error()
|
||||
appGroup.Status.Applications[i].Subcharts[sc.Name()] = cs
|
||||
appGroup.Status.Error = cs.Error
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -163,7 +155,6 @@ func (r *ApplicationGroupReconciler) reconcileApplications(l logr.Logger, appGro
|
|||
err = fmt.Errorf("failed to push application subchart to staging registry : %w", err)
|
||||
cs.Error = err.Error()
|
||||
appGroup.Status.Applications[i].Subcharts[sc.Name()] = cs
|
||||
appGroup.Status.Error = cs.Error
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -193,7 +184,6 @@ func (r *ApplicationGroupReconciler) reconcileApplications(l logr.Logger, appGro
|
|||
if err != nil {
|
||||
ll.Error(err, "chart templates directory yaml check failed")
|
||||
err = fmt.Errorf("chart templates directory yaml check failed : %w", err)
|
||||
appGroup.Status.Error = err.Error()
|
||||
appGroup.Status.Applications[i].ChartStatus.Error = err.Error()
|
||||
return err
|
||||
}
|
||||
|
@ -213,7 +203,6 @@ func (r *ApplicationGroupReconciler) reconcileApplications(l logr.Logger, appGro
|
|||
if err := appCh.Validate(); err != nil {
|
||||
ll.Error(err, "failed to validate application chart for staging registry")
|
||||
err = fmt.Errorf("failed to validate application chart for staging registry : %w", err)
|
||||
appGroup.Status.Error = err.Error()
|
||||
appGroup.Status.Applications[i].ChartStatus.Error = err.Error()
|
||||
return err
|
||||
}
|
||||
|
@ -222,7 +211,6 @@ func (r *ApplicationGroupReconciler) reconcileApplications(l logr.Logger, appGro
|
|||
if err != nil {
|
||||
ll.Error(err, "failed to save modified app chart to filesystem")
|
||||
err = fmt.Errorf("failed to save modified app chart to filesystem : %w", err)
|
||||
appGroup.Status.Error = err.Error()
|
||||
appGroup.Status.Applications[i].ChartStatus.Error = err.Error()
|
||||
return err
|
||||
}
|
||||
|
@ -239,7 +227,6 @@ func (r *ApplicationGroupReconciler) reconcileApplications(l logr.Logger, appGro
|
|||
ll.Error(err, "failed to push modified application chart to staging registry")
|
||||
err = fmt.Errorf("failed to push modified application chart to staging registry : %w", err)
|
||||
appGroup.Status.Applications[i].ChartStatus.Error = err.Error()
|
||||
appGroup.Status.Error = err.Error()
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -258,16 +245,9 @@ func (r *ApplicationGroupReconciler) generateWorkflow(ctx context.Context, logr
|
|||
|
||||
err = r.Engine.Submit(ctx, logr, g)
|
||||
if err != nil {
|
||||
if errors.Is(err, workflow.ErrNamespaceTerminating) {
|
||||
logr.V(1).Info("namespace is in terminating state")
|
||||
return true, err
|
||||
}
|
||||
logr.Error(err, "engine failed to submit workflow")
|
||||
return false, err
|
||||
}
|
||||
|
||||
g.Status.Phase = orkestrav1alpha1.Init
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -5,10 +5,8 @@ metadata:
|
|||
spec:
|
||||
applications:
|
||||
- name: ambassador
|
||||
namespace: ambassador
|
||||
dependencies: []
|
||||
spec:
|
||||
groupId: "bookinfo"
|
||||
chart:
|
||||
repository: "https://www.getambassador.io/helm"
|
||||
name: ambassador
|
||||
|
@ -30,10 +28,8 @@ spec:
|
|||
service:
|
||||
type: ClusterIP
|
||||
- name: bookinfo
|
||||
namespace: bookinfo
|
||||
dependencies: [ambassador]
|
||||
spec:
|
||||
groupId: "bookinfo"
|
||||
chart:
|
||||
repository: "https://nitishm.github.io/istio-bookinfo-chart"
|
||||
name: bookinfo
|
||||
|
|
38
go.mod
38
go.mod
|
@ -5,34 +5,22 @@ go 1.15
|
|||
require (
|
||||
github.com/argoproj/argo v2.5.2+incompatible
|
||||
github.com/chartmuseum/helm-push v0.9.0
|
||||
github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08
|
||||
github.com/fluxcd/helm-operator v1.2.0
|
||||
github.com/go-delve/delve v1.5.1 // indirect
|
||||
github.com/go-logr/logr v0.1.0
|
||||
github.com/gofrs/flock v0.7.1
|
||||
github.com/golang/protobuf v1.4.3 // indirect
|
||||
github.com/go-logr/logr v0.3.0
|
||||
github.com/gofrs/flock v0.8.0
|
||||
github.com/google/go-cmp v0.5.2
|
||||
github.com/jinzhu/copier v0.2.8
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.1
|
||||
github.com/onsi/ginkgo v1.14.0
|
||||
github.com/onsi/gomega v1.10.1
|
||||
github.com/spf13/viper v1.7.0
|
||||
go.opencensus.io v0.22.5 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 // indirect
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 // indirect
|
||||
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 // indirect
|
||||
golang.org/x/text v0.3.4 // indirect
|
||||
google.golang.org/appengine v1.6.6
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb // indirect
|
||||
google.golang.org/grpc v1.33.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
helm.sh/helm/v3 v3.3.4
|
||||
k8s.io/api v0.17.5
|
||||
k8s.io/apimachinery v0.17.5
|
||||
k8s.io/api v0.20.2
|
||||
k8s.io/apimachinery v0.20.2
|
||||
k8s.io/client-go v11.0.0+incompatible
|
||||
k8s.io/helm v2.16.12+incompatible
|
||||
sigs.k8s.io/controller-runtime v0.4.0
|
||||
sigs.k8s.io/yaml v1.1.0
|
||||
sigs.k8s.io/controller-runtime v0.8.3
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
// Hack to import helm-operator package
|
||||
|
@ -45,23 +33,17 @@ replace (
|
|||
github.com/fluxcd/flux => github.com/fluxcd/flux v1.19.0
|
||||
github.com/fluxcd/flux/pkg/install => github.com/fluxcd/flux/pkg/install v0.0.0-20200402061723-01a239a69319
|
||||
github.com/fluxcd/helm-operator/pkg/install => github.com/fluxcd/helm-operator/pkg/install v0.0.0-20200407140510-8d71b0072a3e
|
||||
k8s.io/api => k8s.io/api v0.20.2
|
||||
k8s.io/client-go => k8s.io/client-go v0.20.2
|
||||
)
|
||||
|
||||
// Pin Flux to 1.18.0
|
||||
|
||||
// Force upgrade because of a transitive downgrade.
|
||||
// github.com/fluxcd/helm-operator
|
||||
// +-> github.com/fluxcd/flux@v1.17.2
|
||||
// +-> k8s.io/client-go@v11.0.0+incompatible
|
||||
replace k8s.io/client-go => k8s.io/client-go v0.17.2
|
||||
|
||||
// Force upgrade because of a transitive downgrade.
|
||||
// github.com/fluxcd/flux
|
||||
// +-> github.com/fluxcd/helm-operator@v1.0.0-rc6
|
||||
// +-> helm.sh/helm/v3@v3.1.2
|
||||
// +-> helm.sh/helm@v2.16.1
|
||||
replace (
|
||||
helm.sh/helm/v3 => helm.sh/helm/v3 v3.1.2
|
||||
helm.sh/helm/v3 => helm.sh/helm/v3 v3.5.3
|
||||
k8s.io/helm => k8s.io/helm v2.16.3+incompatible
|
||||
)
|
||||
|
||||
|
@ -75,6 +57,4 @@ replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-
|
|||
|
||||
// End hack
|
||||
|
||||
replace github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.3.1
|
||||
|
||||
replace github.com/docker/distribution => github.com/docker/distribution v0.0.0-20191216044856-a8371794149d
|
||||
|
|
677
go.sum
677
go.sum
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
4
main.go
4
main.go
|
@ -72,7 +72,7 @@ func main() {
|
|||
ctrl.SetLogger(zap.New(zap.UseDevMode(false)))
|
||||
}
|
||||
|
||||
ctrl.Log.Logger.V(debugLevel)
|
||||
ctrl.Log.V(debugLevel)
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
|
@ -96,7 +96,7 @@ func main() {
|
|||
}
|
||||
|
||||
rc, err := registry.NewClient(
|
||||
ctrl.Log.Logger,
|
||||
ctrl.Log,
|
||||
registry.TargetDir(tempChartStoreTargetDir),
|
||||
)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
Copyright 2020 The Flux authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
helmopv1 "github.com/fluxcd/helm-operator/pkg/apis/helm.fluxcd.io/v1"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// ReadyCondition is the name of the workflow condition
|
||||
// This captures the status of the entire ApplicationGroup
|
||||
ReadyCondition string = "Ready"
|
||||
|
||||
// DeployCondition is the name of the Deploy condition
|
||||
// This captures the state of receiving and reacting to the spec by the reconciler
|
||||
DeployCondition string = "Deploy"
|
||||
)
|
||||
|
||||
const (
|
||||
// SucceededReason represents the fact that the reconciliation succeeded
|
||||
SucceededReason string = "Succeeded"
|
||||
|
||||
// FailedReason represents the fact that the the reconciliation failed
|
||||
FailedReason string = "Failed"
|
||||
|
||||
// ProgressingReason represents the fact that the workflow is in a starting
|
||||
// or running state, we have not reached a terminal state yet
|
||||
ProgressingReason string = "Progressing"
|
||||
|
||||
// RollbackReason represents the fact that we are entering a rollback state
|
||||
// and is transitioning into a non-terminal state
|
||||
RollingBackReason string = "RollingBack"
|
||||
)
|
||||
|
||||
// ObjectWithStatusConditions is an interface that describes kubernetes resource
|
||||
// type structs with Status Conditions
|
||||
// +k8s:deepcopy-gen=false
|
||||
type ObjectWithStatusConditions interface {
|
||||
GetStatusConditions() *[]metav1.Condition
|
||||
}
|
||||
|
||||
// SetResourceCondition sets the given condition with the given status,
|
||||
// reason and message on a resource.
|
||||
func SetResourceCondition(obj ObjectWithStatusConditions, condition string, status metav1.ConditionStatus, reason, message string) {
|
||||
conditions := obj.GetStatusConditions()
|
||||
|
||||
newCondition := metav1.Condition{
|
||||
Type: condition,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
}
|
||||
|
||||
apimeta.SetStatusCondition(conditions, newCondition)
|
||||
}
|
||||
|
||||
func GetResourceCondition(obj ObjectWithStatusConditions, condition string) *metav1.Condition {
|
||||
conditions := obj.GetStatusConditions()
|
||||
return apimeta.FindStatusCondition(*conditions, condition)
|
||||
}
|
||||
|
||||
func ToStatusConditions(conditions []helmopv1.HelmReleaseCondition) []metav1.Condition {
|
||||
var newConditions []metav1.Condition
|
||||
for _, condition := range conditions {
|
||||
newCondition := metav1.Condition{}
|
||||
newCondition.LastTransitionTime = *condition.LastTransitionTime
|
||||
newCondition.Status = metav1.ConditionStatus(condition.Status)
|
||||
newCondition.Message = condition.Message
|
||||
newCondition.Reason = condition.Reason
|
||||
newCondition.Type = string(condition.Type)
|
||||
newConditions = append(newConditions, newCondition)
|
||||
}
|
||||
return newConditions
|
||||
}
|
|
@ -2,7 +2,6 @@ package workflow
|
|||
|
||||
import (
|
||||
"context"
|
||||
kerrors "errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
|
@ -34,8 +33,7 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
ErrNamespaceTerminating = kerrors.New("namespace is in terminating phase")
|
||||
timeout int64 = 3600
|
||||
timeout int64 = 3600
|
||||
)
|
||||
|
||||
type argo struct {
|
||||
|
@ -141,10 +139,6 @@ func (a *argo) Submit(ctx context.Context, l logr.Logger, g *v1alpha1.Applicatio
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ns.Status.Phase == corev1.NamespaceTerminating {
|
||||
return ErrNamespaceTerminating
|
||||
}
|
||||
}
|
||||
|
||||
err := a.cli.Get(ctx, types.NamespacedName{Namespace: a.wf.Namespace, Name: a.wf.Name}, obj)
|
||||
|
|
Загрузка…
Ссылка в новой задаче