Signed-off-by: Nitish Malhotra <nitish.malhotra@gmail.com>
This commit is contained in:
Nitish Malhotra 2021-08-31 14:08:54 -07:00
Родитель accd63e1d4
Коммит 597f96b71b
37 изменённых файлов: 3090 добавлений и 0 удалений

9
.github/CODEOWNERS поставляемый Normal file
Просмотреть файл

@ -0,0 +1,9 @@
# This is a comment.
# Each line is a file pattern followed by one or more owners.
# These owners will be the default owners for everything in
# the repo. Unless a later match takes precedence,
# @global-owner1 and @global-owner2 will be requested for
# review when someone opens a pull request.
* @nitishm @jonathan-innis

29
.github/ISSUE_TEMPLATE/bug_report.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,29 @@
---
name: Bug report
about: Create a report to help us improve Orkestra
title: ''
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Environment (please complete the following information):**
- Kubernetes Version [e.g. 1.21.0]
- Kubernetes Distro [e.g. AKS, GKE, etc.]
- Orkestra Version Tag
- Helm Version
**Additional context**
Add any other context about the problem here.

20
.github/ISSUE_TEMPLATE/feature_request.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for Orkestra
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

17
.github/release-drafter.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,17 @@
name-template: 'v$RESOLVED_VERSION'
tag-template: 'v$RESOLVED_VERSION'
template:
## Changes
$CHANGES
categories:
- title: '🚀 Features'
labels:
- 'feat'
- 'enhancement'
- title: '🐛 Bug Fixes'
labels:
- 'fix'
- 'bug'
- title: '🧰 Maintenance'
label: 'chore'

47
.github/workflows/docker.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,47 @@
name: Docker Build & Push
on:
push:
branches:
- 'main'
tags:
- 'v*'
pull_request:
branches:
- 'main'
jobs:
docker:
runs-on: ubuntu-latest
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Docker meta
id: meta-default
uses: crazy-max/ghaction-docker-meta@v2
with:
# list of Docker images to use as base name for tags
images: azureorkestra/keptn-executor
# generate Docker tags based on the following events/attributes
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{raw}}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
latest
-
name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build and push keptn
uses: docker/build-push-action@v2
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta-keptn.outputs.tags }}

16
.github/workflows/lint.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,16 @@
name: golangci-lint
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: golangci-lint-default
uses: golangci/golangci-lint-action@v2
with:
working-directory: .

21
.github/workflows/release-drafter.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
name: Release Drafter
on:
push:
# branches to consider in the event; optional, defaults to all
branches:
- main
# pull_request event is required only for autolabeler
pull_request:
# Only following types are handled by the action, but one can default to all as well
types: [opened, reopened, synchronize]
jobs:
update_release_draft:
runs-on: ubuntu-latest
steps:
# Drafts your next Release notes as Pull Requests are merged into "master"
- uses: release-drafter/release-drafter@v5
with:
config-name: release-drafter.yml
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

36
.github/workflows/testing.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,36 @@
name: Testing
on:
pull_request:
branches: [main]
push:
branches: [main]
jobs:
testing:
env:
working-directory: .
runs-on: ubuntu-latest
strategy:
matrix:
go: [ '1.16', '1.15' ]
name: Go ${{ matrix.go }} testing
steps:
- uses: actions/checkout@v2
- name: Setup go
uses: actions/setup-go@v1
with:
go-version: ${{ matrix.go }}
- name: Restore Go cache
uses: actions/cache@v1
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- run: |
make test
working-directory: ${{env.working-directory}}
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
file: ./coverage.txt
working-directory: ${{env.working-directory}}

16
Dockerfile Normal file
Просмотреть файл

@ -0,0 +1,16 @@
FROM golang:1.15 as builder
WORKDIR /workspace
COPY go.mod go.mod
COPY go.sum go.sum
COPY pkg pkg
RUN go mod download
COPY main.go main.go
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -o executor main.go
FROM alpine:3.13
COPY --from=builder /workspace/executor .
ENTRYPOINT [ "./executor" ]

36
Makefile Normal file
Просмотреть файл

@ -0,0 +1,36 @@
# Image URL to use all building/pushing image targets
IMG ?= azureorkestra/keptn-executor:latest
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
all: build
# Run tests
test: fmt vet
go test ./... -coverprofile cover.out
# Build manager binary
build: fmt vet
go build -o bin/executor main.go
# Run go fmt against code
fmt:
go fmt ./...
# Run go vet against code
vet:
go vet ./...
# Build the docker image
docker-build: test
docker build . -t ${IMG}
# Push the docker image
docker-push:
docker push ${IMG}

27
go.mod Normal file
Просмотреть файл

@ -0,0 +1,27 @@
module github.com/Azure/keptn-workflow-executor
go 1.16
require (
code.gitea.io/sdk/gitea v0.14.1
github.com/cloudevents/sdk-go/v2 v2.4.1
github.com/fluxcd/helm-controller/api v0.10.0
github.com/google/uuid v1.2.0
github.com/keptn/go-utils v0.8.5
github.com/keptn/kubernetes-utils v0.8.3
github.com/sirupsen/logrus v1.8.1
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.6.0
k8s.io/api v0.21.1
k8s.io/apimachinery v0.21.1
k8s.io/client-go v0.21.1
k8s.io/kubectl v0.21.0
sigs.k8s.io/cli-utils v0.25.0
sigs.k8s.io/controller-runtime v0.8.3
sigs.k8s.io/yaml v1.2.0
)
replace (
github.com/docker/distribution => github.com/docker/distribution v0.0.0-20191216044856-a8371794149d
github.com/docker/docker => github.com/moby/moby v1.4.2-0.20200203170920-46ec8731fbce
)

1239
go.sum Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

120
main.go Normal file
Просмотреть файл

@ -0,0 +1,120 @@
package main
import (
"context"
"encoding/base64"
"flag"
"fmt"
"time"
"github.com/Azure/keptn-workflow-executor/pkg/actions"
fluxhelmv2beta1 "github.com/fluxcd/helm-controller/api/v2beta1"
"sigs.k8s.io/yaml"
"log"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kubectl/pkg/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
// The set of executor actions which can be performed on a helmrelease object
Install ExecutorAction = "install"
Delete ExecutorAction = "delete"
)
// ExecutorAction defines the set of executor actions which can be performed on a helmrelease object
type ExecutorAction string
func ParseExecutorAction(s string) (ExecutorAction, error) {
a := ExecutorAction(s)
switch a {
case Install, Delete:
return a, nil
}
return "", fmt.Errorf("invalid executor action: %v", s)
}
func main() {
var spec string
var configMapName, configMapNamespace string
var actionStr string
var timeoutStr string
var intervalStr string
flag.StringVar(&spec, "spec", "", "Spec of the helmrelease object to apply")
flag.StringVar(&actionStr, "action", "", "Action to perform on the helmrelease object. Must be either install or delete")
flag.StringVar(&timeoutStr, "timeout", "5m", "Timeout for the execution of the argo workflow task")
flag.StringVar(&intervalStr, "interval", "10s", "Retry interval for the all actions by the executor")
// Executor specific params
flag.StringVar(&configMapName, "configmap-name", "", "name of the configmap containing the shipyard.yaml, plugin configuration and other resources")
flag.StringVar(&configMapNamespace, "configmap-namespace", "", "namespace of the configmap containing the shipyard.yaml, plugin configuration and other resources")
flag.Parse()
action, err := ParseExecutorAction(actionStr)
if err != nil {
log.Fatalf("Failed to parse action as an executor action with %v", err)
}
timeout, err := time.ParseDuration(timeoutStr)
if err != nil {
log.Fatalf("Failed to parse timeout as a duration with %v", err)
}
interval, err := time.ParseDuration(intervalStr)
if err != nil {
log.Fatalf("Failed to parse interval as a duration with %v", err)
}
log.Printf("Parsed the action: %v, the timeout: %v and the interval: %v", string(action), timeout.String(), interval.String())
ctx, cancel := context.WithTimeout(context.Background(), timeout)
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), nil)
config, err := kubeConfig.ClientConfig()
if err != nil {
log.Fatalf("Failed to initialize the client config with %v", err)
}
decodedSpec, err := base64.StdEncoding.DecodeString(spec)
if err != nil {
log.Fatalf("Failed to decode the string as a base64 string; got the string %v", spec)
}
log.Printf("Successfully base64 decoded the spec")
hr := &fluxhelmv2beta1.HelmRelease{}
if err := yaml.Unmarshal(decodedSpec, hr); err != nil {
log.Fatalf("Failed to decode the spec into yaml with the err %v", err)
}
k8sScheme := scheme.Scheme
clientSet, err := client.New(config, client.Options{Scheme: k8sScheme})
if err != nil {
log.Fatalf("Failed to create the clientset with the given config with %v", err)
}
configmapObj := &corev1.ConfigMap{}
if err := clientSet.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: configMapNamespace}, configmapObj); err != nil {
log.Fatalf("failed to get ConfigMap : %v", err)
}
if configmapObj.Data == nil {
log.Fatalf("configmap data field cannot be nil")
}
if len(configmapObj.Data) == 0 {
log.Fatalf("configmap data field cannot be empty")
}
if action == Install {
if err := actions.Install(ctx, cancel, hr, interval, configmapObj.Data); err != nil {
log.Fatalf("failed to trigger keptn evaluation: %v", err)
}
} else if action == Delete {
if err := actions.Delete(ctx, cancel, clientSet, hr, interval, configmapObj.Data); err != nil {
log.Fatalf("failed to cleanup keptn application resources: %v", err)
}
}
}

45
pkg/actions/delete.go Normal file
Просмотреть файл

@ -0,0 +1,45 @@
package actions
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/Azure/keptn-workflow-executor/pkg/keptn"
fluxhelmv2beta1 "github.com/fluxcd/helm-controller/api/v2beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func Delete(ctx context.Context, cancel context.CancelFunc, clientSet client.Client, hr *fluxhelmv2beta1.HelmRelease, interval time.Duration, data map[string]string) error {
keptnConfig := &keptn.Config{}
// Read the keptn-config.yaml file.
// This file is required and cannot have empty fields
v, ok := data[keptn.KeptnConfigFileName]
if !ok {
return fmt.Errorf("failed to read plugin configuration from configmap")
}
if err := json.Unmarshal([]byte(v), keptnConfig); err != nil {
return fmt.Errorf("failed to unmarshal the keptn configuration file into Config object")
}
if err := keptnConfig.Validate(); err != nil {
return err
}
keptnCli, err := keptn.New(keptnConfig.URL, keptnConfig.Namespace, keptnConfig.Token.SecretRef.Name, nil)
if err != nil {
return fmt.Errorf("failed to create the keptn client %w", err)
}
appName := strings.ToLower(hr.Name + "-" + hr.Namespace)
if err := keptnCli.DeleteProject(appName); err != nil {
if errors.Is(err, keptn.ErrFailedDeleteProject) {
return nil
}
return err
}
return nil
}

103
pkg/actions/install.go Normal file
Просмотреть файл

@ -0,0 +1,103 @@
package actions
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"strings"
"time"
"github.com/Azure/keptn-workflow-executor/pkg/keptn"
"github.com/Azure/keptn-workflow-executor/pkg/status"
fluxhelmv2beta1 "github.com/fluxcd/helm-controller/api/v2beta1"
)
func Install(ctx context.Context, cancel context.CancelFunc, hr *fluxhelmv2beta1.HelmRelease, interval time.Duration, data map[string]string) error {
keptnConfig := &keptn.Config{}
// Read the keptn-config.yaml file.
// This file is required and cannot have empty fields
if v, ok := data[keptn.KeptnConfigFileName]; !ok {
return fmt.Errorf("failed to read plugin configuration from configmap")
} else {
if err := json.Unmarshal([]byte(v), keptnConfig); err != nil {
return fmt.Errorf("failed to unmarshal the keptn configuration file into Config object")
}
}
if err := keptnConfig.Validate(); err != nil {
return err
}
keptnCli, err := keptn.New(keptnConfig.URL, keptnConfig.Namespace, keptnConfig.Token.SecretRef.Name, nil)
if err != nil {
return fmt.Errorf("failed to create the keptn client %w", err)
}
shipyard, ok := data[keptn.ShipyardFileName]
if !ok {
return fmt.Errorf("shipyard.yaml not found")
}
appName := strings.ToLower(hr.Name + "-" + hr.Namespace)
if err := keptnCli.CreateOrUpdateProject(appName, shipyard); err != nil {
// if err := keptnCli.CreateProject(strings.ToLower("new-evaluation-project"), []byte(shipyard)); err != nil {
return err
}
if err := keptnCli.CreateService(appName, appName); err != nil {
return err
}
for k, v := range data {
if err := keptnCli.AddResourceToAllStages(appName, appName, k, v); err != nil {
return err
}
}
if err := keptnCli.ConfigureMonitoring(appName, appName, "prometheus"); err != nil {
return err
}
keptnCtx, err := keptnCli.TriggerEvaluation(appName, appName, keptnConfig.Timeframe)
if err != nil {
return err
}
// if err := status.Retry(ctx, func() error { return createOrUpdateFunc() }, interval); err != nil {
// return fmt.Errorf("retry got error: %w", err)
// }
if err := pollStatus(ctx, keptnCli, keptnCtx, appName, appName, interval, 5); err != nil {
return fmt.Errorf("failed to poll status with: %w", err)
}
return nil
}
func pollStatus(ctx context.Context, keptnCli *keptn.Keptn, keptnCtx, project, service string, interval time.Duration, retrySeconds int) error {
statusPoller := func(done chan<- bool) {
start := time.Now()
defer func() {
fmt.Printf("polling status finished execution in %v\n", time.Now().Sub(start))
}()
// fmt.Printf("Looking up event with keptnCtx : %s\n", keptnCtx)
// lookup keptn evaluation triggered event status
if err := keptnCli.GetEvents(service, project, keptnCtx); err != nil {
if errors.Is(err, keptn.ErrEvaluationFailed) {
fmt.Printf("evaluation failed with error : %v\n", err)
log.Fatalf(err.Error())
}
return
}
done <- true
}
// Poll the helm release condition consecutively until the timeout
if err := status.Poll(ctx, statusPoller, interval); err != nil {
return fmt.Errorf("timed out waiting for condition")
}
return nil
}

37
pkg/keptn/config.go Normal file
Просмотреть файл

@ -0,0 +1,37 @@
package keptn
import (
"fmt"
"time"
)
type Config struct {
URL string `json:"url,omitempty"`
Namespace string `json:"namespace,omitempty"`
Token KeptnAPIToken `json:"token,omitempty"`
Timeframe string `json:"timeframe,omitempty"`
}
func (k *Config) Validate() error {
if k.URL == "" {
return fmt.Errorf("keptn API server (nginx) cannot be nil")
}
if k.Namespace == "" {
return fmt.Errorf("keptn namespace must be specified")
}
if k.Token.SecretRef.Name == "" {
return fmt.Errorf("keptn API token secret name must be specified")
}
if k.Timeframe == "" {
return fmt.Errorf("keptn evaluation timeframe must be specified")
}
if _, err := time.ParseDuration(k.Timeframe); err != nil {
return fmt.Errorf("leptn evaluation duration must be similar to the format 5s/2m/1h")
}
return nil
}

17
pkg/keptn/consts.go Normal file
Просмотреть файл

@ -0,0 +1,17 @@
package keptn
const (
sliFilename = "sli.yaml"
sliURI = "prometheus/sli.yaml"
sloFilename = "slo.yaml"
sloURI = "slo.yaml"
jobExecutorFilename = "config.yaml"
jobExecutorURI = "job/config.yaml"
ShipyardFileName string = "shipyard.yaml"
KeptnConfigFileName string = "keptn-config.json"
KeptnAuthTokenKey string = "x-token"
HTTPTransportProtocol string = "http"
)

7
pkg/keptn/git.go Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package keptn
type Git struct {
URL string
Token string
User string
}

255
pkg/keptn/keptn.go Normal file
Просмотреть файл

@ -0,0 +1,255 @@
package keptn
import (
"encoding/base64"
"encoding/json"
"fmt"
"log"
"net/url"
"time"
// "github.com/keptn/go-utils/pkg/api/models"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/google/uuid"
apimodels "github.com/keptn/go-utils/pkg/api/models"
apiutils "github.com/keptn/go-utils/pkg/api/utils"
keptnlib "github.com/keptn/go-utils/pkg/lib"
keptnk8sutils "github.com/keptn/kubernetes-utils/pkg"
)
var (
ErrFailedDeleteProject = fmt.Errorf("failed to delete project")
ErrEvaluationFailed = fmt.Errorf("evaluation result shows failure")
)
type Keptn struct {
url string
git *Git
token *string
apiHandler *apiutils.APIHandler
resourceHandler *apiutils.ResourceHandler
projectHandler *apiutils.ProjectHandler
eventHandler *apiutils.EventHandler
}
func New(url, namespace, secretName string, git *Git) (*Keptn, error) {
if git == nil {
log.Printf("No upstream git server provided. Using in-built git server")
}
// get token from secret
t, err := keptnk8sutils.GetKeptnAPITokenFromSecret(true, namespace, secretName)
if err != nil {
return nil, err
}
apiHandler := apiutils.NewAuthenticatedAPIHandler(url, t, KeptnAuthTokenKey, nil, HTTPTransportProtocol)
resourceHandler := apiutils.NewAuthenticatedResourceHandler(url, t, KeptnAuthTokenKey, nil, HTTPTransportProtocol)
projectHandler := apiutils.NewAuthenticatedProjectHandler(url, t, KeptnAuthTokenKey, nil, HTTPTransportProtocol)
eventHandler := apiutils.NewAuthenticatedEventHandler(url, t, KeptnAuthTokenKey, nil, HTTPTransportProtocol)
return &Keptn{
url: url,
token: &t,
apiHandler: apiHandler,
resourceHandler: resourceHandler,
projectHandler: projectHandler,
eventHandler: eventHandler,
git: git,
}, nil
}
func (k *Keptn) CreateOrUpdateProject(project string, shipyard string) error {
encodedShipyardContent := base64.StdEncoding.EncodeToString([]byte(shipyard))
projectInfo := apimodels.CreateProject{
Name: &project,
Shipyard: &encodedShipyardContent,
}
projectGetInfo := apimodels.Project{
ProjectName: project,
}
if k.git != nil {
projectInfo.GitRemoteURL = k.git.URL
projectInfo.GitToken = k.git.Token
projectInfo.GitUser = k.git.User
projectGetInfo.GitRemoteURI = k.git.URL
projectGetInfo.GitToken = k.git.Token
projectGetInfo.GitUser = k.git.User
}
// FIXME: address comment
// What happens if the Get call fails due to an actual error but the job exists?
// Nitish Malhotra (08/31) - Error code lookup table is not provided by keptn.
// There is no way to differentiate between errors unfortunately
if _, kErr := k.projectHandler.GetProject(apimodels.Project{
ProjectName: project,
ShipyardVersion: shipyard,
Stages: []*apimodels.Stage{},
}); kErr == nil {
fmt.Println("found the project - deleting it now")
if _, kErr := k.apiHandler.DeleteProject(projectGetInfo); kErr != nil {
return fmt.Errorf("failed to delete project with err: %v", kErr.GetMessage())
}
}
if _, kErr := k.apiHandler.CreateProject(projectInfo); kErr != nil {
return fmt.Errorf("failed to create project with err: %v", kErr.GetMessage())
}
return nil
}
func (k *Keptn) DeleteProject(project string) error {
p := apimodels.Project{
ProjectName: project,
}
if _, kErr := k.apiHandler.DeleteProject(p); kErr != nil {
return fmt.Errorf("%v : %w", kErr.GetMessage(), ErrFailedDeleteProject)
}
return nil
}
func (k *Keptn) CreateService(service, project string) error {
if _, kErr := k.apiHandler.CreateService(project, apimodels.CreateService{
ServiceName: &service,
}); kErr != nil {
return fmt.Errorf("failed to create service with err: %v", kErr.GetMessage())
}
return nil
}
func (k *Keptn) AddResourceToAllStages(service, project, resourceName, resourceContent string) error {
stages, err := k.getProjectStages(project)
if err != nil {
return err
}
for _, stage := range stages {
if err := k.AddResourceToStage(service, project, stage.StageName, resourceNameToURI(resourceName), resourceContent); err != nil {
return err
}
}
return nil
}
func (k *Keptn) AddResourceToStage(service, project, stage, resourceURI, resourceContent string) error {
resource := &apimodels.Resource{
ResourceContent: resourceContent,
ResourceURI: &resourceURI,
}
if _, err := k.resourceHandler.CreateServiceResources(project, stage, service, []*apimodels.Resource{resource}); err != nil {
return err
}
return nil
}
func (k *Keptn) ConfigureMonitoring(project, service, monitoringType string) error {
configureMonitoringEventData := &keptnlib.ConfigureMonitoringEventData{
Type: monitoringType,
Project: project,
Service: service,
}
source, _ := url.Parse("https://github.com/keptn/keptn/cli#configuremonitoring")
sdkEvent := cloudevents.NewEvent()
sdkEvent.SetID(uuid.New().String())
sdkEvent.SetType(keptnlib.ConfigureMonitoringEventType)
sdkEvent.SetSource(source.String())
sdkEvent.SetDataContentType(cloudevents.ApplicationJSON)
sdkEvent.SetData(cloudevents.ApplicationJSON, configureMonitoringEventData)
eventByte, err := json.Marshal(sdkEvent)
if err != nil {
return fmt.Errorf("failed to marshal cloud event. %s", err.Error())
}
apiEvent := apimodels.KeptnContextExtendedCE{}
err = json.Unmarshal(eventByte, &apiEvent)
if err != nil {
return fmt.Errorf("failed to map cloud event to API event model. %s", err.Error())
}
_, kErr := k.apiHandler.SendEvent(apiEvent)
if err != nil {
return fmt.Errorf("sending configure-monitoring event was unsuccessful. %s", *kErr.Message)
}
return nil
}
func (k *Keptn) GetEvents(service, project, keptnCtx string) error {
filter := &apiutils.EventFilter{
Project: project,
Service: service,
KeptnContext: keptnCtx,
EventType: "sh.keptn.event.evaluation.finished",
}
eventsCtx, kErr := k.eventHandler.GetEvents(filter)
if kErr != nil {
return fmt.Errorf("failed to get events for keptn context %s. %s", keptnCtx, *kErr.Message)
}
if len(eventsCtx) != 1 {
return fmt.Errorf("expected to see one event of type %s", filter.EventType)
}
if dataMap, ok := eventsCtx[0].Data.(map[string]interface{}); ok {
result := dataMap["result"].(string)
if result == "pass" {
return nil
}
return ErrEvaluationFailed
}
return fmt.Errorf("event context data expected to be of type map[string]interface{}")
}
func (k *Keptn) TriggerEvaluation(service, project, timeframe string) (string, error) {
currentTime := time.Now()
evaluation := apimodels.Evaluation{
Start: currentTime.UTC().Format("2006-01-02T15:04:05"),
Timeframe: timeframe,
}
stages, err := k.getProjectStages(project)
if err != nil {
return "", err
}
stage := stages[0].StageName
eventCtx, kErr := k.apiHandler.TriggerEvaluation(project, stage, service, evaluation)
if kErr != nil {
return "", fmt.Errorf("failed to trigger evaluation with err: %v", kErr.GetMessage())
}
return *eventCtx.KeptnContext, nil
}
func (k *Keptn) getProjectStages(project string) ([]*apimodels.Stage, error) {
proj := apimodels.Project{
ProjectName: project,
}
if k.git != nil {
proj.GitRemoteURI = k.git.URL
proj.GitToken = k.git.Token
proj.GitUser = k.git.User
}
p, kErr := k.projectHandler.GetProject(proj)
if kErr != nil {
return nil, fmt.Errorf("failed to get project with err: %v", kErr.GetMessage())
}
return p.Stages, nil
}

7
pkg/keptn/token.go Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package keptn
import corev1 "k8s.io/api/core/v1"
type KeptnAPIToken struct {
SecretRef *corev1.ObjectReference `json:"secretRef,omitempty"`
}

14
pkg/keptn/utils.go Normal file
Просмотреть файл

@ -0,0 +1,14 @@
package keptn
func resourceNameToURI(fname string) string {
switch fname {
case sliFilename:
return sliURI
case sloFilename:
return sloURI
case jobExecutorFilename:
return jobExecutorURI
default:
return fname
}
}

26
pkg/status/polling.go Normal file
Просмотреть файл

@ -0,0 +1,26 @@
package status
import (
"context"
"fmt"
log "github.com/sirupsen/logrus"
"time"
)
// Poll retries the poller function, sleeping for the given backoffDuration
// If the poller times out it will return an error
func Poll(ctx context.Context, poller func(chan<- bool), backoffDuration time.Duration) error {
done := make(chan bool)
for {
go poller(done)
select {
case <-ctx.Done():
return fmt.Errorf("failed to complete polling within the timeout")
case <-done:
log.Infof("successfully completed the polling function")
return nil
default:
time.Sleep(backoffDuration)
}
}
}

120
testing/README.md Normal file
Просмотреть файл

@ -0,0 +1,120 @@
# Manual instructions for continuous evaluation of a webserver app using keptn & `hey` load testing tool
## Setup
### Deploy webserver app
#### Without errors/delays
```console
helm upgrade --install webserver https://nitishm.github.io/charts/webserver-v1.0.0.tgz -n webserver --create-namespace
```
#### With errors/delays
This should set the error rate to 50% and the delay to 500ms.
```console
helm upgrade --install webserver https://nitishm.github.io/charts/webserver-v1.0.0.tgz -n webserver --create-namespace --set "webserver.errors.enabled=true" --set "webserver.delay.enabled=true"
```
## Deploy Orkestra with keptn controlplane
Follow the installation instructions [here](https://github.com/Azure/orkestra#installation-).
## Deploy keptn job-executor-service
```console
kubectl create -f job-executor-service.yaml -n orkestra
```
## Deploy prometheus
```console
helm install prometheus prometheus-community/prometheus -n prometheus --create-namespace
```
## Configure keptn to use prometheus for SLI/SLO quality gates
```console
kubectl create -f keptn-prometheus-service.yaml -n orkestra
```
## Webserver application [Webserver](https://github.com/nitishm/k8s-webserver-app)
### Create the project
```console
keptn create project webserver --shipyard=./shipyard.yaml
```
### Onboard service to project
```console
keptn onboard service webserver --project=webserver
```
### Add resource SLI
```console
keptn add-resource --project=webserver --service=webserver --resource=prometheus/sli.yaml --resourceUri=prometheus/sli.yaml --all-stages
```
### Add resource SLO
```console
keptn add-resource --project=webserver --service=webserver --resource=slo.yaml --resourceUri=slo.yaml --all-stages
```
### Configure prometheus sli provider
```console
keptn configure monitoring prometheus --project=webserver --service=webserver
```
### Continuous Testing & Evaluation
#### Run load tester (manually)
```console
hey -z 1h -n -1 http://$(kubectl get svc -n webserver webserver -ojsonpath='{.status.loadBalancer.ingress[0].ip}')/hello
```
#### Trigger load testing (event)
```console
keptn add-resource --project=webserver --service=webserver --resource=config.yaml --resourceUri=job/config.yaml --all-stages
```
where, `config.yaml` contains,
```yaml
apiVersion: v2
actions:
- name: "Run hey"
events:
- name: "sh.keptn.event.test.triggered"
tasks:
- name: "Run hey load tests"
image: "azurewebserver/hey"
cmd: "hey -z 1m http://webserver.webserver.svc.cluster.local:80/hello"
```
#### Trigger evaluation
keptn trigger evaluation --project=webserver --service=webserver --timeframe=5m
---
### TL;DR
```console
keptn create project hey --shipyard=./shipyard.yaml
keptn create service webservers --project=hey
keptn configure monitoring prometheus --project=hey --service=webservers
keptn add-resource --project=hey --service=webservers --resource=slo.yaml --resourceUri=slo.yaml --stage=dev
keptn add-resource --project=hey --service=webservers --resource=prometheus/sli.yaml --resourceUri=prometheus/sli.yaml --stage=dev
keptn add-resource --project=hey --service=webservers --resource=job/config.yaml --resourceUri=job/config.yaml --stage=dev
kkeptn trigger evaluation --project=hey --service=webservers --timeframe=5m --stage dev --start $(date -u +"%Y-%m-%dT%T")
```

13
testing/helmrelease.yaml Normal file
Просмотреть файл

@ -0,0 +1,13 @@
apiVersion: helm.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: app
namespace: webserver
spec:
interval: 5m
chart:
spec:
chart: webserver
sourceRef:
kind: HelmRepository
name: webserver

63
testing/hey/README.md Normal file
Просмотреть файл

@ -0,0 +1,63 @@
# Webserver quality gates
## Create the project
keptn create project webserver --shipyard=./shipyard.yaml
## Onboard service to project
keptn onboard service webserver --project=webserver
## Add resource SLI
keptn add-resource --project=webserver --service=webserver --resource=prometheus/sli.yaml --resourceUri=prometheus/sli.yaml --all-stages
## Add resource SLO
keptn add-resource --project=webserver --service=webserver --resource=slo.yaml --resourceUri=slo.yaml --all-stages
## Configure prometheus sli provider
keptn configure monitoring prometheus --project=webserver --service=webserver
## Start testing
### Run load tester (manually)
hey -z 1h -n -1 http://$(kubectl get svc -n orkestra webserver -ojsonpath='{.status.loadBalancer.ingress[0].ip}')/hello
### Trigger load testing (event)
keptn add-resource --project=webserver --service=webserver --resource=config.yaml --resourceUri=job/config.yaml --all-stages
where, `config.yaml` contains,
```yaml
apiVersion: v2
actions:
- name: "Run hey"
events:
- name: "sh.keptn.event.test.triggered"
tasks:
- name: "Run hey load tests"
image: "azureorkestra/hey"
cmd: "hey -z 1m http://webserver.orkestra.svc.cluster.local:80/hello"
```
### Trigger evaluation
keptn trigger evaluation --project=webserver --service=webserver --timeframe=5m
---
```console
keptn create project hey --shipyard=./shipyard.yaml
keptn create service webservers --project=hey
keptn configure monitoring prometheus --project=hey --service=webservers
keptn add-resource --project=hey --service=webservers --resource=slo.yaml --resourceUri=slo.yaml --stage=dev
keptn add-resource --project=hey --service=webservers --resource=prometheus/sli.yaml --resourceUri=prometheus/sli.yaml --stage=dev
keptn add-resource --project=hey --service=webservers --resource=job/config.yaml --resourceUri=job/config.yaml --stage=dev
keptn send event -f hey/event.json
```

168
testing/hey/deploy.yaml Normal file
Просмотреть файл

@ -0,0 +1,168 @@
---
# Deployment of our job-executor-service
apiVersion: apps/v1
kind: Deployment
metadata:
name: job-executor-service
namespace: orkestra
spec:
selector:
matchLabels:
run: job-executor-service
replicas: 1
template:
metadata:
labels:
run: job-executor-service
app.kubernetes.io/name: job-executor-service
app.kubernetes.io/version: 0.1.3
spec:
containers:
- name: job-executor-service
image: keptnsandbox/job-executor-service:0.1.3
ports:
- containerPort: 8080
resources:
limits:
cpu: 1
memory: 512Mi
requests:
cpu: 50m
memory: 128Mi
env:
- name: INIT_CONTAINER_CONFIGURATION_SERVICE_API_ENDPOINT
value: "http://configuration-service.keptn.svc.cluster.local:8080"
- name: CONFIGURATION_SERVICE
value: 'http://configuration-service.keptn.svc.cluster.local:8080'
- name: JOB_NAMESPACE
value: 'keptn'
- name: INIT_CONTAINER_IMAGE
value: 'keptnsandbox/job-executor-service-initcontainer:0.1.3'
- name: DEFAULT_RESOURCE_LIMITS_CPU
value: "1"
- name: DEFAULT_RESOURCE_LIMITS_MEMORY
value: "512Mi"
- name: DEFAULT_RESOURCE_REQUESTS_CPU
value: "50m"
- name: DEFAULT_RESOURCE_REQUESTS_MEMORY
value: "128Mi"
- name: distributor
image: keptn/distributor:0.8.4
livenessProbe:
httpGet:
path: /health
port: 10999
initialDelaySeconds: 5
periodSeconds: 5
imagePullPolicy: Always
ports:
- containerPort: 8080
resources:
requests:
memory: "32Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: PUBSUB_URL
value: 'nats://keptn-nats-cluster'
- name: PUBSUB_TOPIC
value: 'sh.keptn.>'
- name: PUBSUB_RECIPIENT
value: '127.0.0.1'
- name: VERSION
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: 'metadata.labels[''app.kubernetes.io/version'']'
- name: K8S_DEPLOYMENT_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: 'metadata.labels[''app.kubernetes.io/name'']'
- name: K8S_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
serviceAccountName: job-executor-service
---
# Expose job-executor-service via Port 8080 within the cluster
apiVersion: v1
kind: Service
metadata:
name: job-executor-service
namespace: orkestra
labels:
run: job-executor-service
spec:
ports:
- port: 8080
protocol: TCP
selector:
run: job-executor-service
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: job-executor-service
namespace: orkestra
---
# Role for accessing secrets in the namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: job-executor-service
namespace: orkestra
rules:
- apiGroups:
- ""
resources:
- "secrets"
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- ""
resources:
- "pods"
- "pods/log"
- "persistentvolumeclaims"
- "jobs"
verbs:
- "*"
- apiGroups:
- "batch"
- "extensions"
resources:
- "jobs"
verbs:
- "*"
---
# Bind role for accessing secrets onto the job-executor-service service account
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: job-executor-service
namespace: orkestra
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: job-executor-service
subjects:
- kind: ServiceAccount
name: job-executor-service
namespace: orkestra

13
testing/hey/event.json Normal file
Просмотреть файл

@ -0,0 +1,13 @@
{
"type": "sh.keptn.event.dev.continuousvalidation.triggered",
"specversion": "1.0",
"source": "test-events",
"contenttype": "application/json",
"data": {
"project": "hey",
"stage": "dev",
"service": "webservers"
}
}

Просмотреть файл

@ -0,0 +1,168 @@
---
# Deployment of our job-executor-service
apiVersion: apps/v1
kind: Deployment
metadata:
name: job-executor-service
namespace: orkestra
spec:
selector:
matchLabels:
run: job-executor-service
replicas: 1
template:
metadata:
labels:
run: job-executor-service
app.kubernetes.io/name: job-executor-service
app.kubernetes.io/version: 0.1.3
spec:
containers:
- name: job-executor-service
image: keptnsandbox/job-executor-service:0.1.3
ports:
- containerPort: 8080
resources:
limits:
cpu: 1
memory: 512Mi
requests:
cpu: 50m
memory: 128Mi
env:
- name: INIT_CONTAINER_CONFIGURATION_SERVICE_API_ENDPOINT
value: "http://configuration-service:8080"
- name: CONFIGURATION_SERVICE
value: 'http://configuration-service:8080'
- name: JOB_NAMESPACE
value: 'orkestra'
- name: INIT_CONTAINER_IMAGE
value: 'keptnsandbox/job-executor-service-initcontainer:0.1.3'
- name: DEFAULT_RESOURCE_LIMITS_CPU
value: "1"
- name: DEFAULT_RESOURCE_LIMITS_MEMORY
value: "512Mi"
- name: DEFAULT_RESOURCE_REQUESTS_CPU
value: "50m"
- name: DEFAULT_RESOURCE_REQUESTS_MEMORY
value: "128Mi"
- name: distributor
image: keptn/distributor:0.8.4
livenessProbe:
httpGet:
path: /health
port: 10999
initialDelaySeconds: 5
periodSeconds: 5
imagePullPolicy: Always
ports:
- containerPort: 8080
resources:
requests:
memory: "32Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: PUBSUB_URL
value: 'nats://keptn-nats-cluster'
- name: PUBSUB_TOPIC
value: 'sh.keptn.>'
- name: PUBSUB_RECIPIENT
value: '127.0.0.1'
- name: VERSION
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: 'metadata.labels[''app.kubernetes.io/version'']'
- name: K8S_DEPLOYMENT_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: 'metadata.labels[''app.kubernetes.io/name'']'
- name: K8S_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
serviceAccountName: job-executor-service
---
# Expose job-executor-service via Port 8080 within the cluster
apiVersion: v1
kind: Service
metadata:
name: job-executor-service
namespace: orkestra
labels:
run: job-executor-service
spec:
ports:
- port: 8080
protocol: TCP
selector:
run: job-executor-service
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: job-executor-service
namespace: orkestra
---
# Role for accessing secrets in the namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: job-executor-service
namespace: orkestra
rules:
- apiGroups:
- ""
resources:
- "secrets"
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- ""
resources:
- "pods"
- "pods/log"
- "persistentvolumeclaims"
- "jobs"
verbs:
- "*"
- apiGroups:
- "batch"
- "extensions"
resources:
- "jobs"
verbs:
- "*"
---
# Bind role for accessing secrets onto the job-executor-service service account
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: job-executor-service
namespace: orkestra
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: job-executor-service
subjects:
- kind: ServiceAccount
name: job-executor-service
namespace: orkestra

3
testing/job/README.md Normal file
Просмотреть файл

@ -0,0 +1,3 @@
# Config.yaml
`config.yaml` is required for triggering `hey` load-testing using the [Keptn Job Executor Service](https://github.com/keptn-sandbox/job-executor-service).

13
testing/job/config.yaml Normal file
Просмотреть файл

@ -0,0 +1,13 @@
apiVersion: v2
actions:
- name: "Run hey"
events:
- name: "sh.keptn.event.test.triggered"
tasks:
- name: "Run hey load tests"
image: "azureorkestra/hey"
# hey -z 5m "http://webserver.webserver.svc.cluster.local:80/hello"
# GET "http://webserver.webserver.svc.cluster.local:80/hello" loadtest
cmd: ["hey"]
args: ["-z", "5m", "http://webserver.webserver.svc.cluster.local:80/hello"]
maxPollDuration: 10000

11
testing/keptn-config.json Normal file
Просмотреть файл

@ -0,0 +1,11 @@
{
"url": "http://52.247.18.168/api",
"namespace": "orkestra",
"timeframe": "5m",
"token": {
"secretRef": {
"name": "keptn-api-token",
"namespace": "orkestra"
}
}
}

Просмотреть файл

@ -0,0 +1,75 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: foobar
namespace: default
data:
config.yaml: |
apiVersion: v2
actions:
- name: "Run hey"
events:
- name: "sh.keptn.event.test.triggered"
tasks:
- name: "Run hey load tests"
image: "azureorkestra/hey"
cmd: ["hey"]
args: ["-z", "5m", "http://app.webserver.svc.cluster.local:80/hello"]
maxPollDuration: 10000
keptn-config.json: |-
{
"url": "http://52.177.239.129/api",
"namespace": "orkestra",
"timeframe": "5m",
"token": {
"secretRef": {
"name": "keptn-api-token",
"namespace": "orkestra"
}
}
}
shipyard.yaml: |-
apiVersion: "spec.keptn.sh/0.2.2"
kind: "Shipyard"
metadata:
name: "shipyard-webserver"
spec:
stages:
- name: "dev"
sequences:
- name: "evaluation"
tasks:
- name: "test"
properties:
teststrategy: "functional"
- name: "evaluation"
sli.yaml: |
spec_version: "1.0"
indicators:
error_percentage: "(sum(rate(webserver_errors{}[$DURATION_SECONDS])) / sum(rate(webserver_requests{}[$DURATION_SECONDS])) * 100) > 0 or on() vector(0)"
response_time_p99: "histogram_quantile(0.99, rate(webserver_request_duration_seconds_bucket{}[$DURATION_SECONDS])) > 0.02 and rate(webserver_request_duration_seconds_count{}[$DURATION_SECONDS]) > 1 or on() vector(0)"
slo.yaml: |-
spec_version: '1.0'
comparison:
compare_with: "single_result"
include_result_with_score: "pass"
aggregate_function: avg
objectives:
- sli: response_time_p99
pass:
- criteria:
- "<0.15"
warning:
- criteria:
- "<=0.05"
- sli: error_percentage
pass:
- criteria:
- "<10"
warning:
- criteria:
- "<=5"
total_score:
pass: "100%"
warning: "75%"

Просмотреть файл

@ -0,0 +1,47 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: keptn-prometheus-service
namespace: prometheus
rules:
- apiGroups:
- ""
resources:
- services
- configmaps
verbs:
- get
- create
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- create
- update
- delete
- apiGroups:
- "apps"
resources:
- deployments
verbs:
- create
- update
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: keptn-prometheus-service
namespace: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: keptn-prometheus-service
subjects:
- kind: ServiceAccount
name: keptn-prometheus-service
namespace: orkestra

Просмотреть файл

@ -0,0 +1,211 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: keptn-prometheus-service
namespace: orkestra
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: keptn-create-prom-clusterrole
rules:
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- clusterroles
- clusterrolebindings
verbs:
- get
- create
- update
resourceNames:
- "prometheus"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: keptn-prom-prometheus
rules:
- apiGroups:
- ""
resources:
- endpoints
- nodes
- nodes/proxy
- pods
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- "ingresses"
verbs:
- get
- list
- watch
- nonResourceURLs: [ "/metrics" ]
verbs: [ "get" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: keptn-read-secret-prometheus
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: keptn-prometheus-sli-service
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: keptn-read-secret-prometheus
subjects:
- kind: ServiceAccount
name: keptn-prometheus-service
namespace: orkestra
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: keptn-create-prom-clusterrole
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: keptn-create-prom-clusterrole
subjects:
- kind: ServiceAccount
name: keptn-prometheus-service
namespace: orkestra
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: keptn-prom-prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: keptn-prom-prometheus
subjects:
- kind: ServiceAccount
name: keptn-prometheus-service
namespace: orkestra
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: keptn-keptndomain-prom-service
namespace: orkestra
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: keptn-read-keptndomain
subjects:
- kind: ServiceAccount
name: keptn-prometheus-service
namespace: orkestra
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus-service
namespace: orkestra
spec:
selector:
matchLabels:
run: prometheus-service
replicas: 1
template:
metadata:
labels:
run: prometheus-service
spec:
containers:
- name: prometheus-service
image: keptncontrib/prometheus-service:0.6.0
ports:
- containerPort: 8080
resources:
requests:
memory: "32Mi"
cpu: "50m"
limits:
memory: "256Mi"
cpu: "500m"
env:
- name: METRICS_SCRAPE_PATH
value: '/metrics'
- name: CONFIGURATION_SERVICE
value: 'http://configuration-service:8080'
- name: EVENTBROKER
value: 'http://localhost:8081/event'
- name: API
value: 'ws://api-service:8080/websocket'
- name: PROMETHEUS_NS
value: 'prometheus'
- name: PROMETHEUS_CM
value: 'prometheus-server'
- name: PROMETHEUS_LABELS
value: 'component=server'
- name: PROMETHEUS_ENDPOINT
value: "http://prometheus-server.prometheus.svc.cluster.local:80"
- name: PROMETHEUS_CONFIG_FILENAME
value: 'prometheus.yml'
- name: ALERT_MANAGER_CONFIG_FILENAME
value: 'alertmanager.yml'
- name: ALERT_MANAGER_CM
value: 'prometheus-alertmanager'
- name: ALERT_MANAGER_LABELS
value: 'component=alertmanager'
- name: ALERT_MANAGER_NS
value: 'prometheus'
- name: ALERT_MANAGER_TEMPLATE_CM
value: 'alertmanager-templates'
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: distributor
image: keptn/distributor:0.8.3
ports:
- containerPort: 8080
resources:
requests:
memory: "16Mi"
cpu: "25m"
limits:
memory: "128Mi"
cpu: "250m"
env:
- name: PUBSUB_URL
value: 'nats://keptn-nats-cluster'
- name: PUBSUB_TOPIC
value: 'sh.keptn.event.monitoring.configure,sh.keptn.event.get-sli.triggered'
- name: PUBSUB_RECIPIENT
value: 'prometheus-service'
serviceAccountName: keptn-prometheus-service
---
apiVersion: v1
kind: Service
metadata:
name: prometheus-service
namespace: orkestra
labels:
run: prometheus-service
spec:
ports:
- port: 8080
protocol: TCP
selector:
run: prometheus-service

Просмотреть файл

@ -0,0 +1,4 @@
spec_version: "1.0"
indicators:
error_percentage: "(sum(rate(webserver_errors{}[$DURATION_SECONDS])) / sum(rate(webserver_requests{}[$DURATION_SECONDS])) * 100) > 0 or on() vector(0)"
response_time_p99: "histogram_quantile(0.99, rate(webserver_request_duration_seconds_bucket{}[$DURATION_SECONDS])) > 0.02 and rate(webserver_request_duration_seconds_count{}[$DURATION_SECONDS]) > 1 or on() vector(0)"

14
testing/shipyard.yaml Normal file
Просмотреть файл

@ -0,0 +1,14 @@
apiVersion: "spec.keptn.sh/0.2.2"
kind: "Shipyard"
metadata:
name: "shipyard-webserver"
spec:
stages:
- name: "dev"
sequences:
- name: "evaluation"
tasks:
- name: "test"
properties:
teststrategy: "functional"
- name: "evaluation"

23
testing/slo.yaml Normal file
Просмотреть файл

@ -0,0 +1,23 @@
spec_version: '1.0'
comparison:
compare_with: "single_result"
include_result_with_score: "pass"
aggregate_function: avg
objectives:
- sli: response_time_p99
pass:
- criteria:
- "<0.15"
warning:
- criteria:
- "<=0.05"
- sli: error_percentage
pass:
- criteria:
- "<10"
warning:
- criteria:
- "<=5"
total_score:
pass: "100%"
warning: "75%"