This commit is contained in:
David Moore 2020-02-21 12:18:47 +00:00
Родитель fee0362965
Коммит fc649b0449
86 изменённых файлов: 12305 добавлений и 59 удалений

81
.devcontainer/Dockerfile Normal file
Просмотреть файл

@ -0,0 +1,81 @@
#-------------------------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
#-------------------------------------------------------------------------------------------------------------
FROM golang:1.12-stretch
# Avoid warnings by switching to noninteractive
ENV DEBIAN_FRONTEND=noninteractive
# Configure apt, install packages and tools
RUN apt-get update \
&& apt-get -y install --no-install-recommends apt-utils dialog nano bash-completion 2>&1 \
#
# Verify git, process tools, lsb-release (common in install instructions for CLIs) installed
&& apt-get -y install git iproute2 procps lsb-release \
# Install docker
&& apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common lsb-release \
&& curl -fsSL https://download.docker.com/linux/$(lsb_release -is | tr '[:upper:]' '[:lower:]')/gpg | apt-key add - 2>/dev/null \
&& add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/$(lsb_release -is | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) stable" \
&& apt-get update \
&& apt-get install -y docker-ce-cli \
# Install icu-devtools
&& apt-get install -y icu-devtools \
# Install kubectl
&& curl -sSL -o /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.16.2/bin/linux/amd64/kubectl \
&& chmod +x /usr/local/bin/kubectl \
# Install jq
&& apt-get install -y jq \
# Install Azure cli
&& curl -sL https://aka.ms/InstallAzureCLIDeb | bash - \
# Clean up
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*
# Enable go modules
ENV GO111MODULE=on
# Install Go tools
RUN \
# --> Delve for debugging
go get github.com/go-delve/delve/cmd/dlv@v1.3.2 \
# --> Go language server
&& go get golang.org/x/tools/gopls@v0.2.1 \
# --> Go symbols and outline for go to symbol support and test support
&& go get github.com/acroca/go-symbols@v0.1.1 && go get github.com/ramya-rao-a/go-outline@7182a932836a71948db4a81991a494751eccfe77 \
# --> GolangCI-lint
&& curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sed 's/tar -/tar --no-same-owner -/g' | sh -s -- -b $(go env GOPATH)/bin \
# --> Install Ginkgo
&& go get github.com/onsi/ginkgo/ginkgo@v1.11.0 \
# --> Install junit converter
&& go get github.com/jstemmer/go-junit-report@v0.9.1 \
&& rm -rf /go/src/ && rm -rf /go/pkg
# Install other stuff using scripts because it's nicer
COPY ./scripts/kubebuilder.sh .
RUN bash -f ./kubebuilder.sh
COPY ./scripts/kustomize.sh .
RUN bash -f ./kustomize.sh
COPY ./scripts/installhelm.sh .
RUN bash -f ./installhelm.sh
COPY ./scripts/install-kind.sh .
RUN bash -f ./install-kind.sh
COPY ./scripts/configbash.sh .
RUN bash -f ./configbash.sh
# Install golangci-linter
RUN curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(go env GOPATH)/bin v1.21.0
# Build the path
ENV PATH="/usr/local/kubebuilder/bin:${PATH}"
ENV PATH="/root/.kubectx:${PATH}"
# Set Kubeconfig path to kind cluster
ENV KUBECONFIG="/root/.kube/kind-config-psccontroller"
ENV KUBE_EDITOR="nano"

Просмотреть файл

@ -0,0 +1,63 @@
{
"name": "Go",
"dockerFile": "Dockerfile",
"runArgs": [
"--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined",
// Mount go mod cache
"-v", "agentcontroller-gomodcache:/go/pkg",
// Cache vscode exentsions installs and homedir
"-v", "agentcontroller-vscodecache:/root/.vscode-server",
// Keep command history
"-v", "agentcontroller-bashhistory:/root/commandhistory",
// Mount docker socket for docker builds
"-v", "/var/run/docker.sock:/var/run/docker.sock",
// Use host network
"--network=host",
// Mount ssh
"-v", "${env:HOME}${env:USERPROFILE}/.ssh:/root/.ssh",
// Mount azure and kubernetes auth
"-v", "${env:HOME}${env:USERPROFILE}/.azure:/root/.azure",
"-v", "${env:HOME}${env:USERPROFILE}/.gitconfig:/root/.gitconfig", // Required due to volume mount used for .vscode-server
"-v", "${env:HOME}${env:USERPROFILE}/.kube:/root/.kube"
],
"workspaceMount": "src=${localWorkspaceFolder},dst=/workspace,type=bind,consistency=delegated",
"workspaceFolder": "/workspace",
"settings": {
"go.gopath": "/go",
"go.useLanguageServer": true,
"[go]": {
"editor.snippetSuggestions": "none",
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": true,
}
},
"gopls": {
"usePlaceholders": true, // add parameter placeholders when completing a function
// Experimental settings
"completeUnimported": true, // autocomplete unimported packages
"watchFileChanges": true, // watch file changes outside of the editor
"deepCompletion": true // enable deep completion
},
"go.toolsEnvVars": {
"GO111MODULE": "on"
},
"remote.extensionKind": {
"ms-azuretools.vscode-docker": "workspace"
},
"yaml.schemas": {
"kubernetes": "*.yaml"
},
"go.lintTool":"golangci-lint",
"go.lintFlags": [
"--fast"
]
},
"extensions": [
"ms-vsliveshare.vsliveshare-pack",
"ms-azuretools.vscode-docker",
"ms-vscode.go",
"redhat.vscode-yaml",
"ms-kubernetes-tools.vscode-kubernetes-tools"
]
}

Просмотреть файл

@ -0,0 +1,38 @@
#!/bin/bash
set -e
set -x
# Enable bash completion
echo "source /etc/bash_completion" >> "/root/.bashrc"
# Add autocomplete to kubectl
echo "alias k=kubectl" >> "/root/.bashrc"
echo "source <(kubectl completion bash)" >> "/root/.bashrc"
echo "source <(kubectl completion bash | sed 's/kubectl/k/g')" >> "/root/.bashrc"
# Add kubectx
git clone https://github.com/ahmetb/kubectx.git /root/.kubectx
COMPDIR=$(pkg-config --variable=completionsdir bash-completion)
ln -sf /root/.kubectx/completion/kubens.bash $COMPDIR/kubens
ln -sf /root/.kubectx/completion/kubectx.bash $COMPDIR/kubectx
# Bash history search
cat >> "/root/.bashrc" <<BINDINGS
bind '"\e[A": history-search-backward'
bind '"\e[A": history-search-backward'
bind '"\e[B": history-search-forward'
bind '"\eOA": history-search-backward'
bind '"\eOB": history-search-forward'
BINDINGS
# Save bash history each time a command is used ... /root/commandhistory is mounted to a volume so it
# survives between container restarts
echo "export PROMPT_COMMAND='history -a'" >> "/root/.bashrc"
echo "export HISTFILE=/root/commandhistory/.bash_history" >> "/root/.bashrc"
mkdir -p /root/commandhistory
touch /root/commandhistory/.bash_history
# Git command prompt
git clone https://github.com/magicmonty/bash-git-prompt.git ~/.bash-git-prompt --depth=1
echo "if [ -f \"$HOME/.bash-git-prompt/gitprompt.sh\" ]; then GIT_PROMPT_ONLY_IN_REPO=1 && source $HOME/.bash-git-prompt/gitprompt.sh; fi" >> "/root/.bashrc"

Просмотреть файл

@ -0,0 +1,7 @@
#! /bin/bash
set -e
set -x
curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.5.1/kind-linux-amd64
chmod +x ./kind
mv ./kind /usr/local/bin/kind

Просмотреть файл

@ -0,0 +1,10 @@
#! /bin/bash
set -e
set -x
# Install helm 3.0
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | DESIRED_VERSION=v3.0.3 bash
# add the stable chart repo
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
helm repo update

Просмотреть файл

@ -0,0 +1,18 @@
#! /bin/bash
set -e
set -x
os=$(go env GOOS)
arch=$(go env GOARCH)
kb_version="2.0.1"
# download kubebuilder and extract it to tmp
curl -sL https://go.kubebuilder.io/dl/${kb_version}/${os}/${arch} | tar -xz -C /tmp/
# move to a long-term location and put it on your path
# (you'll need to set the KUBEBUILDER_ASSETS env var if you put it somewhere else)
mv /tmp/kubebuilder_${kb_version}_${os}_${arch} /usr/local/kubebuilder
export PATH=$PATH:/usr/local/kubebuilder/bin
# Clear down pkg file
rm -rf /go/pkg && rm -rf /go/src

Просмотреть файл

@ -0,0 +1,11 @@
#! /bin/bash
set -e
set -x
# download kustomize
curl -o /tmp/kustomize -sL "https://github.com/kubernetes-sigs/kustomize/releases/download/v3.1.0/kustomize_3.1.0_linux_amd64"
cp /tmp/kustomize /usr/local/kubebuilder/bin/
# set permission
chmod a+x /usr/local/kubebuilder/bin/kustomize

1
.gitattributes поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
*.sh text eol=lf

38
.gitignore поставляемый
Просмотреть файл

@ -1,15 +1,23 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the junit tests, build with `make unit-tests-junit`
TEST-*.txt
TEST-*.xml
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Debug information
__debug_bin
bin/manager
# Ignore changes to this file but keep it commited in current form
config/manager/kustomization.yaml
controllers.coverprofile*

45
.golangci.toml Normal file
Просмотреть файл

@ -0,0 +1,45 @@
[run]
deadline = "5m"
skip-files = []
[linters-settings]
[linters-settings.govet]
check-shadowing = true
[linters-settings.gocyclo]
min-complexity = 12.0
[linters-settings.maligned]
suggest-new = true
[linters-settings.goconst]
min-len = 3.0
min-occurrences = 3.0
[linters-settings.misspell]
locale = "US"
ignore-words = ["listend", "analyses"]
[linters-settings.errcheck]
check-blank = true
[linters]
enable = ["vet", "golint", "gofmt", "deadcode", "varcheck", "structcheck", "misspell", "errcheck", "gosimple", "govet", "ineffassign"]
[issues]
exclude-use-default = false
max-per-linter = 0
max-same-issues = 0
exclude = []
# Example ignore stuff
# [[issues.exclude-rules]]
# text = "Error return value of `g.DeleteView` is not checked"
# linters = ["errcheck"]
# [[issues.exclude-rules]]
# text = "Error return value of `g.SetCurrentView` is not checked"
# linters = ["errcheck"]
# [[issues.exclude-rules]]
# text = "Error return value of `*.SetCursor` is not checked"
# linters = ["errcheck"]

16
.vscode/launch.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,16 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Launch",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${workspaceFolder}/main.go",
"args": []
}
]
}

7
.vscode/settings.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
{
"go.inferGopath": false,
"go.lintTool":"golangci-lint",
"go.lintFlags": [
"--fast"
]
}

12
.vscode/tasks.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,12 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "echo",
"type": "shell",
"command": "make kind-start"
}
]
}

27
Dockerfile Normal file
Просмотреть файл

@ -0,0 +1,27 @@
# Build the manager binary
FROM golang:1.12.5 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the go source
COPY main.go main.go
COPY api/ api/
COPY controllers/ controllers/
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER nonroot:nonroot
ENTRYPOINT ["/manager"]

148
Makefile Normal file
Просмотреть файл

@ -0,0 +1,148 @@
# Image URL to use all building/pushing image targets
timestamp := $(shell /bin/date "+%Y%m%d-%H%M%S")
IMG ?= docker.io/controller:$(timestamp)
INIT_IMG ?= docker.io/initcontainer:1
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true"
KIND_CLUSTER_NAME ?= "psccontroller"
K8S_NODE_IMAGE ?= v1.15.3
PROMETHEUS_INSTANCE_NAME ?= prometheus-operator
CONFIG_MAP_NAME ?= initcontainer-configmap
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
# CI
all: manager
build-run-ci: manager unit-tests deploy-kind kind-tests kind-long-tests
# DEPLOYING:
# - Kind
deploy-kind: kind-start kind-load-img kind-load-initcontainer deploy-cluster
# - Configured Kubernetes cluster in ~/.kube/config (could be KIND too)
deploy-cluster: manifests install-crds install-prometheus kustomize-deployment
install-prometheus:
ifneq (1, $(shell helm list | grep ${PROMETHEUS_INSTANCE_NAME} | wc -l))
./deploy/prometheus-grafana/deploy-prometheus.sh
else
@echo "Helm installation of the prometheus-operator already exists with name ${PROMETHEUS_INSTANCE_NAME}... skipping"
endif
kustomize-deployment:
@echo "Kustomizing k8s resource files"
sed -i "/configMapGenerator/,/${CONFIG_MAP_NAME}/d" config/manager/kustomization.yaml
cd config/manager && kustomize edit set image controller=${IMG}
cd config/manager && kustomize edit add configmap ${CONFIG_MAP_NAME} --from-literal=initContainerImage=${INIT_IMG}
@echo "Applying kustomizations"
kustomize build config/default | kubectl apply --validate=false -f -
kind-start:
ifeq (1, $(shell kind get clusters | grep ${KIND_CLUSTER_NAME} | wc -l))
@echo "Cluster already exists"
else
@echo "Creating Cluster"
kind create cluster --name ${KIND_CLUSTER_NAME} --image=kindest/node:${K8S_NODE_IMAGE}
endif
kind-load-img: docker-build
@echo "Loading image into kind"
kind load docker-image ${IMG} --name ${KIND_CLUSTER_NAME} --loglevel "trace"
# Run integration tests in KIND
kind-tests:
ginkgo --skip="LONG TEST:" --nodes 6 --race --randomizeAllSpecs --cover --trace --progress --coverprofile ../controllers.coverprofile ./controllers
-kubectl delete prescaledcronjobs --all -n psc-system
kind-long-tests:
ginkgo --focus="LONG TEST:" -nodes 6 --randomizeAllSpecs --trace --progress ./controllers
-kubetl delete prescaledcronjobs --all -n psc-system
# Run unit tests and output in JUnit format
unit-tests: generate checks manifests
go test controllers/utilities_test.go controllers/utilities.go -v -cover 2>&1 | tee TEST-utilities.txt
go test controllers/structhash_test.go controllers/structhash.go -v -cover 2>&1 | tee TEST-structhash.txt
cat TEST-utilities.txt | go-junit-report 2>&1 > TEST-utilities.xml
cat TEST-structhash.txt | go-junit-report 2>&1 > TEST-structhash.xml
# Build manager binary
manager: generate checks
go build -o bin/manager main.go
# Run against the configured Kubernetes cluster in ~/.kube/config
run: generate checks manifests
go run ./main.go
# Install CRDs into a cluster
install-crds: manifests
kustomize build config/crd | kubectl apply -f -
# Uninstall CRDs from a cluster
uninstall-crds: manifests
kustomize build config/crd | kubectl delete -f -
# SAMPLE YAMLs
# - Regular cronjob
recreate-sample-cron:
-kubectl delete cronjob samplecron
kubectl apply -f ./config/samples/cron_sample.yaml
# - PrescaledCronJob
recreate-sample-psccron:
-kubectl delete prescaledcronjob prescaledcronjob-sample -n psc-system
-kubectl delete cronjob autogen-prescaledcronjob-sample -n psc-system
kubectl apply -f ./config/samples/psc_v1alpha1_prescaledcronjob.yaml
# - Regular cronjob with init container
recreate-sample-initcron:
-kubectl delete cronjob sampleinitcron
kubectl apply -f ./config/samples/init_cron_sample.yaml
# INIT CONTAINER
docker-build-initcontainer:
docker build -t ${INIT_IMG} ./initcontainer
docker-push-initcontainer:
docker push ${INIT_IMG}
kind-load-initcontainer: docker-build-initcontainer
@echo "Loading initcontainer image into kind"
kind load docker-image ${INIT_IMG} --name ${KIND_CLUSTER_NAME} --loglevel "trace"
# UTILITY
# Generate manifests e.g. CRD, RBAC etc.
manifests: controller-gen
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
# Run go fmt against code
fmt:
find . -name '*.go' | grep -v vendor | xargs gofmt -s -w
# Run linting
checks:
GO111MODULE=on golangci-lint run
# Generate code
generate: controller-gen
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
# Build the docker image
docker-build: unit-tests
docker build . -t ${IMG}
# Push the docker image
docker-push:
docker push ${IMG}
# find or download controller-gen
# download controller-gen if necessary
controller-gen:
ifeq (, $(shell which controller-gen))
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.1
CONTROLLER_GEN=$(GOBIN)/controller-gen
else
CONTROLLER_GEN=$(shell which controller-gen)
endif

7
PROJECT Normal file
Просмотреть файл

@ -0,0 +1,7 @@
version: "2"
domain: cronprimer.local
repo: cronprimer.local
resources:
- group: psc
version: v1alpha1
kind: PreScaledCronJob

169
README.md
Просмотреть файл

@ -1,64 +1,145 @@
---
page_type: sample
languages:
- csharp
products:
- dotnet
description: "Add 150 character max description"
urlFragment: "update-this-to-unique-url-stub"
---
# Project Status & Disclaimer
The code provided in this repo is not actively maintained.
# Official Microsoft Sample
# Introduction
The main purpose of this project is to provide a mechanism whereby cronjobs can be run on auto-scaling clusters, and ensure that the cluster is scaled up to their desired size prior to the time at which the `CronJob` workload needs to begin.
<!--
Guidelines on README format: https://review.docs.microsoft.com/help/onboard/admin/samples/concepts/readme-template?branch=master
## Example
For a workload to start at *16:30* exactly, a node in the cluster has to be available and warm at that time. The `PrescaledCronJob` CRD and operator will ensure that a cronjob gets scheduled n minutes earlier to force the cluster to prepare a node, and then a custom init container will run, blocking the workload running until the correct time.
Guidance on onboarding samples to docs.microsoft.com/samples: https://review.docs.microsoft.com/help/onboard/admin/samples/process/onboarding?branch=master
![PrescaledCronJob Scheduling](docs/prescaledcron.png)
Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master
-->
## How it works
- This project defines a new Kubernetes CRD kind named `PreScaledCronJob`; and an Operator that will reconcile said kind.
- When a `PreScaledCronJob` is created in a cluster, this Operator will create an associated `CronJob` object that will execute X minutes prior to the real workload and ensure any necessary agent pool machines are "warmed up".
- More information on how we calculate the `CronJob` schedule can be found in [the Primed Cronjob Schedules
documentation here](docs/cronjobs.md)
- The created `CronJob` is associated to the `PreScaledCronJob` using the Kubernetes `OwnerReference` mechanism. Thus enabling us to automatically delete the `CronJob` when the `PreScaledCronJob` resource is deleted. For more information please check out the [Kubernetes documentation here](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#owners-and-dependents)
- `PreScaledCronJob` objects can check for changes on their associated `CronJob` objects via a generated hash. If this hash does not match that which the `PreScaledCronJob` expects, we update the `CronJob` spec.
- The generated `CronJob` uses an `initContainer` spec to spin-wait thus warming up the agent pool and forcing it to scale up to our desired state ahead of the real workload. For more information please check out the [Init Container documentation here](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
Give a short description for your sample here. What does it do and why is it important?
# Getting Started
1. Clone the codebase
2. Ensure you have Docker installed and all necessary pre-requisites to develop on remote containers [installation notes](https://code.visualstudio.com/docs/remote/containers#_installation)
3. Install [VSCode Remote Development extensions pack](https://aka.ms/vscode-remote/download/extension)
4. Open the project and run in the development container
## Contents
# Build And Deploy
Outline the file contents of the repository. It helps users navigate the codebase, build configuration and any related assets.
**NOTE: In order to ensure a smooth deployment process, for both local and remote deployments, we recommend you use the dev container provided within this repo. This container provides you with all the assemblies and cli tools required to perform the actions below**
| File/folder | Description |
|-------------------|--------------------------------------------|
| `src` | Sample source code. |
| `.gitignore` | Define what to ignore at commit time. |
| `CHANGELOG.md` | List of changes to the sample. |
| `CONTRIBUTING.md` | Guidelines for contributing to the sample. |
| `README.md` | This README file. |
| `LICENSE` | The license for the sample. |
## Deploying locally
## Prerequisites
If you are using the development container you have the option of deploying the Operator into a local test Kubernetes Cluster provided by the [KIND toolset](https://github.com/kubernetes-sigs/kind)
Outline the required components and tools that a user might need to have on their machine in order to run the sample. This can be anything from frameworks, SDKs, OS versions or IDE releases.
To deploy to a local K8s/Kind instance:
```bash
make deploy-kind
```
## Setup
## Deploying to a remote cluster
Explain how to prepare the sample once the user clones or downloads the repository. The section should outline every step necessary to install dependencies and set up any settings (for example, API keys and output folders).
### Prerequisites
- Ensure your terminal is connected to your K8s cluster
- Ensure your terminal is logged into your docker container registry that you will be using as the image store for your K8s cluster
- Ensure your cluster has permissions to pull containers from your container registry
## Running the sample
### Deploying
Outline step-by-step instructions to execute the sample and see its output. Include steps for executing the sample from the IDE, starting specific services in the Azure portal or anything related to the overall launch of the code.
1. Deploy the image used to initialise cluster scale up:
## Key concepts
```bash
make docker-build-initcontainer docker-push-initcontainer INIT_IMG=<some-registry>/initcontainer:<tag>
```
2. Deploy the operator to your cluster:
Provide users with more context on the tools and services used in the sample. Explain some of the code that is being used and how services interact with each other.
```bash
make docker-build docker-push IMG=<some-registry>/prescaledcronjoboperator:<tag> INIT_IMG=<some-registry>/initcontainer:<tag>
## Contributing
make deploy-cluster IMG=<some-registry>/prescaledcronjoboperator:<tag> INIT_IMG=<some-registry>/initcontainer:<tag>
```
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
3. Once the deployment is complete you can check that everything is installed:
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
```
kubectl get all -n psc-system
```
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
## Creating your first PreScaledCronJob
A sample `yaml` is provided for you in the config folder.
- To apply this:
```
kubectl apply -f config/samples/psc_v1alpha1_prescaledcronjob.yaml
```
- To test the Operator worked correctly:
```
kubectl get prescaledcronjobs -A
kubectl get cronjobs -A
```
- If everything worked correctly you should see the following output:
```
NAMESPACE NAME AGE
psc-system prescaledcronjob-sample 30s
NAMESPACE NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
psc-system autogen-prescaledcronjob-sample 45,15 * * * * False 0 <none> 39s
```
If you do not see the ouput above then please review the [debugging documentation](docs/debugging.md). Deleting the `PrescaledCronJob` resource will clean up the `CronJob` automatically.
### Define Primer Schedule
Before the actual cronjob kicks off, an init container pre-warms the cluster so all nodes are immediately available when the cronjob is intended to run.
There are two ways to define this primer schedule:
1. Set `warmUpTimeMins` under the PreScaledCronJob spec. This will [generate](docs/cronjobs.md) a primed cronjob schedule based on your original schedule and the amount of minutes you want to pre-warm your cluster. This can be defined as follows (An example yaml is provided in `config/samples/psc_v1alpha1_prescaledcronjob.yaml`):
```
kind: PreScaledCronJob
spec:
warmUpTimeMins: 5
cronJob:
spec:
schedule: "5/30 * * * *"
```
- OR -
2. Set a pre-defined `primerSchedule` under the PreScaledCronJob. The pre-defined primer schedule below results in the exact same pre-warming and cron schedule as the schedule above. (An example yaml is provided in `config/samples/psc_v1alpha1_prescaledcronjob_primerschedule.yaml`)
```
kind: PreScaledCronJob
spec:
primerSchedule: "*/30 * * * *"
cronJob:
spec:
schedule: "5/30 * * * *"
```
# Debugging
Please review the [debugging documentation](docs/debugging.md)
# Monitoring
Please review the [monitoring documentation](docs/monitoring.md)
# Running the Tests
This repo contains 3 types of tests, which are logically separated:
- Unit tests, run with `go test`.
- To run: `make unit-tests`.
- 'Local' Integration tests, which run in a KIND cluster and test that the operator outputs the objects we expect.
- To run: `make kind-tests`
- 'Long' Integration tests, also running in KIND which submit objects to the cluster and monitor the cluster to ensure jobs start at the right time.
- To run: `make kind-long-tests`
# Hints and Tips
- Run `make fmt` to automatically format your code
# Kustomize patching
Many samples in the Kubernetes docs show `requests` and `limits` of a container using plain integer values, such as:
```yaml
requests:
nvidia.com/gpu: 1
```
The generated yaml schema definition for the `PrescaledCronJob` just sets the validation for these properties to `string`s, rather than what they should be (`integer` | `string` with a fixed regex format). This means we need to apply a patch (`/config/crd/patches/resource-type-patch.yaml`) to override the autogenerated type. This information may come in handy in future if other edge cases are found.

Просмотреть файл

@ -0,0 +1,20 @@
// Package v1alpha1 contains API Schema definitions for the psc v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=psc.cronprimer.local
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "psc.cronprimer.local", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

Просмотреть файл

@ -0,0 +1,46 @@
package v1alpha1
import (
batchv1beta1 "k8s.io/api/batch/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// PreScaledCronJobSpec defines the desired state of PreScaledCronJob
type PreScaledCronJobSpec struct {
WarmUpTimeMins int `json:"warmUpTimeMins,omitempty"`
PrimerSchedule string `json:"primerSchedule,omitempty"`
CronJob batchv1beta1.CronJob `json:"cronJob,omitempty"`
}
// PreScaledCronJobStatus defines the observed state of PreScaledCronJob
type PreScaledCronJobStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
// +kubebuilder:object:root=true
// PreScaledCronJob is the Schema for the prescaledcronjobs API
type PreScaledCronJob struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PreScaledCronJobSpec `json:"spec,omitempty"`
Status PreScaledCronJobStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// PreScaledCronJobList contains a list of PreScaledCronJob
type PreScaledCronJobList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []PreScaledCronJob `json:"items"`
}
func init() {
SchemeBuilder.Register(&PreScaledCronJob{}, &PreScaledCronJobList{})
}

Просмотреть файл

@ -0,0 +1,99 @@
// +build !ignore_autogenerated
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PreScaledCronJob) DeepCopyInto(out *PreScaledCronJob) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreScaledCronJob.
func (in *PreScaledCronJob) DeepCopy() *PreScaledCronJob {
if in == nil {
return nil
}
out := new(PreScaledCronJob)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PreScaledCronJob) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PreScaledCronJobList) DeepCopyInto(out *PreScaledCronJobList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PreScaledCronJob, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreScaledCronJobList.
func (in *PreScaledCronJobList) DeepCopy() *PreScaledCronJobList {
if in == nil {
return nil
}
out := new(PreScaledCronJobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PreScaledCronJobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PreScaledCronJobSpec) DeepCopyInto(out *PreScaledCronJobSpec) {
*out = *in
in.CronJob.DeepCopyInto(&out.CronJob)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreScaledCronJobSpec.
func (in *PreScaledCronJobSpec) DeepCopy() *PreScaledCronJobSpec {
if in == nil {
return nil
}
out := new(PreScaledCronJobSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PreScaledCronJobStatus) DeepCopyInto(out *PreScaledCronJobStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreScaledCronJobStatus.
func (in *PreScaledCronJobStatus) DeepCopy() *PreScaledCronJobStatus {
if in == nil {
return nil
}
out := new(PreScaledCronJobStatus)
in.DeepCopyInto(out)
return out
}

86
azure-pipelines.yml Normal file
Просмотреть файл

@ -0,0 +1,86 @@
trigger:
- master
pool:
vmImage: 'Ubuntu-16.04'
jobs:
- job: Build
strategy:
matrix:
Kubernetes1.14:
K8S_NODE_IMAGE: 'v1.14.10'
Kubernetes1.15:
K8S_NODE_IMAGE: 'v1.15.7'
PUBLISH: 'true'
Kubernetes1.16:
K8S_NODE_IMAGE: 'v1.16.4'
Kubernetes1.17:
K8S_NODE_IMAGE: 'v1.17.0'
maxParallel: 4
steps:
# Cache the docker image file
- task: CacheBeta@0
inputs:
key: go-cache | go.sum
path: ".gocache"
restoreKeys: go-cache
displayName: Cache go mod cache
# Cache the docker image file
- task: CacheBeta@0
inputs:
key: docker-image | .devcontainer/**
path: ".dockercache"
restoreKeys: docker-image
cacheHitVar: DOCKER_CACHE_HIT
displayName: Cache docker layers
- script: |
bash -f ./ci.sh
displayName: 'Run CI'
- task: PublishTestResults@2
inputs:
testResultsFormat: 'JUnit'
testResultsFiles: '**/TEST-*.xml'
mergeTestResults: true
failTaskOnFailedTests: true
publishRunAttachments: true
displayName: Publish test results
- task: Docker@2
condition: succeeded()
inputs:
containerRegistry: 'my-registry'
command: 'login'
displayName: docker login
- task: Bash@3
condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master'), eq(variables['PUBLISH'], 'true'))
inputs:
targetType: 'inline'
script: |
# Write your commands here
docker tag initcontainer:latest-${BUILD_BUILDNUMBER} my-registry.azurecr.io/initcontainer:latest-${BUILD_BUILDNUMBER}
docker tag initcontainer:latest-${BUILD_BUILDNUMBER} my-registry.azurecr.io/initcontainer:latest
docker push my-registry.azurecr.io/initcontainer:latest-${BUILD_BUILDNUMBER}
docker push my-registry.azurecr.io/initcontainer:latest
displayName: docker tag and push master
- task: Bash@3
condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest'), eq(variables['PUBLISH'], 'true'))
inputs:
targetType: 'inline'
script: |
# Write your commands here
# Remove beginning of branch name
BRANCH_NAME=${SYSTEM_PULLREQUEST_SOURCEBRANCH//refs\/heads\//}
# Replace all / with -
BRANCH_NAME=${BRANCH_NAME//\//-}
docker tag initcontainer:latest-${BUILD_BUILDNUMBER} my-registry.azurecr.io/initcontainer:pr-${BRANCH_NAME}-${BUILD_BUILDNUMBER}
docker push my-registry.azurecr.io/initcontainer:pr-${BRANCH_NAME}-${BUILD_BUILDNUMBER}
displayName: docker tag and push PR build

41
ci.sh Normal file
Просмотреть файл

@ -0,0 +1,41 @@
#! /bin/bash
set -e
set -x
# Get storage drive details
docker info
# Create .dockercache directory
mkdir -p ./.dockercache/
# Import devcontainer from cache to speed up build
if [ -f ".dockercache/devcontainer.tar" ];
then
echo "-------> Restoring docker image"
time docker load -i .dockercache/devcontainer.tar
fi
echo "-------> Building devcontainer"
# Use the devcontainer to run the build as it has all the environment setup that we need
time docker build --cache-from devcontainer:latest -t devcontainer -f ./.devcontainer/Dockerfile ./.devcontainer
# Create a directory for go mod cache
mkdir -p ${PWD}/.gocache
echo "-------> Building code and running tests"
# Run `make` to build and test the code
time docker run -v ${PWD}/.gocache:/go/pkg/ -v /var/run/docker.sock:/var/run/docker.sock -v ${PWD}:/src --workdir /src --entrypoint /bin/bash --network="host" devcontainer -c "K8S_NODE_IMAGE=$K8S_NODE_IMAGE make build-run-ci"
# Ensure .gocache permmissions correct for build to save cache
sudo chown -R $USER ${PWD}
# If the current cached image is out of date save devcontainer so it can be cached
if [ $DOCKER_CACHE_HIT != "true" ];
then
echo "-------> Saving docker image"
time docker image save -o ./.dockercache/devcontainer.tar devcontainer
fi
# Build and tag initcontainer docker image
echo "-------> Building and tagging initcontainer"
docker build -t initcontainer:latest-${BUILD_BUILDNUMBER} ./initcontainer

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,22 @@
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/psc.cronprimer.local_prescaledcronjobs.yaml
# +kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_prescaledcronjobs.yaml
# +kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_prescaledcronjobs.yaml
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
- patches/resource-type-patch.yaml
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml

Просмотреть файл

@ -0,0 +1,17 @@
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
group: apiextensions.k8s.io
path: spec/conversion/webhookClientConfig/service/name
namespace:
- kind: CustomResourceDefinition
group: apiextensions.k8s.io
path: spec/conversion/webhookClientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations

Просмотреть файл

@ -0,0 +1,8 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: prescaledcronjobs.psc.cronprimer.local

Просмотреть файл

@ -0,0 +1,43 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: prescaledcronjobs.psc.cronprimer.local
spec:
validation:
openAPIV3Schema:
properties:
spec:
properties:
cronJob:
properties:
spec:
properties:
jobTemplate:
properties:
spec:
properties:
template:
properties:
spec:
properties:
containers:
items:
properties:
resources:
properties:
limits:
additionalProperties:
oneOf:
- type: string
pattern: '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'
- type: integer
type:
requests:
additionalProperties:
oneOf:
- type: string
pattern: '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'
- type: integer
type:

Просмотреть файл

@ -0,0 +1,17 @@
# The following patch enables conversion webhook for CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: prescaledcronjobs.psc.cronprimer.local
spec:
conversion:
strategy: Webhook
webhookClientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert

Просмотреть файл

@ -0,0 +1,72 @@
# Adds namespace to all resources.
namespace: psc-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "psc-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: psc-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
bases:
- ../crd
- ../rbac
- ../manager
- ../priority
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml
#- ../webhook
# [PROMETHEUS] To enable metric scraping using prometheus operator, uncomment all sections with 'PROMETHEUS'.
- ../prometheus
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# Only one of manager_auth_proxy_patch.yaml and
# manager_prometheus_metrics_patch.yaml should be enabled.
- manager_auth_proxy_patch.yaml
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, uncomment the following line and
# comment manager_auth_proxy_patch.yaml.
# Only one of manager_auth_proxy_patch.yaml and
# manager_prometheus_metrics_patch.yaml should be enabled.
#- manager_prometheus_metrics_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
# vars:
# - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: certmanager.k8s.io
# version: v1alpha1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
# - name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: certmanager.k8s.io
# version: v1alpha1
# name: serving-cert # this name should match the one in certificate.yaml
# - name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
# - name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service

Просмотреть файл

@ -0,0 +1,25 @@
# This patch inject a sidecar container which is a HTTP proxy for the controller manager,
# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=10"
ports:
- containerPort: 8443
name: https
- name: manager
args:
- "--metrics-addr=127.0.0.1:8080"
- "--enable-leader-election"

Просмотреть файл

@ -0,0 +1,19 @@
# This patch enables Prometheus scraping for the manager pod.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
metadata:
annotations:
prometheus.io/scrape: 'true'
spec:
containers:
# Expose the prometheus metrics on default port
- name: manager
ports:
- containerPort: 8080
name: metrics
protocol: TCP

Просмотреть файл

@ -0,0 +1,8 @@
# This patch add annotation to admission webhook config and
# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: mutating-webhook-configuration
annotations:
certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)

Просмотреть файл

@ -0,0 +1,58 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-cronprimer-manager
name: system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
labels:
control-plane: controller-cronprimer-manager
spec:
selector:
matchLabels:
control-plane: controller-cronprimer-manager
replicas: 1
template:
metadata:
labels:
control-plane: controller-cronprimer-manager
spec:
containers:
- command:
- /manager
args:
- --enable-leader-election
image: controller:latest
name: manager
env:
- name: INIT_CONTAINER_IMAGE
valueFrom:
configMapKeyRef:
name: initcontainer-configmap
key: initContainerImage
resources:
limits:
cpu: 100m
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
readinessProbe:
httpGet:
path: /ready
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /alive
port: 8081
initialDelaySeconds: 10
periodSeconds: 10
terminationGracePeriodSeconds: 10
priorityClassName: psc-high-priority

Просмотреть файл

@ -0,0 +1,7 @@
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: high-priority
value: 1000000
globalDefault: false
description: "This priority class should be used for CronPrimer controller pods only."

Просмотреть файл

@ -0,0 +1,2 @@
resources:
- high_priority.yaml

Просмотреть файл

@ -0,0 +1,2 @@
resources:
- monitor.yaml

Просмотреть файл

@ -0,0 +1,4 @@
# This file is for teaching kustomize how to substitute namespaceSelector reference in CRD
varReference:
- kind: ServiceMonitor
path: spec/namespaceSelector/matchNames

Просмотреть файл

@ -0,0 +1,20 @@
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
control-plane: controller-cronprimer-manager
release: prometheus-operator # Ensure this matches your Prometheus service Monitor Selector criteria.
name: controller-manager-metrics-monitor
namespace: system
spec:
endpoints:
- path: /metrics
port: https
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true # Configure certs here if set up for auth_proxy (uses self-signed currently)
selector:
matchLabels:
control-plane: controller-cronprimer-manager # this needs to match the labels applied to your pods/services

Просмотреть файл

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-role
rules:
- apiGroups: ["authentication.k8s.io"]
resources:
- tokenreviews
verbs: ["create"]
- apiGroups: ["authorization.k8s.io"]
resources:
- subjectaccessreviews
verbs: ["create"]

Просмотреть файл

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-role
subjects:
- kind: ServiceAccount
name: default
namespace: system

Просмотреть файл

@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "8443"
prometheus.io/scheme: https
prometheus.io/scrape: "true"
labels:
control-plane: controller-cronprimer-manager
name: controller-manager-metrics-service
namespace: system
spec:
ports:
- name: https
port: 8443
targetPort: https
selector:
control-plane: controller-cronprimer-manager

Просмотреть файл

@ -0,0 +1,11 @@
resources:
- role.yaml
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
# Comment the following 3 lines if you want to disable
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
# which protects your /metrics endpoint.
- auth_proxy_service.yaml
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml

Просмотреть файл

@ -0,0 +1,32 @@
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-election-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create

Просмотреть файл

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: default
namespace: system

71
config/rbac/role.yaml Normal file
Просмотреть файл

@ -0,0 +1,71 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- patch
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- batch
resources:
- cronjobs
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- batch
resources:
- cronjobs/status
verbs:
- get
- patch
- update
- apiGroups:
- psc.cronprimer.local
resources:
- prescaledcronjobs
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- psc.cronprimer.local
resources:
- prescaledcronjobs/status
verbs:
- get
- patch
- update

Просмотреть файл

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: system

Просмотреть файл

@ -0,0 +1,53 @@
# This sample file provides an easy mechanism for testing the initContainer image of this project
apiVersion: batch/v1beta1
kind: CronJob
metadata:
namespace: default
name: sampleinitcron
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
# This placeholder label is current required to allow the
# mutation admission controller to patch it
metadata:
labels:
placeholder: ""
spec:
initContainers:
- name: warmup
image: initcontainer:1
env:
- name: NAMESPACE
value: psc-system
- name: CRONJOB_SCHEDULE
value: "2/5 * * * *"
containers:
- name: greeter
image: busybox
args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
resources:
requests:
memory: "512Mi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "500m"
restartPolicy: OnFailure
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- list

Просмотреть файл

@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
generation: 3
labels:
run: cpu-scale
name: cpu-scale
namespace: default
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
run: cpu-scale
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
run: cpu-scale
spec:
containers:
- image: busybox
command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600']
imagePullPolicy: IfNotPresent
name: cpu-scale
ports:
- containerPort: 8080
protocol: TCP
resources:
requests:
memory: "2Gi"
cpu: "1"
limits:
memory: "2Gi"
cpu: "1"
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
nodeSelector:
agentpool: cpupool2

Просмотреть файл

@ -0,0 +1,24 @@
apiVersion: psc.cronprimer.local/v1alpha1
kind: PreScaledCronJob
metadata:
name: prescaledcronjob-sample
namespace: psc-system
spec:
warmUpTimeMins: 15
cronJob:
metadata:
name: my-cron-sample
spec:
schedule: "*/30 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox
args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure

Просмотреть файл

@ -0,0 +1,24 @@
apiVersion: psc.cronprimer.local/v1alpha1
kind: PreScaledCronJob
metadata:
name: prescaledcronjob-sample
namespace: psc-system
spec:
primerSchedule: "*/30 * * * *"
cronJob:
metadata:
name: my-cron-sample
spec:
schedule: "5/30 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox
args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure

Просмотреть файл

@ -0,0 +1,30 @@
apiVersion: psc.cronprimer.local/v1alpha1
kind: PreScaledCronJob
metadata:
name: prescaledcronjob-sample
namespace: psc-system
spec:
warmUpTimeMins: 15
cronJob:
metadata:
name: my-cron-sample
spec:
schedule: "*/30 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox
args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
resources:
limits:
nvidia.com/gpu: 1
requests:
nvidia.com/gpu: 1
restartPolicy: OnFailure

Просмотреть файл

78
controllers/metrics.go Normal file
Просмотреть файл

@ -0,0 +1,78 @@
package controllers
import (
"github.com/prometheus/client_golang/prometheus"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)
const (
successMetric = "success"
failureMetric = "failure"
// CronJobCreatedMetric represents a metric to track cronjob created
CronJobCreatedMetric = "create"
// CronJobUpdatedMetric represents a metric to track cronjob updated
CronJobUpdatedMetric = "update"
// CronJobDeletedMetric represents a metric to track cronjob deleted
CronJobDeletedMetric = "delete"
)
var cronjobCounter = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "prescaledcronjoboperator_cronjob_action_total",
Help: "Number of CronJob objects created by controller",
}, []string{"action", "outcome"})
var timingLabels = []string{"prescalecron", "nodepool", "durationtype"}
// Track in buckets from 2 secs up to 60mins over 28 increments
var timingBuckets = prometheus.ExponentialBuckets(2, 1.32, 28)
var transitionTimeHistograms = map[string]*prometheus.HistogramVec{
timeToSchedule: timeToScheduleHistogram,
timeInitContainerRan: timeInitContainerRanHistogram,
timeToStartWorkload: timeToStartWorkloadHistogram,
timeDelayOfWorkload: timeDelayOfWorkloadHistogram,
}
var timeToScheduleHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "prescalecronjoboperator_cronjob_time_to_schedule",
Help: "How long did it take to schedule the pod used to execute and instance of the CRONJob in secs",
Buckets: timingBuckets,
}, timingLabels)
var timeInitContainerRanHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "prescalecronjoboperator_cronjob_time_init_container_ran",
Help: "How long did the warmup container run waiting for the cron schedule to trigger in secs",
Buckets: timingBuckets,
}, timingLabels)
var timeToStartWorkloadHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "prescalecronjoboperator_cronjob_time_to_start_workload",
Help: "How long did it take to start the real workload after warmup container stopped in secs",
Buckets: timingBuckets,
}, timingLabels)
var timeDelayOfWorkloadHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "prescalecronjoboperator_cronjob_time_delay_of_workload",
Help: "How long did after it's scheduled start time did the workload actually start in secs",
Buckets: timingBuckets,
}, timingLabels)
func init() {
// Register custom metrics with the global prometheus registry
metrics.Registry.MustRegister(cronjobCounter)
metrics.Registry.MustRegister(timeToScheduleHistogram)
metrics.Registry.MustRegister(timeInitContainerRanHistogram)
metrics.Registry.MustRegister(timeToStartWorkloadHistogram)
metrics.Registry.MustRegister(timeDelayOfWorkloadHistogram)
}
// TrackCronAction increments the metric tracking how many CronJobs actions
func TrackCronAction(action string, success bool) {
outcome := successMetric
if !success {
outcome = failureMetric
}
cronjobCounter.WithLabelValues(action, outcome).Inc()
}

Просмотреть файл

@ -0,0 +1,338 @@
package controllers
import (
"context"
"fmt"
"sort"
"time"
pscv1alpha1 "cronprimer.local/api/v1alpha1"
"github.com/ReneKroon/ttlcache"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/robfig/cron/v3"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
var trackedEventsByPod = ttlcache.NewCache()
// PodReconciler reconciles a Pod object
type PodReconciler struct {
client.Client
clientset *kubernetes.Clientset
Log logr.Logger
Recorder record.EventRecorder
InitContainerImage string
}
const (
timeToSchedule = "timeToSchedule"
timeInitContainerRan = "timeInitContainerRan"
timeToStartWorkload = "timeToStartWorkload"
timeDelayOfWorkload = "timeDelayOfWorkload"
scheduledEvent = "Scheduled"
startedInitContainerEvent = "StartedInitContainer"
finishedInitContainerEvent = "FinishedInitContainer"
startedWorkloadContainerEvent = "StartedWorkloadContainer"
)
type podTransitionTimes struct {
createdAt *metav1.Time
scheduledAt *corev1.Event
initStartAt *corev1.Event
initFinishedAt *corev1.Event
workloadStartAt *corev1.Event
transitionsObserved map[string]time.Duration
}
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;patch;watch
// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=pods/status,verbs=get
// Reconcile watches for Pods created as a results of a PrimedCronJob and tracks metrics against the parent
// PrimedCronJob about the instance by inspecting the events on the pod (for example: late, early, init container runtime)
func (r *PodReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
logger := r.Log.WithValues("pod", req.NamespacedName)
logger.Info(fmt.Sprintf("Starting reconcile loop for %v", req.NamespacedName))
defer logger.Info(fmt.Sprintf("Finish reconcile loop for %v", req.NamespacedName))
podInstance := &corev1.Pod{}
if err := r.Get(ctx, req.NamespacedName, podInstance); err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{}, nil
}
logger.Error(err, "Failed to get pod")
return ctrl.Result{}, err
}
parentExists, prescaledInstance, err := r.getParentPrescaledCronIfExists(ctx, podInstance)
if err != nil {
logger.Error(err, "Failed to get parent prescaledcronjob")
return ctrl.Result{}, err
}
if !parentExists {
logger.Info("prescaledcronjob no longer exists, likely deleted recently")
return ctrl.Result{}, nil
}
// Lets build some stats
eventsOnPodOverLastHour, err := r.clientset.CoreV1().Events(podInstance.Namespace).List(metav1.ListOptions{
FieldSelector: fields.AndSelectors(fields.OneTermEqualSelector("involvedObject.name", podInstance.Name), fields.OneTermEqualSelector("involvedObject.namespace", podInstance.Namespace)).String(),
})
if err != nil {
return ctrl.Result{}, err
}
// We don't care about this one it has no events yet
if len(eventsOnPodOverLastHour.Items) < 1 {
return ctrl.Result{}, nil
}
// When we do have some events
// Lets make sure we have time in time order
// latest -> oldest
allEvents := eventsOnPodOverLastHour.Items
sort.Slice(allEvents, func(i, j int) bool {
return allEvents[i].FirstTimestamp.After(allEvents[j].FirstTimestamp.Time)
})
newEventsSinceLastRun := getNewEventsSinceLastRun(podInstance.Name, allEvents)
// Update last tracked event
// Track with a 75min TTL to ensure list doesn't grow forever (events exist for 1 hour by default in k8s added a buffer)
trackedEventsByPod.SetWithTTL(podInstance.Name, allEvents[0].UID, time.Minute*75)
// No new events - give up
if len(newEventsSinceLastRun) < 1 {
return ctrl.Result{}, nil
}
// Calculate the timings of transitions between states
timings, err := generateTransitionTimingsFromEvents(allEvents, newEventsSinceLastRun, podInstance.CreationTimestamp, prescaledInstance.Spec.CronJob.Spec.Schedule)
if err != nil {
//generateTransitionTimings errors are only partial faults so can log and continue
// worst case this error means a transition time wasn't available
r.Recorder.Event(prescaledInstance, corev1.EventTypeWarning, "Metrics", err.Error())
}
r.publishMetrics(timings, podInstance, prescaledInstance)
r.Recorder.Event(prescaledInstance, corev1.EventTypeNormal, "Debug", "Metrics calculated for PrescaleCronJob invocation.")
return ctrl.Result{}, nil
}
func (r *PodReconciler) getParentPrescaledCronIfExists(ctx context.Context, podInstance *corev1.Pod) (exists bool, instance *pscv1alpha1.PreScaledCronJob, err error) {
// Attempt to get the parent name from the pod
prescaledName, exists := podInstance.GetLabels()[primedCronLabel]
if !exists {
return false, nil, nil
}
// Get the prescaled cron which triggered this pod
prescaledInstance := &pscv1alpha1.PreScaledCronJob{}
if err := r.Get(ctx, types.NamespacedName{Name: prescaledName, Namespace: podInstance.Namespace}, prescaledInstance); err != nil {
if errors.IsNotFound(err) {
return false, nil, nil
}
return true, nil, err
}
return true, prescaledInstance, nil
}
func getNewEventsSinceLastRun(podName string, latestEventsFirst []corev1.Event) map[types.UID]corev1.Event {
// Work out which events we've already processed and filter to new events only
eventsSinceLastCheck := map[types.UID]corev1.Event{}
uidOfLastProcessedEvent, isCurrentlyTrackedPod := trackedEventsByPod.Get(podName)
for _, event := range latestEventsFirst {
if isCurrentlyTrackedPod && event.UID == uidOfLastProcessedEvent {
// We've caught up to last seen event
break
}
eventsSinceLastCheck[event.UID] = event
}
return eventsSinceLastCheck
}
func generateTransitionTimingsFromEvents(allEvents []corev1.Event, newEventsSinceLastRun map[types.UID]corev1.Event, podCreationTime metav1.Time, cronSchedule string) (podTransitionTimes, error) {
// What do we know?
timings := podTransitionTimes{
createdAt: &podCreationTime,
transitionsObserved: map[string]time.Duration{},
}
// Build idea of timing of events
for _, event := range allEvents {
eventCaptured := event
interested, eventType := getEventType(eventCaptured)
if !interested {
continue
}
switch eventType {
case scheduledEvent:
timings.scheduledAt = &eventCaptured
case startedInitContainerEvent:
timings.initStartAt = &eventCaptured
case finishedInitContainerEvent:
timings.initFinishedAt = &eventCaptured
case startedWorkloadContainerEvent:
timings.workloadStartAt = &eventCaptured
}
}
// Calculate transition durations which are based off new events
if timings.scheduledAt != nil && isNewEvent(newEventsSinceLastRun, timings.scheduledAt) {
timings.transitionsObserved[timeToSchedule] = timings.scheduledAt.LastTimestamp.Sub(timings.createdAt.Time)
}
if allHaveOccurredWithAtLeastOneNew(newEventsSinceLastRun, timings.initStartAt, timings.initFinishedAt) {
timings.transitionsObserved[timeInitContainerRan] = timings.initFinishedAt.LastTimestamp.Sub(timings.initStartAt.LastTimestamp.Time)
}
if allHaveOccurredWithAtLeastOneNew(newEventsSinceLastRun, timings.initFinishedAt, timings.workloadStartAt) {
timings.transitionsObserved[timeToStartWorkload] = timings.workloadStartAt.LastTimestamp.Sub(timings.initFinishedAt.LastTimestamp.Time)
}
if allHaveOccurredWithAtLeastOneNew(newEventsSinceLastRun, timings.workloadStartAt) {
// Todo: Track as vectored metric by early/late
schedule, err := cron.ParseStandard(cronSchedule)
if err != nil {
return timings, fmt.Errorf("Parital failure generating transition times, failed to parse CRON Schedule: %s", err.Error())
}
expectedStartTimeForWorkload := schedule.Next(podCreationTime.Time)
timings.transitionsObserved[timeDelayOfWorkload] = timings.workloadStartAt.LastTimestamp.Time.Sub(expectedStartTimeForWorkload)
}
return timings, nil
}
func (r *PodReconciler) publishMetrics(timings podTransitionTimes, pod *corev1.Pod, prescaledInstance *pscv1alpha1.PreScaledCronJob) {
agentpool, exists := pod.Spec.NodeSelector["agentpool"]
if !exists {
agentpool = "noneset"
}
for transitionName, duration := range timings.transitionsObserved {
r.Recorder.Eventf(prescaledInstance, corev1.EventTypeNormal, "Metrics", "Event %s took %s on pod %s", transitionName, duration.String(), pod.Name)
durationSecs := duration.Seconds()
durationType := "late"
if durationSecs < 0 {
durationType = "early"
durationSecs = durationSecs * -1
}
promLabels := prometheus.Labels{"prescalecron": prescaledInstance.Name, "nodepool": agentpool, "durationtype": durationType}
histogram, exists := transitionTimeHistograms[transitionName]
if !exists {
r.Recorder.Eventf(prescaledInstance, corev1.EventTypeWarning, "Metrics", "Failed to track transition time as no histogram defined for %s", transitionName)
}
histogram.With(promLabels).Observe(durationSecs)
}
}
func allHaveOccurredWithAtLeastOneNew(newEvents map[types.UID]corev1.Event, events ...*corev1.Event) bool {
atLeastOneNewEvent := false
for _, event := range events {
// If event is nil it hasn't happened yet
if event == nil {
return false
}
// Check at least one of the events is new since last time
// the reconcile loop ran.
if !atLeastOneNewEvent {
atLeastOneNewEvent = isNewEvent(newEvents, event)
}
}
return atLeastOneNewEvent
}
func isNewEvent(newEvents map[types.UID]corev1.Event, thisEvent *corev1.Event) bool {
_, existsInDictorary := newEvents[thisEvent.UID]
return existsInDictorary
}
func getEventType(event corev1.Event) (isInteresting bool, eventType string) {
// If this a sheduler assigned event?
if event.Reason == "Scheduled" {
return true, scheduledEvent
}
// Kubelet events
if event.Source.Component == "kubelet" {
// Any other field spec if related to original workload
isRelatedToInitContainer := event.InvolvedObject.FieldPath == fmt.Sprintf("spec.initContainers{%s}", warmupContainerInjectNameUID)
// Are these events related to our init container?
if isRelatedToInitContainer {
if event.Reason == "Started" {
return true, startedInitContainerEvent
}
}
// When the init container is finished the other containers in the pod will get pulled
// if the image is already on the node "Pulled" is fired if not "Pulling" -> "Pulled"
// This is useful as it signals when our init container has exited and now kubelet
// has moved to running the original workload
if (event.Reason == "Pulling" || event.Reason == "Pulled") && !isRelatedToInitContainer {
return true, finishedInitContainerEvent
}
// Main workload has started (or at least one of them)
if event.Reason == "Started" && !isRelatedToInitContainer {
return true, startedWorkloadContainerEvent
}
}
return false, ""
}
// SetupWithManager sets up defaults
func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Get clientset so we can read events
r.clientset = kubernetes.NewForConfigOrDie(mgr.GetConfig())
return ctrl.NewControllerManagedBy(mgr).
For(&corev1.Pod{}).
WithEventFilter(predicate.Funcs{
// We're using the pod controller to watch for the job moving from init -> normal execution
// given this we don't care about Delete or Create only update and only update on
// pods which are downstream of the CronPrimer object
DeleteFunc: func(e event.DeleteEvent) bool {
return false
},
CreateFunc: func(e event.CreateEvent) bool {
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
// Going for an injected pod label instead
if _, exists := e.MetaNew.GetLabels()[primedCronLabel]; exists {
return true
}
return false
},
}).
Complete(r)
}

Просмотреть файл

@ -0,0 +1,110 @@
package controllers
import (
"context"
"fmt"
"log"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/robfig/cron/v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
)
var _ = Describe("LONG TEST: Pod Controller metrics", func() {
logger := log.New(GinkgoWriter, "INFO: ", log.Lshortfile)
const timeout = time.Second * 145
const interval = time.Second * 15
ctx := context.Background()
BeforeEach(func() {
// failed test runs that don't clean up leave resources behind.
})
AfterEach(func() {
// Add any teardown steps that needs to be executed after each test
})
It("with prescaled cronjob sees metrics events", func() {
// construct a prescaled cron in code + post to K8s
toCreateFull := generatePSCSpec()
toCreate := &toCreateFull
toCreate.Spec.CronJob.Spec.Schedule = fmt.Sprintf("*/1 * * * *")
toCreate.Spec.WarmUpTimeMins = 0
Expect(k8sClient.Create(ctx, toCreate)).To(Succeed(), "Creating prescaled cron primer failed and it shouldn't have")
// Wait till the first execution has started of the pod
schedule, err := cron.ParseStandard(toCreate.Spec.CronJob.Spec.Schedule)
Expect(err).To(BeNil())
expectedPodStartTime := schedule.Next(time.Now()).Add(15 * time.Second)
<-time.After(time.Until(expectedPodStartTime))
// Then mark the primed cron as inactive so we're only testing one instance
err = k8sClient.Get(ctx, types.NamespacedName{Namespace: toCreate.Namespace, Name: toCreate.Name}, toCreate)
Expect(err).To(BeNil())
suspend := true
toCreate.Spec.CronJob.Spec.Suspend = &suspend
Expect(k8sClient.Update(ctx, toCreate)).To(Succeed(), "Failed to suspend cronjob. We need it suspended so multiple instances of the CRON job don't start - we want to test with just one execution")
clientset := kubernetes.NewForConfigOrDie(cfg)
Eventually(func() bool {
events, err := clientset.CoreV1().Events(namespace).List(metav1.ListOptions{
FieldSelector: fields.AndSelectors(
fields.OneTermEqualSelector("involvedObject.name", toCreate.Name),
fields.OneTermEqualSelector("involvedObject.namespace", toCreate.Namespace),
).String(),
})
logger.Println(len(events.Items))
if err != nil {
logger.Println(err.Error())
Fail("we broke")
return false
}
//For each invocation we generate "Metrics" 4 events. Lets check they all came through.
// Todo: make this a better test
// It doesn't quite assert what I want. I want to see that exactly one of each of these timings is reported,
// what is currently done is roughly equivalent but if timing issues occur then you could pass this test with 4xScheduledAt messages rather than one of each.
// Will add a task to fix this one up.
metricsMessageCount := 0
for _, event := range events.Items {
if strings.Contains(event.Message, timeToSchedule) ||
strings.Contains(event.Message, timeInitContainerRan) ||
strings.Contains(event.Message, timeToStartWorkload) ||
strings.Contains(event.Message, timeDelayOfWorkload) {
metricsMessageCount++
}
}
for _, event := range events.Items {
// We observed an invocation of the job ... Yey!
if event.Reason == "Debug" && strings.HasPrefix(event.Message, "Metrics calculated for PrescaleCronJob invocation.") {
logger.Println("Found debug message from controller")
const expectedNumberOfMetricsEvents = 4
logger.Printf("Checking number of metrics events reported:%d %+v", metricsMessageCount, events)
return metricsMessageCount >= expectedNumberOfMetricsEvents
}
}
logger.Println("Event not found on object yet")
return false
}, timeout, interval).Should(BeTrue())
})
})

Просмотреть файл

@ -0,0 +1,257 @@
package controllers
import (
"encoding/json"
"io/ioutil"
"time"
"github.com/ReneKroon/ttlcache"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
var _ = Describe("Pod Controller", func() {
Context("With a set of preexisting events", func() {
// Create a sample pod with last tracked event set
podName := "bananas"
lastHighWaterMarkUID := "lasthighwatermark"
trackedEventsByPod = ttlcache.NewCache()
trackedEventsByPod.SetWithTTL(podName, types.UID(lastHighWaterMarkUID), time.Hour)
newEvent := corev1.Event{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID("new3"),
},
}
oldEvent := corev1.Event{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID("old2"),
},
}
// Create a list of sorted events with new and old events either side of the high water mark
sortedEvents := []corev1.Event{
newEvent,
{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(lastHighWaterMarkUID),
},
},
oldEvent,
{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID("old1"),
},
},
}
It("ttl cache should expire and remove items", func() {
keyName := "bananas1234"
trackedEventsByPod.SetWithTTL(keyName, true, time.Second*2)
// Wait for item to expire
<-time.After(time.Second * 3)
_, valueExists := trackedEventsByPod.Get(keyName)
Expect(valueExists).To(BeFalse())
})
It("ttl cache should renew ttl time when items read", func() {
keyName := "bananas456"
trackedEventsByPod.SetWithTTL(keyName, true, time.Second*5)
<-time.After(time.Second * 2)
// Read before expiry
_, valueExists := trackedEventsByPod.Get(keyName)
Expect(valueExists).To(BeTrue())
<-time.After(time.Second * 4)
// Read before expiry
_, valueExists = trackedEventsByPod.Get(keyName)
Expect(valueExists).To(BeTrue())
<-time.After(time.Second * 4)
// Read before expiry
_, valueExists = trackedEventsByPod.Get(keyName)
Expect(valueExists).To(BeTrue())
// Let it expire
<-time.After(time.Second * 6)
_, valueExists = trackedEventsByPod.Get(keyName)
Expect(valueExists).To(BeFalse())
})
It("getNewEventsSinceLastRun should only return new events", func() {
newEvents := getNewEventsSinceLastRun(podName, sortedEvents)
_, expectMissing := newEvents["old1"]
Expect(expectMissing).To(BeFalse())
_, expectMissingHighWater := newEvents["lasthighwatermark"]
Expect(expectMissingHighWater).To(BeFalse())
_, expectContains := newEvents["new3"]
Expect(expectContains).To(BeTrue())
})
It("haveNewEventsOccurred should return true", func() {
newEvents := getNewEventsSinceLastRun(podName, sortedEvents)
result := allHaveOccurredWithAtLeastOneNew(newEvents, &newEvent)
Expect(result).To(BeTrue())
})
It("haveNewEventsOccurred should return true at least one new event", func() {
newEvents := getNewEventsSinceLastRun(podName, sortedEvents)
result := allHaveOccurredWithAtLeastOneNew(newEvents, &newEvent, &oldEvent)
Expect(result).To(BeTrue())
})
It("haveNewEventsOccurred should return false with old events", func() {
newEvents := getNewEventsSinceLastRun(podName, sortedEvents)
result := allHaveOccurredWithAtLeastOneNew(newEvents, &oldEvent, &oldEvent)
Expect(result).To(BeFalse())
})
It("isNewEvent should return true for new event", func() {
newEvents := getNewEventsSinceLastRun(podName, sortedEvents)
result := isNewEvent(newEvents, &newEvent)
Expect(result).To(BeTrue())
})
It("isNewEvent should return false for old", func() {
newEvents := getNewEventsSinceLastRun(podName, sortedEvents)
result := isNewEvent(newEvents, &oldEvent)
Expect(result).To(BeFalse())
})
})
Context("With a set of kubelet and scheduler events", func() {
scheduledSampleEvent := mustReadEventFromFile("../testdata/events/scheduledEvent.json")
initStartedSampleEvent := mustReadEventFromFile("../testdata/events/initStartedEvent.json")
workloadPullEvent := mustReadEventFromFile("../testdata/events/workloadPulledEvent.json")
workloadStartedEvent := mustReadEventFromFile("../testdata/events/workloadStartedEvent.json")
unintestingEvent := mustReadEventFromFile("../testdata/events/uninterestingEvent.json")
cronSchedule := "*/1 * * * *"
It("Should correctly identify initStartedEvent", func() {
isInteresting, eventType := getEventType(initStartedSampleEvent)
Expect(isInteresting).To(BeTrue())
Expect(eventType).To(Equal(startedInitContainerEvent))
})
It("Should correctly identify scheduledEvent", func() {
isInteresting, eventType := getEventType(scheduledSampleEvent)
Expect(isInteresting).To(BeTrue())
Expect(eventType).To(Equal(scheduledEvent))
})
It("Should correctly identify workloadPullEvent", func() {
isInteresting, eventType := getEventType(workloadPullEvent)
Expect(isInteresting).To(BeTrue())
Expect(eventType).To(Equal(finishedInitContainerEvent))
})
It("Should correctly identify workloadStartedEvent", func() {
isInteresting, eventType := getEventType(workloadStartedEvent)
Expect(isInteresting).To(BeTrue())
Expect(eventType).To(Equal(startedWorkloadContainerEvent))
})
It("Should correctly identify unintestingEvent", func() {
isInteresting, eventType := getEventType(unintestingEvent)
Expect(isInteresting).To(BeFalse())
Expect(eventType).To(Equal(""))
})
Context("testing generateTransitionTimingsFromEvents with events", func() {
allEvents := []corev1.Event{
scheduledSampleEvent, initStartedSampleEvent, workloadPullEvent, workloadStartedEvent,
}
newEvents := map[types.UID]corev1.Event{
scheduledSampleEvent.UID: scheduledSampleEvent,
initStartedSampleEvent.UID: initStartedSampleEvent,
workloadPullEvent.UID: workloadPullEvent,
workloadStartedEvent.UID: workloadStartedEvent,
}
time, err := time.Parse(time.RFC3339, "2020-01-29T12:03:05Z")
if err != nil {
panic(err)
}
creationTime := metav1.NewTime(time)
timings, err := generateTransitionTimingsFromEvents(allEvents, newEvents, creationTime, cronSchedule)
It("Shouldn't error", func() {
Expect(err).To(BeNil())
})
It("Should get correct dates from events", func() {
Expect(timings.createdAt).NotTo(Equal(nil))
Expect(timings.scheduledAt).NotTo(Equal(nil))
Expect(timings.initStartAt).NotTo(Equal(nil))
Expect(timings.initFinishedAt).NotTo(Equal(nil))
Expect(timings.workloadStartAt).NotTo(Equal(nil))
outputFormat := "2006-01-02T15:04:05Z" // Format the date into format used in event.json files
Expect(timings.scheduledAt.LastTimestamp.Format(outputFormat)).To(Equal("2020-01-29T12:03:07Z")) // Time from scheduledEvent.json
Expect(timings.initStartAt.LastTimestamp.Format(outputFormat)).To(Equal("2020-01-29T12:03:08Z")) // Time from initStartedEvent.json
Expect(timings.initFinishedAt.LastTimestamp.Format(outputFormat)).To(Equal("2020-01-29T12:04:03Z")) // Time from workloadPulledEvent.json
Expect(timings.workloadStartAt.LastTimestamp.Format(outputFormat)).To(Equal("2020-01-29T12:04:05Z")) // Time from workloadStartedEvent.json
})
It("Should get correct transition times", func() {
Expect(timings.transitionsObserved[timeToSchedule].String()).To(Equal("2s")) // Time between Create and Schedule
Expect(timings.transitionsObserved[timeInitContainerRan].String()).To(Equal("55s")) // Time between initStartEvent.json and workloadPullEvent.json
Expect(timings.transitionsObserved[timeToStartWorkload].String()).To(Equal("2s")) // Time between workloadPullEvent.json and worloadStartEvent.json
Expect(timings.transitionsObserved[timeDelayOfWorkload].String()).To(Equal("5s")) // Based on the cron schedule how late was the workload
})
It("Should only calculate new transition times", func() {
reducedNewEvents := map[types.UID]corev1.Event{
workloadPullEvent.UID: workloadPullEvent,
workloadStartedEvent.UID: workloadStartedEvent,
}
expectedReducedTimings, err := generateTransitionTimingsFromEvents(allEvents, reducedNewEvents, creationTime, cronSchedule)
Expect(err).To(BeNil())
_, timeToScheduleExists := expectedReducedTimings.transitionsObserved[timeToSchedule]
Expect(timeToScheduleExists).To(BeFalse())
Expect(expectedReducedTimings.transitionsObserved[timeInitContainerRan].String()).To(Equal("55s"))
Expect(expectedReducedTimings.transitionsObserved[timeToStartWorkload].String()).To(Equal("2s"))
})
})
})
})
func mustReadEventFromFile(filepath string) corev1.Event {
dat, err := ioutil.ReadFile(filepath)
if err != nil {
panic(err)
}
event := &corev1.Event{}
if err := json.Unmarshal(dat, event); err != nil {
panic(err)
}
return *event
}

Просмотреть файл

@ -0,0 +1,265 @@
package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
batchv1beta1 "k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
pscv1alpha1 "cronprimer.local/api/v1alpha1"
)
// PreScaledCronJobReconciler reconciles a PreScaledCronJob object
type PreScaledCronJobReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
InitContainerImage string
}
// +kubebuilder:rbac:groups=psc.cronprimer.local,resources=prescaledcronjobs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=psc.cronprimer.local,resources=prescaledcronjobs/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=batch,resources=cronjobs/status,verbs=get;update;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=patch
// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list
// +kubebuilder:rbac:groups="",resources=pods/status,verbs=get
const (
objectHashField = "pscObjectHash"
finalizerName = "foregroundDeletion"
primedCronLabel = "primedcron"
warmupContainerInjectNameUID = "injected-0d825b4f-07f0-4952-8150-fba894c613b1"
)
// Reconcile takes the PreScaled request and creates a regular cron, n mins earlier.
func (r *PreScaledCronJobReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
logger := r.Log.WithValues("prescaledcronjob", req.NamespacedName)
logger.Info(fmt.Sprintf("Starting reconcile loop for %v", req.NamespacedName))
defer logger.Info(fmt.Sprintf("Finish reconcile loop for %v", req.NamespacedName))
// instance = the submitted prescaledcronjob CRD
instance := &pscv1alpha1.PreScaledCronJob{}
if err := r.Get(ctx, req.NamespacedName, instance); err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{}, nil
}
logger.Error(err, "Failed to get prescaledcronjob")
return ctrl.Result{}, err
}
// allow cascade delete of child resources - "foregroundDeletion" tells k8s we want children cleaned up
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
if !containsString(instance.ObjectMeta.Finalizers, finalizerName) {
logger.Info(fmt.Sprintf("AddFinalizer for %v", req.NamespacedName))
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, finalizerName)
if err := r.Update(ctx, instance); err != nil {
r.Recorder.Event(instance, corev1.EventTypeWarning, "Adding finalizer", fmt.Sprintf("Failed to add finalizer: %s", err))
TrackCronAction(CronJobDeletedMetric, false)
return ctrl.Result{}, err
}
r.Recorder.Event(instance, corev1.EventTypeNormal, "Adding finalizer", "Object finalizer is added")
TrackCronAction(CronJobDeletedMetric, true)
return ctrl.Result{}, nil
}
}
// Generate the cron we'll post and get a hash for it
cronToPost, cronGenErr := r.generateCronJob(instance)
if cronGenErr != nil {
r.Recorder.Event(instance, corev1.EventTypeWarning, "Invalid cron schedule", fmt.Sprintf("Failed to generate cronjob: %s", cronGenErr))
logger.Error(cronGenErr, "Failed to generate cronjob")
return ctrl.Result{}, nil
}
objectHash, err := Hash(cronToPost, 1)
if err != nil {
logger.Error(err, "Failed to hash cronjob")
return ctrl.Result{}, err
}
existingCron, err := r.getCronJob(ctx, cronToPost.Name, cronToPost.Namespace)
if err != nil {
// did we get an error because the cronjob doesn't exist?
if errors.IsNotFound(err) {
return r.createCronJob(ctx, cronToPost, objectHash, instance, logger)
}
// we hit an unexpected problem getting the cron, fail the reconcile loop
logger.Error(err, "Failed to get associated cronjob")
return ctrl.Result{}, err
}
// we found a CronJob, lets update it
return r.updateCronJob(ctx, existingCron, cronToPost, objectHash, instance, logger)
}
func (r *PreScaledCronJobReconciler) generateCronJob(instance *pscv1alpha1.PreScaledCronJob) (*batchv1beta1.CronJob, error) {
// Deep copy the cron
cronToPost := instance.Spec.CronJob.DeepCopy()
// add a label so we can watch the pods for metrics generation
if cronToPost.Spec.JobTemplate.Spec.Template.ObjectMeta.Labels != nil {
cronToPost.Spec.JobTemplate.Spec.Template.ObjectMeta.Labels[primedCronLabel] = instance.Name
} else {
cronToPost.Spec.JobTemplate.Spec.Template.ObjectMeta.Labels = map[string]string{
primedCronLabel: instance.Name,
}
}
// get original cron schedule
scheduleSpec := instance.Spec.CronJob.Spec.Schedule
warmUpTimeMins := instance.Spec.WarmUpTimeMins
primerSchedule := instance.Spec.PrimerSchedule
// Get the new schedule for the cron
primerSchedule, err := GetPrimerSchedule(scheduleSpec, warmUpTimeMins, primerSchedule)
if err != nil {
return nil, fmt.Errorf("Failed parse primer schedule: %s", err)
}
// update cron schedule of primer cronjob
cronToPost.Spec.Schedule = primerSchedule
// Create + Add the init container that runs on the primed cron schedule
// and will die on the CRONJOB_SCHEDULE
initContainer := corev1.Container{
Name: warmupContainerInjectNameUID, // The warmup container has UID to allow pod controller to identify it reliably
Image: r.InitContainerImage,
Env: []corev1.EnvVar{
{
Name: "NAMESPACE",
Value: instance.Namespace,
},
{
Name: "CRONJOB_SCHEDULE",
Value: scheduleSpec,
},
},
}
// set the owner reference on the autogenerated job so it's cleaned up with the parent
ownerRef := v1.OwnerReference{
APIVersion: instance.APIVersion,
Kind: instance.Kind,
Name: instance.Name,
UID: instance.UID,
}
cronToPost.ObjectMeta.OwnerReferences = append(cronToPost.ObjectMeta.OwnerReferences, ownerRef)
// add the init containers to the init containers array
cronToPost.Spec.JobTemplate.Spec.Template.Spec.InitContainers = append([]corev1.Container{initContainer}, cronToPost.Spec.JobTemplate.Spec.Template.Spec.InitContainers...)
// Add dynamic name to cron identify one to the other
autoGenName := "autogen-" + instance.ObjectMeta.Name
cronToPost.ObjectMeta.Name = autoGenName
cronToPost.ObjectMeta.Namespace = instance.ObjectMeta.Namespace
return cronToPost, nil
}
func (r *PreScaledCronJobReconciler) getCronJob(ctx context.Context, name string, namespace string) (*batchv1beta1.CronJob, error) {
key := client.ObjectKey{
Name: name,
Namespace: namespace,
}
cron := batchv1beta1.CronJob{}
err := r.Client.Get(ctx, key, &cron)
return &cron, err
}
func (r *PreScaledCronJobReconciler) createCronJob(ctx context.Context, cronToPost *batchv1beta1.CronJob, objectHash string,
instance *pscv1alpha1.PreScaledCronJob, logger logr.Logger) (ctrl.Result, error) {
logger.Info(fmt.Sprintf("Creating cronjob: %v", cronToPost.ObjectMeta.Name))
// Add the object hash as an annotation so we can compare with future updates
if cronToPost.ObjectMeta.Annotations == nil {
cronToPost.ObjectMeta.Annotations = map[string]string{}
}
cronToPost.ObjectMeta.Annotations[objectHashField] = objectHash
if err := r.Client.Create(ctx, cronToPost); err != nil {
r.Recorder.Event(instance, corev1.EventTypeWarning, "Create cronjob failed", fmt.Sprintf("Failed to create cronjob: %s", err))
TrackCronAction(CronJobCreatedMetric, false)
return ctrl.Result{}, err
}
r.Recorder.Event(instance, corev1.EventTypeNormal, "Create cronjob successful", fmt.Sprintf("Created associated cronjob: %s", cronToPost.Name))
TrackCronAction(CronJobCreatedMetric, true)
return ctrl.Result{}, nil
}
func (r *PreScaledCronJobReconciler) updateCronJob(ctx context.Context, existingCron *batchv1beta1.CronJob, cronToPost *batchv1beta1.CronJob,
objectHash string, instance *pscv1alpha1.PreScaledCronJob, logger logr.Logger) (ctrl.Result, error) {
logger.Info(fmt.Sprintf("Found associated cronjob: %v", existingCron.ObjectMeta.Name))
// does this belong to us? if not - leave it alone and error out
canUpdate := false
for _, ref := range existingCron.ObjectMeta.OwnerReferences {
if ref.UID == instance.UID {
canUpdate = true
break
}
}
if !canUpdate {
r.Recorder.Event(instance, corev1.EventTypeWarning, "Cronjob already exists", fmt.Sprintf("A cronjob with this name already exists, and was not created by this operator : %s", existingCron.ObjectMeta.Name))
logger.Info(fmt.Sprintf("A cronjob with this name already exists, and was not created by this operator : %s", existingCron.ObjectMeta.Name))
return ctrl.Result{}, nil
}
// Is it the same as what we've just generated?
if existingCron.ObjectMeta.Annotations[objectHashField] == objectHash {
// it's the same - no-op
logger.Info("Autogenerated cronjob has not changed, will not recreate")
return ctrl.Result{}, nil
}
// it's been updated somehow - let's update the cronjob
existingCron.Spec = cronToPost.Spec
if existingCron.ObjectMeta.Annotations == nil {
existingCron.ObjectMeta.Annotations = map[string]string{}
}
existingCron.ObjectMeta.Annotations[objectHashField] = objectHash
if err := r.Client.Update(ctx, existingCron); err != nil {
r.Recorder.Event(instance, corev1.EventTypeWarning, "Update of cronjob failed", fmt.Sprintf("Failed to update cronjob: %s", err))
logger.Error(err, "Failed to update cronjob")
TrackCronAction(CronJobUpdatedMetric, false)
return ctrl.Result{}, err
}
r.Recorder.Event(instance, corev1.EventTypeNormal, "Update of cronjob successful", fmt.Sprintf("Updated associated cronjob: %s", existingCron.Name))
logger.Info("Successfully updated cronjob")
TrackCronAction(CronJobUpdatedMetric, true)
return ctrl.Result{}, nil
}
// Helper functions to check and remove string from a slice of strings.
func containsString(slice []string, s string) bool {
for _, item := range slice {
if item == s {
return true
}
}
return false
}
// SetupWithManager sets up defaults
func (r *PreScaledCronJobReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&pscv1alpha1.PreScaledCronJob{}).
Complete(r)
}

Просмотреть файл

@ -0,0 +1,219 @@
package controllers
import (
"context"
"fmt"
"strings"
"time"
pscv1alpha1 "cronprimer.local/api/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/robfig/cron/v3"
)
type testCase struct {
minsApart int
warmUpMins int
shouldPass bool
shouldError bool
}
const (
timeFormat = "15:04:05"
)
var _ = Describe("PrescaledCronJob Controller - Integration Tests", func() {
DescribeTable("Integration test configurations",
func(testCase testCase) {
// generate a random name for the job so parallel jobs don't clash
jobName := "psc-test-int-" + randString()
// make sure we clean up this psc even if the assertions fail / something goes wrong
defer deletePsc(jobName)
result, err := runTest(testCase.minsApart, testCase.warmUpMins, jobName)
Expect(result).To(Equal(testCase.shouldPass))
Expect(err != nil).To(Equal(testCase.shouldError))
},
// add a line per test case here...
Entry("LONG TEST: 1 minute, 1 warmup", testCase{minsApart: 1, warmUpMins: 1, shouldPass: true, shouldError: false}),
Entry("LONG TEST: 3 minutes, 1 warmup", testCase{minsApart: 3, warmUpMins: 2, shouldPass: true, shouldError: false}),
Entry("LONG TEST: 4 minutes, 1 warmup", testCase{minsApart: 4, warmUpMins: 2, shouldPass: true, shouldError: false}),
Entry("LONG TEST: 4 minutes, 3 warmup", testCase{minsApart: 4, warmUpMins: 3, shouldPass: true, shouldError: false}),
Entry("LONG TEST: 5 minutes, 1 warmup", testCase{minsApart: 5, warmUpMins: 4, shouldPass: true, shouldError: false}),
Entry("LONG TEST: 5 minutes, 5 warmup", testCase{minsApart: 5, warmUpMins: 5, shouldPass: true, shouldError: false}),
Entry("LONG TEST: 10 minutes, 2 warmup", testCase{minsApart: 10, warmUpMins: 4, shouldPass: true, shouldError: false}),
)
})
func runTest(minsApart int, warmUpMins int, jobName string) (passed bool, errored error) {
ctx := context.Background()
// construct a prescaled cron in code + post to K8s
toCreate := generatePSCSpec()
toCreate.Name = jobName
autoGenCronName := "autogen-" + jobName
toCreate.Spec.CronJob.Spec.Schedule = fmt.Sprintf("*/%d * * * *", minsApart)
toCreate.Spec.WarmUpTimeMins = warmUpMins
if err := k8sClient.Create(ctx, &toCreate); err != nil {
return false, err
}
time.Sleep(time.Second * 10)
// get the autogenerated cron back
fetchedAutogenCron := &batchv1beta1.CronJob{}
if err := k8sClient.Get(ctx, types.NamespacedName{Name: autoGenCronName, Namespace: namespace}, fetchedAutogenCron); err != nil {
return false, err
}
// get the next iteration of the prescaled cron. 'next' = the time the *workload* container should start
schedule, schErr := cron.ParseStandard(toCreate.Spec.CronJob.Spec.Schedule)
if schErr != nil {
return false, schErr
}
next := schedule.Next(fetchedAutogenCron.GetCreationTimestamp().Time)
// if we're within the 'warmup time' when we post this, the actual cron won't get triggered until the next iteration.
if fetchedAutogenCron.GetCreationTimestamp().Time.After(next.Add(time.Duration(-toCreate.Spec.WarmUpTimeMins) * time.Minute)) {
fmt.Println(fmt.Sprintf("\nWithin warmup zone, will wait for next iteration."))
next = next.Add(time.Duration(minsApart) * time.Minute)
}
fmt.Println(fmt.Sprintf("\nWorkload container should start at: %v", next.Format(timeFormat)))
hasSuspended := false
// before workload container should start - either the cluster is scaling or the init container running here
for next.After(time.Now()) {
// try and get the pod
if jobPod, err := getPod(ctx, autoGenCronName); jobPod.Name != "" && err == nil {
if len(jobPod.Status.InitContainerStatuses) > 0 {
if jobPod.Status.InitContainerStatuses[0].State.Waiting != nil {
fmt.Println(fmt.Sprintf("Time: %v :: Pod exists, phase --> Init container starting", time.Now().Format(timeFormat)))
}
if jobPod.Status.InitContainerStatuses[0].State.Running != nil {
fmt.Println(fmt.Sprintf("Time: %v :: Pod exists, phase --> Init container running", time.Now().Format(timeFormat)))
}
} else {
fmt.Println(fmt.Sprintf("Time: %v :: Pod exists, phase --> %v", time.Now().Format(timeFormat), jobPod.Status.Phase))
}
// suspend the cron to prevent clogging up the cluster with pods we don't care about
if !hasSuspended {
if err := k8sClient.Get(ctx, types.NamespacedName{Name: autoGenCronName, Namespace: namespace}, fetchedAutogenCron); err != nil {
return false, err
}
suspend := true
fetchedAutogenCron.Spec.Suspend = &suspend
if err := k8sClient.Update(ctx, fetchedAutogenCron); err != nil {
fmt.Println("Failed to suspend cronjob. We need it suspended so multiple instances of the CRON job don't start - we want to test with just one execution")
return false, err
}
hasSuspended = true
}
// expect workload container to *not* be running
if len(jobPod.Status.ContainerStatuses) > 1 && jobPod.Status.ContainerStatuses[0].State.Running != nil {
fmt.Println("Workload container should not be running before the schedule: FAIL")
return false, nil
}
} else {
// no pod there - not supposed to have started yet, or cluster is scaling
fmt.Println(fmt.Sprintf("Time: %v :: No pod --> waiting", time.Now().Format(timeFormat)))
}
time.Sleep(time.Second * 5)
}
// let the pod get scheduled
time.Sleep(time.Second * 30)
fmt.Println(fmt.Sprintf("\nTime: %v :: Cron iteration passed - job should now be running. Checking status ...", time.Now().Format(timeFormat)))
// get the pod to check what's happening
jobPod, err := getPod(ctx, autoGenCronName)
if err != nil {
return false, err
}
fmt.Println(fmt.Sprintf("Got job: %v :: Phase --> Workload container %v", jobPod.Name, jobPod.Status.Phase))
// nicer check for the phase / status here
if len(jobPod.Status.ContainerStatuses) == 0 || len(jobPod.Status.InitContainerStatuses) == 0 {
fmt.Println("Pod has no status. Check your test setup, it's possible the pod couldn't be scheduled: FAIL")
return false, nil
}
// expect the workload to be running and the init to be terminated. NOTE: This may fail if the workload image is being pulled - TBC
if jobPod.Status.ContainerStatuses[0].State.Running == nil {
fmt.Println("Workload container not running: FAIL")
return false, nil
}
if jobPod.Status.ContainerStatuses[0].State.Terminated != nil {
fmt.Println("Workload container terminated: FAIL")
return false, nil
}
if jobPod.Status.InitContainerStatuses[0].State.Terminated == nil {
fmt.Println("Init container still running: FAIL")
return false, nil
}
fmt.Println(fmt.Sprintf("\n\nWorkload started at: %v", jobPod.Status.ContainerStatuses[0].State.Running.StartedAt))
fmt.Println(fmt.Sprintf("Init container completed at: %v", jobPod.Status.InitContainerStatuses[0].State.Terminated.FinishedAt))
fmt.Println(fmt.Sprintf("Workload started %v seconds after schedule\n\n", jobPod.Status.ContainerStatuses[0].State.Running.StartedAt.Sub(next).Seconds()))
return true, nil
}
func getPod(ctx context.Context, podPrefix string) (v1.Pod, error) {
podList := &v1.PodList{}
opts := client.ListOptions{
Namespace: namespace,
}
err := k8sClient.List(ctx, podList, &opts)
fetchedPod := v1.Pod{}
for _, pod := range podList.Items {
if strings.HasPrefix(pod.Name, podPrefix) {
fetchedPod = pod
break
}
}
return fetchedPod, err
}
func deletePsc(jobName string) {
// Delete this psc
ctx := context.Background()
psc := &pscv1alpha1.PreScaledCronJob{}
if err := k8sClient.Get(ctx, types.NamespacedName{Name: jobName, Namespace: namespace}, psc); err != nil {
return
}
if err := k8sClient.Delete(ctx, psc); err != nil {
return
}
}

Просмотреть файл

@ -0,0 +1,360 @@
package controllers
import (
"context"
"math/rand"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
//"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
pscv1alpha1 "cronprimer.local/api/v1alpha1"
)
var namespace = "psc-system"
var namePrefix = "psc-test-local-"
var autogenPrefix = "autogen-"
var _ = Describe("PrescaledCronJob Controller", func() {
const timeout = time.Second * 60
const interval = time.Second * 1
ctx := context.Background()
BeforeEach(func() {
// failed test runs that don't clean up leave resources behind.
})
AfterEach(func() {
})
// Add Tests for OpenAPI validation (or additional CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.
Context("Cronjob Autogeneration", func() {
It("Should create cronjob correctly", func() {
// construct a prescaled cron in code + post to K8s
toCreate := generatePSCSpec()
autogenName := autogenPrefix + toCreate.Name
Expect(k8sClient.Create(ctx, &toCreate)).Should(Succeed())
time.Sleep(time.Second * 5)
fetched := &pscv1alpha1.PreScaledCronJob{}
fetchedAutogenCron := &batchv1beta1.CronJob{}
// check the CRD was created ok
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: toCreate.Name, Namespace: namespace}, fetched)
return err == nil
}, timeout, interval).Should(BeTrue())
// get + compare the autogenerated cronjob
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: autogenName, Namespace: namespace}, fetchedAutogenCron)
return err == nil
}, timeout, interval).Should(BeTrue())
// test the cronjob props
Expect(len(fetchedAutogenCron.Spec.JobTemplate.Spec.Template.Spec.InitContainers)).To(Equal(1))
Expect(fetchedAutogenCron.Spec.JobTemplate.Spec.Template.Spec.InitContainers[0].Name).To(Equal(warmupContainerInjectNameUID))
Expect(fetchedAutogenCron.Name).To(Equal(autogenName))
Expect(fetchedAutogenCron.Spec.Schedule).To(Equal("20 * * 10 *"))
Expect(len(fetchedAutogenCron.Spec.JobTemplate.Spec.Template.Spec.InitContainers)).To(Equal(1))
Expect(fetchedAutogenCron.OwnerReferences[0].UID).To(Equal(fetched.UID))
})
})
It("Should update the CRD and cronjob correctly", func() {
toCreate := generatePSCSpec()
autogenName := autogenPrefix + toCreate.Name
Expect(k8sClient.Create(ctx, &toCreate)).Should(Succeed())
time.Sleep(time.Second * 5)
original := &pscv1alpha1.PreScaledCronJob{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: toCreate.Name, Namespace: namespace}, original)).Should(Succeed())
original.Spec.CronJob.Spec.Schedule = "30 * * 10 *"
original.Spec.WarmUpTimeMins = 1
Expect(k8sClient.Update(ctx, original)).Should(Succeed())
time.Sleep(time.Second * 10)
fetched := &pscv1alpha1.PreScaledCronJob{}
fetchedAutogenCron := &batchv1beta1.CronJob{}
// check the CRD was updated ok
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: toCreate.Name, Namespace: namespace}, fetched)
return err == nil
}, timeout, interval).Should(BeTrue())
// check the custom CRD schedule has updated
Expect(fetched.Spec.CronJob.Spec.Schedule).To(Equal("30 * * 10 *"))
Expect(fetched.Spec.WarmUpTimeMins).To(Equal(1))
// get + compare the autogenerated cronjob
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: autogenName, Namespace: namespace}, fetchedAutogenCron)
return err == nil
}, timeout, interval).Should(BeTrue())
// check the autogenerated cronjob has been updated
Expect(fetchedAutogenCron.Spec.Schedule).To(Equal("29 * * 10 *"))
})
It("Should delete the CRD and autogenerated cronjob", func() {
toCreate := generatePSCSpec()
autogenName := autogenPrefix + toCreate.Name
Expect(k8sClient.Create(ctx, &toCreate)).Should(Succeed())
time.Sleep(time.Second * 5)
original := &pscv1alpha1.PreScaledCronJob{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: toCreate.Name, Namespace: namespace}, original)).Should(Succeed())
By("Deleting the prescaled cron job CRD")
Expect(k8sClient.Delete(ctx, original)).Should(Succeed())
time.Sleep(time.Second * 15)
// check the psc has gone
Eventually(func() bool {
fetched := &pscv1alpha1.PreScaledCronJob{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: toCreate.Name, Namespace: namespace}, fetched)
return errors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())
Eventually(func() bool {
fetchedAutogenCron := &batchv1beta1.CronJob{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: autogenName, Namespace: namespace}, fetchedAutogenCron)
return errors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())
})
It("Should add the warmup initContainer as the first item in initContainers", func() {
// construct a prescaled cron in code post to K8s
toCreate := generatePSCSpec()
autogenName := autogenPrefix + toCreate.Name
toCreate.Spec.CronJob.Spec.JobTemplate.Spec.Template.Spec.InitContainers =
[]v1.Container{
{
Image: "busybox",
Name: "iatric",
Args: []string{
"/bin/sh",
"-c",
"date",
},
},
}
By("Creating the prescaled cron job CRD")
Expect(k8sClient.Create(ctx, &toCreate)).Should(Succeed())
time.Sleep(time.Second * 5)
fetched := &pscv1alpha1.PreScaledCronJob{}
fetchedAutogenCron := &batchv1beta1.CronJob{}
// check the CRD was created ok
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: toCreate.Name, Namespace: namespace}, fetched)
return err == nil
}, timeout, interval).Should(BeTrue())
// get compare the autogenerated cronjob
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: autogenName, Namespace: namespace}, fetchedAutogenCron)
return err == nil
}, timeout, interval).Should(BeTrue())
// test the cronjob props
Expect(len(fetchedAutogenCron.Spec.JobTemplate.Spec.Template.Spec.InitContainers)).To(Equal(2))
Expect(fetchedAutogenCron.Spec.JobTemplate.Spec.Template.Spec.InitContainers[0].Name).To(Equal(warmupContainerInjectNameUID))
Expect(fetchedAutogenCron.Spec.JobTemplate.Spec.Template.Spec.InitContainers[1].Name).To(Equal("iatric"))
Expect(fetchedAutogenCron.Name).To(Equal(autogenName))
Expect(fetchedAutogenCron.Spec.Schedule).To(Equal("20 * * 10 *"))
Expect(fetchedAutogenCron.OwnerReferences[0].UID).To(Equal(fetched.UID))
original := &pscv1alpha1.PreScaledCronJob{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: toCreate.Name, Namespace: namespace}, original)).Should(Succeed())
By("Deleting the prescaled cron job CRD")
Expect(k8sClient.Delete(ctx, original)).Should(Succeed())
time.Sleep(time.Second * 30)
})
var _ = Describe("PrescaledCronJob Controller Unhappy Path", func() {
const timeout = time.Second * 60
const interval = time.Second * 1
ctx := context.Background()
Context("Invalid Cron Schedule", func() {
It("Should create the psc but not a cron", func() {
// construct a prescaled cron in code + post to K8s
toCreate := generatePSCSpec()
autogenName := autogenPrefix + toCreate.Name
toCreate.Spec.CronJob.Spec.Schedule = "bananas"
By("Creating the prescaled cron job CRD")
Expect(k8sClient.Create(ctx, &toCreate)).Should(Succeed())
time.Sleep(time.Second * 5)
fetched := &pscv1alpha1.PreScaledCronJob{}
// check the CRD was created and the controller hasn't crashed
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: toCreate.Name, Namespace: namespace}, fetched)
return err == nil
}, timeout, interval).Should(BeTrue())
// should not have a downstream cron
Eventually(func() bool {
fetchedAutogenCron := &batchv1beta1.CronJob{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: autogenName, Namespace: namespace}, fetchedAutogenCron)
return errors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())
})
})
Context("Empty Cron Template", func() {
It("Should reject the call", func() {
// construct a prescaled cron in code + post to K8s
toCreate := generatePSCSpec()
toCreate.Spec.CronJob = batchv1beta1.CronJob{}
By("Creating the prescaled cron job CRD")
Expect(k8sClient.Create(ctx, &toCreate)).ShouldNot(Succeed())
})
})
Context("Duplicate", func() {
It("Should reject the create", func() {
// construct a prescaled cron in code + post to K8s
toCreate := generatePSCSpec()
By("Posting the initial psc")
Expect(k8sClient.Create(ctx, &toCreate)).Should(Succeed())
By("Posting the duplicate")
Expect(k8sClient.Create(ctx, &toCreate)).ShouldNot(Succeed())
})
})
Context("Existing cron with same name", func() {
It("Should not overwrite existing cron", func() {
// construct a prescaled cron in code + post to K8s
toCreate := generatePSCSpec()
// post a manual cron object
cron := toCreate.Spec.CronJob.DeepCopy()
autogenName := autogenPrefix + toCreate.Name
cron.Name = autogenName
cron.Namespace = namespace
Expect(k8sClient.Create(ctx, cron)).Should(Succeed())
// now post the psc
Expect(k8sClient.Create(ctx, &toCreate)).Should(Succeed())
// wait a few
time.Sleep(time.Second * 5)
// check the original cronjob has its previous schedule and not been overwritten
fetchedAutogenCron := &batchv1beta1.CronJob{}
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: autogenName, Namespace: namespace}, fetchedAutogenCron)
return err == nil
}, timeout, interval).Should(BeTrue())
Expect(fetchedAutogenCron.Spec.Schedule).To(Equal("30 * * 10 *"))
})
})
})
})
func generatePSCSpec() pscv1alpha1.PreScaledCronJob {
spec := pscv1alpha1.PreScaledCronJobSpec{
WarmUpTimeMins: 10,
CronJob: batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "my-name-will-change",
},
Spec: batchv1beta1.CronJobSpec{
Schedule: "30 * * 10 *",
JobTemplate: batchv1beta1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "busybox",
Name: "test-busybox",
Args: []string{
"/bin/sh",
"-c",
"date; sleep 90s; date;",
},
// Uncomment below when testing against a real cluster, to force some scaling
//Resources: v1.ResourceRequirements{
// Requests: v1.ResourceList{
// "memory": resource.MustParse("1Gi"),
// "cpu": resource.MustParse("1"),
// },
//},
},
},
RestartPolicy: "OnFailure",
InitContainers: []v1.Container{},
},
},
},
},
},
},
}
toCreate := pscv1alpha1.PreScaledCronJob{
ObjectMeta: metav1.ObjectMeta{
Name: namePrefix + randString(),
Namespace: namespace,
},
Spec: spec,
}
return toCreate
}
func randString() string {
rand.Seed(time.Now().UnixNano())
chars := []rune("abcdefghijklmnopqrstuvwxyz")
length := 6
var b strings.Builder
for i := 0; i < length; i++ {
b.WriteRune(chars[rand.Intn(len(chars))])
}
return b.String()
}

259
controllers/structhash.go Normal file
Просмотреть файл

@ -0,0 +1,259 @@
package controllers
// https://raw.githubusercontent.com/cnf/structhash/master/structhash.go
import (
"bytes"
"crypto/md5"
"crypto/sha1"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
)
// Version returns the version of the supplied hash as an integer
// or -1 on failure
func Version(h string) int {
if h == "" {
return -1
}
if h[0] != 'v' {
return -1
}
if spos := strings.IndexRune(h[1:], '_'); spos >= 0 {
n, e := strconv.Atoi(h[1 : spos+1])
if e != nil {
return -1
}
return n
}
return -1
}
// Hash takes a data structure and returns a hash string of that data structure
// at the version asked.
//
// This function uses md5 hashing function and default formatter. See also Dump()
// function.
func Hash(c interface{}, version int) (string, error) {
return fmt.Sprintf("v%d_%x", version, Md5(c, version)), nil
}
// Dump takes a data structure and returns its byte representation. This can be
// useful if you need to use your own hashing function or formatter.
func Dump(c interface{}, version int) []byte {
return serialize(c, version)
}
// Md5 takes a data structure and returns its md5 hash.
// This is a shorthand for md5.Sum(Dump(c, version)).
func Md5(c interface{}, version int) []byte {
sum := md5.Sum(Dump(c, version))
return sum[:]
}
// Sha1 takes a data structure and returns its sha1 hash.
// This is a shorthand for sha1.Sum(Dump(c, version)).
func Sha1(c interface{}, version int) []byte {
sum := sha1.Sum(Dump(c, version))
return sum[:]
}
type item struct {
name string
value reflect.Value
}
type itemSorter []item
func (s itemSorter) Len() int {
return len(s)
}
func (s itemSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s itemSorter) Less(i, j int) bool {
return s[i].name < s[j].name
}
type tagError string
func (e tagError) Error() string {
return "incorrect tag " + string(e)
}
type structFieldFilter func(reflect.StructField, *item) (bool, error)
func writeValue(buf *bytes.Buffer, val reflect.Value, fltr structFieldFilter) {
switch val.Kind() {
case reflect.String:
buf.WriteByte('"')
buf.WriteString(val.String())
buf.WriteByte('"')
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
buf.WriteString(strconv.FormatInt(val.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
buf.WriteString(strconv.FormatUint(val.Uint(), 10))
case reflect.Float32, reflect.Float64:
buf.WriteString(strconv.FormatFloat(val.Float(), 'E', -1, 64))
case reflect.Bool:
if val.Bool() {
buf.WriteByte('t')
} else {
buf.WriteByte('f')
}
case reflect.Ptr:
if !val.IsNil() || val.Type().Elem().Kind() == reflect.Struct {
writeValue(buf, reflect.Indirect(val), fltr)
} else {
writeValue(buf, reflect.Zero(val.Type().Elem()), fltr)
}
case reflect.Array, reflect.Slice:
buf.WriteByte('[')
len := val.Len()
for i := 0; i < len; i++ {
if i != 0 {
buf.WriteByte(',')
}
writeValue(buf, val.Index(i), fltr)
}
buf.WriteByte(']')
case reflect.Map:
mk := val.MapKeys()
items := make([]item, len(mk))
// Get all values
for i := range items {
items[i].name = formatValue(mk[i], fltr)
items[i].value = val.MapIndex(mk[i])
}
// Sort values by key
sort.Sort(itemSorter(items))
buf.WriteByte('[')
for i := range items {
if i != 0 {
buf.WriteByte(',')
}
buf.WriteString(items[i].name)
buf.WriteByte(':')
writeValue(buf, items[i].value, fltr)
}
buf.WriteByte(']')
case reflect.Struct:
vtype := val.Type()
flen := vtype.NumField()
items := make([]item, 0, flen)
// Get all fields
for i := 0; i < flen; i++ {
field := vtype.Field(i)
it := item{field.Name, val.Field(i)}
if fltr != nil {
ok, err := fltr(field, &it)
if err != nil && strings.Contains(err.Error(), "method:") {
panic(err)
}
if !ok {
continue
}
}
items = append(items, it)
}
// Sort fields by name
sort.Sort(itemSorter(items))
buf.WriteByte('{')
for i := range items {
if i != 0 {
buf.WriteByte(',')
}
buf.WriteString(items[i].name)
buf.WriteByte(':')
writeValue(buf, items[i].value, fltr)
}
buf.WriteByte('}')
case reflect.Interface:
writeValue(buf, reflect.ValueOf(val.Interface()), fltr)
default:
buf.WriteString(val.String())
}
}
func formatValue(val reflect.Value, fltr structFieldFilter) string {
if val.Kind() == reflect.String {
return "\"" + val.String() + "\""
}
var buf bytes.Buffer
writeValue(&buf, val, fltr)
return buf.String()
}
func filterField(f reflect.StructField, i *item, version int) (bool, error) {
var err error
ver := 0
lastver := -1
if str := f.Tag.Get("hash"); str != "" {
if str == "-" {
return false, nil
}
for _, tag := range strings.Split(str, " ") {
args := strings.Split(strings.TrimSpace(tag), ":")
if len(args) != 2 {
return false, tagError(tag)
}
switch args[0] {
case "name":
i.name = args[1]
case "version":
if ver, err = strconv.Atoi(args[1]); err != nil {
return false, tagError(tag)
}
case "lastversion":
if lastver, err = strconv.Atoi(args[1]); err != nil {
return false, tagError(tag)
}
case "method":
property, found := f.Type.MethodByName(strings.TrimSpace(args[1]))
if !found || property.Type.NumOut() != 1 {
return false, tagError(tag)
}
i.value = property.Func.Call([]reflect.Value{i.value})[0]
}
}
} else {
if str := f.Tag.Get("lastversion"); str != "" {
if lastver, err = strconv.Atoi(str); err != nil {
return false, tagError(str)
}
}
if str := f.Tag.Get("version"); str != "" {
if ver, err = strconv.Atoi(str); err != nil {
return false, tagError(str)
}
}
}
if lastver != -1 && lastver < version {
return false, nil
}
if ver > version {
return false, nil
}
return true, nil
}
func serialize(object interface{}, version int) []byte {
var buf bytes.Buffer
writeValue(&buf, reflect.ValueOf(object),
func(f reflect.StructField, i *item) (bool, error) {
return filterField(f, i, version)
})
return buf.Bytes()
}

Просмотреть файл

@ -0,0 +1,294 @@
package controllers
import (
"fmt"
"testing"
)
type First struct {
Bool bool `version:"1"`
String string `version:"2"`
Int int `version:"1" lastversion:"1"`
Float float64 `version:"1"`
Struct *Second `version:"1"`
Uint uint `version:"1"`
}
type Second struct {
Map map[string]string `version:"1"`
Slice []int `version:"1"`
}
type Tags1 struct {
Int int `hash:"-"`
Str string `hash:"name:Foo version:1 lastversion:2"`
Bar string `hash:"version:1"`
}
type Tags2 struct {
Foo string
Bar string
}
type Tags3 struct {
Bar string
}
type Tags4 struct {
Data1 ambiguousData `hash:"method:Serialize"`
Data2 ambiguousData `hash:"method:Normalize"`
}
type Tags5 struct {
Data1 ambiguousData `hash:"method:UnknownMethod"`
}
type Nils struct {
Str *string
Int *int
Bool *bool
Map map[string]string
Slice []string
}
type unexportedTags struct {
foo string
bar string
aMap map[string]string
}
type interfaceStruct struct {
Name string
Interface1 interface{}
InterfaceIgnore interface{} `hash:"-"`
}
type ambiguousData struct {
Prefix string
Suffix string
}
func (p ambiguousData) Serialize() string {
return p.Prefix + p.Suffix
}
func (p ambiguousData) Normalize() ambiguousData {
return ambiguousData{p.Prefix + p.Suffix, ""}
}
func dataSetup() *First {
tmpmap := make(map[string]string)
tmpmap["foo"] = "bar"
tmpmap["baz"] = "go"
tmpslice := make([]int, 3)
tmpslice[0] = 0
tmpslice[1] = 1
tmpslice[2] = 2
return &First{
Bool: true,
String: "test",
Int: 123456789,
Float: 65.3458,
Struct: &Second{
Map: tmpmap,
Slice: tmpslice,
},
Uint: 1,
}
}
func TestHash(t *testing.T) {
v1Hash := "v1_e8e67581aee36d7237603381a9cbd9fc"
v2Hash := "v2_5e51490d7c24c4b7a9e63c04f55734eb"
data := dataSetup()
v1, err := Hash(data, 1)
if err != nil {
t.Error(err)
}
// fmt.Println(string(Dump(data, 1)))
if v1 != v1Hash {
t.Errorf("%s is not %s", v1, v1Hash)
}
v2, err := Hash(data, 2)
if err != nil {
t.Error(err)
}
// fmt.Println(string(Dump(data, 2)))
if v2 != v2Hash {
t.Errorf("%s is not %s", v2, v2Hash)
}
v1md5 := fmt.Sprintf("v1_%x", Md5(data, 1))
if v1md5 != v1Hash {
t.Errorf("%s is not %s", v1md5, v1Hash[3:])
}
v2md5 := fmt.Sprintf("v2_%x", Md5(data, 2))
if v2md5 != v2Hash {
t.Errorf("%s is not %s", v2md5, v2Hash[3:])
}
}
func TestTags(t *testing.T) {
t1 := Tags1{11, "foo", "bar"}
t1x := Tags1{22, "foo", "bar"}
t2 := Tags2{"foo", "bar"}
t3 := Tags3{"bar"}
t1Dump := string(Dump(t1, 1))
t1xDump := string(Dump(t1x, 1))
if t1Dump != t1xDump {
t.Errorf("%s is not %s", t1Dump, t1xDump)
}
t2Dump := string(Dump(t2, 1))
if t1Dump != t2Dump {
t.Errorf("%s is not %s", t1Dump, t2Dump)
}
t1v3Dump := string(Dump(t1, 3))
t3v3Dump := string(Dump(t3, 3))
if t1v3Dump != t3v3Dump {
t.Errorf("%s is not %s", t1v3Dump, t3v3Dump)
}
}
func TestNils(t *testing.T) {
s1 := Nils{
Str: nil,
Int: nil,
Bool: nil,
Map: nil,
Slice: nil,
}
s2 := Nils{
Str: new(string),
Int: new(int),
Bool: new(bool),
Map: make(map[string]string),
Slice: make([]string, 0),
}
s1Dump := string(Dump(s1, 1))
s2Dump := string(Dump(s2, 1))
if s1Dump != s2Dump {
t.Errorf("%s is not %s", s1Dump, s2Dump)
}
}
func TestUnexportedFields(t *testing.T) {
v1Hash := "v1_750efb7c919caf87f2ab0d119650c87d"
data := unexportedTags{
foo: "foo",
bar: "bar",
aMap: map[string]string{
"key1": "val",
},
}
v1, err := Hash(data, 1)
if err != nil {
t.Error(err)
}
if v1 != v1Hash {
t.Errorf("%s is not %s", v1, v1Hash)
}
v1md5 := fmt.Sprintf("v1_%x", Md5(data, 1))
if v1md5 != v1Hash {
t.Errorf("%s is not %s", v1md5, v1Hash[3:])
}
}
func TestInterfaceField(t *testing.T) {
a := interfaceStruct{
Name: "name",
Interface1: "a",
InterfaceIgnore: "b",
}
b := interfaceStruct{
Name: "name",
Interface1: "b",
InterfaceIgnore: "b",
}
c := interfaceStruct{
Name: "name",
Interface1: "b",
InterfaceIgnore: "c",
}
ha, err := Hash(a, 1)
if err != nil {
t.Error(err)
}
hb, err := Hash(b, 1)
if err != nil {
t.Error(err)
}
hc, err := Hash(c, 1)
if err != nil {
t.Error(err)
}
if ha == hb {
t.Errorf("%s equals %s", ha, hb)
}
if hb != hc {
t.Errorf("%s is not %s", hb, hc)
}
b.Interface1 = map[string]string{"key": "value"}
c.Interface1 = map[string]string{"key": "value"}
hb, err = Hash(b, 1)
if err != nil {
t.Error(err)
}
hc, err = Hash(c, 1)
if err != nil {
t.Error(err)
}
if hb != hc {
t.Errorf("%s is not %s", hb, hc)
}
c.Interface1 = map[string]string{"key1": "value1"}
hc, err = Hash(c, 1)
if err != nil {
t.Error(err)
}
if hb == hc {
t.Errorf("%s equals %s", hb, hc)
}
}
func TestMethod(t *testing.T) {
dump1 := Dump(Tags4{
ambiguousData{"123", "45"},
ambiguousData{"12", "345"},
}, 1)
dump2 := Dump(Tags4{
ambiguousData{"12", "345"},
ambiguousData{"123", "45"},
}, 1)
if string(dump1) != string(dump2) {
t.Errorf("%s not equals %s", dump1, dump2)
}
}
func TestMethodPanic(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("dumping via incorrect \"method\" tag did not panic")
}
}()
_ = Dump(Tags5{
ambiguousData{"123", "45"},
}, 1)
}

101
controllers/suite_test.go Normal file
Просмотреть файл

@ -0,0 +1,101 @@
package controllers
import (
"flag"
"fmt"
"testing"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega"
pscv1alpha1 "cronprimer.local/api/v1alpha1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var k8sManager ctrl.Manager
var testEnv *envtest.Environment
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
junitReporter := reporters.NewJUnitReporter(fmt.Sprintf("../TEST-ginkgo-junit_%d.xml", config.GinkgoConfig.ParallelNode))
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{envtest.NewlineReporter{}, junitReporter})
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
useCluster := true
By("bootstrapping test environment")
testEnv = &envtest.Environment{
UseExistingCluster: &useCluster,
AttachControlPlaneOutput: true,
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = pscv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
// make the metrics listen address different for each parallel thread to avoid clashes when running with -p
var metricsAddr string
metricsPort := 8090 + config.GinkgoConfig.ParallelNode
flag.StringVar(&metricsAddr, "metrics-addr", fmt.Sprintf(":%d", metricsPort), "The address the metric endpoint binds to.")
flag.Parse()
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
MetricsBindAddress: metricsAddr,
})
Expect(err).ToNot(HaveOccurred())
// Uncomment the block below to run the operator locally and enable breakpoints / debug during tests
/*
err = (&PreScaledCronJobReconciler{
Client: k8sManager.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("PrescaledCronJob"),
Recorder: k8sManager.GetEventRecorderFor("prescaledcronjob-controller"),
InitContainerImage: "initcontainer:1",
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
*/
go func() {
err = k8sManager.Start(ctrl.SetupSignalHandler())
Expect(err).ToNot(HaveOccurred())
}()
k8sClient = k8sManager.GetClient()
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})

186
controllers/utilities.go Normal file
Просмотреть файл

@ -0,0 +1,186 @@
package controllers
import (
"fmt"
"strconv"
"strings"
"github.com/robfig/cron/v3"
)
// GetPrimerSchedule tries to parse (an optional) primerSchedule and otherwise manually creates the primerSchedule
func GetPrimerSchedule(scheduleSpec string, warmupMinutes int, primerSchedule string) (string, error) {
if primerSchedule != "" {
// parse primer schedule
_, err := cron.ParseStandard(primerSchedule)
if err == nil {
return primerSchedule, err
}
return "", fmt.Errorf("primerSchedule provided is invalid: %v", err)
}
// no pre-defined primer schedule or couldn't parse it, creating it with warmup minutes instead
return CreatePrimerSchedule(scheduleSpec, warmupMinutes)
}
// CreatePrimerSchedule deducts the warmup time from the original cronjob schedule and creates a primed cronjob schedule
func CreatePrimerSchedule(scheduleSpec string, warmupMinutes int) (string, error) {
// bool that will be set to true if the minute expression goes < 0 after deduction
// therefore the hour expression will also need adjustments if not set to *
negativeMinutes := false
// parse schedule
_, err := cron.ParseStandard(scheduleSpec)
if err != nil {
return "", fmt.Errorf("scheduleSpec provided is invalid: %v", err)
}
cronArray := strings.Split(scheduleSpec, " ")
// validate cronjob
if cronArray[0] == "*" {
err = fmt.Errorf("Can't create primer schedule on something that runs every minute")
return "", err
}
// convert cron expressions with step (/) to commas
if strings.Contains(cronArray[0], "/") {
cronArray, err = convertStepCronToCommaCron(cronArray)
if err != nil {
return "", err
}
}
// convert cron expressions with range (-) to commas
if strings.Contains(cronArray[0], "-") {
cronArray, err = convertRangeCronToCommaCron(cronArray)
if err != nil {
return "", err
}
}
if strings.Contains(cronArray[0], ",") {
commaValues := strings.Split(cronArray[0], ",")
for i, s := range commaValues {
commaValues[i], negativeMinutes, err = deductWarmupMinutes(s, warmupMinutes)
if err != nil {
return "", err
}
if negativeMinutes && cronArray[1] != "*" {
return "", fmt.Errorf("Can't adjust hour for minute expression with multiple values")
}
}
cronArray[0] = strings.Join(commaValues, ",")
} else {
cronArray[0], negativeMinutes, err = deductWarmupMinutes(cronArray[0], warmupMinutes)
if err != nil {
return "", err
}
if negativeMinutes && cronArray[1] != "*" {
hourCron, minErr := strconv.Atoi(cronArray[1])
if minErr != nil {
return "", fmt.Errorf("Can't adjust special characters in cron-hour argument")
}
// adjust hour param (if not set to *)
cronArray[1] = strconv.Itoa(hourCron - 1)
// only allow changing a 'negative' hour when the cron expression is not day-specific
if cronArray[1] == "-1" && cronArray[2] == "*" && cronArray[3] == "*" && cronArray[4] == "*" {
cronArray[1] = "23"
} else if cronArray[1] == "-1" {
// cronjobs that run on midnight on a specific day are not supported
return "", fmt.Errorf("Unsupported cron, can't create primer cronjob with this expression")
}
} else if negativeMinutes &&
(cronArray[2] != "*" || cronArray[3] != "*" || cronArray[4] != "*") {
// cronjobs that run on midnight on a specific day are not supported
return "", fmt.Errorf("Unsupported cron, can't create primer cronjob with this expression")
}
}
// parse primer schedule
primerSchedule := strings.Join(cronArray, " ")
_, err = cron.ParseStandard(primerSchedule)
if err != nil {
return "", err
}
return primerSchedule, nil
}
func convertStepCronToCommaCron(cronArray []string) ([]string, error) {
splitStepVal := strings.Split(cronArray[0], "/")
// convert */x to 0/x since it's the same but easier to work with
if splitStepVal[0] == "*" {
splitStepVal[0] = "0"
}
startVal, err1 := strconv.Atoi(splitStepVal[0])
stepVal, err2 := strconv.Atoi(splitStepVal[1])
if err1 != nil || err2 != nil {
return nil, fmt.Errorf("Can't break up step values")
}
cronArray[0] = splitStepVal[0] + ","
for startVal+stepVal < 60 {
startVal += stepVal
cronArray[0] += strconv.Itoa(startVal) + ","
}
// remove trailing comma
cronArray[0] = strings.TrimSuffix(cronArray[0], ",")
return cronArray, nil
}
func convertRangeCronToCommaCron(cronArray []string) ([]string, error) {
rangeVal := strings.Split(cronArray[0], "-")
startVal, err1 := strconv.Atoi(rangeVal[0])
endVal, err2 := strconv.Atoi(rangeVal[1])
if err1 != nil || err2 != nil {
return nil, fmt.Errorf("Can't break up range values")
}
cronArray[0] = rangeVal[0] + ","
for startVal+1 <= endVal {
startVal++
cronArray[0] += strconv.Itoa(startVal) + ","
}
// remove trailing comma
cronArray[0] = strings.TrimSuffix(cronArray[0], ",")
return cronArray, nil
}
func deductWarmupMinutes(minuteVal string, warmupMinutes int) (string, bool, error) {
negativeMinutes := false
minuteCron, err := strconv.Atoi(minuteVal)
if err != nil {
return "", negativeMinutes, fmt.Errorf("Can't parse minute value to int")
}
minuteVal = strconv.Itoa(minuteCron - warmupMinutes)
// when cronjob-minute param minus warmupTime min is smaller than 0
if (minuteCron - warmupMinutes) < 0 {
// add 60 (so that e.g. -5 becomes 55)
minuteVal = strconv.Itoa(minuteCron - warmupMinutes + 60)
negativeMinutes = true
}
return minuteVal, negativeMinutes, nil
}

Просмотреть файл

@ -0,0 +1,261 @@
package controllers
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
AdmissionAllowed = "AdmissionAllowed"
AdmissionRejected = "AdmissoinRejected"
)
type InputData struct {
Cronjob string
WarmupTime int
}
type Expected struct {
AdmissionAllowed bool
ExpectedCronjob string
}
type TestData struct {
ScenarioName string
ScenarioType string
inputData InputData
expected Expected
}
func assertReviewResult(testData TestData, t *testing.T) {
actualResult, err := CreatePrimerSchedule(testData.inputData.Cronjob, testData.inputData.WarmupTime)
actualAdmissionAllowed := true
if err != nil {
actualAdmissionAllowed = false
}
require.Equal(t, testData.expected.AdmissionAllowed, actualAdmissionAllowed)
require.Equal(t, testData.expected.ExpectedCronjob, actualResult)
}
func TestCreatePrimerSchedule(t *testing.T) {
scenarios := []TestData{
{
ScenarioName: "valid basic cron that runs on specific minute",
ScenarioType: AdmissionAllowed,
inputData: InputData{
Cronjob: "30 * * 10 *",
WarmupTime: 5,
},
expected: Expected{
ExpectedCronjob: "25 * * 10 *",
},
},
{
ScenarioName: "valid primer cron that needs to adjust the hours",
ScenarioType: AdmissionAllowed,
inputData: InputData{
Cronjob: "0 0 * * *",
WarmupTime: 10,
},
expected: Expected{
ExpectedCronjob: "50 23 * * *",
},
},
{
ScenarioName: "valid cron with step values",
ScenarioType: AdmissionAllowed,
inputData: InputData{
Cronjob: "0/15 * 1 * *",
WarmupTime: 10,
},
expected: Expected{
ExpectedCronjob: "50,5,20,35 * 1 * *",
},
},
{
ScenarioName: "valid cron with hour ranges",
ScenarioType: AdmissionAllowed,
inputData: InputData{
Cronjob: "30 14-16 * * *",
WarmupTime: 10,
},
expected: Expected{
ExpectedCronjob: "20 14-16 * * *",
},
},
{
ScenarioName: "valid cron with *-defined step values",
ScenarioType: AdmissionAllowed,
inputData: InputData{
Cronjob: "*/30 * 1 * *",
WarmupTime: 10,
},
expected: Expected{
ExpectedCronjob: "50,20 * 1 * *",
},
},
{
ScenarioName: "valid complicated cron with unaffected hour/day params",
ScenarioType: AdmissionAllowed,
inputData: InputData{
Cronjob: "5 * */12 * 1,2",
WarmupTime: 5,
},
expected: Expected{
ExpectedCronjob: "0 * */12 * 1,2",
},
},
{
ScenarioName: "valid cron with non-zero step value minutes",
ScenarioType: AdmissionAllowed,
inputData: InputData{
Cronjob: "15/30 5 * * *",
WarmupTime: 5,
},
expected: Expected{
ExpectedCronjob: "10,40 5 * * *",
},
},
{
ScenarioName: "valid cron with a minute range",
ScenarioType: AdmissionAllowed,
inputData: InputData{
Cronjob: "15-20 12 * * 5",
WarmupTime: 5,
},
expected: Expected{
ExpectedCronjob: "10,11,12,13,14,15 12 * * 5",
},
},
{
ScenarioName: "valid cron with comma values",
ScenarioType: AdmissionAllowed,
inputData: InputData{
Cronjob: "5,12,48,56 * * * 5",
WarmupTime: 10,
},
expected: Expected{
ExpectedCronjob: "55,2,38,46 * * * 5",
},
},
{
ScenarioName: "invalid cron (every minute)",
ScenarioType: AdmissionRejected,
inputData: InputData{
Cronjob: "* 0 * * *",
WarmupTime: 5,
},
expected: Expected{
ExpectedCronjob: "",
},
},
{
ScenarioName: "invalid cron (6 arguments instead of 5)",
ScenarioType: AdmissionRejected,
inputData: InputData{
Cronjob: "* 0 * * * *",
WarmupTime: 5,
},
expected: Expected{
ExpectedCronjob: "",
},
},
{
ScenarioName: "can't use combination of ranges and step values",
ScenarioType: AdmissionRejected,
inputData: InputData{
Cronjob: "15-17,0/30 * * * *",
WarmupTime: 5,
},
expected: Expected{
ExpectedCronjob: "",
},
},
{
ScenarioName: "can't convert special characters in cron-hour argument",
ScenarioType: AdmissionRejected,
inputData: InputData{
Cronjob: "0 14-16 * * *",
WarmupTime: 10,
},
expected: Expected{
ExpectedCronjob: "",
},
},
{
ScenarioName: "expected cronjob unable to compute due to overlap over multiple days",
ScenarioType: AdmissionRejected,
inputData: InputData{
Cronjob: "0-15 0 * * *",
WarmupTime: 10,
},
expected: Expected{
ExpectedCronjob: "",
},
},
{
ScenarioName: "invalid, expected cron needs to change day-of-the-week",
ScenarioType: AdmissionRejected,
inputData: InputData{
Cronjob: "0 0 * * 5",
WarmupTime: 10,
},
expected: Expected{
ExpectedCronjob: "",
},
},
{
ScenarioName: "invalid, expected cron needs to change day-of-the-week (no hour)",
ScenarioType: AdmissionRejected,
inputData: InputData{
Cronjob: "0 * * * 5",
WarmupTime: 10,
},
expected: Expected{
ExpectedCronjob: "",
},
},
}
for _, testData := range scenarios {
switch testData.ScenarioType {
case AdmissionAllowed:
testData.expected.AdmissionAllowed = true
case AdmissionRejected:
testData.expected.AdmissionAllowed = false
}
t.Run(fmt.Sprintf("[%s]:%s", testData.ScenarioName, testData.inputData.Cronjob), func(t *testing.T) {
assertReviewResult(testData, t)
})
}
}
func TestGetPrimerSchedule_ValidPrimerSchedule_Returns_PrimerSchedule(t *testing.T) {
schedule := "30 * 15 * *"
actualResult, err := GetPrimerSchedule("* * * * *", 10, schedule)
if assert.NoError(t, err) {
require.Equal(t, schedule, actualResult)
}
}
func TestGetPrimerSchedule_InvalidPrimerSchedule_Returns_Error(t *testing.T) {
schedule := "wibble"
_, err := GetPrimerSchedule("* * * * *", 10, schedule)
assert.Error(t, err)
}
func TestGetPrimerSchedule_NoPrimerSchedule_InvalidSchedule_Returns_Error(t *testing.T) {
schedule := "wibble"
_, err := GetPrimerSchedule(schedule, 10, "")
assert.Error(t, err)
}

10
debug/debug-endpoint.yaml Normal file
Просмотреть файл

@ -0,0 +1,10 @@
apiVersion: v1
kind: Endpoints
metadata:
name: psc-webhook-service
namespace: psc-system
subsets:
- addresses:
- ip: 18.185.254.87 #0.tcp.eu.ngrok.io
ports:
- port: 13351

16
debug/debug-service.yaml Normal file
Просмотреть файл

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: psc-webhook-service
namespace: psc-system
selfLink: /api/v1/namespaces/psc-system/services/psc-webhook-service
spec:
clusterIP: 10.98.155.51
ports:
- port: 443
protocol: TCP
targetPort: 9443
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}

Просмотреть файл

@ -0,0 +1,664 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"links": [],
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"hiddenSeries": false,
"id": 14,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(1, sum(rate(prescalecronjoboperator_cronjob_time_to_start_workload_bucket[5m])) by (le))",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Time delay to start workload",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "s",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"hiddenSeries": false,
"id": 12,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(1, sum(rate(prescalecronjoboperator_cronjob_time_init_container_ran_bucket[5m])) by (le))",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Init container run duration",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "s",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"hiddenSeries": false,
"id": 10,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(1, sum(rate(prescalecronjoboperator_cronjob_time_delay_of_workload_bucket[5m])) by (le))",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Time delay of Workload Bucket",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "s",
"label": "seconds",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"cacheTimeout": null,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"hiddenSeries": false,
"id": 4,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pluginVersion": "6.5.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(1, sum(rate(prescalecronjoboperator_cronjob_time_to_schedule_bucket[5m])) by (le)) ",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Time delay to schedule",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "s",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"description": "",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 16
},
"hiddenSeries": false,
"id": 2,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": "true",
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(workqueue_work_duration_seconds_bucket{job=\"psc-controller-manager-metrics-service\"}[1m])) by (name, le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "P99-{{name}}",
"refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(workqueue_queue_duration_seconds_bucket{job=\"psc-controller-manager-metrics-service\"}[1m])) by (name, le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "P50-{{name}}",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Controller - WorkQueue_Queue",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "s",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "s",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": null,
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 4,
"w": 4,
"x": 12,
"y": 16
},
"id": 8,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false,
"ymax": null,
"ymin": null
},
"tableColumn": "",
"targets": [
{
"expr": "count(kube_cronjob_status_active)",
"refId": "A"
}
],
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "Amount of Active Cronjobs",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": null,
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 4,
"w": 4,
"x": 16,
"y": 16
},
"id": 6,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"pluginVersion": "6.5.2",
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false,
"ymax": null,
"ymin": null
},
"tableColumn": "",
"targets": [
{
"expr": "count(prescalecronjoboperator_cronjob_time_init_container_ran_count)",
"refId": "A"
}
],
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "Amount of Init Containers that completed",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "total"
}
],
"schemaVersion": 21,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "CronPrimer Dashboard",
"uid": "RytDScsWk",
"version": 1
}

Просмотреть файл

@ -0,0 +1,83 @@
#!/bin/bash
set -e
cd "$(dirname "$0")"
#
# Argument parsing and validation
#
function show_usage() {
echo "deploy-prometheus.sh (PROMETHEUS_INSTANCE_NAME)"
echo
echo "first argument optionally defines the prometheus instance name"
}
function deploy_dashboard() {
local exists
local DASHBOARD_NAME=$1
local DASHBOARD_SOURCE=$2
exists=$(kubectl get configmap "$DASHBOARD_NAME" >/dev/null 2>&1 ; echo $?)
if [[ $exists -eq "0" ]]; then
echo "$DASHBOARD_NAME exists - deleting..."
kubectl delete configmap "$DASHBOARD_NAME"
fi
echo "Creating $DASHBOARD_NAME..."
kubectl create configmap "$DASHBOARD_NAME" --from-file=./dashboards/"$DASHBOARD_SOURCE"
#Label it for autodiscovery
kubectl label configmap "$DASHBOARD_NAME" grafana_dashboard="1"
echo
}
while [[ $# -gt 1 ]]
do
case "$2" in
*)
echo "Unexpected '$2'"
show_usage
exit 1
;;
esac
done
#
# Main script start
#
# Create/switch to namespace
CURRENT_NAMESPACE=$(kubectl config view --minify --output 'jsonpath={..namespace}')
NAMESPACE="prometheus"
PROMETHEUS_INSTANCE_NAME=${1:-prometheus-operator} # use first argument as instance name, if given
NAMESPACE_EXISTS=$(kubectl get namespaces $NAMESPACE 2>&1 > /dev/null ; echo $?)
if [[ $NAMESPACE_EXISTS = 0 ]]; then
echo "Namespace $NAMESPACE already exists - skipping"
else
echo "Creating $NAMESPACE"
kubectl create namespace $NAMESPACE
fi
echo
echo "Switching to $NAMESPACE namespace"
kubectl config set-context --current --namespace=$NAMESPACE
deploy_dashboard cronprimer-dashboard cronprimer-dash.json
OPERATOR_INSTALLED=$(helm ls -o json | jq '.[] | select(.name=='\"$PROMETHEUS_INSTANCE_NAME\"') | {name:.name} | length')
if [[ $OPERATOR_INSTALLED -eq "1" ]]; then
echo "Prometheus operator already installed"
else
echo "Installing Prometheus operator..."
# prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false means all serviceMonitors are discover not just
# those deployed by the helm chart itself
helm install $PROMETHEUS_INSTANCE_NAME stable/prometheus-operator --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false
fi
echo
echo "Switching to back to orginal namespace: $CURRENT_NAMESPACE"
kubectl config set-context --current --namespace=$CURRENT_NAMESPACE
echo
echo "DONE"
echo "Connect to the grafana:"
echo "kubectl port-forward service/$PROMETHEUS_INSTANCE_NAME-grafana 8080:80 -n $NAMESPACE"

65
docs/cronjobs.md Normal file
Просмотреть файл

@ -0,0 +1,65 @@
# Primed Cronjob Schedules
This document provides an overview of the existing logic that determines primed cronjob schedules based on the schedules of incoming cronjobs. Primed cronjobs are designed to warmup the cluster X minutes before the intended cronjob kicks off. This ensures that all required nodes are pre-warmed and readily available when required. The challenging bit of this procedure is that incoming cronjob schedules need to be converted to primed cronjob schedules (original schedule minus X minutes) in order to be scheduled properly to warmup the cluster.
## Cron schedule expressions
A cron schedule consists of 5 fields separated by a space:
```
<minute> <hour> <day-of-month> <month> <day-of-week>
(0-59) (0-23) (1-31) (1-12) (0-6)
```
For standard cron expressions, each field contains either an `*` (every occurrence) or a `number` shown in the range above. The cron expression `0 * * * *` runs e.g. at every hour on minute 0, while `0 2 * * *` only runs at 2AM every day.
>For more information on non-standard cron expressions and a nice playground, please use [crontab.guru](https://crontab.guru).
## Existing implementation
The existing implementation returns primed schedules for most standard and non-standard cron schedules in the `CreatePrimerSchedule()` function in `controllers/utilities.go`.
### 1. Convert Step and Range expressions to Commas
After initial cron validation, non-standard step and range cron expressions are converted to commas in respectively `ConvertStepCronToCommaCron()` and `ConvertRangeCronToCommaCron()`. When the original cron contains step values (e.g. `0/30 * * * *`) these are converted to commas (e.g. `0,30 * * * *`) to easier determine the primed cron schedule. Similarly, ranges (e.g. `0-5 * * * *`) are converted to commas (e.g. `0,1,2,3,4,5 * * * *`).
After conversion, the cron expression follows one of the below paths, depending on whether commas are present are not
#### 2a. Cron expressions without commas
When no commas (and therefore no step/range expressions) are present, the `warmupMinutes` are deducted from the first cron field in `DeductWarmupMinutes()`. This function returns a `negativeMinutes` boolean when this subtraction resulted in a 'negative' minute which needs to be accounted for.
Some examples:
- Incoming cron is `30 * * * *` with `10` warmupMinutes
- Returned cron equals `20 * * * *`
- `negativeMinutes = false` because minutes didn't go negative after deduction
- Incoming cron is `0 * * * *` with `10` warmupMinutes
- Returned cron equals `-10 * * * *`
- Above cron is invalid and 60 minutes need to be added to return: `50 * * * *`
- `negativeMinutes = true` because minutes went negative after deduction
Why do we need the `negativeMinutes` boolean? Because when cronjobs are hour-specific (2nd field is not '*'), the hours also need to be adjusted with `-1`. This scenario is only supported when day, month and day of the week (last 3 fields) are not specified (i.e. cronjobs ending with `x x * * *`).
#### 2b. Cron expressions with commas
In a similar fashion `warmupMinutes` are deducted for every comma-separated minute using `DeductWarmupMinutes()`. When `negativeMinutes` for one of these values equals `true`, this is currently only supported when hour, day, month and day of the week (last 4 fields) are not specified (i.e. cronjobs ending with `x * * * *`). Cron expressions with commas are therefore a little less flexible.
### 3. Primer schedule validation
As a last step, the resulting primer cron expression is parsed to validate the resulting expression. Theoretically these primed schedules should be valid, but this is an extra step to catch errors, especially when extra adding logic to this utility function.
## Testing
Several valid and invalid test schedules are defined in `controllers/utilities_test.go` and need to pass for a successful build of the code. New tests can be added by adding an extra object to the `scenario` object in the `TestPrimerScheduleString()` function with the following parameters:
```
{
ScenarioName: "detailed name of tested scenario for debugging",
ScenarioType: AdmissionAllowed, // or AdmissionRejected if you expect it to fail
inputData: InputData{
Cronjob: "0 0 * * *", // input cronjob
WarmupTime: 10, // input warmup minutes
},
expected: Expected{
ExpectedCronjob: "50 23 * * *", // expected result or "" (empty) for expected failed result
},
},
```
## Known issues
As mentioned before, not all cron expressions can be converted to valid primed crons, especially for non-standard expressions. Below is a list of known unsupported cron expressions:
- Can't convert special characters in cron-hour field (e.g. `0 14-16 * * *`)
- Can't use combination of range and step values in cron field (e.g. `0-15 0 * * *`)
- Can't change day, month or day-of-the-week when negativeMinutes is true and one of these is set (e.g. `0 0 * * 5`)

20
docs/debugging.md Normal file
Просмотреть файл

@ -0,0 +1,20 @@
# Debugging
## Debug locally
To debug in Kind and hit breakpoints on your laptop, follow these steps:
1. Ensure you have the operator [deployed locally](../readme#deploying%20locally).
2. Set a breakpoint in `./controllers/prescaledcronjob_controller.go` and hit `F5`
3. Create a new prescaledcronjob object: `make recreate-sample-psccron`
4. all being well... your breakpoint will get hit.
## Checking the logs
The Operator records debug information with the logs for the controller manager. To view them:
- run `kubectl get pods -A`
- copy the name of your controller manager, for example: `pod/psc-controller-manager-6544fc674f-nl5d2`
- run `kubectl logs <pod name> -n psc-system manager` (so in our example: `kubectl logs pod/psc-controller-manager-6544fc674f-nl5d2 -n psc-system manager`)
## Checking object events
The Operator records events on the `PreScaledCronJob` objects as they occur. To view them:
- run `kubectl describe prescaledcronjobs <your prescaledcronjob name here> -n psc-system`
- you will be shown all events that have taken place related to the `prescaledcronjob` object you created

110
docs/monitoring.md Normal file
Просмотреть файл

@ -0,0 +1,110 @@
# Monitoring the Operator
The Operator provides a mechanism for monitoring its performance and throughput via usage of Prometheus. Prometheus is a monitoring and metric gathering tool for Kubernetes and [information regarding the system can be found here](https://github.com/coreos/prometheus-operator).
This repository provides a way for you to use a new installation of Prometheus as part of the installation of Operator, or to use with an existing installation.
> *Note: In order to scrape the metrics the Operator provides in an existing, it may be necesary to install the custom ServiceMonitor provided within this repo.*
## Installing Prometheus
If you are using a brand new cluster and want to enable monitoring we provide a very simple setup process:
### Prerequisites
- Helm (v3+)
- Terminal with kubeconfig pointing to desired K8s cluster
- Optional: Helm installs the operator into the currently active context namespace (by default this is `default`). If you wish to install Prometheus into a specific namespace then you should setup your namespace before running the commands below (`kubectl config set-context --current --namespace=<insert-namespace-name-here>`)
### Install Prometheus-Operator Helm chart
1. Deploy the Prometheus-Operator helm chart:
```bash
make install-prometheus
```
2. Ensure Prometheus installs correctly:
```bash
kubectl get pods -l "release=prometheus-operator" -A
```
3. Verify that you see the following output:
```
NAMESPACE NAME READY STATUS RESTARTS AGE
default prometheus-operator-grafana-74df55f54d-znr7k 2/2 Running 0 2m42s
default prometheus-operator-operator-64f6586685-d54h4 2/2 Running 0 2m42s
default prometheus-operator-prometheus-node-exporter-x29rf 1/1 Running 0 2m42s
```
> **Notes:**
> - By default using the `make` command the Helm chart is installed with the name `prometheus-operator`, as seen above in the prefix of the pod names.
> - The name prefix can be overriden when using the `make` command if required: `make install-prometheus {PROMETHEUS_INSTANCE_NAME}=<some value>`. (This is useful if you want to run multiple Prometheus instances per cluster)
> - If you override `{PROMETHEUS_INSTANCE_NAME}` you will need to [make changes to the Kustomization scripts](Customizing%20Installation) and replace the `prometheus-operator` pod label selector in step 2. above.
## Installing the ServiceMonitor
Once your Prometheus instance is up and running correctly you will have to configure the instace to scrape the metrics from the PreScaledCronJob Operator.
The service monitor will automatically get installed during deployment via the use of `make deploy` and `make deploy-cluster` commands providing the `[PROMETHEUS]` sections of the `/config/default/kustomization.yaml` file are uncommented.
## Customizing Installation
These steps need to be performed if you provided a custom `{PROMETHEUS_INSTANCE_NAME}` parameter during installation or if you are using an existing Prometheus installation on a cluster:
1. Determine the `serviceMonitorSelector` being used by your Prometheus instance:
```bash
kubectl get prometheus -o custom-columns="NAME:metadata.name,RELEASE:spec.serviceMonitorSelector"
```
> Example:
>
> Executing the command gives:
>```
>NAME RELEASE
>prom-prometheus-operator-prometheus1 map[matchLabels:map[release:wibble]]
>prom-prometheus-operator-prometheus2 map[matchLabels:map[release:wobble]]
>```
>
> I want to use Prometheus instance `prom-prometheus-operator-prometheus2` to scrape my metrics so I note the matchLabel is `release:wobble`
2. Edit `config/prometheus/monitor.yaml` file where indicated to match the matchLabel determined in step 1.
3. Install the service monitor via the deployment scripts `make deploy` or `make deploy-cluster`
## Viewing the metrics
To monitor the metrics you will need to port forward to the Prometheus pod from your terminal:
```bash
kubectl port-forward service/prometheus-operator-prometheus 9090:9090 -n <your prometheus namespace>
```
You can now access the metrics on your machine by opening a browser and navigating to `http://localhost:9090`
> **Notes:**
> - If you changed the name of the Prometheus instance then you need to replace the initial `prometheus-operator` above with your instance name *(you can find this by doing `kubectl get services -A` and looking for `prometheus-operator-prometheus`)*
> - If you are using the dev container the port forward may not work, use the [VSCode temporary port forwarding](https://code.visualstudio.com/docs/remote/containers#_temporarily-forwarding-a-port) to resolve
## Viewing Grafana dashboards
The Prometheus-Operator Helm chart comes with an installation of Grafana by default to allow easy installation and viewing of metrics. To view the dashboard you will need to port forward to the service:
```bash
kubectl port-forward service/prometheus-operator-grafana 8080:80 -n <your prometheus namespace>
```
You can now access the metrics on your machine by opening a browser and navigating to `http://localhost:8080`
> **Notes:**
> - Grafana requires a username and password to access. By default the admin password is set via Helm, [this can be found and can be overriden via instructions here](https://github.com/helm/charts/tree/master/stable/prometheus-operator#grafana)
> - If you changed the name of the Prometheus instance then you need to replace `prometheus-operator` above with your instance name *(you can find this by doing `kubectl get services -A` and looking for `-grafana`)*
> - If you are using the dev container the port forward may not work, use the [VSCode temporary port forwarding](https://code.visualstudio.com/docs/remote/containers#_temporarily-forwarding-a-port) to resolve
## Note
The metrics for `prescalecronjoboperator_cronjob_time_*` published out in the [metrics.go](./controllers/metrics.go) are useful to tell how long the `PrescaledCronJob` took to execute.
They are stored as a histogram in `prometheus` with exponential buckets starting from 2secs -> 1hr. Once running it's strongly suggested to tweak these buckets based on the observed delays and scale up times.

Двоичные данные
docs/prescaledcron.png Normal file

Двоичный файл не отображается.

После

Ширина:  |  Высота:  |  Размер: 45 KiB

18
go.mod Normal file
Просмотреть файл

@ -0,0 +1,18 @@
module cronprimer.local
go 1.12
require (
github.com/ReneKroon/ttlcache v1.6.0
github.com/go-logr/logr v0.1.0
github.com/onsi/ginkgo v1.8.0
github.com/onsi/gomega v1.5.0
github.com/prometheus/client_golang v0.9.0
github.com/robfig/cron/v3 v3.0.0
github.com/stretchr/testify v1.3.0
k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
sigs.k8s.io/controller-runtime v0.2.2
sigs.k8s.io/controller-tools v0.2.1 // indirect
)

178
go.sum Normal file
Просмотреть файл

@ -0,0 +1,178 @@
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/ReneKroon/ttlcache v1.6.0 h1:aO+GDNVKTQmcuI0H78PXCR9E59JMiGfSXHAkVBUlzbA=
github.com/ReneKroon/ttlcache v1.6.0/go.mod h1:DG6nbhXKUQhrExfwwLuZUdH7UnRDDRA1IW+nBuCssvs=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54=
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo=
github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE=
github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs=
github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY=
github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/robfig/cron/v3 v3.0.0 h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E=
github.com/robfig/cron/v3 v3.0.0/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg=
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 h1:cGjJzUd8RgBw428LXP65YXni0aiGNA4Bl+ls8SmLOm8=
golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190501045030-23463209683d h1:D7DVZUZEUgsSIDTivnUtVeGfN5AvhDIKtdIZAqx0ieE=
golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0=
gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22 h1:0efs3hwEZhFKsCoP8l6dDB1AZWMgnEl3yWXWRZTOaEA=
gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo=
k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA=
k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA=
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ=
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM=
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y=
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/controller-runtime v0.2.2 h1:JT/vJJhUjjL9NZNwnm8AXmqCBUXSCFKmTaNjwDi28N0=
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
sigs.k8s.io/controller-tools v0.2.1 h1:HoCik83vXOpPi7KSJWdPRmiGntyOzK0v0BTV4U+pl8o=
sigs.k8s.io/controller-tools v0.2.1/go.mod h1:cenyhL7t2e7izk/Zy7ZxDqQ9YEj0niU5VDL1PWMgZ5s=
sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs=
sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

5
initcontainer/Dockerfile Normal file
Просмотреть файл

@ -0,0 +1,5 @@
FROM python:3
COPY main.py /
COPY requirements.txt /
RUN pip install -r requirements.txt
CMD [ "python", "-u", "main.py" ]

41
initcontainer/main.py Normal file
Просмотреть файл

@ -0,0 +1,41 @@
from croniter import croniter
from datetime import datetime
from kubernetes import client, config
import time
import os
def get_pod_creation_date(podName, podNamespace):
try:
config.load_incluster_config()
except:
config.load_kube_config()
v1 = client.CoreV1Api()
podStatus = v1.read_namespaced_pod_status(name=podName, namespace=podNamespace)
return podStatus.metadata.creation_timestamp
def wait_on_cron_schedule(creationDate, schedule):
if schedule:
if croniter.is_valid(schedule):
cron = croniter(schedule, creationDate)
nextdate = cron.get_next(datetime)
while True:
now = datetime.now().astimezone() # needs to be tz-aware to compare
if now >= nextdate:
print("finally reached!")
break
print("current time: " + now.strftime("%m/%d/%Y, %H:%M:%S"))
print("didn't reach " + nextdate.strftime("%m/%d/%Y, %H:%M:%S"))
time.sleep(5)
else:
print("invalid cron schedule")
else:
print("no cron schedule passed via env variables")
if __name__ == '__main__':
creationDate = get_pod_creation_date(os.environ.get('HOSTNAME'), os.environ.get('NAMESPACE'))
wait_on_cron_schedule(creationDate, os.environ.get('CRONJOB_SCHEDULE'))

Просмотреть файл

@ -0,0 +1,2 @@
croniter==0.3.31
kubernetes==10.0.1

135
main.go Normal file
Просмотреть файл

@ -0,0 +1,135 @@
package main
import (
"flag"
"fmt"
"net/http"
"os"
"time"
pscv1alpha1 "cronprimer.local/api/v1alpha1"
"cronprimer.local/controllers"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
// +kubebuilder:scaffold:imports
)
const (
initContainerEnvVariable = "INIT_CONTAINER_IMAGE"
defaultContainerImage = "initcontainer:1"
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
probeLog = ctrl.Log.WithName("probe")
)
func init() {
if err := clientgoscheme.AddToScheme(scheme); err != nil {
setupLog.Error(err, "unable to add client go scheme")
os.Exit(1)
}
if err := pscv1alpha1.AddToScheme(scheme); err != nil {
setupLog.Error(err, "unable to add pscv1alpha1 scheme")
os.Exit(1)
}
// +kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.Parse()
logger := zap.Logger(true)
ctrl.SetLogger(logger)
setupProbes()
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
Port: 9443,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
initContainerImage := os.Getenv(initContainerEnvVariable)
if initContainerImage == "" {
setupLog.Info(fmt.Sprintf("%s not set, using default", initContainerEnvVariable))
initContainerImage = defaultContainerImage
}
setupLog.Info(fmt.Sprintf("Using image %s for initContainer", initContainerImage))
if err = (&controllers.PreScaledCronJobReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("prescaledcronjob"),
Recorder: mgr.GetEventRecorderFor("prescaledcronjob-controller"),
InitContainerImage: initContainerImage,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "prescaledcronjob")
os.Exit(1)
}
if err = (&controllers.PodReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("pod"),
Recorder: mgr.GetEventRecorderFor("pod-controller"),
InitContainerImage: initContainerImage,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "pod")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
func setupProbes() {
setupLog.Info("setting up probes")
started := time.Now()
http.HandleFunc("/ready", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Since(started)).String()
_, err := w.Write([]byte(data))
if err != nil {
probeLog.Error(err, "problem in readiness probe")
return
}
})
http.HandleFunc("/alive", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
_, err := w.Write([]byte("ok"))
if err != nil {
probeLog.Error(err, "problem in liveness probe")
return
}
})
go func() {
setupLog.Info("probes are starting to listen", "addr", ":8081")
err := http.ListenAndServe(":8081", nil)
if err != nil {
setupLog.Error(err, "problem setting up probes")
}
}()
}

34
testdata/events/initStartedEvent.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:03:08Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.initContainers{injected-0d825b4f-07f0-4952-8150-fba894c613b1}",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2638",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:03:08Z",
"message": "Started container injected-0d825b4f-07f0-4952-8150-fba894c613b1",
"metadata": {
"creationTimestamp": "2020-01-29T12:03:08Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59ebb457f0bb",
"namespace": "psc-system",
"resourceVersion": "2656",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59ebb457f0bb",
"uid": "b451eb09-d73e-4d82-a65b-4adbd96f3546"
},
"reason": "Started",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "psccontroller-control-plane"
},
"type": "Normal"
}

32
testdata/events/scheduledEvent.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:03:07Z",
"involvedObject": {
"apiVersion": "v1",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2635",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:03:07Z",
"message": "Successfully assigned psc-system/autogen-psc-test-local-jbyphf-1580299380-nqj4z to psccontroller-control-plane",
"metadata": {
"creationTimestamp": "2020-01-29T12:03:07Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59eb880fbe3f",
"namespace": "psc-system",
"resourceVersion": "2640",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59eb880fbe3f",
"uid": "c197694f-7f29-4e04-8ec6-21ad4d9ec45c"
},
"reason": "Scheduled",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "default-scheduler"
},
"type": "Normal"
}

32
testdata/events/uninterestingEvent.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T14:38:02Z",
"involvedObject": {
"apiVersion": "psc.cronprimer.local/v1alpha1",
"kind": "PreScaledCronJob",
"name": "psc-test-local-babgvr",
"namespace": "psc-system",
"resourceVersion": "910",
"uid": "522bd349-06ad-4e27-8311-12872ce344a2"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T14:38:02Z",
"message": "Event timeInitContainerRan took 50s on pod autogen-psc-test-local-babgvr-1580308620-d2plh",
"metadata": {
"creationTimestamp": "2020-01-29T14:38:02Z",
"name": "psc-test-local-babgvr.15ee625fd3c8d22e",
"namespace": "psc-system",
"resourceVersion": "12580",
"selfLink": "/api/v1/namespaces/psc-system/events/psc-test-local-babgvr.15ee625fd3c8d22e",
"uid": "59dc0028-9793-4bc0-98dd-5cbc1e8c4b49"
},
"reason": "Metrics",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "pod-controller"
},
"type": "Normal"
}

34
testdata/events/workloadPulledEvent.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:04:03Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.containers{test-busybox}",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2638",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:04:03Z",
"message": "Pulling image \"busybox\"",
"metadata": {
"creationTimestamp": "2020-01-29T12:04:03Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f8aa4180eb",
"namespace": "psc-system",
"resourceVersion": "2751",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f8aa4180eb",
"uid": "637edad4-8528-4293-8fc7-5cf96c1a1da3"
},
"reason": "Pulling",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "psccontroller-control-plane"
},
"type": "Normal"
}

34
testdata/events/workloadStartedEvent.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:04:05Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.containers{test-busybox}",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2638",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:04:05Z",
"message": "Started container test-busybox",
"metadata": {
"creationTimestamp": "2020-01-29T12:04:05Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f9093577a9",
"namespace": "psc-system",
"resourceVersion": "2758",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f9093577a9",
"uid": "b958ce5b-69aa-4e37-b1d9-fa85388031d9"
},
"reason": "Started",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "psccontroller-control-plane"
},
"type": "Normal"
}

64
testdata/examplepod.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,64 @@
apiVersion: v1
kind: Pod
metadata:
generateName: autogen-psc-test-local-ezkryx-1580302500-
labels:
controller-uid: 0883838b-80af-4d42-9897-821f1012c6b5
job-name: autogen-psc-test-local-ezkryx-1580302500
primedcron: psc-test-local-ezkryx
name: autogen-psc-test-local-ezkryx-1580302500-b2wrx
namespace: psc-system
spec:
containers:
- args:
- /bin/sh
- -c
- date; sleep 30s; date;
image: busybox
imagePullPolicy: Always
name: test-busybox
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-q29rf
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
initContainers:
- env:
- name: CRONJOB_SCHEDULE
value: '*/1 * * * *'
image: initcontainer:1
imagePullPolicy: IfNotPresent
name: injected-0d825b4f-07f0-4952-8150-fba894c613b1
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-q29rf
readOnly: true
nodeName: psccontroller-control-plane
priority: 0
restartPolicy: OnFailure
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-q29rf
secret:
defaultMode: 420
secretName: default-token-q29rf

280
testdata/sample-pod-events3.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,280 @@
{
"apiVersion": "v1",
"items": [
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:03:07Z",
"involvedObject": {
"apiVersion": "v1",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2635",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:03:07Z",
"message": "Successfully assigned psc-system/autogen-psc-test-local-jbyphf-1580299380-nqj4z to psccontroller-control-plane",
"metadata": {
"creationTimestamp": "2020-01-29T12:03:07Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59eb880fbe3f",
"namespace": "psc-system",
"resourceVersion": "2640",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59eb880fbe3f",
"uid": "c197694f-7f29-4e04-8ec6-21ad4d9ec45c"
},
"reason": "Scheduled",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "default-scheduler"
},
"type": "Normal"
},
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:03:07Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.initContainers{injected-0d825b4f-07f0-4952-8150-fba894c613b1}",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2638",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:03:07Z",
"message": "Container image \"initcontainer:1\" already present on machine",
"metadata": {
"creationTimestamp": "2020-01-29T12:03:07Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59eba8ad68b2",
"namespace": "psc-system",
"resourceVersion": "2652",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59eba8ad68b2",
"uid": "098fe463-effb-4b8d-b332-cc45d78c4730"
},
"reason": "Pulled",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "psccontroller-control-plane"
},
"type": "Normal"
},
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:03:08Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.initContainers{injected-0d825b4f-07f0-4952-8150-fba894c613b1}",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2638",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:03:08Z",
"message": "Created container injected-0d825b4f-07f0-4952-8150-fba894c613b1",
"metadata": {
"creationTimestamp": "2020-01-29T12:03:08Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59ebaf6c8627",
"namespace": "psc-system",
"resourceVersion": "2654",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59ebaf6c8627",
"uid": "dfccd34f-1801-46c1-a72a-914206e8b873"
},
"reason": "Created",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "psccontroller-control-plane"
},
"type": "Normal"
},
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:03:08Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.initContainers{injected-0d825b4f-07f0-4952-8150-fba894c613b1}",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2638",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:03:08Z",
"message": "Started container injected-0d825b4f-07f0-4952-8150-fba894c613b1",
"metadata": {
"creationTimestamp": "2020-01-29T12:03:08Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59ebb457f0bb",
"namespace": "psc-system",
"resourceVersion": "2656",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59ebb457f0bb",
"uid": "b451eb09-d73e-4d82-a65b-4adbd96f3546"
},
"reason": "Started",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "psccontroller-control-plane"
},
"type": "Normal"
},
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:04:03Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.containers{test-busybox}",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2638",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:04:03Z",
"message": "Pulling image \"busybox\"",
"metadata": {
"creationTimestamp": "2020-01-29T12:04:03Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f8aa4180eb",
"namespace": "psc-system",
"resourceVersion": "2751",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f8aa4180eb",
"uid": "637edad4-8528-4293-8fc7-5cf96c1a1da3"
},
"reason": "Pulling",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "psccontroller-control-plane"
},
"type": "Normal"
},
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:04:05Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.containers{test-busybox}",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2638",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:04:05Z",
"message": "Successfully pulled image \"busybox\"",
"metadata": {
"creationTimestamp": "2020-01-29T12:04:05Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f8fefd90a2",
"namespace": "psc-system",
"resourceVersion": "2756",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f8fefd90a2",
"uid": "2c536ce9-5f6a-4647-8303-ecc9f625a89d"
},
"reason": "Pulled",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "psccontroller-control-plane"
},
"type": "Normal"
},
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:04:05Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.containers{test-busybox}",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2638",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:04:05Z",
"message": "Created container test-busybox",
"metadata": {
"creationTimestamp": "2020-01-29T12:04:05Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f9050165ef",
"namespace": "psc-system",
"resourceVersion": "2757",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f9050165ef",
"uid": "006829e7-64ff-4867-bbee-6322a04cc6f9"
},
"reason": "Created",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "psccontroller-control-plane"
},
"type": "Normal"
},
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-01-29T12:04:05Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.containers{test-busybox}",
"kind": "Pod",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z",
"namespace": "psc-system",
"resourceVersion": "2638",
"uid": "2cbd8c39-2506-4f2a-98bc-6f5a3098aadc"
},
"kind": "Event",
"lastTimestamp": "2020-01-29T12:04:05Z",
"message": "Started container test-busybox",
"metadata": {
"creationTimestamp": "2020-01-29T12:04:05Z",
"name": "autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f9093577a9",
"namespace": "psc-system",
"resourceVersion": "2758",
"selfLink": "/api/v1/namespaces/psc-system/events/autogen-psc-test-local-jbyphf-1580299380-nqj4z.15ee59f9093577a9",
"uid": "b958ce5b-69aa-4e37-b1d9-fa85388031d9"
},
"reason": "Started",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "psccontroller-control-plane"
},
"type": "Normal"
}
],
"kind": "List",
"metadata": {
"resourceVersion": "",
"selfLink": ""
}
}

719
testdata/sample-pod-transition.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,719 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"creationTimestamp": "2020-01-28T09:38:08Z",
"generateName": "autogen-psc-test-local-wujutc-1580204280-",
"labels": {
"controller-uid": "92be876f-0d70-4ffe-b387-13a52fd66942",
"job-name": "autogen-psc-test-local-wujutc-1580204280",
"primedcron": "psc-test-local-wujutc"
},
"name": "autogen-psc-test-local-wujutc-1580204280-7fll6",
"namespace": "psc-system",
"ownerReferences": [
{
"apiVersion": "batch/v1",
"blockOwnerDeletion": true,
"controller": true,
"kind": "Job",
"name": "autogen-psc-test-local-wujutc-1580204280",
"uid": "92be876f-0d70-4ffe-b387-13a52fd66942"
}
],
"resourceVersion": "2077",
"selfLink": "/api/v1/namespaces/psc-system/pods/autogen-psc-test-local-wujutc-1580204280-7fll6",
"uid": "00f12b31-e722-4092-8fb3-2a86368a2db1"
},
"spec": {
"containers": [
{
"args": [
"/bin/sh",
"-c",
"date; sleep 30s; date;"
],
"image": "busybox",
"imagePullPolicy": "Always",
"name": "test-busybox",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "default-token-hh6wt",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"enableServiceLinks": true,
"initContainers": [
{
"env": [
{
"name": "CRONJOB_SCHEDULE",
"value": "*/1 * * * *"
}
],
"image": "docker.io/initcontainer:1",
"imagePullPolicy": "IfNotPresent",
"name": "warmup",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "default-token-hh6wt",
"readOnly": true
}
]
}
],
"nodeName": "psccontroller-control-plane",
"priority": 0,
"restartPolicy": "OnFailure",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "default",
"serviceAccountName": "default",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"tolerationSeconds": 300
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"tolerationSeconds": 300
}
],
"volumes": [
{
"name": "default-token-hh6wt",
"secret": {
"defaultMode": 420,
"secretName": "default-token-hh6wt"
}
}
]
},
"status": {
"conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:38:08Z",
"message": "containers with incomplete status: [warmup]",
"reason": "ContainersNotInitialized",
"status": "False",
"type": "Initialized"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:38:08Z",
"message": "containers with unready status: [test-busybox]",
"reason": "ContainersNotReady",
"status": "False",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:38:08Z",
"message": "containers with unready status: [test-busybox]",
"reason": "ContainersNotReady",
"status": "False",
"type": "ContainersReady"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:38:08Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"image": "busybox",
"imageID": "",
"lastState": {},
"name": "test-busybox",
"ready": false,
"restartCount": 0,
"state": {
"waiting": {
"reason": "PodInitializing"
}
}
}
],
"hostIP": "172.17.0.2",
"initContainerStatuses": [
{
"containerID": "containerd://89d2ee8fab9150cacafc771044d2a559b3d2adee33ae7d70464db1912685d012",
"image": "docker.io/library/initcontainer:1",
"imageID": "sha256:9a290fea57895f747a9e18570692e8988a35d9eca875487e2fe1f1d8fb1a1c15",
"lastState": {},
"name": "warmup",
"ready": false,
"restartCount": 0,
"state": {
"running": {
"startedAt": "2020-01-28T09:38:08Z"
}
}
}
],
"phase": "Pending",
"podIP": "10.244.0.20",
"qosClass": "BestEffort",
"startTime": "2020-01-28T09:38:08Z"
}
}
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"creationTimestamp": "2020-01-28T09:38:08Z",
"generateName": "autogen-psc-test-local-wujutc-1580204280-",
"labels": {
"controller-uid": "92be876f-0d70-4ffe-b387-13a52fd66942",
"job-name": "autogen-psc-test-local-wujutc-1580204280",
"primedcron": "psc-test-local-wujutc"
},
"name": "autogen-psc-test-local-wujutc-1580204280-7fll6",
"namespace": "psc-system",
"ownerReferences": [
{
"apiVersion": "batch/v1",
"blockOwnerDeletion": true,
"controller": true,
"kind": "Job",
"name": "autogen-psc-test-local-wujutc-1580204280",
"uid": "92be876f-0d70-4ffe-b387-13a52fd66942"
}
],
"resourceVersion": "2183",
"selfLink": "/api/v1/namespaces/psc-system/pods/autogen-psc-test-local-wujutc-1580204280-7fll6",
"uid": "00f12b31-e722-4092-8fb3-2a86368a2db1"
},
"spec": {
"containers": [
{
"args": [
"/bin/sh",
"-c",
"date; sleep 30s; date;"
],
"image": "busybox",
"imagePullPolicy": "Always",
"name": "test-busybox",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "default-token-hh6wt",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"enableServiceLinks": true,
"initContainers": [
{
"env": [
{
"name": "CRONJOB_SCHEDULE",
"value": "*/1 * * * *"
}
],
"image": "docker.io/initcontainer:1",
"imagePullPolicy": "IfNotPresent",
"name": "warmup",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "default-token-hh6wt",
"readOnly": true
}
]
}
],
"nodeName": "psccontroller-control-plane",
"priority": 0,
"restartPolicy": "OnFailure",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "default",
"serviceAccountName": "default",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"tolerationSeconds": 300
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"tolerationSeconds": 300
}
],
"volumes": [
{
"name": "default-token-hh6wt",
"secret": {
"defaultMode": 420,
"secretName": "default-token-hh6wt"
}
}
]
},
"status": {
"conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:39:05Z",
"status": "True",
"type": "Initialized"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:38:08Z",
"message": "containers with unready status: [test-busybox]",
"reason": "ContainersNotReady",
"status": "False",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:38:08Z",
"message": "containers with unready status: [test-busybox]",
"reason": "ContainersNotReady",
"status": "False",
"type": "ContainersReady"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:38:08Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"image": "busybox",
"imageID": "",
"lastState": {},
"name": "test-busybox",
"ready": false,
"restartCount": 0,
"state": {
"waiting": {
"reason": "PodInitializing"
}
}
}
],
"hostIP": "172.17.0.2",
"initContainerStatuses": [
{
"containerID": "containerd://89d2ee8fab9150cacafc771044d2a559b3d2adee33ae7d70464db1912685d012",
"image": "docker.io/library/initcontainer:1",
"imageID": "sha256:9a290fea57895f747a9e18570692e8988a35d9eca875487e2fe1f1d8fb1a1c15",
"lastState": {},
"name": "warmup",
"ready": true,
"restartCount": 0,
"state": {
"terminated": {
"containerID": "containerd://89d2ee8fab9150cacafc771044d2a559b3d2adee33ae7d70464db1912685d012",
"exitCode": 0,
"finishedAt": "2020-01-28T09:39:04Z",
"reason": "Completed",
"startedAt": "2020-01-28T09:38:08Z"
}
}
}
],
"phase": "Pending",
"podIP": "10.244.0.20",
"qosClass": "BestEffort",
"startTime": "2020-01-28T09:38:08Z"
}
}
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"creationTimestamp": "2020-01-28T09:38:08Z",
"generateName": "autogen-psc-test-local-wujutc-1580204280-",
"labels": {
"controller-uid": "92be876f-0d70-4ffe-b387-13a52fd66942",
"job-name": "autogen-psc-test-local-wujutc-1580204280",
"primedcron": "psc-test-local-wujutc"
},
"name": "autogen-psc-test-local-wujutc-1580204280-7fll6",
"namespace": "psc-system",
"ownerReferences": [
{
"apiVersion": "batch/v1",
"blockOwnerDeletion": true,
"controller": true,
"kind": "Job",
"name": "autogen-psc-test-local-wujutc-1580204280",
"uid": "92be876f-0d70-4ffe-b387-13a52fd66942"
}
],
"resourceVersion": "2190",
"selfLink": "/api/v1/namespaces/psc-system/pods/autogen-psc-test-local-wujutc-1580204280-7fll6",
"uid": "00f12b31-e722-4092-8fb3-2a86368a2db1"
},
"spec": {
"containers": [
{
"args": [
"/bin/sh",
"-c",
"date; sleep 30s; date;"
],
"image": "busybox",
"imagePullPolicy": "Always",
"name": "test-busybox",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "default-token-hh6wt",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"enableServiceLinks": true,
"initContainers": [
{
"env": [
{
"name": "CRONJOB_SCHEDULE",
"value": "*/1 * * * *"
}
],
"image": "docker.io/initcontainer:1",
"imagePullPolicy": "IfNotPresent",
"name": "warmup",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "default-token-hh6wt",
"readOnly": true
}
]
}
],
"nodeName": "psccontroller-control-plane",
"priority": 0,
"restartPolicy": "OnFailure",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "default",
"serviceAccountName": "default",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"tolerationSeconds": 300
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"tolerationSeconds": 300
}
],
"volumes": [
{
"name": "default-token-hh6wt",
"secret": {
"defaultMode": 420,
"secretName": "default-token-hh6wt"
}
}
]
},
"status": {
"conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:39:05Z",
"status": "True",
"type": "Initialized"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:39:07Z",
"status": "True",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:39:07Z",
"status": "True",
"type": "ContainersReady"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:38:08Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"containerID": "containerd://ffe1c6cbbb870ccd68f173a20f3be2477729a93fedbafa303eb7e0383a7b1ad6",
"image": "docker.io/library/busybox:latest",
"imageID": "docker.io/library/busybox@sha256:6915be4043561d64e0ab0f8f098dc2ac48e077fe23f488ac24b665166898115a",
"lastState": {},
"name": "test-busybox",
"ready": true,
"restartCount": 0,
"state": {
"running": {
"startedAt": "2020-01-28T09:39:07Z"
}
}
}
],
"hostIP": "172.17.0.2",
"initContainerStatuses": [
{
"containerID": "containerd://89d2ee8fab9150cacafc771044d2a559b3d2adee33ae7d70464db1912685d012",
"image": "docker.io/library/initcontainer:1",
"imageID": "sha256:9a290fea57895f747a9e18570692e8988a35d9eca875487e2fe1f1d8fb1a1c15",
"lastState": {},
"name": "warmup",
"ready": true,
"restartCount": 0,
"state": {
"terminated": {
"containerID": "containerd://89d2ee8fab9150cacafc771044d2a559b3d2adee33ae7d70464db1912685d012",
"exitCode": 0,
"finishedAt": "2020-01-28T09:39:04Z",
"reason": "Completed",
"startedAt": "2020-01-28T09:38:08Z"
}
}
}
],
"phase": "Running",
"podIP": "10.244.0.20",
"qosClass": "BestEffort",
"startTime": "2020-01-28T09:38:08Z"
}
}
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"creationTimestamp": "2020-01-28T09:38:08Z",
"generateName": "autogen-psc-test-local-wujutc-1580204280-",
"labels": {
"controller-uid": "92be876f-0d70-4ffe-b387-13a52fd66942",
"job-name": "autogen-psc-test-local-wujutc-1580204280",
"primedcron": "psc-test-local-wujutc"
},
"name": "autogen-psc-test-local-wujutc-1580204280-7fll6",
"namespace": "psc-system",
"ownerReferences": [
{
"apiVersion": "batch/v1",
"blockOwnerDeletion": true,
"controller": true,
"kind": "Job",
"name": "autogen-psc-test-local-wujutc-1580204280",
"uid": "92be876f-0d70-4ffe-b387-13a52fd66942"
}
],
"resourceVersion": "2256",
"selfLink": "/api/v1/namespaces/psc-system/pods/autogen-psc-test-local-wujutc-1580204280-7fll6",
"uid": "00f12b31-e722-4092-8fb3-2a86368a2db1"
},
"spec": {
"containers": [
{
"args": [
"/bin/sh",
"-c",
"date; sleep 30s; date;"
],
"image": "busybox",
"imagePullPolicy": "Always",
"name": "test-busybox",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "default-token-hh6wt",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"enableServiceLinks": true,
"initContainers": [
{
"env": [
{
"name": "CRONJOB_SCHEDULE",
"value": "*/1 * * * *"
}
],
"image": "docker.io/initcontainer:1",
"imagePullPolicy": "IfNotPresent",
"name": "warmup",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "default-token-hh6wt",
"readOnly": true
}
]
}
],
"nodeName": "psccontroller-control-plane",
"priority": 0,
"restartPolicy": "OnFailure",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "default",
"serviceAccountName": "default",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"tolerationSeconds": 300
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"tolerationSeconds": 300
}
],
"volumes": [
{
"name": "default-token-hh6wt",
"secret": {
"defaultMode": 420,
"secretName": "default-token-hh6wt"
}
}
]
},
"status": {
"conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:39:05Z",
"reason": "PodCompleted",
"status": "True",
"type": "Initialized"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:39:37Z",
"reason": "PodCompleted",
"status": "False",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:39:37Z",
"reason": "PodCompleted",
"status": "False",
"type": "ContainersReady"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2020-01-28T09:38:08Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"containerID": "containerd://ffe1c6cbbb870ccd68f173a20f3be2477729a93fedbafa303eb7e0383a7b1ad6",
"image": "docker.io/library/busybox:latest",
"imageID": "docker.io/library/busybox@sha256:6915be4043561d64e0ab0f8f098dc2ac48e077fe23f488ac24b665166898115a",
"lastState": {},
"name": "test-busybox",
"ready": false,
"restartCount": 0,
"state": {
"terminated": {
"containerID": "containerd://ffe1c6cbbb870ccd68f173a20f3be2477729a93fedbafa303eb7e0383a7b1ad6",
"exitCode": 0,
"finishedAt": "2020-01-28T09:39:37Z",
"reason": "Completed",
"startedAt": "2020-01-28T09:39:07Z"
}
}
}
],
"hostIP": "172.17.0.2",
"initContainerStatuses": [
{
"containerID": "containerd://89d2ee8fab9150cacafc771044d2a559b3d2adee33ae7d70464db1912685d012",
"image": "docker.io/library/initcontainer:1",
"imageID": "sha256:9a290fea57895f747a9e18570692e8988a35d9eca875487e2fe1f1d8fb1a1c15",
"lastState": {},
"name": "warmup",
"ready": true,
"restartCount": 0,
"state": {
"terminated": {
"containerID": "containerd://89d2ee8fab9150cacafc771044d2a559b3d2adee33ae7d70464db1912685d012",
"exitCode": 0,
"finishedAt": "2020-01-28T09:39:04Z",
"reason": "Completed",
"startedAt": "2020-01-28T09:38:08Z"
}
}
}
],
"phase": "Succeeded",
"podIP": "10.244.0.20",
"qosClass": "BestEffort",
"startTime": "2020-01-28T09:38:08Z"
}
}