Update azure-pipelines.yml to fix build failure (#307)

* Update azure-pipelines.yml to fix build failure

Not sure if this will solve the issue, can anyone chime in?

Build Error: go: cannot find GOROOT directory: /usr/local/go1.13

https://tpark.visualstudio.com/fabrikate/_build/results?buildId=5235&view=logs&j=12f1170f-54f2-53f3-20dd-22fc7dff55f9&s=d654deb9-056d-50a2-1717-90c08683d50a&t=f8ed7bd8-2a7f-56f6-9385-7fc29a8b5b7b&l=12

* Change to Azure Devops Go 1.11+

https://docs.microsoft.com/en-us/azure/devops/pipelines/ecosystems/go?view=azure-devops&tabs=go-current#set-up-go

* Update azure-pipelines.yml with Go 1.11+

* Update azure-pipelines.yml

* Fix linting errors in find.go

* Replace efk with jaeger to fix TestGenerateJSON

* Limit processor count to 1 in go test

* Properly install jaeger to helm_repos/jaeger

* Revert "Limit processor count to 1 in go test"

This reverts commit af85ec69ab.
This commit is contained in:
Andrew Doing 2020-09-02 14:49:43 -07:00 коммит произвёл GitHub
Родитель 19220cbade
Коммит 7d61cd5c83
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
194 изменённых файлов: 3774 добавлений и 6709 удалений

Просмотреть файл

@ -10,25 +10,22 @@ pool:
vmImage: "Ubuntu-16.04"
variables:
GOBIN: "$(GOPATH)/bin" # Go binaries path
GOROOT: "/usr/local/go1.13" # Go installation path
GOPATH: "$(system.defaultWorkingDirectory)/gopath" # Go workspace path
modulePath: "$(GOPATH)/src/github.com/$(build.repository.name)" # Path to the module's code
GO111MODULE: "on"
linterTimeout: "5m"
steps:
# https://docs.microsoft.com/en-us/azure/devops/pipelines/tasks/tool/go-tool
- task: GoTool@0
inputs:
version: "1.15"
- script: |
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
shopt -s extglob
shopt -s dotglob
mv !(gopath) '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
go version
displayName: "Set up Go workspace"
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.30.0
./bin/golangci-lint run --timeout $(linterTimeout)
displayName: "Lint"
- script: |
scripts/build clean
displayName: "Clean"
- script: |
HELM_URL=https://get.helm.sh
@ -38,22 +35,8 @@ steps:
PATH=`pwd`/linux-amd64/:$PATH
displayName: "Install helm"
- script: |
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.25.0
displayName: Install linter
- script: |
scripts/build clean
go mod download
workingDirectory: "$(modulePath)"
displayName: "Get Go dependencies"
- script: |
golangci-lint run --timeout $(linterTimeout)
workingDirectory: "$(modulePath)"
displayName: "Lint"
- script: |
go test -v -race ./...
workingDirectory: "$(modulePath)"
displayName: "Test"
- task: Go@0
displayName: Test
inputs:
command: test
arguments: -v -race ./...

Просмотреть файл

@ -5,31 +5,32 @@ import (
"errors"
"fmt"
"strings"
"github.com/spf13/cobra"
"github.com/google/go-github/v28/github"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
)
// FindComponent finds fabrikate components in the fabrikate-definitions repository that are related to the given keyword.
func FindComponent(keyword string)(error){
func FindComponent(keyword string) error {
client := github.NewClient(nil)
ctx := context.Background()
query := keyword + "+repo:microsoft/fabrikate-definitions"
results, _, err := client.Search.Code(ctx, query, nil)
if (err != nil || results.CodeResults == nil){
if err != nil || results.CodeResults == nil {
return err
}
components := GetFabrikateComponents(results.CodeResults)
fmt.Println(fmt.Sprintf("Search results for '%s':", keyword))
if (len(components) == 0){
fmt.Printf("Search results for '%s':\n", keyword)
if len(components) == 0 {
log.Info(fmt.Sprintf("No components were found for '%s'", keyword))
} else{
} else {
for _, component := range components {
fmt.Println(component)
}
@ -39,19 +40,19 @@ func FindComponent(keyword string)(error){
}
// GetFabrikateComponents returns a unique list of fabrikate components from a github search result
func GetFabrikateComponents(codeResults []github.CodeResult) ([]string){
if (codeResults == nil){
func GetFabrikateComponents(codeResults []github.CodeResult) []string {
if codeResults == nil {
return []string{}
}
components := []string{}
uniqueComponents := map[string]bool{}
uniqueComponents := map[string]bool{}
for _, result := range codeResults{
for _, result := range codeResults {
path := *result.Path
if(!strings.HasPrefix(path, "definitions")){
if !strings.HasPrefix(path, "definitions") {
continue
}
@ -64,7 +65,7 @@ func GetFabrikateComponents(codeResults []github.CodeResult) ([]string){
}
}
return components;
return components
}
var findCmd = &cobra.Command{
@ -75,8 +76,8 @@ Eg.
$ fab find prometheus
Finds fabrikate components that are related to 'prometheus'.
`,
RunE: func(cmd *cobra.Command, args []string) (err error){
RunE: func(cmd *cobra.Command, args []string) (err error) {
if len(args) != 1 {
return errors.New("'find' takes one argument")
}

Просмотреть файл

@ -22,14 +22,11 @@ func TestGenerateJSON(t *testing.T) {
assert.Nil(t, err)
expectedLengths := map[string]int{
"elasticsearch": 14477,
"elasticsearch-curator": 2390,
"fluentd-elasticsearch": 20230,
"kibana": 1590,
"jaeger": 26916,
"static": 188,
}
assert.Equal(t, 8, len(components))
assert.Equal(t, 4, len(components))
checkComponentLengthsAgainstExpected(t, components, expectedLengths)
}

10
testdata/generate/infra/component.json поставляемый
Просмотреть файл

@ -2,13 +2,9 @@
"name": "infra",
"subcomponents": [
{
"name": "efk",
"method": "git",
"source": "https://github.com/timfpark/fabrikate-elasticsearch-fluentd-kibana"
},
{
"name": "static",
"source": "./static"
"name": "fabrikate-jaeger",
"type": "component",
"source": "./fabrikate-jaeger"
}
]
}

Просмотреть файл

@ -1,35 +0,0 @@
{
"name": "elasticsearch-fluentd-kibana",
"generator": "static",
"path": "./manifests",
"subcomponents": [
{
"name": "elasticsearch",
"generator": "helm",
"source": "https://github.com/helm/charts",
"method": "git",
"path": "stable/elasticsearch"
},
{
"name": "elasticsearch-curator",
"generator": "helm",
"source": "https://github.com/helm/charts",
"method": "git",
"path": "stable/elasticsearch-curator"
},
{
"name": "fluentd-elasticsearch",
"generator": "helm",
"source": "https://github.com/helm/charts",
"method": "git",
"path": "stable/fluentd-elasticsearch"
},
{
"name": "kibana",
"generator": "helm",
"source": "https://github.com/helm/charts",
"method": "git",
"path": "stable/kibana"
}
]
}

Просмотреть файл

@ -1,43 +0,0 @@
{
"config": {},
"subcomponents": {
"elasticsearch": {
"namespace": "elasticsearch",
"injectNamespace": true,
"config": {}
},
"elasticsearch-curator": {
"namespace": "elasticsearch",
"injectNamespace": true,
"config": {
"config": {
"elasticsearch": {
"hosts": [
"elasticsearch-client.elasticsearch.svc.cluster.local"
]
}
}
}
},
"fluentd-elasticsearch": {
"namespace": "fluentd",
"injectNamespace": true,
"config": {
"elasticsearch": {
"host": "elasticsearch-client.elasticsearch.svc.cluster.local"
}
}
},
"kibana": {
"namespace": "kibana",
"injectNamespace": true,
"config": {
"files": {
"kibana.yml": {
"elasticsearch.url": "http://elasticsearch-client.elasticsearch.svc.cluster.local:9200"
}
}
}
}
}
}

Просмотреть файл

@ -1,49 +0,0 @@
version: 2
jobs:
lint-scripts:
docker:
- image: koalaman/shellcheck-alpine
steps:
- checkout
- run:
name: lint
command: |
shellcheck -x test/build.sh
shellcheck -x test/e2e.sh
shellcheck -x test/helm-test-e2e.sh
shellcheck -x test/repo-sync.sh
lint-charts:
docker:
- image: gcr.io/kubernetes-charts-ci/test-image:v3.1.0
steps:
- checkout
- run:
name: lint
command: |
git remote add k8s https://github.com/helm/charts
git fetch k8s master
ct lint --config test/ct.yaml
sync:
docker:
- image: google/cloud-sdk
steps:
- checkout
- run:
name: sync
command: test/repo-sync.sh
workflows:
version: 2
lint:
jobs:
- lint-scripts
- lint-charts
sync:
triggers:
- schedule:
cron: "23,53 * * * *"
filters:
branches:
only:
- master
jobs:
- sync

Просмотреть файл

@ -1,38 +0,0 @@
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions. It's helpful to search the existing GitHub issues first. It's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of-->
**Is this a request for help?**:
---
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template below as you can. If you leave out
information, we can't help you as well.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
In both cases, be ready for followup questions, and please respond in a timely
manner. If we can't reproduce a bug or think a feature already exists, we
might close your issue. If we're wrong, PLEASE feel free to reopen it and
explain why.
-->
**Version of Helm and Kubernetes**:
**Which chart**:
**What happened**:
**What you expected to happen**:
**How to reproduce it** (as minimally and precisely as possible):
**Anything else we need to know**:

Просмотреть файл

@ -1,40 +0,0 @@
<!--
Thank you for contributing to helm/charts. Before you submit this PR we'd like to
make sure you are aware of our technical requirements and best practices:
* https://github.com/helm/charts/blob/master/CONTRIBUTING.md#technical-requirements
* https://github.com/helm/helm/tree/master/docs/chart_best_practices
For a quick overview across what we will look at reviewing your PR, please read
our review guidelines:
* https://github.com/helm/charts/blob/master/REVIEW_GUIDELINES.md
Following our best practices right from the start will accelerate the review process and
help get your PR merged quicker.
When updates to your PR are requested, please add new commits and do not squash the
history. This will make it easier to identify new changes. The PR will be squashed
anyways when it is merged. Thanks.
For fast feedback, please @-mention maintainers that are listed in the Chart.yaml file.
Please make sure you test your changes before you push them. Once pushed, a CircleCI
will run across your changes and do some initial checks and linting. These checks run
very quickly. Please check the results. We would like these checks to pass before we
even continue reviewing your changes.
-->
#### What this PR does / why we need it:
#### Which issue this PR fixes
*(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*
- fixes #
#### Special notes for your reviewer:
#### Checklist
[Place an '[x]' (no spaces) in all applicable fields. Please remove unrelated fields.]
- [ ] [DCO](https://www.helm.sh/blog/helm-dco/index.html) signed
- [ ] Chart Version bumped
- [ ] Variables are documented in the README.md

Просмотреть файл

@ -1,16 +0,0 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 30
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 14
# Issues with these labels will never be considered stale
exemptLabels:
- lifecycle/frozen
staleLabel: lifecycle/stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Any further update will
cause the issue/pull request to no longer be considered stale. Thank you for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: >
This issue is being automatically closed due to inactivity.

Просмотреть файл

@ -1,37 +0,0 @@
# General files for the project
pkg/*
*.pyc
bin/*
.project
/.bin
/_test/secrets/*.json
# OSX leaves these everywhere on SMB shares
._*
# OSX trash
.DS_Store
# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
.idea/
*.iml
# Vscode files
.vscode
# Emacs save files
*~
\#*\#
.\#*
# Vim-related files
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
*.un~
Session.vim
.netrwhist
# Chart dependencies
**/charts/*.tgz
.history

Просмотреть файл

@ -1,136 +0,0 @@
# Contributing Guidelines
The Kubernetes Charts project accepts contributions via GitHub pull requests. This document outlines the process to help get your contribution accepted.
## Sign Your Work
The sign-off is a simple line at the end of the explanation for a commit. All
commits needs to be signed. Your signature certifies that you wrote the patch or
otherwise have the right to contribute the material. The rules are pretty simple,
if you can certify the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@example.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
Note: If your git config information is set properly then viewing the
`git log` information for your commit will look something like this:
```
Author: Joe Smith <joe.smith@example.com>
Date: Thu Feb 2 11:41:15 2018 -0800
Update README
Signed-off-by: Joe Smith <joe.smith@example.com>
```
Notice the `Author` and `Signed-off-by` lines match. If they don't
your PR will be rejected by the automated DCO check.
### Reporting a Bug in Helm
This repository is used by Chart developers for maintaining the official charts for Kubernetes Helm. If your issue is in the Helm tool itself, please use the issue tracker in the [helm/helm](https://github.com/helm/helm) repository.
## How to Contribute a Chart
1. Fork this repository, develop and test your Chart. Remember to sign off your commits as described in the "Sign Your Work" chapter.
1. Choose the correct folder for your chart based on the information in the [Repository Structure](README.md#repository-structure) section
1. Ensure your Chart follows the [technical](#technical-requirements) and [documentation](#documentation-requirements) guidelines, described below.
1. Submit a pull request.
***NOTE***: In order to make testing and merging of PRs easier, please submit changes to multiple charts in separate PRs.
### Technical requirements
* All Chart dependencies should also be submitted independently
* Must pass the linter (`helm lint`)
* Must successfully launch with default values (`helm install .`)
* All pods go to the running state (or NOTES.txt provides further instructions if a required value is missing e.g. [minecraft](https://github.com/helm/charts/blob/master/stable/minecraft/templates/NOTES.txt#L3))
* All services have at least one endpoint
* Must include source GitHub repositories for images used in the Chart
* Images should not have any major security vulnerabilities
* Must be up-to-date with the latest stable Helm/Kubernetes features
* Use Deployments in favor of ReplicationControllers
* Should follow Kubernetes best practices
* Include Health Checks wherever practical
* Allow configurable [resource requests and limits](http://kubernetes.io/docs/user-guide/compute-resources/#resource-requests-and-limits-of-pod-and-container)
* Provide a method for data persistence (if applicable)
* Support application upgrades
* Allow customization of the application configuration
* Provide a secure default configuration
* Do not leverage alpha features of Kubernetes
* Includes a [NOTES.txt](https://github.com/helm/helm/blob/master/docs/charts.md#chart-license-readme-and-notes) explaining how to use the application after install
* Follows [best practices](https://github.com/helm/helm/tree/master/docs/chart_best_practices)
(especially for [labels](https://github.com/helm/helm/blob/master/docs/chart_best_practices/labels.md)
and [values](https://github.com/helm/helm/blob/master/docs/chart_best_practices/values.md))
### Documentation requirements
* Must include an in-depth `README.md`, including:
* Short description of the Chart
* Any prerequisites or requirements
* Customization: explaining options in `values.yaml` and their defaults
* Must include a short `NOTES.txt`, including:
* Any relevant post-installation information for the Chart
* Instructions on how to access the application or service provided by the Chart
### Merge approval and release process
A Kubernetes Charts maintainer will review the Chart submission, and start a validation job in the CI to verify the technical requirements of the Chart. A maintainer may add "LGTM" (Looks Good To Me) or an equivalent comment to indicate that a PR is acceptable. Any change requires at least one LGTM. No pull requests can be merged until at least one maintainer signs off with an LGTM.
Once the Chart has been merged, the release job will automatically run in the CI to package and release the Chart in the [`gs://kubernetes-charts` Google Storage bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts/).
## Support Channels
Whether you are a user or contributor, official support channels include:
- GitHub issues: https://github.com/helm/charts/issues
- Slack: Helm Users - #Helm-users room in the [Kubernetes Slack](http://slack.kubernetes.io/)
- Slack: Helm Developers - #Helm-dev room in the [Kubernetes Slack](http://slack.kubernetes.io/)
Before opening a new issue or submitting a new pull request, it's helpful to search the project - it's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of.

Просмотреть файл

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Просмотреть файл

@ -1,14 +0,0 @@
approvers:
- lachie83
- mgoodness
- prydonius
- sameersbn
- seanknox
- viglesiasce
- foxish
- unguiculus
- scottrigby
- mattfarina
- davidkarlsen
- paulczar
- cpanato

Просмотреть файл

@ -1,49 +0,0 @@
# Processes
This document outlines processes and procedures for some common tasks in the charts repository.
## Deprecating A Chart
When a chart is no longer maintained it can be [deprecated](https://en.wikipedia.org/wiki/Deprecation). Once a chart is deprecated the expectation is the chart will see no further development. The chart and its versions will still be accessible, though tools such as [monocular](https://github.com/kubernetes-helm/monocular) and [Kubeapps Hub](https://hub.kubeapps.com/) will no longer list the chart.
To deprecate a chart perform the following:
1. Increment the chart `version` in the `Chart.yaml` file. This is required as all charts are immutable.
1. Add a property to the `Chart.yaml` file of `deprecated: true` at the top level of the YAML structure.
1. Above the deprecated property add a comment noting that the chart is deprecated and linking to the deprecation policy.
1. Remove any maintainers from the chart as the chart is no longer maintained.
1. Prefix the description with "DEPRECATED"
1. Update READMEs and NOTES.txt to note that the chart is deprecated
For example, A `Chart.yaml` could start like:
```yaml
name: foo
# The foo chart is deprecated and no longer maintained. For details deprecation,
# including how to un-deprecate a chart see the PROCESSES.md file.
deprecated: true
description: DEPRECATED foo bar baz qux...
```
## Un-deprecating A Chart
When new maintainers are interested in bring a chart out of deprecation with
new features or new support that can be an option. To un-deprecate a chart:
1. Update the maintainers on the chart if any are listed. The previous maintainers should not be expected to maintain the chart unless they explicitly decide to do so.
1. If there is an OWNERS file in the chart that should be updated with the new reviewers and approvers.
1. The deprecated property from the `Chart.yaml` file should be removed along with any associated comment.
1. The chart `version` needs to be incremented accordingly. If the same functionality is kept the version can be a patch increase. Otherwise the minor or major version needs to be incremented. For more detail on changing the version number see the [semver specification](http://semver.org).
## Promoting A Chart From Incubator To Stable
When promoting a chart from incubator to stable there are several steps that need to be carried out.
1. Prior to promoting the chart verify that it does not depend on any other incubator charts. Stable charts cannot depend on incubator charts.
1. The chart should be copied, not moved, from the incubator directory to the stable directory.
1. The chart in the incubator directory should be deprecated according to the [deprecation process](#deprecating-a-chart) described above with a comment noting that the chart has been promoted to stable.
1. The version of the chart in the stable directory should be updated so that any documentation or other details points to stable rather than incubator. The chart `version` will, also, need to be incremented.
## Reviewing A Pull Request
There are two parts to reviewing a pull request in the process to do so and the guidelines to follow. Both of those are outlined in the [Review Guidelines](REVIEW_GUIDELINES.md).

Просмотреть файл

@ -1,101 +0,0 @@
# Helm Charts
Use this repository to submit official Charts for Helm. Charts are curated application definitions for Helm. For more information about installing and using Helm, see its
[README.md](https://github.com/helm/helm/tree/master/README.md). To get a quick introduction to Charts see this [chart document](https://github.com/helm/helm/blob/master/docs/charts.md).
## Where to find us
For general Helm Chart discussions join the Helm Charts (#charts) room in the [Kubernetes](http://slack.kubernetes.io/).
For issues and support for Helm and Charts see [Support Channels](CONTRIBUTING.md#support-channels).
## How do I install these charts?
Just `helm install stable/<chart>`. This is the default repository for Helm which is located at https://kubernetes-charts.storage.googleapis.com/ and is installed by default.
For more information on using Helm, refer to the [Helm's documentation](https://github.com/kubernetes/helm#docs).
## How do I enable the Incubator repository?
To add the Incubator charts for your local client, run `helm repo add`:
```
$ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/
"incubator" has been added to your repositories
```
You can then run `helm search incubator` to see the charts.
## Chart Format
Take a look at the [alpine example chart](https://github.com/helm/helm/tree/master/docs/examples/alpine) and the [nginx example chart](https://github.com/helm/helm/tree/master/docs/examples/nginx) for reference when you're writing your first few charts.
Before contributing a Chart, become familiar with the format. Note that the project is still under active development and the format may still evolve a bit.
## Repository Structure
This GitHub repository contains the source for the packaged and versioned charts released in the [`gs://kubernetes-charts` Google Storage bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts/) (the Chart Repository).
The Charts in the `stable/` directory in the master branch of this repository match the latest packaged Chart in the Chart Repository, though there may be previous versions of a Chart available in that Chart Repository.
The purpose of this repository is to provide a place for maintaining and contributing official Charts, with CI processes in place for managing the releasing of Charts into the Chart Repository.
The Charts in this repository are organized into two folders:
* stable
* incubator
Stable Charts meet the criteria in the [technical requirements](CONTRIBUTING.md#technical-requirements).
Incubator Charts are those that do not meet these criteria. Having the incubator folder allows charts to be shared and improved on until they are ready to be moved into the stable folder. The charts in the `incubator/` directory can be found in the [`gs://kubernetes-charts-incubator` Google Storage Bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts-incubator).
In order to get a Chart from incubator to stable, Chart maintainers should open a pull request that moves the chart folder.
## Contributing a Chart
We'd love for you to contribute a Chart that provides a useful application or service for Kubernetes. Please read our [Contribution Guide](CONTRIBUTING.md) for more information on how you can contribute Charts.
Note: We use the same [workflow](https://github.com/kubernetes/community/blob/master/contributors/devel/development.md#workflow),
[License](LICENSE) and [Contributor License Agreement](CONTRIBUTING.md) as the main Kubernetes repository.
## Owning and Maintaining A Chart
Individual charts can be maintained by one or more users of GitHub. When someone maintains a chart they have the access to merge changes to that chart. To have merge access to a chart someone needs to:
1. Be listed on the chart, in the `Chart.yaml` file, as a maintainer. If you need sponsors and have contributed to the chart, please reach out to the existing maintainers, or if you are having trouble connecting with them, please reach out to one of the [OWNERS](OWNERS) of the charts repository.
1. Be invited (and accept your invite) as a read-only collaborator on [this repo](https://github.com/helm/charts). This is required for @k8s-ci-robot [PR comment interaction](https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md).
1. An OWNERS file needs to be added to a chart. That OWNERS file should list the maintainers' GitHub login names for both the reviewers and approvers sections. For an example see the [Drupal chart](stable/drupal/OWNERS). The `OWNERS` file should also be appended to the `.helmignore` file.
Once these three steps are done a chart approver can merge pull requests following the directions in the [REVIEW_GUIDELINES.md](REVIEW_GUIDELINES.md) file.
## Trusted Collaborator
The `pull-charts-e2e` test run, that installs a chart to test it, is required before a pull request can be merged. These tests run automatically for members of the Helm Org and for chart [repository collaborators](https://help.github.com/articles/adding-outside-collaborators-to-repositories-in-your-organization/). For regular contributors who are trusted, in a manner similar to Kubernetes community members, we have trusted collaborators. These individuals can have their tests run automatically as well as mark other pull requests as ok to test by adding a comment of `/ok-to-test` on pull requests.
There are two paths to becoming a trusted collaborator. One only needs follow one of them.
1. If you are a Kubernetes GitHub org member and have your Kubernetes org membership public you can become a trusted collaborator for Helm Charts
2. Get sponsorship from one of the Charts Maintainers listed in the OWNERS file at the root of this repository
The process to get added is:
* File an issue asking to be a trusted collaborator
* A Helm Chart Maintainer can then add the user as a read only collaborator to the repository
## Review Process
For information related to the review procedure used by the Chart repository maintainers, see [Merge approval and release process](CONTRIBUTING.md#merge-approval-and-release-process).
### Stale Pull Requests and Issues
Pull Requests and Issues that have no activity for 30 days automatically become stale. After 30 days of being stale, without activity, they become rotten. Pull Requests and Issues can rot for 30 days and then they are automatically closed. This is the standard stale process handling for all repositories on the Kubernetes GitHub organization.
## Supported Kubernetes Versions
This chart repository supports the latest and previous minor versions of Kubernetes. For example, if the latest minor release of Kubernetes is 1.8 then 1.7 and 1.8 are supported. Charts may still work on previous versions of Kubernertes even though they are outside the target supported window.
To provide that support the API versions of objects should be those that work for both the latest minor release and the previous one.
## Status of the Project
This project is still under active development, so you might run into [issues](https://github.com/helm/charts/issues). If you do, please don't be shy about letting us know, or better yet, contribute a fix or feature.

Просмотреть файл

@ -1,340 +0,0 @@
# Chart Review Guidelines
Anyone is welcome to review pull requests. Besides our [technical requirements](https://github.com/helm/charts/blob/master/CONTRIBUTING.md#technical-requirements) and [best practices](https://github.com/helm/helm/tree/master/docs/chart_best_practices), here's an overview of process and review guidelines.
## Process
The process to get a pull request merged is fairly simple. First, all required tests need to pass and the contributor needs to have a signed [DCO](https://www.helm.sh/blog/helm-dco/index.html). See [Charts Testing](https://github.com/helm/charts/blob/master/test/README.md) for details on our CI system and how you can provide custom values for testing. If there is a problem with some part of the test, such as a timeout issue, please contact one of the charts repository maintainers by commenting `cc @helm/charts-maintainers`.
The charts repository uses the OWNERS files to provide merge access. If a chart has an OWNERS file, an approver listed in that file can approve the pull request. If the chart does not have an OWNERS file, an approver in the OWNERS file at the root of the repository can approve the pull request.
To approve the pull request, an approver needs to leave a comment of `/lgtm` on the pull request. Once this is in place some tags (`lgtm` and `approved`) will be added to the pull request and a bot will come along and perform the merge.
Note, if a reviewer who is not an approver in an OWNERS file leaves a comment of `/lgtm` a `lgtm` label will be added but a merge will not happen.
## Immutability
Chart releases must be immutable. Any change to a chart warrants a chart version bump even if it is only changes to the documentation.
## Chart Metadata
The `Chart.yaml` should be as complete as possible. The following fields are mandatory:
* name (same as chart's directory)
* home
* version
* appVersion
* description
* maintainers (name should be Github username)
## Dependencies
Stable charts should not depend on charts in incubator.
## Names and Labels
### Metadata
Resources and labels should follow some conventions. The standard resource metadata (`metadata.labels` and `spec.template.metadata.labels`) should be this:
```yaml
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added (e. g. `app.kubernetes.io/component: server`). The resource name should get the component as suffix (e. g. `name: {{ include "myapp.fullname" . }}-server`).
Note that templates have to be namespaced. With Helm 2.7+, `helm create` does this out-of-the-box. The `app.kubernetes.io/name` label should use the `name` template, not `fullname` as is still the case with older charts.
### Deployments, StatefulSets, DaemonSets Selectors
`spec.selector.matchLabels` must be specified should follow some conventions. The standard selector should be this:
```yaml
selector:
matchLabels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `component` label should be added to the selector (see above).
`spec.selector.matchLabels` defined in `Deployments`/`StatefulSets`/`DaemonSets` `>=v1/beta2` **must not** contain `helm.sh/chart` label or any label containing a version of the chart, because the selector is immutable.
The chart label string contains the version, so if it is specified, whenever the the Chart.yaml version changes, Helm's attempt to change this immutable field would cause the upgrade to fail.
#### Fixing Selectors
##### For Deployments, StatefulSets, DaemonSets apps/v1beta1 or extensions/v1beta1
- If it does not specify `spec.selector.matchLabels`, set it
- Remove `helm.sh/chart` label in `spec.selector.matchLabels` if it exists
- Bump patch version of the Chart
##### For Deployments, StatefulSets, DaemonSets >=apps/v1beta2
- Remove `helm.sh/chart` label in `spec.selector.matchLabels` if it exists
- Bump major version of the Chart as it is a breaking change
### Service Selectors
Label selectors for services must have both `app.kubernetes.io/name` and `app.kubernetes.io/instance` labels.
```yaml
selector:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added to the selector (see above).
### Persistence Labels
### StatefulSet
In case of a `Statefulset`, `spec.volumeClaimTemplates.metadata.labels` must have both `app.kubernetes.io/name` and `app.kubernetes.io/instance` labels, and **must not** contain `helm.sh/chart` label or any label containing a version of the chart, because `spec.volumeClaimTemplates` is immutable.
```yaml
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added to the selector (see above).
### PersistentVolumeClaim
In case of a `PersistentVolumeClaim`, unless special needs, `matchLabels` should not be specified
because it would prevent automatic `PersistentVolume` provisioning.
## Formatting
* Yaml file should be indented with two spaces.
* List indentation style should be consistent.
* There should be a single space after `{{` and before `}}`.
## Configuration
* Docker images should be configurable. Image tags should use stable versions.
```yaml
image:
repository: myapp
tag: 1.2.3
pullPolicy: IfNotPresent
```
* The use of the `default` function should be avoided if possible in favor of defaults in `values.yaml`.
* It is usually best to not specify defaults for resources and to just provide sensible values that are commented out as a recommendation, especially when resources are rather high. This makes it easier to test charts on small clusters or Minikube. Setting resources should generally be a conscious choice of the user.
## Persistence
* Persistence should be enabled by default
* PVCs should support specifying an existing claim
* Storage class should be empty by default so that the default storage class is used
* All options should be shown in README.md
* Example persistence section in values.yaml:
```yaml
persistence:
enabled: true
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
accessMode: ReadWriteOnce
size: 10Gi
# existingClaim: ""
```
* Example pod spec within a deployment:
```yaml
volumes:
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "myapp.fullname" .) }}
{{- else }}
emptyDir: {}
{{- end -}}
```
* Example pvc.yaml:
```yaml
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end }}
```
## AutoScaling / HorizontalPodAutoscaler
* Autoscaling should be disabled by default
* All options should be shown in README.md
* Example autoscaling section in values.yaml:
```yaml
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 50
targetMemoryUtilizationPercentage: 50
```
* Example hpa.yaml:
```yaml
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
app.kubernetes.io/component: "{{ .Values.name }}"
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "myapp.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
```
## Ingress
* See the [Ingress resource documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) for a broader concept overview
* Ingress should be disabled by default
* Example ingress section in values.yaml:
```yaml
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.test
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.test
```
* Example ingress.yaml:
```yaml
{{- if .Values.ingress.enabled -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ include "myapp.fullname" }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ .Values.ingress.path }}
backend:
serviceName: {{ include "myapp.fullname" }}
servicePort: http
{{- end }}
{{- end }}
```
* Example prepend logic for getting an application URL in NOTES.txt:
```
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
```
## Documentation
`README.md` and `NOTES.txt` are mandatory. `README.md` should contain a table listing all configuration options. `NOTES.txt` should provide accurate and useful information how the chart can be used/accessed.
## Compatibility
We officially support compatibility with the current and the previous minor version of Kubernetes. Generated resources should use the latest possible API versions compatible with these versions. For extended backwards compatibility conditional logic based on capabilities may be used (see [built-in objects](https://github.com/helm/helm/blob/master/docs/chart_template_guide/builtin_objects.md)).
## Kubernetes Native Workloads
While reviewing Charts that contain workloads such as [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) the below points should be considered. These are to be seen as best practices rather than strict enforcement.
1. Any workload that are stateless and long running (servers) in nature are to be created as Deployments. Deployments in turn create ReplicaSets.
2. Unless there is a compelling reason, ReplicaSets or ReplicationControllers should be avoided as workload types.
3. Workloads that are stateful in nature such as databases, key-value stores, message queues, in-memory caches are to be created as StatefulSets
4. It is recommended that Deployments and StatefulSets configure their workloads with a [Pod Disruption Budget](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) for high availability.
5. For workloads such as databases, KV stores, etc., that replicate data, it is recommended to configure interpod anti-affinity.
6. It is recommended to have a default workload update strategy configured that is suitable for this chart.
7. Batch workloads are to be created using Jobs.
8. It is best to always create workloads with the latest supported [api version](https://v1-8.docs.kubernetes.io/docs/api-reference/v1.8/) as older version are either deprecated or soon to be deprecated.
9. It is generally not advisable to provide hard [resource limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) to workloads and leave it configurable unless the workload requires such quantity bare minimum to function.
10. As much as possible complex pre-app setups are configured using [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/).
More [configuration](https://kubernetes.io/docs/concepts/configuration/overview/) best practices.

Просмотреть файл

@ -1,3 +0,0 @@
# Community Code of Conduct
Helm follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

Просмотреть файл

@ -1,18 +0,0 @@
apiVersion: v1
appVersion: "5.5.4"
description: A Helm chart for Elasticsearch Curator
name: elasticsearch-curator
version: 1.1.0
home: https://github.com/elastic/curator
keywords:
- curator
- elasticsearch
- elasticsearch-curator
sources:
- https://github.com/kubernetes/charts/elasticsearch-curator
- https://github.com/pires/docker-elasticsearch-curator
maintainers:
- name: tmestdagh
email: mestdagh.tom@gmail.com
- name: gianrubio
email: gianrubio@gmail.com

Просмотреть файл

@ -1,6 +0,0 @@
approvers:
- tmestdagh
- gianrubio
reviewers:
- tmestdagh
- gianrubio

Просмотреть файл

@ -1,51 +0,0 @@
# Elasticsearch Curator Helm Chart
This directory contains a Kubernetes chart to deploy the [Elasticsearch Curator](https://github.com/elastic/curator).
## Prerequisites Details
* Elasticsearch
* The `elasticsearch-curator` cron job requires [K8s CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) support:
> You need a working Kubernetes cluster at version >= 1.8 (for CronJob). For previous versions of cluster (< 1.8) you need to explicitly enable `batch/v2alpha1` API by passing `--runtime-config=batch/v2alpha1=true` to the API server ([see Turn on or off an API version for your cluster for more](https://kubernetes.io/docs/admin/cluster-management/#turn-on-or-off-an-api-version-for-your-cluster)).
## Chart Details
This chart will do the following:
* Create a CronJob which runs the Curator
## Installing the Chart
To install the chart, use the following:
```console
$ helm install stable/elasticsearch-curator
```
## Configuration
The following table lists the configurable parameters of the docker-registry chart and
their default values.
| Parameter | Description | Default |
| :----------------------------------- | :---------------------------------------------------- | :------------------------------------------- |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `image.repository` | Container image to use | `quay.io/pires/docker-elasticsearch-curator` |
| `image.tag` | Container image tag to deploy | `5.5.4` |
| `hooks` | Whether to run job on selected hooks | `{ "install": false, "upgrade": false }` |
| `cronjob.schedule` | Schedule for the CronJob | `0 1 * * *` |
| `cronjob.annotations` | Annotations to add to the cronjob | {} |
| `cronjob.concurrencyPolicy` | `Allow|Forbid|Replace` concurrent jobs | `nil` |
| `cronjob.failedJobsHistoryLimit` | Specify the number of failed Jobs to keep | `nil` |
| `cronjob.successfulJobsHistoryLimit` | Specify the number of completed Jobs to keep | `nil` |
| `pod.annotations` | Annotations to add to the pod | {} |
| `configMaps.action_file_yml` | Contents of the Curator action_file.yml | See values.yaml |
| `configMaps.config_yml` | Contents of the Curator config.yml (overrides config) | See values.yaml |
| `resources` | Resource requests and limits | {} |
| `priorityClassName` | priorityClassName | `nil` |
| `extraVolumeMounts` | Mount extra volume(s), | |
| `extraVolumes` | Extra volumes | |
Specify each parameter using the `--set key=value[,key=value]` argument to
`helm install`.

Просмотреть файл

@ -1,6 +0,0 @@
A CronJob will run with schedule {{ .Values.cronjob.schedule }}.
The Jobs will not be removed automagically when deleting this Helm chart.
To remove these jobs, run the following :
kubectl -n {{ .Release.Namespace }} delete job -l app={{ template "elasticsearch-curator.name" . }},release={{ .Release.Name }}

Просмотреть файл

@ -1,44 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Return the appropriate apiVersion for cronjob APIs.
*/}}
{{- define "cronjob.apiVersion" -}}
{{- if semverCompare "< 1.8-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "batch/v2alpha1" }}
{{- else if semverCompare ">=1.8-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "batch/v1beta1" }}
{{- end -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
{{- define "elasticsearch-curator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "elasticsearch-curator.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "elasticsearch-curator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

Просмотреть файл

@ -1,12 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "elasticsearch-curator.fullname" . }}-config
labels:
app: {{ template "elasticsearch-curator.name" . }}
chart: {{ template "elasticsearch-curator.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
action_file.yml: {{ required "A valid .Values.configMaps.action_file_yml entry is required!" (toYaml .Values.configMaps.action_file_yml | indent 2) }}
config.yml: {{ required "A valid .Values.configMaps.config_yml entry is required!" (toYaml .Values.configMaps.config_yml | indent 2) }}

Просмотреть файл

@ -1,77 +0,0 @@
apiVersion: {{ template "cronjob.apiVersion" . }}
kind: CronJob
metadata:
name: {{ template "elasticsearch-curator.fullname" . }}
labels:
app: {{ template "elasticsearch-curator.name" . }}
chart: {{ template "elasticsearch-curator.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.cronjob.annotations }}
annotations:
{{ toYaml .Values.cronjob.annotations | indent 4 }}
{{- end }}
spec:
schedule: "{{ .Values.cronjob.schedule }}"
{{- with .Values.cronjob.concurrencyPolicy }}
concurrencyPolicy: {{ . }}
{{- end }}
{{- with .Values.cronjob.failedJobsHistoryLimit }}
failedJobsHistoryLimit: {{ . }}
{{- end }}
{{- with .Values.cronjob.successfulJobsHistoryLimit }}
successfulJobsHistoryLimit: {{ . }}
{{- end }}
jobTemplate:
metadata:
labels:
app: {{ template "elasticsearch-curator.name" . }}
release: {{ .Release.Name }}
spec:
template:
metadata:
labels:
app: {{ template "elasticsearch-curator.name" . }}
release: {{ .Release.Name }}
{{- if .Values.pod.annotations }}
annotations:
{{ toYaml .Values.pod.annotations | indent 12 }}
{{- end }}
spec:
volumes:
- name: config-volume
configMap:
name: {{ template "elasticsearch-curator.fullname" . }}-config
{{- if .Values.extraVolumes }}
{{ toYaml .Values.extraVolumes | indent 12 }}
{{- end }}
restartPolicy: Never
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
volumeMounts:
- name: config-volume
mountPath: /etc/es-curator
{{- if .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | indent 16 }}
{{- end }}
command: [ "curator" ]
args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
resources:
{{ toYaml .Values.resources | indent 16 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 12 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 12 }}
{{- end }}

Просмотреть файл

@ -1,69 +0,0 @@
{{- range $kind, $enabled := .Values.hooks }}
{{ if $enabled }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: {{ template "elasticsearch-curator.fullname" $ }}-on-{{ $kind }}
labels:
app: {{ template "elasticsearch-curator.name" $ }}
chart: {{ template "elasticsearch-curator.chart" $ }}
release: {{ $.Release.Name }}
heritage: {{ $.Release.Service }}
annotations:
"helm.sh/hook": post-{{ $kind }}
"helm.sh/hook-weight": "1"
"helm.sh/hook-delete-policy": before-hook-creation
{{- if $.Values.cronjob.annotations }}
{{ toYaml $.Values.cronjob.annotations | indent 4 }}
{{- end }}
spec:
template:
metadata:
labels:
app: {{ template "elasticsearch-curator.name" $ }}
release: {{ $.Release.Name }}
{{- if $.Values.pod.annotations }}
annotations:
{{ toYaml $.Values.pod.annotations | indent 8 }}
{{- end }}
spec:
volumes:
- name: config-volume
configMap:
name: {{ template "elasticsearch-curator.fullname" $ }}-config
{{- if $.Values.extraVolumes }}
{{ toYaml $.Values.extraVolumes | indent 8 }}
{{- end }}
restartPolicy: Never
{{- if $.Values.priorityClassName }}
priorityClassName: "{{ $.Values.priorityClassName }}"
{{- end }}
containers:
- name: {{ $.Chart.Name }}
image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}"
imagePullPolicy: {{ $.Values.image.pullPolicy }}
volumeMounts:
- name: config-volume
mountPath: /etc/es-curator
{{- if $.Values.extraVolumeMounts }}
{{ toYaml $.Values.extraVolumeMounts | indent 12 }}
{{- end }}
command: [ "curator" ]
args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
resources:
{{ toYaml $.Values.resources | indent 12 }}
{{- with $.Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with $.Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with $.Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
{{- end -}}
{{ end }}

Просмотреть файл

@ -1,96 +0,0 @@
# Default values for elasticsearch-curator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
cronjob:
# At 01:00 every day
schedule: "0 1 * * *"
annotations: {}
concurrencyPolicy: ""
failedJobsHistoryLimit: ""
successfulJobsHistoryLimit: ""
pod:
annotations: {}
image:
repository: quay.io/pires/docker-elasticsearch-curator
tag: 5.5.4
pullPolicy: IfNotPresent
hooks:
install: false
upgrade: false
configMaps:
# Delete indices older than 7 days
action_file_yml: |-
---
actions:
1:
action: delete_indices
description: "Clean up ES by deleting old indices"
options:
timeout_override:
continue_if_exception: False
disable_action: False
ignore_empty_list: True
filters:
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 7
field:
stats_result:
epoch:
exclude: False
# Having config_yaml WILL override the other config
config_yml: |-
---
client:
hosts:
- CHANGEME.host
port: 9200
# url_prefix:
# use_ssl: True
# certificate:
# client_cert:
# client_key:
# ssl_no_validate: True
# http_auth:
# timeout: 30
# master_only: False
# logging:
# loglevel: INFO
# logfile:
# logformat: default
# blacklist: ['elasticsearch', 'urllib3']
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
priorityClassName: ""
# extraVolumes and extraVolumeMounts allows you to mount other volumes
# Example Use Case: mount ssl certificates when elasticsearch has tls enabled
# extraVolumes:
# - name: es-certs
# secret:
# defaultMode: 420
# secretName: es-certs
# extraVolumeMounts:
# - name: es-certs
# mountPath: /certs
# readOnly: true

Просмотреть файл

@ -1,49 +0,0 @@
version: 2
jobs:
lint-scripts:
docker:
- image: koalaman/shellcheck-alpine
steps:
- checkout
- run:
name: lint
command: |
shellcheck -x test/build.sh
shellcheck -x test/e2e.sh
shellcheck -x test/helm-test-e2e.sh
shellcheck -x test/repo-sync.sh
lint-charts:
docker:
- image: gcr.io/kubernetes-charts-ci/test-image:v3.1.0
steps:
- checkout
- run:
name: lint
command: |
git remote add k8s https://github.com/helm/charts
git fetch k8s master
ct lint --config test/ct.yaml
sync:
docker:
- image: google/cloud-sdk
steps:
- checkout
- run:
name: sync
command: test/repo-sync.sh
workflows:
version: 2
lint:
jobs:
- lint-scripts
- lint-charts
sync:
triggers:
- schedule:
cron: "23,53 * * * *"
filters:
branches:
only:
- master
jobs:
- sync

Просмотреть файл

@ -1,38 +0,0 @@
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions. It's helpful to search the existing GitHub issues first. It's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of-->
**Is this a request for help?**:
---
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template below as you can. If you leave out
information, we can't help you as well.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
In both cases, be ready for followup questions, and please respond in a timely
manner. If we can't reproduce a bug or think a feature already exists, we
might close your issue. If we're wrong, PLEASE feel free to reopen it and
explain why.
-->
**Version of Helm and Kubernetes**:
**Which chart**:
**What happened**:
**What you expected to happen**:
**How to reproduce it** (as minimally and precisely as possible):
**Anything else we need to know**:

Просмотреть файл

@ -1,40 +0,0 @@
<!--
Thank you for contributing to helm/charts. Before you submit this PR we'd like to
make sure you are aware of our technical requirements and best practices:
* https://github.com/helm/charts/blob/master/CONTRIBUTING.md#technical-requirements
* https://github.com/helm/helm/tree/master/docs/chart_best_practices
For a quick overview across what we will look at reviewing your PR, please read
our review guidelines:
* https://github.com/helm/charts/blob/master/REVIEW_GUIDELINES.md
Following our best practices right from the start will accelerate the review process and
help get your PR merged quicker.
When updates to your PR are requested, please add new commits and do not squash the
history. This will make it easier to identify new changes. The PR will be squashed
anyways when it is merged. Thanks.
For fast feedback, please @-mention maintainers that are listed in the Chart.yaml file.
Please make sure you test your changes before you push them. Once pushed, a CircleCI
will run across your changes and do some initial checks and linting. These checks run
very quickly. Please check the results. We would like these checks to pass before we
even continue reviewing your changes.
-->
#### What this PR does / why we need it:
#### Which issue this PR fixes
*(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*
- fixes #
#### Special notes for your reviewer:
#### Checklist
[Place an '[x]' (no spaces) in all applicable fields. Please remove unrelated fields.]
- [ ] [DCO](https://www.helm.sh/blog/helm-dco/index.html) signed
- [ ] Chart Version bumped
- [ ] Variables are documented in the README.md

Просмотреть файл

@ -1,16 +0,0 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 30
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 14
# Issues with these labels will never be considered stale
exemptLabels:
- lifecycle/frozen
staleLabel: lifecycle/stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Any further update will
cause the issue/pull request to no longer be considered stale. Thank you for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: >
This issue is being automatically closed due to inactivity.

Просмотреть файл

@ -1,37 +0,0 @@
# General files for the project
pkg/*
*.pyc
bin/*
.project
/.bin
/_test/secrets/*.json
# OSX leaves these everywhere on SMB shares
._*
# OSX trash
.DS_Store
# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
.idea/
*.iml
# Vscode files
.vscode
# Emacs save files
*~
\#*\#
.\#*
# Vim-related files
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
*.un~
Session.vim
.netrwhist
# Chart dependencies
**/charts/*.tgz
.history

Просмотреть файл

@ -1,136 +0,0 @@
# Contributing Guidelines
The Kubernetes Charts project accepts contributions via GitHub pull requests. This document outlines the process to help get your contribution accepted.
## Sign Your Work
The sign-off is a simple line at the end of the explanation for a commit. All
commits needs to be signed. Your signature certifies that you wrote the patch or
otherwise have the right to contribute the material. The rules are pretty simple,
if you can certify the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@example.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
Note: If your git config information is set properly then viewing the
`git log` information for your commit will look something like this:
```
Author: Joe Smith <joe.smith@example.com>
Date: Thu Feb 2 11:41:15 2018 -0800
Update README
Signed-off-by: Joe Smith <joe.smith@example.com>
```
Notice the `Author` and `Signed-off-by` lines match. If they don't
your PR will be rejected by the automated DCO check.
### Reporting a Bug in Helm
This repository is used by Chart developers for maintaining the official charts for Kubernetes Helm. If your issue is in the Helm tool itself, please use the issue tracker in the [helm/helm](https://github.com/helm/helm) repository.
## How to Contribute a Chart
1. Fork this repository, develop and test your Chart. Remember to sign off your commits as described in the "Sign Your Work" chapter.
1. Choose the correct folder for your chart based on the information in the [Repository Structure](README.md#repository-structure) section
1. Ensure your Chart follows the [technical](#technical-requirements) and [documentation](#documentation-requirements) guidelines, described below.
1. Submit a pull request.
***NOTE***: In order to make testing and merging of PRs easier, please submit changes to multiple charts in separate PRs.
### Technical requirements
* All Chart dependencies should also be submitted independently
* Must pass the linter (`helm lint`)
* Must successfully launch with default values (`helm install .`)
* All pods go to the running state (or NOTES.txt provides further instructions if a required value is missing e.g. [minecraft](https://github.com/helm/charts/blob/master/stable/minecraft/templates/NOTES.txt#L3))
* All services have at least one endpoint
* Must include source GitHub repositories for images used in the Chart
* Images should not have any major security vulnerabilities
* Must be up-to-date with the latest stable Helm/Kubernetes features
* Use Deployments in favor of ReplicationControllers
* Should follow Kubernetes best practices
* Include Health Checks wherever practical
* Allow configurable [resource requests and limits](http://kubernetes.io/docs/user-guide/compute-resources/#resource-requests-and-limits-of-pod-and-container)
* Provide a method for data persistence (if applicable)
* Support application upgrades
* Allow customization of the application configuration
* Provide a secure default configuration
* Do not leverage alpha features of Kubernetes
* Includes a [NOTES.txt](https://github.com/helm/helm/blob/master/docs/charts.md#chart-license-readme-and-notes) explaining how to use the application after install
* Follows [best practices](https://github.com/helm/helm/tree/master/docs/chart_best_practices)
(especially for [labels](https://github.com/helm/helm/blob/master/docs/chart_best_practices/labels.md)
and [values](https://github.com/helm/helm/blob/master/docs/chart_best_practices/values.md))
### Documentation requirements
* Must include an in-depth `README.md`, including:
* Short description of the Chart
* Any prerequisites or requirements
* Customization: explaining options in `values.yaml` and their defaults
* Must include a short `NOTES.txt`, including:
* Any relevant post-installation information for the Chart
* Instructions on how to access the application or service provided by the Chart
### Merge approval and release process
A Kubernetes Charts maintainer will review the Chart submission, and start a validation job in the CI to verify the technical requirements of the Chart. A maintainer may add "LGTM" (Looks Good To Me) or an equivalent comment to indicate that a PR is acceptable. Any change requires at least one LGTM. No pull requests can be merged until at least one maintainer signs off with an LGTM.
Once the Chart has been merged, the release job will automatically run in the CI to package and release the Chart in the [`gs://kubernetes-charts` Google Storage bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts/).
## Support Channels
Whether you are a user or contributor, official support channels include:
- GitHub issues: https://github.com/helm/charts/issues
- Slack: Helm Users - #Helm-users room in the [Kubernetes Slack](http://slack.kubernetes.io/)
- Slack: Helm Developers - #Helm-dev room in the [Kubernetes Slack](http://slack.kubernetes.io/)
Before opening a new issue or submitting a new pull request, it's helpful to search the project - it's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of.

Просмотреть файл

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Просмотреть файл

@ -1,14 +0,0 @@
approvers:
- lachie83
- mgoodness
- prydonius
- sameersbn
- seanknox
- viglesiasce
- foxish
- unguiculus
- scottrigby
- mattfarina
- davidkarlsen
- paulczar
- cpanato

Просмотреть файл

@ -1,49 +0,0 @@
# Processes
This document outlines processes and procedures for some common tasks in the charts repository.
## Deprecating A Chart
When a chart is no longer maintained it can be [deprecated](https://en.wikipedia.org/wiki/Deprecation). Once a chart is deprecated the expectation is the chart will see no further development. The chart and its versions will still be accessible, though tools such as [monocular](https://github.com/kubernetes-helm/monocular) and [Kubeapps Hub](https://hub.kubeapps.com/) will no longer list the chart.
To deprecate a chart perform the following:
1. Increment the chart `version` in the `Chart.yaml` file. This is required as all charts are immutable.
1. Add a property to the `Chart.yaml` file of `deprecated: true` at the top level of the YAML structure.
1. Above the deprecated property add a comment noting that the chart is deprecated and linking to the deprecation policy.
1. Remove any maintainers from the chart as the chart is no longer maintained.
1. Prefix the description with "DEPRECATED"
1. Update READMEs and NOTES.txt to note that the chart is deprecated
For example, A `Chart.yaml` could start like:
```yaml
name: foo
# The foo chart is deprecated and no longer maintained. For details deprecation,
# including how to un-deprecate a chart see the PROCESSES.md file.
deprecated: true
description: DEPRECATED foo bar baz qux...
```
## Un-deprecating A Chart
When new maintainers are interested in bring a chart out of deprecation with
new features or new support that can be an option. To un-deprecate a chart:
1. Update the maintainers on the chart if any are listed. The previous maintainers should not be expected to maintain the chart unless they explicitly decide to do so.
1. If there is an OWNERS file in the chart that should be updated with the new reviewers and approvers.
1. The deprecated property from the `Chart.yaml` file should be removed along with any associated comment.
1. The chart `version` needs to be incremented accordingly. If the same functionality is kept the version can be a patch increase. Otherwise the minor or major version needs to be incremented. For more detail on changing the version number see the [semver specification](http://semver.org).
## Promoting A Chart From Incubator To Stable
When promoting a chart from incubator to stable there are several steps that need to be carried out.
1. Prior to promoting the chart verify that it does not depend on any other incubator charts. Stable charts cannot depend on incubator charts.
1. The chart should be copied, not moved, from the incubator directory to the stable directory.
1. The chart in the incubator directory should be deprecated according to the [deprecation process](#deprecating-a-chart) described above with a comment noting that the chart has been promoted to stable.
1. The version of the chart in the stable directory should be updated so that any documentation or other details points to stable rather than incubator. The chart `version` will, also, need to be incremented.
## Reviewing A Pull Request
There are two parts to reviewing a pull request in the process to do so and the guidelines to follow. Both of those are outlined in the [Review Guidelines](REVIEW_GUIDELINES.md).

Просмотреть файл

@ -1,101 +0,0 @@
# Helm Charts
Use this repository to submit official Charts for Helm. Charts are curated application definitions for Helm. For more information about installing and using Helm, see its
[README.md](https://github.com/helm/helm/tree/master/README.md). To get a quick introduction to Charts see this [chart document](https://github.com/helm/helm/blob/master/docs/charts.md).
## Where to find us
For general Helm Chart discussions join the Helm Charts (#charts) room in the [Kubernetes](http://slack.kubernetes.io/).
For issues and support for Helm and Charts see [Support Channels](CONTRIBUTING.md#support-channels).
## How do I install these charts?
Just `helm install stable/<chart>`. This is the default repository for Helm which is located at https://kubernetes-charts.storage.googleapis.com/ and is installed by default.
For more information on using Helm, refer to the [Helm's documentation](https://github.com/kubernetes/helm#docs).
## How do I enable the Incubator repository?
To add the Incubator charts for your local client, run `helm repo add`:
```
$ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/
"incubator" has been added to your repositories
```
You can then run `helm search incubator` to see the charts.
## Chart Format
Take a look at the [alpine example chart](https://github.com/helm/helm/tree/master/docs/examples/alpine) and the [nginx example chart](https://github.com/helm/helm/tree/master/docs/examples/nginx) for reference when you're writing your first few charts.
Before contributing a Chart, become familiar with the format. Note that the project is still under active development and the format may still evolve a bit.
## Repository Structure
This GitHub repository contains the source for the packaged and versioned charts released in the [`gs://kubernetes-charts` Google Storage bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts/) (the Chart Repository).
The Charts in the `stable/` directory in the master branch of this repository match the latest packaged Chart in the Chart Repository, though there may be previous versions of a Chart available in that Chart Repository.
The purpose of this repository is to provide a place for maintaining and contributing official Charts, with CI processes in place for managing the releasing of Charts into the Chart Repository.
The Charts in this repository are organized into two folders:
* stable
* incubator
Stable Charts meet the criteria in the [technical requirements](CONTRIBUTING.md#technical-requirements).
Incubator Charts are those that do not meet these criteria. Having the incubator folder allows charts to be shared and improved on until they are ready to be moved into the stable folder. The charts in the `incubator/` directory can be found in the [`gs://kubernetes-charts-incubator` Google Storage Bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts-incubator).
In order to get a Chart from incubator to stable, Chart maintainers should open a pull request that moves the chart folder.
## Contributing a Chart
We'd love for you to contribute a Chart that provides a useful application or service for Kubernetes. Please read our [Contribution Guide](CONTRIBUTING.md) for more information on how you can contribute Charts.
Note: We use the same [workflow](https://github.com/kubernetes/community/blob/master/contributors/devel/development.md#workflow),
[License](LICENSE) and [Contributor License Agreement](CONTRIBUTING.md) as the main Kubernetes repository.
## Owning and Maintaining A Chart
Individual charts can be maintained by one or more users of GitHub. When someone maintains a chart they have the access to merge changes to that chart. To have merge access to a chart someone needs to:
1. Be listed on the chart, in the `Chart.yaml` file, as a maintainer. If you need sponsors and have contributed to the chart, please reach out to the existing maintainers, or if you are having trouble connecting with them, please reach out to one of the [OWNERS](OWNERS) of the charts repository.
1. Be invited (and accept your invite) as a read-only collaborator on [this repo](https://github.com/helm/charts). This is required for @k8s-ci-robot [PR comment interaction](https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md).
1. An OWNERS file needs to be added to a chart. That OWNERS file should list the maintainers' GitHub login names for both the reviewers and approvers sections. For an example see the [Drupal chart](stable/drupal/OWNERS). The `OWNERS` file should also be appended to the `.helmignore` file.
Once these three steps are done a chart approver can merge pull requests following the directions in the [REVIEW_GUIDELINES.md](REVIEW_GUIDELINES.md) file.
## Trusted Collaborator
The `pull-charts-e2e` test run, that installs a chart to test it, is required before a pull request can be merged. These tests run automatically for members of the Helm Org and for chart [repository collaborators](https://help.github.com/articles/adding-outside-collaborators-to-repositories-in-your-organization/). For regular contributors who are trusted, in a manner similar to Kubernetes community members, we have trusted collaborators. These individuals can have their tests run automatically as well as mark other pull requests as ok to test by adding a comment of `/ok-to-test` on pull requests.
There are two paths to becoming a trusted collaborator. One only needs follow one of them.
1. If you are a Kubernetes GitHub org member and have your Kubernetes org membership public you can become a trusted collaborator for Helm Charts
2. Get sponsorship from one of the Charts Maintainers listed in the OWNERS file at the root of this repository
The process to get added is:
* File an issue asking to be a trusted collaborator
* A Helm Chart Maintainer can then add the user as a read only collaborator to the repository
## Review Process
For information related to the review procedure used by the Chart repository maintainers, see [Merge approval and release process](CONTRIBUTING.md#merge-approval-and-release-process).
### Stale Pull Requests and Issues
Pull Requests and Issues that have no activity for 30 days automatically become stale. After 30 days of being stale, without activity, they become rotten. Pull Requests and Issues can rot for 30 days and then they are automatically closed. This is the standard stale process handling for all repositories on the Kubernetes GitHub organization.
## Supported Kubernetes Versions
This chart repository supports the latest and previous minor versions of Kubernetes. For example, if the latest minor release of Kubernetes is 1.8 then 1.7 and 1.8 are supported. Charts may still work on previous versions of Kubernertes even though they are outside the target supported window.
To provide that support the API versions of objects should be those that work for both the latest minor release and the previous one.
## Status of the Project
This project is still under active development, so you might run into [issues](https://github.com/helm/charts/issues). If you do, please don't be shy about letting us know, or better yet, contribute a fix or feature.

Просмотреть файл

@ -1,340 +0,0 @@
# Chart Review Guidelines
Anyone is welcome to review pull requests. Besides our [technical requirements](https://github.com/helm/charts/blob/master/CONTRIBUTING.md#technical-requirements) and [best practices](https://github.com/helm/helm/tree/master/docs/chart_best_practices), here's an overview of process and review guidelines.
## Process
The process to get a pull request merged is fairly simple. First, all required tests need to pass and the contributor needs to have a signed [DCO](https://www.helm.sh/blog/helm-dco/index.html). See [Charts Testing](https://github.com/helm/charts/blob/master/test/README.md) for details on our CI system and how you can provide custom values for testing. If there is a problem with some part of the test, such as a timeout issue, please contact one of the charts repository maintainers by commenting `cc @helm/charts-maintainers`.
The charts repository uses the OWNERS files to provide merge access. If a chart has an OWNERS file, an approver listed in that file can approve the pull request. If the chart does not have an OWNERS file, an approver in the OWNERS file at the root of the repository can approve the pull request.
To approve the pull request, an approver needs to leave a comment of `/lgtm` on the pull request. Once this is in place some tags (`lgtm` and `approved`) will be added to the pull request and a bot will come along and perform the merge.
Note, if a reviewer who is not an approver in an OWNERS file leaves a comment of `/lgtm` a `lgtm` label will be added but a merge will not happen.
## Immutability
Chart releases must be immutable. Any change to a chart warrants a chart version bump even if it is only changes to the documentation.
## Chart Metadata
The `Chart.yaml` should be as complete as possible. The following fields are mandatory:
* name (same as chart's directory)
* home
* version
* appVersion
* description
* maintainers (name should be Github username)
## Dependencies
Stable charts should not depend on charts in incubator.
## Names and Labels
### Metadata
Resources and labels should follow some conventions. The standard resource metadata (`metadata.labels` and `spec.template.metadata.labels`) should be this:
```yaml
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added (e. g. `app.kubernetes.io/component: server`). The resource name should get the component as suffix (e. g. `name: {{ include "myapp.fullname" . }}-server`).
Note that templates have to be namespaced. With Helm 2.7+, `helm create` does this out-of-the-box. The `app.kubernetes.io/name` label should use the `name` template, not `fullname` as is still the case with older charts.
### Deployments, StatefulSets, DaemonSets Selectors
`spec.selector.matchLabels` must be specified should follow some conventions. The standard selector should be this:
```yaml
selector:
matchLabels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `component` label should be added to the selector (see above).
`spec.selector.matchLabels` defined in `Deployments`/`StatefulSets`/`DaemonSets` `>=v1/beta2` **must not** contain `helm.sh/chart` label or any label containing a version of the chart, because the selector is immutable.
The chart label string contains the version, so if it is specified, whenever the the Chart.yaml version changes, Helm's attempt to change this immutable field would cause the upgrade to fail.
#### Fixing Selectors
##### For Deployments, StatefulSets, DaemonSets apps/v1beta1 or extensions/v1beta1
- If it does not specify `spec.selector.matchLabels`, set it
- Remove `helm.sh/chart` label in `spec.selector.matchLabels` if it exists
- Bump patch version of the Chart
##### For Deployments, StatefulSets, DaemonSets >=apps/v1beta2
- Remove `helm.sh/chart` label in `spec.selector.matchLabels` if it exists
- Bump major version of the Chart as it is a breaking change
### Service Selectors
Label selectors for services must have both `app.kubernetes.io/name` and `app.kubernetes.io/instance` labels.
```yaml
selector:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added to the selector (see above).
### Persistence Labels
### StatefulSet
In case of a `Statefulset`, `spec.volumeClaimTemplates.metadata.labels` must have both `app.kubernetes.io/name` and `app.kubernetes.io/instance` labels, and **must not** contain `helm.sh/chart` label or any label containing a version of the chart, because `spec.volumeClaimTemplates` is immutable.
```yaml
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added to the selector (see above).
### PersistentVolumeClaim
In case of a `PersistentVolumeClaim`, unless special needs, `matchLabels` should not be specified
because it would prevent automatic `PersistentVolume` provisioning.
## Formatting
* Yaml file should be indented with two spaces.
* List indentation style should be consistent.
* There should be a single space after `{{` and before `}}`.
## Configuration
* Docker images should be configurable. Image tags should use stable versions.
```yaml
image:
repository: myapp
tag: 1.2.3
pullPolicy: IfNotPresent
```
* The use of the `default` function should be avoided if possible in favor of defaults in `values.yaml`.
* It is usually best to not specify defaults for resources and to just provide sensible values that are commented out as a recommendation, especially when resources are rather high. This makes it easier to test charts on small clusters or Minikube. Setting resources should generally be a conscious choice of the user.
## Persistence
* Persistence should be enabled by default
* PVCs should support specifying an existing claim
* Storage class should be empty by default so that the default storage class is used
* All options should be shown in README.md
* Example persistence section in values.yaml:
```yaml
persistence:
enabled: true
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
accessMode: ReadWriteOnce
size: 10Gi
# existingClaim: ""
```
* Example pod spec within a deployment:
```yaml
volumes:
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "myapp.fullname" .) }}
{{- else }}
emptyDir: {}
{{- end -}}
```
* Example pvc.yaml:
```yaml
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end }}
```
## AutoScaling / HorizontalPodAutoscaler
* Autoscaling should be disabled by default
* All options should be shown in README.md
* Example autoscaling section in values.yaml:
```yaml
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 50
targetMemoryUtilizationPercentage: 50
```
* Example hpa.yaml:
```yaml
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
app.kubernetes.io/component: "{{ .Values.name }}"
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "myapp.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
```
## Ingress
* See the [Ingress resource documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) for a broader concept overview
* Ingress should be disabled by default
* Example ingress section in values.yaml:
```yaml
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.test
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.test
```
* Example ingress.yaml:
```yaml
{{- if .Values.ingress.enabled -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ include "myapp.fullname" }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ .Values.ingress.path }}
backend:
serviceName: {{ include "myapp.fullname" }}
servicePort: http
{{- end }}
{{- end }}
```
* Example prepend logic for getting an application URL in NOTES.txt:
```
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
```
## Documentation
`README.md` and `NOTES.txt` are mandatory. `README.md` should contain a table listing all configuration options. `NOTES.txt` should provide accurate and useful information how the chart can be used/accessed.
## Compatibility
We officially support compatibility with the current and the previous minor version of Kubernetes. Generated resources should use the latest possible API versions compatible with these versions. For extended backwards compatibility conditional logic based on capabilities may be used (see [built-in objects](https://github.com/helm/helm/blob/master/docs/chart_template_guide/builtin_objects.md)).
## Kubernetes Native Workloads
While reviewing Charts that contain workloads such as [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) the below points should be considered. These are to be seen as best practices rather than strict enforcement.
1. Any workload that are stateless and long running (servers) in nature are to be created as Deployments. Deployments in turn create ReplicaSets.
2. Unless there is a compelling reason, ReplicaSets or ReplicationControllers should be avoided as workload types.
3. Workloads that are stateful in nature such as databases, key-value stores, message queues, in-memory caches are to be created as StatefulSets
4. It is recommended that Deployments and StatefulSets configure their workloads with a [Pod Disruption Budget](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) for high availability.
5. For workloads such as databases, KV stores, etc., that replicate data, it is recommended to configure interpod anti-affinity.
6. It is recommended to have a default workload update strategy configured that is suitable for this chart.
7. Batch workloads are to be created using Jobs.
8. It is best to always create workloads with the latest supported [api version](https://v1-8.docs.kubernetes.io/docs/api-reference/v1.8/) as older version are either deprecated or soon to be deprecated.
9. It is generally not advisable to provide hard [resource limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) to workloads and leave it configurable unless the workload requires such quantity bare minimum to function.
10. As much as possible complex pre-app setups are configured using [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/).
More [configuration](https://kubernetes.io/docs/concepts/configuration/overview/) best practices.

Просмотреть файл

@ -1,3 +0,0 @@
# Community Code of Conduct
Helm follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

Просмотреть файл

@ -1,10 +0,0 @@
approvers:
- simonswine
- icereval
- rendhalver
- desaintmartin
reviewers:
- simonswine
- icereval
- rendhalver
- desaintmartin

Просмотреть файл

@ -1,49 +0,0 @@
version: 2
jobs:
lint-scripts:
docker:
- image: koalaman/shellcheck-alpine
steps:
- checkout
- run:
name: lint
command: |
shellcheck -x test/build.sh
shellcheck -x test/e2e.sh
shellcheck -x test/helm-test-e2e.sh
shellcheck -x test/repo-sync.sh
lint-charts:
docker:
- image: gcr.io/kubernetes-charts-ci/test-image:v3.1.0
steps:
- checkout
- run:
name: lint
command: |
git remote add k8s https://github.com/helm/charts
git fetch k8s master
ct lint --config test/ct.yaml
sync:
docker:
- image: google/cloud-sdk
steps:
- checkout
- run:
name: sync
command: test/repo-sync.sh
workflows:
version: 2
lint:
jobs:
- lint-scripts
- lint-charts
sync:
triggers:
- schedule:
cron: "23,53 * * * *"
filters:
branches:
only:
- master
jobs:
- sync

Просмотреть файл

@ -1,38 +0,0 @@
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions. It's helpful to search the existing GitHub issues first. It's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of-->
**Is this a request for help?**:
---
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template below as you can. If you leave out
information, we can't help you as well.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
In both cases, be ready for followup questions, and please respond in a timely
manner. If we can't reproduce a bug or think a feature already exists, we
might close your issue. If we're wrong, PLEASE feel free to reopen it and
explain why.
-->
**Version of Helm and Kubernetes**:
**Which chart**:
**What happened**:
**What you expected to happen**:
**How to reproduce it** (as minimally and precisely as possible):
**Anything else we need to know**:

Просмотреть файл

@ -1,40 +0,0 @@
<!--
Thank you for contributing to helm/charts. Before you submit this PR we'd like to
make sure you are aware of our technical requirements and best practices:
* https://github.com/helm/charts/blob/master/CONTRIBUTING.md#technical-requirements
* https://github.com/helm/helm/tree/master/docs/chart_best_practices
For a quick overview across what we will look at reviewing your PR, please read
our review guidelines:
* https://github.com/helm/charts/blob/master/REVIEW_GUIDELINES.md
Following our best practices right from the start will accelerate the review process and
help get your PR merged quicker.
When updates to your PR are requested, please add new commits and do not squash the
history. This will make it easier to identify new changes. The PR will be squashed
anyways when it is merged. Thanks.
For fast feedback, please @-mention maintainers that are listed in the Chart.yaml file.
Please make sure you test your changes before you push them. Once pushed, a CircleCI
will run across your changes and do some initial checks and linting. These checks run
very quickly. Please check the results. We would like these checks to pass before we
even continue reviewing your changes.
-->
#### What this PR does / why we need it:
#### Which issue this PR fixes
*(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*
- fixes #
#### Special notes for your reviewer:
#### Checklist
[Place an '[x]' (no spaces) in all applicable fields. Please remove unrelated fields.]
- [ ] [DCO](https://www.helm.sh/blog/helm-dco/index.html) signed
- [ ] Chart Version bumped
- [ ] Variables are documented in the README.md

Просмотреть файл

@ -1,16 +0,0 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 30
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 14
# Issues with these labels will never be considered stale
exemptLabels:
- lifecycle/frozen
staleLabel: lifecycle/stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Any further update will
cause the issue/pull request to no longer be considered stale. Thank you for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: >
This issue is being automatically closed due to inactivity.

Просмотреть файл

@ -1,37 +0,0 @@
# General files for the project
pkg/*
*.pyc
bin/*
.project
/.bin
/_test/secrets/*.json
# OSX leaves these everywhere on SMB shares
._*
# OSX trash
.DS_Store
# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
.idea/
*.iml
# Vscode files
.vscode
# Emacs save files
*~
\#*\#
.\#*
# Vim-related files
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
*.un~
Session.vim
.netrwhist
# Chart dependencies
**/charts/*.tgz
.history

Просмотреть файл

@ -1,136 +0,0 @@
# Contributing Guidelines
The Kubernetes Charts project accepts contributions via GitHub pull requests. This document outlines the process to help get your contribution accepted.
## Sign Your Work
The sign-off is a simple line at the end of the explanation for a commit. All
commits needs to be signed. Your signature certifies that you wrote the patch or
otherwise have the right to contribute the material. The rules are pretty simple,
if you can certify the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@example.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
Note: If your git config information is set properly then viewing the
`git log` information for your commit will look something like this:
```
Author: Joe Smith <joe.smith@example.com>
Date: Thu Feb 2 11:41:15 2018 -0800
Update README
Signed-off-by: Joe Smith <joe.smith@example.com>
```
Notice the `Author` and `Signed-off-by` lines match. If they don't
your PR will be rejected by the automated DCO check.
### Reporting a Bug in Helm
This repository is used by Chart developers for maintaining the official charts for Kubernetes Helm. If your issue is in the Helm tool itself, please use the issue tracker in the [helm/helm](https://github.com/helm/helm) repository.
## How to Contribute a Chart
1. Fork this repository, develop and test your Chart. Remember to sign off your commits as described in the "Sign Your Work" chapter.
1. Choose the correct folder for your chart based on the information in the [Repository Structure](README.md#repository-structure) section
1. Ensure your Chart follows the [technical](#technical-requirements) and [documentation](#documentation-requirements) guidelines, described below.
1. Submit a pull request.
***NOTE***: In order to make testing and merging of PRs easier, please submit changes to multiple charts in separate PRs.
### Technical requirements
* All Chart dependencies should also be submitted independently
* Must pass the linter (`helm lint`)
* Must successfully launch with default values (`helm install .`)
* All pods go to the running state (or NOTES.txt provides further instructions if a required value is missing e.g. [minecraft](https://github.com/helm/charts/blob/master/stable/minecraft/templates/NOTES.txt#L3))
* All services have at least one endpoint
* Must include source GitHub repositories for images used in the Chart
* Images should not have any major security vulnerabilities
* Must be up-to-date with the latest stable Helm/Kubernetes features
* Use Deployments in favor of ReplicationControllers
* Should follow Kubernetes best practices
* Include Health Checks wherever practical
* Allow configurable [resource requests and limits](http://kubernetes.io/docs/user-guide/compute-resources/#resource-requests-and-limits-of-pod-and-container)
* Provide a method for data persistence (if applicable)
* Support application upgrades
* Allow customization of the application configuration
* Provide a secure default configuration
* Do not leverage alpha features of Kubernetes
* Includes a [NOTES.txt](https://github.com/helm/helm/blob/master/docs/charts.md#chart-license-readme-and-notes) explaining how to use the application after install
* Follows [best practices](https://github.com/helm/helm/tree/master/docs/chart_best_practices)
(especially for [labels](https://github.com/helm/helm/blob/master/docs/chart_best_practices/labels.md)
and [values](https://github.com/helm/helm/blob/master/docs/chart_best_practices/values.md))
### Documentation requirements
* Must include an in-depth `README.md`, including:
* Short description of the Chart
* Any prerequisites or requirements
* Customization: explaining options in `values.yaml` and their defaults
* Must include a short `NOTES.txt`, including:
* Any relevant post-installation information for the Chart
* Instructions on how to access the application or service provided by the Chart
### Merge approval and release process
A Kubernetes Charts maintainer will review the Chart submission, and start a validation job in the CI to verify the technical requirements of the Chart. A maintainer may add "LGTM" (Looks Good To Me) or an equivalent comment to indicate that a PR is acceptable. Any change requires at least one LGTM. No pull requests can be merged until at least one maintainer signs off with an LGTM.
Once the Chart has been merged, the release job will automatically run in the CI to package and release the Chart in the [`gs://kubernetes-charts` Google Storage bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts/).
## Support Channels
Whether you are a user or contributor, official support channels include:
- GitHub issues: https://github.com/helm/charts/issues
- Slack: Helm Users - #Helm-users room in the [Kubernetes Slack](http://slack.kubernetes.io/)
- Slack: Helm Developers - #Helm-dev room in the [Kubernetes Slack](http://slack.kubernetes.io/)
Before opening a new issue or submitting a new pull request, it's helpful to search the project - it's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of.

Просмотреть файл

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Просмотреть файл

@ -1,14 +0,0 @@
approvers:
- lachie83
- mgoodness
- prydonius
- sameersbn
- seanknox
- viglesiasce
- foxish
- unguiculus
- scottrigby
- mattfarina
- davidkarlsen
- paulczar
- cpanato

Просмотреть файл

@ -1,49 +0,0 @@
# Processes
This document outlines processes and procedures for some common tasks in the charts repository.
## Deprecating A Chart
When a chart is no longer maintained it can be [deprecated](https://en.wikipedia.org/wiki/Deprecation). Once a chart is deprecated the expectation is the chart will see no further development. The chart and its versions will still be accessible, though tools such as [monocular](https://github.com/kubernetes-helm/monocular) and [Kubeapps Hub](https://hub.kubeapps.com/) will no longer list the chart.
To deprecate a chart perform the following:
1. Increment the chart `version` in the `Chart.yaml` file. This is required as all charts are immutable.
1. Add a property to the `Chart.yaml` file of `deprecated: true` at the top level of the YAML structure.
1. Above the deprecated property add a comment noting that the chart is deprecated and linking to the deprecation policy.
1. Remove any maintainers from the chart as the chart is no longer maintained.
1. Prefix the description with "DEPRECATED"
1. Update READMEs and NOTES.txt to note that the chart is deprecated
For example, A `Chart.yaml` could start like:
```yaml
name: foo
# The foo chart is deprecated and no longer maintained. For details deprecation,
# including how to un-deprecate a chart see the PROCESSES.md file.
deprecated: true
description: DEPRECATED foo bar baz qux...
```
## Un-deprecating A Chart
When new maintainers are interested in bring a chart out of deprecation with
new features or new support that can be an option. To un-deprecate a chart:
1. Update the maintainers on the chart if any are listed. The previous maintainers should not be expected to maintain the chart unless they explicitly decide to do so.
1. If there is an OWNERS file in the chart that should be updated with the new reviewers and approvers.
1. The deprecated property from the `Chart.yaml` file should be removed along with any associated comment.
1. The chart `version` needs to be incremented accordingly. If the same functionality is kept the version can be a patch increase. Otherwise the minor or major version needs to be incremented. For more detail on changing the version number see the [semver specification](http://semver.org).
## Promoting A Chart From Incubator To Stable
When promoting a chart from incubator to stable there are several steps that need to be carried out.
1. Prior to promoting the chart verify that it does not depend on any other incubator charts. Stable charts cannot depend on incubator charts.
1. The chart should be copied, not moved, from the incubator directory to the stable directory.
1. The chart in the incubator directory should be deprecated according to the [deprecation process](#deprecating-a-chart) described above with a comment noting that the chart has been promoted to stable.
1. The version of the chart in the stable directory should be updated so that any documentation or other details points to stable rather than incubator. The chart `version` will, also, need to be incremented.
## Reviewing A Pull Request
There are two parts to reviewing a pull request in the process to do so and the guidelines to follow. Both of those are outlined in the [Review Guidelines](REVIEW_GUIDELINES.md).

Просмотреть файл

@ -1,101 +0,0 @@
# Helm Charts
Use this repository to submit official Charts for Helm. Charts are curated application definitions for Helm. For more information about installing and using Helm, see its
[README.md](https://github.com/helm/helm/tree/master/README.md). To get a quick introduction to Charts see this [chart document](https://github.com/helm/helm/blob/master/docs/charts.md).
## Where to find us
For general Helm Chart discussions join the Helm Charts (#charts) room in the [Kubernetes](http://slack.kubernetes.io/).
For issues and support for Helm and Charts see [Support Channels](CONTRIBUTING.md#support-channels).
## How do I install these charts?
Just `helm install stable/<chart>`. This is the default repository for Helm which is located at https://kubernetes-charts.storage.googleapis.com/ and is installed by default.
For more information on using Helm, refer to the [Helm's documentation](https://github.com/kubernetes/helm#docs).
## How do I enable the Incubator repository?
To add the Incubator charts for your local client, run `helm repo add`:
```
$ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/
"incubator" has been added to your repositories
```
You can then run `helm search incubator` to see the charts.
## Chart Format
Take a look at the [alpine example chart](https://github.com/helm/helm/tree/master/docs/examples/alpine) and the [nginx example chart](https://github.com/helm/helm/tree/master/docs/examples/nginx) for reference when you're writing your first few charts.
Before contributing a Chart, become familiar with the format. Note that the project is still under active development and the format may still evolve a bit.
## Repository Structure
This GitHub repository contains the source for the packaged and versioned charts released in the [`gs://kubernetes-charts` Google Storage bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts/) (the Chart Repository).
The Charts in the `stable/` directory in the master branch of this repository match the latest packaged Chart in the Chart Repository, though there may be previous versions of a Chart available in that Chart Repository.
The purpose of this repository is to provide a place for maintaining and contributing official Charts, with CI processes in place for managing the releasing of Charts into the Chart Repository.
The Charts in this repository are organized into two folders:
* stable
* incubator
Stable Charts meet the criteria in the [technical requirements](CONTRIBUTING.md#technical-requirements).
Incubator Charts are those that do not meet these criteria. Having the incubator folder allows charts to be shared and improved on until they are ready to be moved into the stable folder. The charts in the `incubator/` directory can be found in the [`gs://kubernetes-charts-incubator` Google Storage Bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts-incubator).
In order to get a Chart from incubator to stable, Chart maintainers should open a pull request that moves the chart folder.
## Contributing a Chart
We'd love for you to contribute a Chart that provides a useful application or service for Kubernetes. Please read our [Contribution Guide](CONTRIBUTING.md) for more information on how you can contribute Charts.
Note: We use the same [workflow](https://github.com/kubernetes/community/blob/master/contributors/devel/development.md#workflow),
[License](LICENSE) and [Contributor License Agreement](CONTRIBUTING.md) as the main Kubernetes repository.
## Owning and Maintaining A Chart
Individual charts can be maintained by one or more users of GitHub. When someone maintains a chart they have the access to merge changes to that chart. To have merge access to a chart someone needs to:
1. Be listed on the chart, in the `Chart.yaml` file, as a maintainer. If you need sponsors and have contributed to the chart, please reach out to the existing maintainers, or if you are having trouble connecting with them, please reach out to one of the [OWNERS](OWNERS) of the charts repository.
1. Be invited (and accept your invite) as a read-only collaborator on [this repo](https://github.com/helm/charts). This is required for @k8s-ci-robot [PR comment interaction](https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md).
1. An OWNERS file needs to be added to a chart. That OWNERS file should list the maintainers' GitHub login names for both the reviewers and approvers sections. For an example see the [Drupal chart](stable/drupal/OWNERS). The `OWNERS` file should also be appended to the `.helmignore` file.
Once these three steps are done a chart approver can merge pull requests following the directions in the [REVIEW_GUIDELINES.md](REVIEW_GUIDELINES.md) file.
## Trusted Collaborator
The `pull-charts-e2e` test run, that installs a chart to test it, is required before a pull request can be merged. These tests run automatically for members of the Helm Org and for chart [repository collaborators](https://help.github.com/articles/adding-outside-collaborators-to-repositories-in-your-organization/). For regular contributors who are trusted, in a manner similar to Kubernetes community members, we have trusted collaborators. These individuals can have their tests run automatically as well as mark other pull requests as ok to test by adding a comment of `/ok-to-test` on pull requests.
There are two paths to becoming a trusted collaborator. One only needs follow one of them.
1. If you are a Kubernetes GitHub org member and have your Kubernetes org membership public you can become a trusted collaborator for Helm Charts
2. Get sponsorship from one of the Charts Maintainers listed in the OWNERS file at the root of this repository
The process to get added is:
* File an issue asking to be a trusted collaborator
* A Helm Chart Maintainer can then add the user as a read only collaborator to the repository
## Review Process
For information related to the review procedure used by the Chart repository maintainers, see [Merge approval and release process](CONTRIBUTING.md#merge-approval-and-release-process).
### Stale Pull Requests and Issues
Pull Requests and Issues that have no activity for 30 days automatically become stale. After 30 days of being stale, without activity, they become rotten. Pull Requests and Issues can rot for 30 days and then they are automatically closed. This is the standard stale process handling for all repositories on the Kubernetes GitHub organization.
## Supported Kubernetes Versions
This chart repository supports the latest and previous minor versions of Kubernetes. For example, if the latest minor release of Kubernetes is 1.8 then 1.7 and 1.8 are supported. Charts may still work on previous versions of Kubernertes even though they are outside the target supported window.
To provide that support the API versions of objects should be those that work for both the latest minor release and the previous one.
## Status of the Project
This project is still under active development, so you might run into [issues](https://github.com/helm/charts/issues). If you do, please don't be shy about letting us know, or better yet, contribute a fix or feature.

Просмотреть файл

@ -1,340 +0,0 @@
# Chart Review Guidelines
Anyone is welcome to review pull requests. Besides our [technical requirements](https://github.com/helm/charts/blob/master/CONTRIBUTING.md#technical-requirements) and [best practices](https://github.com/helm/helm/tree/master/docs/chart_best_practices), here's an overview of process and review guidelines.
## Process
The process to get a pull request merged is fairly simple. First, all required tests need to pass and the contributor needs to have a signed [DCO](https://www.helm.sh/blog/helm-dco/index.html). See [Charts Testing](https://github.com/helm/charts/blob/master/test/README.md) for details on our CI system and how you can provide custom values for testing. If there is a problem with some part of the test, such as a timeout issue, please contact one of the charts repository maintainers by commenting `cc @helm/charts-maintainers`.
The charts repository uses the OWNERS files to provide merge access. If a chart has an OWNERS file, an approver listed in that file can approve the pull request. If the chart does not have an OWNERS file, an approver in the OWNERS file at the root of the repository can approve the pull request.
To approve the pull request, an approver needs to leave a comment of `/lgtm` on the pull request. Once this is in place some tags (`lgtm` and `approved`) will be added to the pull request and a bot will come along and perform the merge.
Note, if a reviewer who is not an approver in an OWNERS file leaves a comment of `/lgtm` a `lgtm` label will be added but a merge will not happen.
## Immutability
Chart releases must be immutable. Any change to a chart warrants a chart version bump even if it is only changes to the documentation.
## Chart Metadata
The `Chart.yaml` should be as complete as possible. The following fields are mandatory:
* name (same as chart's directory)
* home
* version
* appVersion
* description
* maintainers (name should be Github username)
## Dependencies
Stable charts should not depend on charts in incubator.
## Names and Labels
### Metadata
Resources and labels should follow some conventions. The standard resource metadata (`metadata.labels` and `spec.template.metadata.labels`) should be this:
```yaml
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added (e. g. `app.kubernetes.io/component: server`). The resource name should get the component as suffix (e. g. `name: {{ include "myapp.fullname" . }}-server`).
Note that templates have to be namespaced. With Helm 2.7+, `helm create` does this out-of-the-box. The `app.kubernetes.io/name` label should use the `name` template, not `fullname` as is still the case with older charts.
### Deployments, StatefulSets, DaemonSets Selectors
`spec.selector.matchLabels` must be specified should follow some conventions. The standard selector should be this:
```yaml
selector:
matchLabels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `component` label should be added to the selector (see above).
`spec.selector.matchLabels` defined in `Deployments`/`StatefulSets`/`DaemonSets` `>=v1/beta2` **must not** contain `helm.sh/chart` label or any label containing a version of the chart, because the selector is immutable.
The chart label string contains the version, so if it is specified, whenever the the Chart.yaml version changes, Helm's attempt to change this immutable field would cause the upgrade to fail.
#### Fixing Selectors
##### For Deployments, StatefulSets, DaemonSets apps/v1beta1 or extensions/v1beta1
- If it does not specify `spec.selector.matchLabels`, set it
- Remove `helm.sh/chart` label in `spec.selector.matchLabels` if it exists
- Bump patch version of the Chart
##### For Deployments, StatefulSets, DaemonSets >=apps/v1beta2
- Remove `helm.sh/chart` label in `spec.selector.matchLabels` if it exists
- Bump major version of the Chart as it is a breaking change
### Service Selectors
Label selectors for services must have both `app.kubernetes.io/name` and `app.kubernetes.io/instance` labels.
```yaml
selector:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added to the selector (see above).
### Persistence Labels
### StatefulSet
In case of a `Statefulset`, `spec.volumeClaimTemplates.metadata.labels` must have both `app.kubernetes.io/name` and `app.kubernetes.io/instance` labels, and **must not** contain `helm.sh/chart` label or any label containing a version of the chart, because `spec.volumeClaimTemplates` is immutable.
```yaml
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added to the selector (see above).
### PersistentVolumeClaim
In case of a `PersistentVolumeClaim`, unless special needs, `matchLabels` should not be specified
because it would prevent automatic `PersistentVolume` provisioning.
## Formatting
* Yaml file should be indented with two spaces.
* List indentation style should be consistent.
* There should be a single space after `{{` and before `}}`.
## Configuration
* Docker images should be configurable. Image tags should use stable versions.
```yaml
image:
repository: myapp
tag: 1.2.3
pullPolicy: IfNotPresent
```
* The use of the `default` function should be avoided if possible in favor of defaults in `values.yaml`.
* It is usually best to not specify defaults for resources and to just provide sensible values that are commented out as a recommendation, especially when resources are rather high. This makes it easier to test charts on small clusters or Minikube. Setting resources should generally be a conscious choice of the user.
## Persistence
* Persistence should be enabled by default
* PVCs should support specifying an existing claim
* Storage class should be empty by default so that the default storage class is used
* All options should be shown in README.md
* Example persistence section in values.yaml:
```yaml
persistence:
enabled: true
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
accessMode: ReadWriteOnce
size: 10Gi
# existingClaim: ""
```
* Example pod spec within a deployment:
```yaml
volumes:
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "myapp.fullname" .) }}
{{- else }}
emptyDir: {}
{{- end -}}
```
* Example pvc.yaml:
```yaml
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end }}
```
## AutoScaling / HorizontalPodAutoscaler
* Autoscaling should be disabled by default
* All options should be shown in README.md
* Example autoscaling section in values.yaml:
```yaml
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 50
targetMemoryUtilizationPercentage: 50
```
* Example hpa.yaml:
```yaml
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
app.kubernetes.io/component: "{{ .Values.name }}"
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "myapp.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
```
## Ingress
* See the [Ingress resource documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) for a broader concept overview
* Ingress should be disabled by default
* Example ingress section in values.yaml:
```yaml
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.test
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.test
```
* Example ingress.yaml:
```yaml
{{- if .Values.ingress.enabled -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ include "myapp.fullname" }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ .Values.ingress.path }}
backend:
serviceName: {{ include "myapp.fullname" }}
servicePort: http
{{- end }}
{{- end }}
```
* Example prepend logic for getting an application URL in NOTES.txt:
```
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
```
## Documentation
`README.md` and `NOTES.txt` are mandatory. `README.md` should contain a table listing all configuration options. `NOTES.txt` should provide accurate and useful information how the chart can be used/accessed.
## Compatibility
We officially support compatibility with the current and the previous minor version of Kubernetes. Generated resources should use the latest possible API versions compatible with these versions. For extended backwards compatibility conditional logic based on capabilities may be used (see [built-in objects](https://github.com/helm/helm/blob/master/docs/chart_template_guide/builtin_objects.md)).
## Kubernetes Native Workloads
While reviewing Charts that contain workloads such as [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) the below points should be considered. These are to be seen as best practices rather than strict enforcement.
1. Any workload that are stateless and long running (servers) in nature are to be created as Deployments. Deployments in turn create ReplicaSets.
2. Unless there is a compelling reason, ReplicaSets or ReplicationControllers should be avoided as workload types.
3. Workloads that are stateful in nature such as databases, key-value stores, message queues, in-memory caches are to be created as StatefulSets
4. It is recommended that Deployments and StatefulSets configure their workloads with a [Pod Disruption Budget](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) for high availability.
5. For workloads such as databases, KV stores, etc., that replicate data, it is recommended to configure interpod anti-affinity.
6. It is recommended to have a default workload update strategy configured that is suitable for this chart.
7. Batch workloads are to be created using Jobs.
8. It is best to always create workloads with the latest supported [api version](https://v1-8.docs.kubernetes.io/docs/api-reference/v1.8/) as older version are either deprecated or soon to be deprecated.
9. It is generally not advisable to provide hard [resource limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) to workloads and leave it configurable unless the workload requires such quantity bare minimum to function.
10. As much as possible complex pre-app setups are configured using [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/).
More [configuration](https://kubernetes.io/docs/concepts/configuration/overview/) best practices.

Просмотреть файл

@ -1,3 +0,0 @@
# Community Code of Conduct
Helm follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

Просмотреть файл

@ -1,19 +0,0 @@
name: fluentd-elasticsearch
version: 2.0.7
appVersion: 2.3.2
home: https://www.fluentd.org/
description: DEPRECATED! - A Fluentd Helm chart for Kubernetes with Elasticsearch output
icon: https://raw.githubusercontent.com/fluent/fluentd-docs/master/public/logo/Fluentd_square.png
keywords:
- fluentd
- elasticsearch
- multiline
- detect-exceptions
- logging
sources:
- https://github.com/kubernetes/charts/stable/fluentd-elasticsearch
- https://github.com/fluent/fluentd-kubernetes-daemonset
- https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions
- https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/fluentd-elasticsearch/fluentd-es-image
engine: gotpl
deprecated: true

Просмотреть файл

@ -1,6 +0,0 @@
approvers:
- axdotl
- monotek
reviewers:
- axdotl
- monotek

Просмотреть файл

@ -1,102 +0,0 @@
# DEPRECATED - Fluentd Elasticsearch
This chart is deprecated as we move to our own repo (https://kiwigrid.github.io) which will be puplished on hub.helm.sh soon.
The chart source can be found here: https://github.com/kiwigrid/helm-charts/tree/master/charts/fluentd-elasticsearch
* Installs [Fluentd](https://www.fluentd.org/) log forwarder.
## TL;DR;
```console
$ helm install stable/fluentd-elasticsearch
```
## Introduction
This chart bootstraps a [Fluentd](https://www.fluentd.org/) daemonset on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
It's meant to be a drop in replacement for fluentd-gcp on GKE which sends logs to Google's Stackdriver service, but can also be used in other places where logging to ElasticSearch is required.
The used Docker image also contains Google's detect exceptions (for Java multiline stacktraces), Prometheus exporter, Kubernetes metadata filter & Systemd plugins.
## Prerequisites
- Kubernetes 1.8+ with Beta APIs enabled
## Installing the Chart
To install the chart with the release name `my-release`:
```console
$ helm install --name my-release stable/fluentd-elasticsearch
```
The command deploys fluentd-elasticsearch on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the configurable parameters of the Fluentd elasticsearch chart and their default values.
| Parameter | Description | Default |
| ---------------------------------- | ------------------------------------------ | ---------------------------------------------------------- |
| `annotations` | Optional daemonset annotations | `NULL` |
| `podAnnotations` | Optional daemonset's pods annotations | `NULL` |
| `configMaps` | Fluentd configmaps | `default conf files` |
| `elasticsearch.host` | Elasticsearch Host | `elasticsearch-client` |
| `elasticsearch.port` | Elasticsearch Port | `9200` |
| `elasticsearch.logstash_prefix` | Elasticsearch Logstash prefix | `logstash` |
| `elasticsearch.buffer_chunk_limit` | Elasticsearch buffer chunk limit | `2M` |
| `elasticsearch.buffer_queue_limit` | Elasticsearch buffer queue limit | `8` |
| `elasticsearch.scheme` | Elasticsearch scheme setting | `http` |
| `env` | List of environment variables that are added to the fluentd pods | `{}` |
| `secret` | List of environment variables that are set from secrets and added to the fluentd pods | `[]` |
| `extraVolumeMounts` | Mount an extra volume, required to mount ssl certificates when elasticsearch has tls enabled | |
| `extraVolume` | Extra volume | |
| `image.repository` | Image | `gcr.io/google-containers/fluentd-elasticsearch` |
| `image.tag` | Image tag | `v2.3.2` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `livenessProbe.enabled` | Whether to enable livenessProbe | `true` |
| `nodeSelector` | Optional daemonset nodeSelector | `{}` |
| `podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` |
| `podSecurityPolicy.enabled` | Specify if a pod security policy must be created | `false` |
| `rbac.create` | RBAC | `true` |
| `resources.limits.cpu` | CPU limit | `100m` |
| `resources.limits.memory` | Memory limit | `500Mi` |
| `resources.requests.cpu` | CPU request | `100m` |
| `resources.requests.memory` | Memory request | `200Mi` |
| `service` | Service definition | `{}` |
| `service.type` | Service type (ClusterIP/NodePort) | Not Set |
| `service.ports` | List of service ports dict [{name:...}...] | Not Set |
| `service.ports[].name` | One of service ports name | Not Set |
| `service.ports[].port` | Service port | Not Set |
| `service.ports[].nodePort` | NodePort port (when service.type is NodePort) | Not Set |
| `service.ports[].protocol` | Service protocol(optional, can be TCP/UDP) | Not Set |
| `serviceAccount.create` | Specifies whether a service account should be created.| `true` |
| `serviceAccount.name` | Name of the service account. | |
| `tolerations` | Optional daemonset tolerations | `{}` |
| `updateStrategy` | Optional daemonset update strategy | `type: RollingUpdate` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install --name my-release \
stable/fluentd-elasticsearch
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install --name my-release -f values.yaml stable/fluentd-elasticsearch
```

Просмотреть файл

@ -1,24 +0,0 @@
1. To verify that Fluentd has started, run:
kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/name={{ include "fluentd-elasticsearch.name" . }},app.kubernetes.io/instance={{ .Release.Name }}"
THIS APPLICATION CAPTURES ALL CONSOLE OUTPUT AND FORWARDS IT TO elasticsearch . Anything that might be identifying,
including things like IP addresses, container images, and object names will NOT be anonymized.
{{- if .Values.service }}
2. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "fluentd-elasticsearch.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "fluentd-elasticsearch.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "fluentd-elasticsearch.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "fluentd-elasticsearch.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}
{{- end }}

Просмотреть файл

@ -1,23 +0,0 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "fluentd-elasticsearch.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "namespaces"
- "pods"
verbs:
- "get"
- "watch"
- "list"
{{- end -}}

Просмотреть файл

@ -1,21 +0,0 @@
{{- if .Values.rbac.create -}}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "fluentd-elasticsearch.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: {{ template "fluentd-elasticsearch.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "fluentd-elasticsearch.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

Просмотреть файл

@ -1,16 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluentd-elasticsearch.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
{{- range $key, $value := .Values.configMaps }}
{{ $key }}: |-
{{ $value | indent 4 }}
{{- end }}

Просмотреть файл

@ -1,165 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "fluentd-elasticsearch.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
{{- if .Values.annotations }}
annotations:
{{ toYaml .Values.annotations | indent 4 }}
{{- end }}
spec:
updateStrategy:
{{ toYaml .Values.updateStrategy | indent 4 }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ include "fluentd-elasticsearch.fullname" . }}
containers:
- name: {{ include "fluentd-elasticsearch.fullname" . }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
- name: OUTPUT_HOST
value: {{ .Values.elasticsearch.host | quote }}
- name: OUTPUT_PORT
value: {{ .Values.elasticsearch.port | quote }}
- name: LOGSTASH_PREFIX
value: {{ .Values.elasticsearch.logstash_prefix | quote }}
- name: OUTPUT_SCHEME
value: {{ .Values.elasticsearch.scheme | quote }}
- name: OUTPUT_SSL_VERSION
value: {{ .Values.elasticsearch.ssl_version | quote }}
- name: OUTPUT_BUFFER_CHUNK_LIMIT
value: {{ .Values.elasticsearch.buffer_chunk_limit | quote }}
- name: OUTPUT_BUFFER_QUEUE_LIMIT
value: {{ .Values.elasticsearch.buffer_queue_limit | quote }}
{{- if .Values.env }}
{{- range $key, $value := .Values.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
{{- if .Values.secret }}
{{- range $key, $value := .Values.secret }}
- name: {{ .name }}
valueFrom:
secretKeyRef:
name: {{ $value.secret_name }}
key: {{ $value.secret_key | quote }}
{{- end }}
{{- end }}
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
{{ toYaml .Values.resources | indent 10 }}
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: libsystemddir
mountPath: /host/lib
readOnly: true
- name: config-volume
mountPath: /etc/fluent/config.d
{{- if .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | indent 8 }}
{{- end }}
ports:
{{- range $port := .Values.service.ports }}
- name: {{ $port.name }}
containerPort: {{ $port.port }}
{{- if $port.protocol }}
protocol: {{ $port.protocol }}
{{- end }}
{{- end }}
{{- if .Values.livenessProbe.enabled }}
# Liveness probe is aimed to help in situarions where fluentd
# silently hangs for no apparent reasons until manual restart.
# The idea of this probe is that if fluentd is not queueing or
# flushing chunks for 5 minutes, something is not right. If
# you want to change the fluentd configuration, reducing amount of
# logs fluentd collects, consider changing the threshold or turning
# liveness probe off completely.
livenessProbe:
initialDelaySeconds: 600
periodSeconds: 60
exec:
command:
- '/bin/sh'
- '-c'
- >
LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300};
STUCK_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-900};
if [ ! -e /var/log/fluentd-buffers ];
then
exit 1;
fi;
touch -d "${STUCK_THRESHOLD_SECONDS} seconds ago" /tmp/marker-stuck;
if [[ -z "$(find /var/log/fluentd-buffers -type f -newer /tmp/marker-stuck -print -quit)" ]];
then
rm -rf /var/log/fluentd-buffers;
exit 1;
fi;
touch -d "${LIVENESS_THRESHOLD_SECONDS} seconds ago" /tmp/marker-liveness;
if [[ -z "$(find /var/log/fluentd-buffers -type f -newer /tmp/marker-liveness -print -quit)" ]];
then
exit 1;
fi;
{{- end }}
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
# It is needed to copy systemd library to decompress journals
- name: libsystemddir
hostPath:
path: /usr/lib64
- name: config-volume
configMap:
name: {{ include "fluentd-elasticsearch.fullname" . }}
{{- if .Values.extraVolumes }}
{{ toYaml .Values.extraVolumes | indent 6 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 6 }}
{{- end }}

Просмотреть файл

@ -1,52 +0,0 @@
{{- if .Values.podSecurityPolicy.enabled }}
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "fluentd-elasticsearch.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
annotations:
{{- if .Values.podSecurityPolicy.annotations }}
{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }}
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: true
volumes:
- 'configMap'
- 'emptyDir'
- 'hostPath'
- 'secret'
allowedHostPaths:
- pathPrefix: /var/log
readOnly: false
- pathPrefix: /var/lib/docker/containers
readOnly: true
- pathPrefix: /usr/lib64
readOnly: true
hostNetwork: false
hostPID: false
hostIPC: false
runAsUser:
rule: 'RunAsAny'
runAsGroup:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
hostPorts:
- min: 1
max: 65535
{{- end }}

Просмотреть файл

@ -1,19 +0,0 @@
{{- if .Values.podSecurityPolicy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: {{ template "fluentd-elasticsearch.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "fluentd-elasticsearch.fullname" . }}
{{- end }}

Просмотреть файл

@ -1,22 +0,0 @@
{{- if .Values.podSecurityPolicy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: {{ template "fluentd-elasticsearch.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
kind: Role
name: {{ template "fluentd-elasticsearch.fullname" . }}
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ template "fluentd-elasticsearch.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

Просмотреть файл

@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "fluentd-elasticsearch.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
{{- end -}}

Просмотреть файл

@ -1,30 +0,0 @@
{{- if .Values.service }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "fluentd-elasticsearch.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
type: {{ .Values.service.type }}
ports:
{{- range $port := .Values.service.ports }}
- name: {{ $port.name }}
port: {{ $port.port }}
targetPort: {{ $port.port }}
{{- if $port.nodePort }}
nodePort: {{ $port.nodePort }}
{{- end }}
{{- if $port.protocol }}
protocol: {{ $port.protocol }}
{{- end }}
{{- end }}
selector:
app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

Просмотреть файл

@ -1,491 +0,0 @@
image:
repository: gcr.io/google-containers/fluentd-elasticsearch
## Specify an imagePullPolicy (Required)
## It's recommended to change this to 'Always' if the image tag is 'latest'
## ref: http://kubernetes.io/docs/user-guide/images/#updating-images
tag: v2.3.2
pullPolicy: IfNotPresent
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# cpu: 100m
# memory: 500Mi
# requests:
# cpu: 100m
# memory: 200Mi
elasticsearch:
host: 'elasticsearch-client'
port: 9200
scheme: 'http'
ssl_version: TLSv1_2
buffer_chunk_limit: 2M
buffer_queue_limit: 8
logstash_prefix: 'logstash'
# If you want to add custom environment variables, use the env dict
# You can then reference these in your config file e.g.:
# user "#{ENV['OUTPUT_USER']}"
env:
# OUTPUT_USER: my_user
# If you want to add custom environment variables from secrets, use the secret list
secret:
# - name: ELASTICSEARCH_PASSWORD
# secret_name: elasticsearch
# secret_key: password
rbac:
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
## Specify if a Pod Security Policy for node-exporter must be created
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
enabled: false
annotations: {}
## Specify pod annotations
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
##
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
livenessProbe:
enabled: true
annotations: {}
podAnnotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "24231"
## DaemonSet update strategy
## Ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
updateStrategy:
type: RollingUpdate
tolerations: {}
# - key: node-role.kubernetes.io/master
# operator: Exists
# effect: NoSchedule
nodeSelector: {}
service: {}
# type: ClusterIP
# ports:
# - name: "monitor-agent"
# port: 24231
configMaps:
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID
# to the docker logs for pods in the /var/log/containers directory on the host.
# If running this fluentd configuration in a Docker container, the /var/log
# directory should be mounted in the container.
#
# These logs are then submitted to Elasticsearch which assumes the
# installation of the fluent-plugin-elasticsearch & the
# fluent-plugin-kubernetes_metadata_filter plugins.
# See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins.
#
# Example
# =======
# A line in the Docker log file might look like this JSON:
#
# {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z"}
#
# The time_format specification below makes sure we properly
# parse the time format produced by Docker. This will be
# submitted to Elasticsearch and should appear like:
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
# ...
# {
# "_index" : "logstash-2014.09.25",
# "_type" : "fluentd",
# "_id" : "VBrbor2QTuGpsQyTCdfzqA",
# "_score" : 1.0,
# "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
# "stream":"stderr","tag":"docker.container.all",
# "@timestamp":"2014-09-25T22:45:50+00:00"}
# },
# ...
#
# The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
# record & add labels to the log record if properly configured. This enables users
# to filter & search logs on any metadata.
# For example a Docker container's logs might be in the directory:
#
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
#
# and in the file:
#
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# where 997599971ee6... is the Docker ID of the running container.
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
# in the /var/log/containers directory which includes the pod name and the Kubernetes
# container name:
#
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# ->
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# The /var/log directory on the host is mapped to the /var/log directory in the container
# running this instance of Fluentd and we end up collecting the file:
#
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# This results in the tag:
#
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
# which are added to the log message as a kubernetes field object & the Docker container ID
# is also added under the docker field object.
# The final tag is:
#
# kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# And the final log record look like:
#
# {
# "log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z",
# "kubernetes": {
# "namespace": "default",
# "pod_name": "synthetic-logger-0.25lps-pod",
# "container_name": "synth-lgr"
# },
# "docker": {
# "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
# }
# }
#
# This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
# Json Log Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
<source>
@id fluentd-containers.log
@type tail
path /var/log/containers/*.log
pos_file /var/log/fluentd-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag raw.kubernetes.*
format json
read_from_head true
</source>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@id raw.kubernetes
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
@id minion
@type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
@id startupscript.log
@type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/startupscript.log.pos
tag startupscript
</source>
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
<source>
@id docker.log
@type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/docker.log.pos
tag docker
</source>
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
@id etcd.log
@type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
path /var/log/etcd.log
pos_file /var/log/etcd.log.pos
tag etcd
</source>
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
@id kubelet.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kubelet.log
pos_file /var/log/kubelet.log.pos
tag kubelet
</source>
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
@id kube-proxy.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-proxy.log
pos_file /var/log/kube-proxy.log.pos
tag kube-proxy
</source>
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
@id kube-apiserver.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-apiserver.log
pos_file /var/log/kube-apiserver.log.pos
tag kube-apiserver
</source>
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
@id kube-controller-manager.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-controller-manager.log
pos_file /var/log/kube-controller-manager.log.pos
tag kube-controller-manager
</source>
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
@id kube-scheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-scheduler.log
pos_file /var/log/kube-scheduler.log.pos
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@id rescheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@id glbc.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/glbc.log
pos_file /var/log/glbc.log.pos
tag glbc
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@id cluster-autoscaler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/cluster-autoscaler.log
pos_file /var/log/cluster-autoscaler.log.pos
tag cluster-autoscaler
</source>
# Logs from systemd-journal for interesting services.
<source>
@id journald-docker
@type systemd
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
<storage>
@type local
persistent true
path /var/log/journald-docker.pos
</storage>
read_from_head true
tag docker
</source>
<source>
@id journald-kubelet
@type systemd
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
<storage>
@type local
persistent true
path /var/log/journald-kubelet.pos
</storage>
read_from_head true
tag kubelet
</source>
<source>
@id journald-node-problem-detector
@type systemd
matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
<storage>
@type local
persistent true
path /var/log/journald-node-problem-detector.pos
</storage>
read_from_head true
tag node-problem-detector
</source>
forward.input.conf: |-
# Takes the messages sent over TCP
<source>
@type forward
</source>
monitoring.conf: |-
# Prometheus Exporter Plugin
# input plugin that exports metrics
<source>
@type prometheus
</source>
<source>
@type monitor_agent
</source>
# input plugin that collects metrics from MonitorAgent
<source>
@type prometheus_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for output plugin
<source>
@type prometheus_output_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for in_tail plugin
<source>
@type prometheus_tail_monitor
<labels>
host ${hostname}
</labels>
</source>
output.conf: |
# Enriches records with Kubernetes metadata
<filter kubernetes.**>
@type kubernetes_metadata
</filter>
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
type_name _doc
host "#{ENV['OUTPUT_HOST']}"
port "#{ENV['OUTPUT_PORT']}"
scheme "#{ENV['OUTPUT_SCHEME']}"
ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
logstash_format true
logstash_prefix "#{ENV['LOGSTASH_PREFIX']}"
reconnect_on_error true
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
overflow_action block
</buffer>
</match>
# extraVolumes:
# - name: es-certs
# secret:
# defaultMode: 420
# secretName: es-certs
# extraVolumeMounts:
# - name: es-certs
# mountPath: /certs
# readOnly: true

Просмотреть файл

@ -1,49 +0,0 @@
version: 2
jobs:
lint-scripts:
docker:
- image: koalaman/shellcheck-alpine
steps:
- checkout
- run:
name: lint
command: |
shellcheck -x test/build.sh
shellcheck -x test/e2e.sh
shellcheck -x test/helm-test-e2e.sh
shellcheck -x test/repo-sync.sh
lint-charts:
docker:
- image: gcr.io/kubernetes-charts-ci/test-image:v3.1.0
steps:
- checkout
- run:
name: lint
command: |
git remote add k8s https://github.com/helm/charts
git fetch k8s master
ct lint --config test/ct.yaml
sync:
docker:
- image: google/cloud-sdk
steps:
- checkout
- run:
name: sync
command: test/repo-sync.sh
workflows:
version: 2
lint:
jobs:
- lint-scripts
- lint-charts
sync:
triggers:
- schedule:
cron: "23,53 * * * *"
filters:
branches:
only:
- master
jobs:
- sync

Просмотреть файл

@ -1,38 +0,0 @@
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions. It's helpful to search the existing GitHub issues first. It's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of-->
**Is this a request for help?**:
---
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template below as you can. If you leave out
information, we can't help you as well.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
In both cases, be ready for followup questions, and please respond in a timely
manner. If we can't reproduce a bug or think a feature already exists, we
might close your issue. If we're wrong, PLEASE feel free to reopen it and
explain why.
-->
**Version of Helm and Kubernetes**:
**Which chart**:
**What happened**:
**What you expected to happen**:
**How to reproduce it** (as minimally and precisely as possible):
**Anything else we need to know**:

Просмотреть файл

@ -1,40 +0,0 @@
<!--
Thank you for contributing to helm/charts. Before you submit this PR we'd like to
make sure you are aware of our technical requirements and best practices:
* https://github.com/helm/charts/blob/master/CONTRIBUTING.md#technical-requirements
* https://github.com/helm/helm/tree/master/docs/chart_best_practices
For a quick overview across what we will look at reviewing your PR, please read
our review guidelines:
* https://github.com/helm/charts/blob/master/REVIEW_GUIDELINES.md
Following our best practices right from the start will accelerate the review process and
help get your PR merged quicker.
When updates to your PR are requested, please add new commits and do not squash the
history. This will make it easier to identify new changes. The PR will be squashed
anyways when it is merged. Thanks.
For fast feedback, please @-mention maintainers that are listed in the Chart.yaml file.
Please make sure you test your changes before you push them. Once pushed, a CircleCI
will run across your changes and do some initial checks and linting. These checks run
very quickly. Please check the results. We would like these checks to pass before we
even continue reviewing your changes.
-->
#### What this PR does / why we need it:
#### Which issue this PR fixes
*(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*
- fixes #
#### Special notes for your reviewer:
#### Checklist
[Place an '[x]' (no spaces) in all applicable fields. Please remove unrelated fields.]
- [ ] [DCO](https://www.helm.sh/blog/helm-dco/index.html) signed
- [ ] Chart Version bumped
- [ ] Variables are documented in the README.md

Просмотреть файл

@ -1,16 +0,0 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 30
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 14
# Issues with these labels will never be considered stale
exemptLabels:
- lifecycle/frozen
staleLabel: lifecycle/stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Any further update will
cause the issue/pull request to no longer be considered stale. Thank you for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: >
This issue is being automatically closed due to inactivity.

Просмотреть файл

@ -1,37 +0,0 @@
# General files for the project
pkg/*
*.pyc
bin/*
.project
/.bin
/_test/secrets/*.json
# OSX leaves these everywhere on SMB shares
._*
# OSX trash
.DS_Store
# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
.idea/
*.iml
# Vscode files
.vscode
# Emacs save files
*~
\#*\#
.\#*
# Vim-related files
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
*.un~
Session.vim
.netrwhist
# Chart dependencies
**/charts/*.tgz
.history

Просмотреть файл

@ -1,136 +0,0 @@
# Contributing Guidelines
The Kubernetes Charts project accepts contributions via GitHub pull requests. This document outlines the process to help get your contribution accepted.
## Sign Your Work
The sign-off is a simple line at the end of the explanation for a commit. All
commits needs to be signed. Your signature certifies that you wrote the patch or
otherwise have the right to contribute the material. The rules are pretty simple,
if you can certify the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@example.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
Note: If your git config information is set properly then viewing the
`git log` information for your commit will look something like this:
```
Author: Joe Smith <joe.smith@example.com>
Date: Thu Feb 2 11:41:15 2018 -0800
Update README
Signed-off-by: Joe Smith <joe.smith@example.com>
```
Notice the `Author` and `Signed-off-by` lines match. If they don't
your PR will be rejected by the automated DCO check.
### Reporting a Bug in Helm
This repository is used by Chart developers for maintaining the official charts for Kubernetes Helm. If your issue is in the Helm tool itself, please use the issue tracker in the [helm/helm](https://github.com/helm/helm) repository.
## How to Contribute a Chart
1. Fork this repository, develop and test your Chart. Remember to sign off your commits as described in the "Sign Your Work" chapter.
1. Choose the correct folder for your chart based on the information in the [Repository Structure](README.md#repository-structure) section
1. Ensure your Chart follows the [technical](#technical-requirements) and [documentation](#documentation-requirements) guidelines, described below.
1. Submit a pull request.
***NOTE***: In order to make testing and merging of PRs easier, please submit changes to multiple charts in separate PRs.
### Technical requirements
* All Chart dependencies should also be submitted independently
* Must pass the linter (`helm lint`)
* Must successfully launch with default values (`helm install .`)
* All pods go to the running state (or NOTES.txt provides further instructions if a required value is missing e.g. [minecraft](https://github.com/helm/charts/blob/master/stable/minecraft/templates/NOTES.txt#L3))
* All services have at least one endpoint
* Must include source GitHub repositories for images used in the Chart
* Images should not have any major security vulnerabilities
* Must be up-to-date with the latest stable Helm/Kubernetes features
* Use Deployments in favor of ReplicationControllers
* Should follow Kubernetes best practices
* Include Health Checks wherever practical
* Allow configurable [resource requests and limits](http://kubernetes.io/docs/user-guide/compute-resources/#resource-requests-and-limits-of-pod-and-container)
* Provide a method for data persistence (if applicable)
* Support application upgrades
* Allow customization of the application configuration
* Provide a secure default configuration
* Do not leverage alpha features of Kubernetes
* Includes a [NOTES.txt](https://github.com/helm/helm/blob/master/docs/charts.md#chart-license-readme-and-notes) explaining how to use the application after install
* Follows [best practices](https://github.com/helm/helm/tree/master/docs/chart_best_practices)
(especially for [labels](https://github.com/helm/helm/blob/master/docs/chart_best_practices/labels.md)
and [values](https://github.com/helm/helm/blob/master/docs/chart_best_practices/values.md))
### Documentation requirements
* Must include an in-depth `README.md`, including:
* Short description of the Chart
* Any prerequisites or requirements
* Customization: explaining options in `values.yaml` and their defaults
* Must include a short `NOTES.txt`, including:
* Any relevant post-installation information for the Chart
* Instructions on how to access the application or service provided by the Chart
### Merge approval and release process
A Kubernetes Charts maintainer will review the Chart submission, and start a validation job in the CI to verify the technical requirements of the Chart. A maintainer may add "LGTM" (Looks Good To Me) or an equivalent comment to indicate that a PR is acceptable. Any change requires at least one LGTM. No pull requests can be merged until at least one maintainer signs off with an LGTM.
Once the Chart has been merged, the release job will automatically run in the CI to package and release the Chart in the [`gs://kubernetes-charts` Google Storage bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts/).
## Support Channels
Whether you are a user or contributor, official support channels include:
- GitHub issues: https://github.com/helm/charts/issues
- Slack: Helm Users - #Helm-users room in the [Kubernetes Slack](http://slack.kubernetes.io/)
- Slack: Helm Developers - #Helm-dev room in the [Kubernetes Slack](http://slack.kubernetes.io/)
Before opening a new issue or submitting a new pull request, it's helpful to search the project - it's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of.

Просмотреть файл

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Просмотреть файл

@ -1,14 +0,0 @@
approvers:
- lachie83
- mgoodness
- prydonius
- sameersbn
- seanknox
- viglesiasce
- foxish
- unguiculus
- scottrigby
- mattfarina
- davidkarlsen
- paulczar
- cpanato

Просмотреть файл

@ -1,49 +0,0 @@
# Processes
This document outlines processes and procedures for some common tasks in the charts repository.
## Deprecating A Chart
When a chart is no longer maintained it can be [deprecated](https://en.wikipedia.org/wiki/Deprecation). Once a chart is deprecated the expectation is the chart will see no further development. The chart and its versions will still be accessible, though tools such as [monocular](https://github.com/kubernetes-helm/monocular) and [Kubeapps Hub](https://hub.kubeapps.com/) will no longer list the chart.
To deprecate a chart perform the following:
1. Increment the chart `version` in the `Chart.yaml` file. This is required as all charts are immutable.
1. Add a property to the `Chart.yaml` file of `deprecated: true` at the top level of the YAML structure.
1. Above the deprecated property add a comment noting that the chart is deprecated and linking to the deprecation policy.
1. Remove any maintainers from the chart as the chart is no longer maintained.
1. Prefix the description with "DEPRECATED"
1. Update READMEs and NOTES.txt to note that the chart is deprecated
For example, A `Chart.yaml` could start like:
```yaml
name: foo
# The foo chart is deprecated and no longer maintained. For details deprecation,
# including how to un-deprecate a chart see the PROCESSES.md file.
deprecated: true
description: DEPRECATED foo bar baz qux...
```
## Un-deprecating A Chart
When new maintainers are interested in bring a chart out of deprecation with
new features or new support that can be an option. To un-deprecate a chart:
1. Update the maintainers on the chart if any are listed. The previous maintainers should not be expected to maintain the chart unless they explicitly decide to do so.
1. If there is an OWNERS file in the chart that should be updated with the new reviewers and approvers.
1. The deprecated property from the `Chart.yaml` file should be removed along with any associated comment.
1. The chart `version` needs to be incremented accordingly. If the same functionality is kept the version can be a patch increase. Otherwise the minor or major version needs to be incremented. For more detail on changing the version number see the [semver specification](http://semver.org).
## Promoting A Chart From Incubator To Stable
When promoting a chart from incubator to stable there are several steps that need to be carried out.
1. Prior to promoting the chart verify that it does not depend on any other incubator charts. Stable charts cannot depend on incubator charts.
1. The chart should be copied, not moved, from the incubator directory to the stable directory.
1. The chart in the incubator directory should be deprecated according to the [deprecation process](#deprecating-a-chart) described above with a comment noting that the chart has been promoted to stable.
1. The version of the chart in the stable directory should be updated so that any documentation or other details points to stable rather than incubator. The chart `version` will, also, need to be incremented.
## Reviewing A Pull Request
There are two parts to reviewing a pull request in the process to do so and the guidelines to follow. Both of those are outlined in the [Review Guidelines](REVIEW_GUIDELINES.md).

Просмотреть файл

@ -1,101 +0,0 @@
# Helm Charts
Use this repository to submit official Charts for Helm. Charts are curated application definitions for Helm. For more information about installing and using Helm, see its
[README.md](https://github.com/helm/helm/tree/master/README.md). To get a quick introduction to Charts see this [chart document](https://github.com/helm/helm/blob/master/docs/charts.md).
## Where to find us
For general Helm Chart discussions join the Helm Charts (#charts) room in the [Kubernetes](http://slack.kubernetes.io/).
For issues and support for Helm and Charts see [Support Channels](CONTRIBUTING.md#support-channels).
## How do I install these charts?
Just `helm install stable/<chart>`. This is the default repository for Helm which is located at https://kubernetes-charts.storage.googleapis.com/ and is installed by default.
For more information on using Helm, refer to the [Helm's documentation](https://github.com/kubernetes/helm#docs).
## How do I enable the Incubator repository?
To add the Incubator charts for your local client, run `helm repo add`:
```
$ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/
"incubator" has been added to your repositories
```
You can then run `helm search incubator` to see the charts.
## Chart Format
Take a look at the [alpine example chart](https://github.com/helm/helm/tree/master/docs/examples/alpine) and the [nginx example chart](https://github.com/helm/helm/tree/master/docs/examples/nginx) for reference when you're writing your first few charts.
Before contributing a Chart, become familiar with the format. Note that the project is still under active development and the format may still evolve a bit.
## Repository Structure
This GitHub repository contains the source for the packaged and versioned charts released in the [`gs://kubernetes-charts` Google Storage bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts/) (the Chart Repository).
The Charts in the `stable/` directory in the master branch of this repository match the latest packaged Chart in the Chart Repository, though there may be previous versions of a Chart available in that Chart Repository.
The purpose of this repository is to provide a place for maintaining and contributing official Charts, with CI processes in place for managing the releasing of Charts into the Chart Repository.
The Charts in this repository are organized into two folders:
* stable
* incubator
Stable Charts meet the criteria in the [technical requirements](CONTRIBUTING.md#technical-requirements).
Incubator Charts are those that do not meet these criteria. Having the incubator folder allows charts to be shared and improved on until they are ready to be moved into the stable folder. The charts in the `incubator/` directory can be found in the [`gs://kubernetes-charts-incubator` Google Storage Bucket](https://console.cloud.google.com/storage/browser/kubernetes-charts-incubator).
In order to get a Chart from incubator to stable, Chart maintainers should open a pull request that moves the chart folder.
## Contributing a Chart
We'd love for you to contribute a Chart that provides a useful application or service for Kubernetes. Please read our [Contribution Guide](CONTRIBUTING.md) for more information on how you can contribute Charts.
Note: We use the same [workflow](https://github.com/kubernetes/community/blob/master/contributors/devel/development.md#workflow),
[License](LICENSE) and [Contributor License Agreement](CONTRIBUTING.md) as the main Kubernetes repository.
## Owning and Maintaining A Chart
Individual charts can be maintained by one or more users of GitHub. When someone maintains a chart they have the access to merge changes to that chart. To have merge access to a chart someone needs to:
1. Be listed on the chart, in the `Chart.yaml` file, as a maintainer. If you need sponsors and have contributed to the chart, please reach out to the existing maintainers, or if you are having trouble connecting with them, please reach out to one of the [OWNERS](OWNERS) of the charts repository.
1. Be invited (and accept your invite) as a read-only collaborator on [this repo](https://github.com/helm/charts). This is required for @k8s-ci-robot [PR comment interaction](https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md).
1. An OWNERS file needs to be added to a chart. That OWNERS file should list the maintainers' GitHub login names for both the reviewers and approvers sections. For an example see the [Drupal chart](stable/drupal/OWNERS). The `OWNERS` file should also be appended to the `.helmignore` file.
Once these three steps are done a chart approver can merge pull requests following the directions in the [REVIEW_GUIDELINES.md](REVIEW_GUIDELINES.md) file.
## Trusted Collaborator
The `pull-charts-e2e` test run, that installs a chart to test it, is required before a pull request can be merged. These tests run automatically for members of the Helm Org and for chart [repository collaborators](https://help.github.com/articles/adding-outside-collaborators-to-repositories-in-your-organization/). For regular contributors who are trusted, in a manner similar to Kubernetes community members, we have trusted collaborators. These individuals can have their tests run automatically as well as mark other pull requests as ok to test by adding a comment of `/ok-to-test` on pull requests.
There are two paths to becoming a trusted collaborator. One only needs follow one of them.
1. If you are a Kubernetes GitHub org member and have your Kubernetes org membership public you can become a trusted collaborator for Helm Charts
2. Get sponsorship from one of the Charts Maintainers listed in the OWNERS file at the root of this repository
The process to get added is:
* File an issue asking to be a trusted collaborator
* A Helm Chart Maintainer can then add the user as a read only collaborator to the repository
## Review Process
For information related to the review procedure used by the Chart repository maintainers, see [Merge approval and release process](CONTRIBUTING.md#merge-approval-and-release-process).
### Stale Pull Requests and Issues
Pull Requests and Issues that have no activity for 30 days automatically become stale. After 30 days of being stale, without activity, they become rotten. Pull Requests and Issues can rot for 30 days and then they are automatically closed. This is the standard stale process handling for all repositories on the Kubernetes GitHub organization.
## Supported Kubernetes Versions
This chart repository supports the latest and previous minor versions of Kubernetes. For example, if the latest minor release of Kubernetes is 1.8 then 1.7 and 1.8 are supported. Charts may still work on previous versions of Kubernertes even though they are outside the target supported window.
To provide that support the API versions of objects should be those that work for both the latest minor release and the previous one.
## Status of the Project
This project is still under active development, so you might run into [issues](https://github.com/helm/charts/issues). If you do, please don't be shy about letting us know, or better yet, contribute a fix or feature.

Просмотреть файл

@ -1,340 +0,0 @@
# Chart Review Guidelines
Anyone is welcome to review pull requests. Besides our [technical requirements](https://github.com/helm/charts/blob/master/CONTRIBUTING.md#technical-requirements) and [best practices](https://github.com/helm/helm/tree/master/docs/chart_best_practices), here's an overview of process and review guidelines.
## Process
The process to get a pull request merged is fairly simple. First, all required tests need to pass and the contributor needs to have a signed [DCO](https://www.helm.sh/blog/helm-dco/index.html). See [Charts Testing](https://github.com/helm/charts/blob/master/test/README.md) for details on our CI system and how you can provide custom values for testing. If there is a problem with some part of the test, such as a timeout issue, please contact one of the charts repository maintainers by commenting `cc @helm/charts-maintainers`.
The charts repository uses the OWNERS files to provide merge access. If a chart has an OWNERS file, an approver listed in that file can approve the pull request. If the chart does not have an OWNERS file, an approver in the OWNERS file at the root of the repository can approve the pull request.
To approve the pull request, an approver needs to leave a comment of `/lgtm` on the pull request. Once this is in place some tags (`lgtm` and `approved`) will be added to the pull request and a bot will come along and perform the merge.
Note, if a reviewer who is not an approver in an OWNERS file leaves a comment of `/lgtm` a `lgtm` label will be added but a merge will not happen.
## Immutability
Chart releases must be immutable. Any change to a chart warrants a chart version bump even if it is only changes to the documentation.
## Chart Metadata
The `Chart.yaml` should be as complete as possible. The following fields are mandatory:
* name (same as chart's directory)
* home
* version
* appVersion
* description
* maintainers (name should be Github username)
## Dependencies
Stable charts should not depend on charts in incubator.
## Names and Labels
### Metadata
Resources and labels should follow some conventions. The standard resource metadata (`metadata.labels` and `spec.template.metadata.labels`) should be this:
```yaml
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added (e. g. `app.kubernetes.io/component: server`). The resource name should get the component as suffix (e. g. `name: {{ include "myapp.fullname" . }}-server`).
Note that templates have to be namespaced. With Helm 2.7+, `helm create` does this out-of-the-box. The `app.kubernetes.io/name` label should use the `name` template, not `fullname` as is still the case with older charts.
### Deployments, StatefulSets, DaemonSets Selectors
`spec.selector.matchLabels` must be specified should follow some conventions. The standard selector should be this:
```yaml
selector:
matchLabels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `component` label should be added to the selector (see above).
`spec.selector.matchLabels` defined in `Deployments`/`StatefulSets`/`DaemonSets` `>=v1/beta2` **must not** contain `helm.sh/chart` label or any label containing a version of the chart, because the selector is immutable.
The chart label string contains the version, so if it is specified, whenever the the Chart.yaml version changes, Helm's attempt to change this immutable field would cause the upgrade to fail.
#### Fixing Selectors
##### For Deployments, StatefulSets, DaemonSets apps/v1beta1 or extensions/v1beta1
- If it does not specify `spec.selector.matchLabels`, set it
- Remove `helm.sh/chart` label in `spec.selector.matchLabels` if it exists
- Bump patch version of the Chart
##### For Deployments, StatefulSets, DaemonSets >=apps/v1beta2
- Remove `helm.sh/chart` label in `spec.selector.matchLabels` if it exists
- Bump major version of the Chart as it is a breaking change
### Service Selectors
Label selectors for services must have both `app.kubernetes.io/name` and `app.kubernetes.io/instance` labels.
```yaml
selector:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added to the selector (see above).
### Persistence Labels
### StatefulSet
In case of a `Statefulset`, `spec.volumeClaimTemplates.metadata.labels` must have both `app.kubernetes.io/name` and `app.kubernetes.io/instance` labels, and **must not** contain `helm.sh/chart` label or any label containing a version of the chart, because `spec.volumeClaimTemplates` is immutable.
```yaml
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
```
If a chart has multiple components, a `app.kubernetes.io/component` label should be added to the selector (see above).
### PersistentVolumeClaim
In case of a `PersistentVolumeClaim`, unless special needs, `matchLabels` should not be specified
because it would prevent automatic `PersistentVolume` provisioning.
## Formatting
* Yaml file should be indented with two spaces.
* List indentation style should be consistent.
* There should be a single space after `{{` and before `}}`.
## Configuration
* Docker images should be configurable. Image tags should use stable versions.
```yaml
image:
repository: myapp
tag: 1.2.3
pullPolicy: IfNotPresent
```
* The use of the `default` function should be avoided if possible in favor of defaults in `values.yaml`.
* It is usually best to not specify defaults for resources and to just provide sensible values that are commented out as a recommendation, especially when resources are rather high. This makes it easier to test charts on small clusters or Minikube. Setting resources should generally be a conscious choice of the user.
## Persistence
* Persistence should be enabled by default
* PVCs should support specifying an existing claim
* Storage class should be empty by default so that the default storage class is used
* All options should be shown in README.md
* Example persistence section in values.yaml:
```yaml
persistence:
enabled: true
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
accessMode: ReadWriteOnce
size: 10Gi
# existingClaim: ""
```
* Example pod spec within a deployment:
```yaml
volumes:
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "myapp.fullname" .) }}
{{- else }}
emptyDir: {}
{{- end -}}
```
* Example pvc.yaml:
```yaml
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end }}
```
## AutoScaling / HorizontalPodAutoscaler
* Autoscaling should be disabled by default
* All options should be shown in README.md
* Example autoscaling section in values.yaml:
```yaml
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 50
targetMemoryUtilizationPercentage: 50
```
* Example hpa.yaml:
```yaml
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "myapp.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
app.kubernetes.io/component: "{{ .Values.name }}"
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "myapp.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
```
## Ingress
* See the [Ingress resource documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) for a broader concept overview
* Ingress should be disabled by default
* Example ingress section in values.yaml:
```yaml
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.test
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.test
```
* Example ingress.yaml:
```yaml
{{- if .Values.ingress.enabled -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ include "myapp.fullname" }}
labels:
app.kubernetes.io/name: {{ include "myapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "myapp.chart" . }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ .Values.ingress.path }}
backend:
serviceName: {{ include "myapp.fullname" }}
servicePort: http
{{- end }}
{{- end }}
```
* Example prepend logic for getting an application URL in NOTES.txt:
```
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
```
## Documentation
`README.md` and `NOTES.txt` are mandatory. `README.md` should contain a table listing all configuration options. `NOTES.txt` should provide accurate and useful information how the chart can be used/accessed.
## Compatibility
We officially support compatibility with the current and the previous minor version of Kubernetes. Generated resources should use the latest possible API versions compatible with these versions. For extended backwards compatibility conditional logic based on capabilities may be used (see [built-in objects](https://github.com/helm/helm/blob/master/docs/chart_template_guide/builtin_objects.md)).
## Kubernetes Native Workloads
While reviewing Charts that contain workloads such as [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) the below points should be considered. These are to be seen as best practices rather than strict enforcement.
1. Any workload that are stateless and long running (servers) in nature are to be created as Deployments. Deployments in turn create ReplicaSets.
2. Unless there is a compelling reason, ReplicaSets or ReplicationControllers should be avoided as workload types.
3. Workloads that are stateful in nature such as databases, key-value stores, message queues, in-memory caches are to be created as StatefulSets
4. It is recommended that Deployments and StatefulSets configure their workloads with a [Pod Disruption Budget](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) for high availability.
5. For workloads such as databases, KV stores, etc., that replicate data, it is recommended to configure interpod anti-affinity.
6. It is recommended to have a default workload update strategy configured that is suitable for this chart.
7. Batch workloads are to be created using Jobs.
8. It is best to always create workloads with the latest supported [api version](https://v1-8.docs.kubernetes.io/docs/api-reference/v1.8/) as older version are either deprecated or soon to be deprecated.
9. It is generally not advisable to provide hard [resource limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) to workloads and leave it configurable unless the workload requires such quantity bare minimum to function.
10. As much as possible complex pre-app setups are configured using [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/).
More [configuration](https://kubernetes.io/docs/concepts/configuration/overview/) best practices.

Просмотреть файл

@ -1,3 +0,0 @@
# Community Code of Conduct
Helm follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

Просмотреть файл

@ -1,15 +0,0 @@
name: kibana
version: 1.1.2
appVersion: 6.5.4
description: Kibana is an open source data visualization plugin for Elasticsearch
icon: https://raw.githubusercontent.com/elastic/kibana/master/src/ui/public/icons/kibana-color.svg
keywords:
- elasticsearch
- kibana
maintainers:
- name: compleatang
email: casey@monax.io
sources:
- https://github.com/elastic/kibana
engine: gotpl
home: https://www.elastic.co/products/kibana

Просмотреть файл

@ -1,6 +0,0 @@
approvers:
- compleatang
- monotek
reviewers:
- compleatang
- monotek

Просмотреть файл

@ -1,128 +0,0 @@
# kibana
[kibana](https://github.com/elastic/kibana) is your window into the Elastic Stack. Specifically, it's an open source (Apache Licensed), browser-based analytics and search dashboard for Elasticsearch.
## TL;DR;
```console
$ helm install stable/kibana
```
## Introduction
This chart bootstraps a kibana deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Installing the Chart
To install the chart with the release name `my-release`:
```console
$ helm install stable/kibana --name my-release
```
The command deploys kibana on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
NOTE : We notice that lower resource constraints given to the chart + plugins are likely not going to work well.
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the configurable parameters of the kibana chart and their default values.
| Parameter | Description | Default |
|-----------------------------------------------|--------------------------------------------|----------------------------------------|
| `affinity` | node/pod affinities | None |
| `env` | Environment variables to configure Kibana | `{}` |
| `files` | Kibana configuration files | None |
| `livenessProbe.enabled` | livenessProbe to be enabled? | `false` |
| `livenessProbe.initialDelaySeconds` | number of seconds | 30 |
| `livenessProbe.timeoutSeconds` | number of seconds | 10 |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.repository` | Image repository | `docker.elastic.co/kibana/kibana-oss` |
| `image.tag` | Image tag | `6.5.4` |
| `image.pullSecrets` | Specify image pull secrets | `nil` |
| `commandline.args` | add additional commandline args | `nil` |
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations | None: |
| `ingress.hosts` | Ingress accepted hostnames | None: |
| `ingress.tls` | Ingress TLS configuration | None: |
| `nodeSelector` | node labels for pod assignment | `{}` |
| `podAnnotations` | annotations to add to each pod | `{}` |
| `replicaCount` | desired number of pods | `1` |
| `revisionHistoryLimit` | revisionHistoryLimit | `3` |
| `serviceAccountName` | DEPRECATED: use serviceAccount.name | `nil` |
| `serviceAccount.create` | create a serviceAccount to run the pod | `false` |
| `serviceAccount.name` | name of the serviceAccount to create | `kibana.fullname` |
| `authProxyEnabled` | enables authproxy. Create container in extracontainers | `false` |
| `extraContainers` | Sidecar containers to add to the kibana pod| `{}` |
| `resources` | pod resource requests & limits | `{}` |
| `priorityClassName` | priorityClassName | `nil` |
| `service.externalPort` | external port for the service | `443` |
| `service.internalPort` | internal port for the service | `4180` |
| `service.authProxyPort` | port to use when using sidecar authProxy | None: |
| `service.externalIPs` | external IP addresses | None: |
| `service.loadBalancerIP` | Load Balancer IP address | None: |
| `service.loadBalancerSourceRanges` | Limit load balancer source IPs to list of CIDRs (where available)) | `[]` |
| `service.nodePort` | NodePort value if service.type is NodePort | None: |
| `service.type` | type of service | `ClusterIP` |
| `service.annotations` | Kubernetes service annotations | None: |
| `service.labels` | Kubernetes service labels | None: |
| `tolerations` | List of node taints to tolerate | `[]` |
| `dashboardImport.timeout` | Time in seconds waiting for Kibana to be in green overall state | `60` |
| `dashboardImport.xpackauth.enabled` | Enable Xpack auth | `false` |
| `dashboardImport.xpackauth.username` | Optional Xpack username | `myuser` |
| `dashboardImport.xpackauth.password` | Optional Xpack password | `mypass` |
| `dashboardImport.dashboards` | Dashboards | `{}` |
| `plugins.enabled` | Enable installation of plugins. | `false` |
| `plugins.reset` | Optional : Remove all installed plugins before installing all new ones | `false` |
| `plugins.values` | List of plugins to install. Format <pluginName,version,URL> with URLs pointing to zip files of Kibana plugins to install | None: |
| `persistentVolumeClaim.enabled` | Enable PVC for plugins | `false` |
| `persistentVolumeClaim.existingClaim` | Use your own PVC for plugins | `false` |
| `persistentVolumeClaim.annotations` | Add your annotations for the PVC | `{}` |
| `persistentVolumeClaim.accessModes` | Acces mode to the PVC | `ReadWriteOnce` |
| `persistentVolumeClaim.size` | Size of the PVC | `5Gi` |
| `persistentVolumeClaim.storageClass` | Storage class of the PVC | None: |
| `readinessProbe.enabled` | readinessProbe to be enabled? | `false` |
| `readinessProbe.initialDelaySeconds` | number of seconds | 30 |
| `readinessProbe.timeoutSeconds` | number of seconds | 10 |
| `readinessProbe.periodSeconds` | number of seconds | 10 |
| `readinessProbe.successThreshold` | number of successes | 5 |
| `securityContext.enabled` | Enable security context (should be true for PVC) | `false` |
| `securityContext.allowPrivilegeEscalation` | Allow privilege escalation | `false` |
| `securityContext.runAsUser` | User id to run in pods | `1000` |
| `securityContext.fsGroup` | fsGroup id to run in pods | `2000` |
| `extraConfigMapMounts` | Additional configmaps to be mounted | `[]` |
| `deployment.annotations` | Annotations for deployment | `{}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
* The Kibana configuration files config properties can be set through the `env` parameter too.
* All the files listed under this variable will overwrite any existing files by the same name in kibana config directory.
* Files not mentioned under this variable will remain unaffected.
```console
$ helm install stable/kibana --name my-release \
--set=image.tag=v0.0.2,resources.limits.cpu=200m
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example :
```console
$ helm install stable/kibana --name my-release -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Dasboard import
* A dashboard for dashboardImport.dashboards can be a JSON or a download url to a JSON file.

Просмотреть файл

@ -1,21 +0,0 @@
---
# enable the dashboard init container with dashboard embedded in configmap
dashboardImport:
dashboards:
1_create_index: |-
{
"version": "6.4.2",
"objects": [
{
"id": "a88738e0-d3c1-11e8-b38e-a37c21cf8c95",
"version": 2,
"attributes": {
"title": "logstash-*",
"timeFieldName": "@timestamp",
"fields": "[{\"name\":\"@timestamp\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true}]"
}
}
]
}

Просмотреть файл

@ -1,6 +0,0 @@
---
extraConfigMapMounts:
- name: logtrail-configs
configMap: kibana-logtrail
mountPath: /usr/share/kibana/plugins/logtrail/logtrail.json
subPath: logtrail.json

Просмотреть файл

@ -1,3 +0,0 @@
ingress:
hosts:
- localhost.localdomain/kibana

Просмотреть файл

@ -1,3 +0,0 @@
ingress:
hosts:
- kibana.localhost.localdomain

Просмотреть файл

@ -1,9 +0,0 @@
---
# enable the plugin init container with plugins retrieved from an URL
plugins:
enabled: true
reset: false
# Use <plugin_name,version,url> to add/upgrade plugin
values:
- logtrail,0.1.30,https://github.com/sivasamyk/logtrail/releases/download/v0.1.30/logtrail-6.4.3-0.1.30.zip
# - other_plugin

Просмотреть файл

@ -1,11 +0,0 @@
---
persistentVolumeClaim:
# set to true to use pvc
enabled: true
# set to true to use you own pvc
existingClaim: false
annotations: {}
accessModes:
- ReadWriteOnce
size: "5Gi"

Просмотреть файл

@ -1,6 +0,0 @@
---
securityContext:
enabled: true
allowPrivilegeEscalation: false
runAsUser: 1000
fsGroup: 2000

Просмотреть файл

@ -1,6 +0,0 @@
---
# enable the dashboard init container with dashboard retrieved from an URL
dashboardImport:
dashboards:
k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json

Просмотреть файл

@ -1,18 +0,0 @@
To verify that {{ template "kibana.fullname" . }} has started, run:
kubectl --namespace={{ .Release.Namespace }} get pods -l "app={{ template "kibana.name" . }}"
Kibana can be accessed:
* From outside the cluster, run these commands in the same shell:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "kibana.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "kibana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:5601 to use Kibana"
kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME 5601:5601
{{- end }}

Просмотреть файл

@ -1,40 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kibana.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kibana.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "kibana.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "kibana.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{- if .Values.serviceAccountName -}}
{{- .Values.serviceAccountName }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- end -}}

Просмотреть файл

@ -1,67 +0,0 @@
{{- if .Values.dashboardImport.dashboards }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "kibana.fullname" . }}-importscript
labels:
app: {{ template "kibana.name" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
dashboardImport.sh: |
#!/usr/bin/env bash
#
# kibana dashboard import script
#
cd /kibanadashboards
echo "Starting Kibana..."
/usr/local/bin/kibana-docker $@ &
echo "Waiting up to {{ .Values.dashboardImport.timeout }} seconds for Kibana to get in green overall state..."
for i in {1..{{ .Values.dashboardImport.timeout }}}; do
curl -s localhost:5601/api/status | python -c 'import sys, json; print json.load(sys.stdin)["status"]["overall"]["state"]' 2> /dev/null | grep green > /dev/null && break || sleep 1
done
for DASHBOARD_FILE in *; do
echo -e "Importing ${DASHBOARD_FILE} dashboard..."
if ! python -c 'import sys, json; print json.load(sys.stdin)' < "${DASHBOARD_FILE}" &> /dev/null ; then
echo "${DASHBOARD_FILE} is not valid JSON, assuming it's an URL..."
TMP_FILE="$(mktemp)"
curl -s $(cat ${DASHBOARD_FILE}) > ${TMP_FILE}
curl -v {{ if .Values.dashboardImport.xpackauth.enabled }}--user {{ .Values.dashboardImport.xpackauth.username }}:{{ .Values.dashboardImport.xpackauth.password }}{{ end }} -s --connect-timeout 60 --max-time 60 -XPOST localhost:5601/api/kibana/dashboards/import?force=true -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d @${TMP_FILE}
rm ${TMP_FILE}
else
echo "Valid JSON found in ${DASHBOARD_FILE}, importing..."
curl -v {{ if .Values.dashboardImport.xpackauth.enabled }}--user {{ .Values.dashboardImport.xpackauth.username }}:{{ .Values.dashboardImport.xpackauth.password }}{{ end }} -s --connect-timeout 60 --max-time 60 -XPOST localhost:5601/api/kibana/dashboards/import?force=true -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d @./${DASHBOARD_FILE}
fi
if [ "$?" != "0" ]; then
echo -e "\nImport of ${DASHBOARD_FILE} dashboard failed... Exiting..."
exit 1
else
echo -e "\nImport of ${DASHBOARD_FILE} dashboard finished :-)"
fi
done
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "kibana.fullname" . }}-dashboards
labels:
app: {{ template "kibana.name" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
{{- range $key, $value := .Values.dashboardImport.dashboards }}
{{ $key }}: |-
{{ $value | indent 4 }}
{{- end -}}
{{- end -}}

Просмотреть файл

@ -1,14 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "kibana.fullname" . }}
labels:
app: {{ template "kibana.name" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
{{- range $key, $value := .Values.files }}
{{ $key }}: |
{{ toYaml $value | default "{}" | indent 4 }}
{{- end -}}

Просмотреть файл

@ -1,231 +0,0 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
app: {{ template "kibana.name" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "kibana.fullname" . }}
{{- if .Values.deployment.annotations }}
annotations:
{{ toYaml .Values.deployment.annotations | indent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
labels:
app: {{ template "kibana.name" . }}
release: "{{ .Release.Name }}"
spec:
serviceAccountName: {{ template "kibana.serviceAccountName" . }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- if or (.Values.dashboardImport.dashboards) (.Values.plugins.enabled) }}
initContainers:
{{- if .Values.dashboardImport.dashboards }}
- name: {{ .Chart.Name }}-dashboardimport
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["/bin/bash"]
args:
- "-c"
- "/tmp/dashboardImport.sh"
{{- if .Values.commandline.args }}
{{ toYaml .Values.commandline.args | indent 10 }}
{{- end }}
env:
{{- range $key, $value := .Values.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
ports:
- containerPort: {{ .Values.service.internalPort }}
name: {{ template "kibana.name" . }}
protocol: TCP
volumeMounts:
- name: {{ template "kibana.fullname" . }}-dashboards
mountPath: "/kibanadashboards"
- name: {{ template "kibana.fullname" . }}-importscript
mountPath: "/tmp/dashboardImport.sh"
subPath: dashboardImport.sh
{{- range $configFile := (keys .Values.files) }}
- name: {{ template "kibana.name" $ }}
mountPath: "/usr/share/kibana/config/{{ $configFile }}"
subPath: {{ $configFile }}
{{- end }}
{{- end }}
{{- if .Values.plugins.enabled}}
- name: {{ .Chart.Name }}-plugins-install
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /bin/bash
- "-c"
- |
set -e
rm -rf plugins/lost+found
plugins=(
{{- range .Values.plugins.values }}
{{ . }}
{{- end }}
)
if {{ .Values.plugins.reset }}
then
for p in $(./bin/kibana-plugin list | cut -d "@" -f1)
do
./bin/kibana-plugin remove ${p}
done
fi
for i in "${plugins[@]}"
do
IFS=',' read -ra PLUGIN <<< "$i"
pluginInstalledCheck=$(./bin/kibana-plugin list | grep "${PLUGIN[0]}" | cut -d '@' -f1 || true)
pluginVersionCheck=$(./bin/kibana-plugin list | grep "${PLUGIN[0]}" | cut -d '@' -f2 || true)
if [ "${pluginInstalledCheck}" = "${PLUGIN[0]}" ]
then
if [ "${pluginVersionCheck}" != "${PLUGIN[1]}" ]
then
./bin/kibana-plugin remove "${PLUGIN[0]}"
./bin/kibana-plugin install "${PLUGIN[2]}"
fi
else
./bin/kibana-plugin install "${PLUGIN[2]}"
fi
done
env:
{{- range $key, $value := .Values.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
ports:
- containerPort: {{ .Values.service.internalPort }}
name: {{ template "kibana.name" . }}
protocol: TCP
volumeMounts:
- name: plugins
mountPath: /usr/share/kibana/plugins
{{- range $configFile := (keys .Values.files) }}
- name: {{ template "kibana.name" $ }}
mountPath: "/usr/share/kibana/config/{{ $configFile }}"
subPath: {{ $configFile }}
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
allowPrivilegeEscalation: {{ .Values.securityContext.allowPrivilegeEscalation }}
{{- end }}
{{- end }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.commandline.args }}
args:
- "/bin/bash"
- "/usr/local/bin/kibana-docker"
{{ toYaml .Values.commandline.args | indent 10 }}
{{- end }}
env:
{{- range $key, $value := .Values.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
{{- if (not .Values.authProxyEnabled) }}
ports:
- containerPort: {{ .Values.service.internalPort }}
name: {{ template "kibana.name" . }}
protocol: TCP
{{- end }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /status
port: {{ .Values.service.internalPort }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /status
port: {{ .Values.service.internalPort }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 10 }}
volumeMounts:
{{- range $configFile := (keys .Values.files) }}
- name: {{ template "kibana.name" $ }}
mountPath: "/usr/share/kibana/config/{{ $configFile }}"
subPath: {{ $configFile }}
{{- end }}
{{- if .Values.plugins.enabled}}
- name: plugins
mountPath: /usr/share/kibana/plugins
{{- end }}
{{- with .Values.extraContainers }}
{{ tpl . $ | indent 6 }}
{{- end }}
{{- range .Values.extraConfigMapMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{ toYaml .Values.image.pullSecrets | indent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- if .Values.securityContext.enabled }}
securityContext:
runAsUser: {{ .Values.securityContext.runAsUser }}
fsGroup: {{ .Values.securityContext.fsGroup }}
{{- end }}
volumes:
- name: {{ template "kibana.name" . }}
configMap:
name: {{ template "kibana.fullname" . }}
{{- if .Values.plugins.enabled}}
- name: plugins
{{- if .Values.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ template "kibana.fullname" . }}
{{- else }}
emptyDir: {}
{{- end }}
{{- end }}
{{- if .Values.dashboardImport.dashboards }}
- name: {{ template "kibana.fullname" . }}-dashboards
configMap:
name: {{ template "kibana.fullname" . }}-dashboards
- name: {{ template "kibana.fullname" . }}-importscript
configMap:
name: {{ template "kibana.fullname" . }}-importscript
defaultMode: 0777
{{- end }}
{{- range .Values.extraConfigMapMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}

Просмотреть файл

@ -1,33 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $serviceName := include "kibana.fullname" . -}}
{{- $servicePort := .Values.service.externalPort -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
labels:
app: {{ template "kibana.name" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "kibana.fullname" . }}
annotations:
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
rules:
{{- range .Values.ingress.hosts }}
{{- $url := splitList "/" . }}
- host: {{ first $url }}
http:
paths:
- path: /{{ rest $url | join "/" }}
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{ toYaml .Values.ingress.tls | indent 4 }}
{{- end -}}
{{- end -}}

Просмотреть файл

@ -1,47 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ template "kibana.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- range $key, $value := .Values.service.labels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
name: {{ template "kibana.fullname" . }}
{{- with .Values.service.annotations }}
annotations:
{{- range $key, $value := . }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := .Values.service.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.externalPort }}
{{- if not .Values.authProxyEnabled }}
targetPort: {{ .Values.service.internalPort }}
{{- else }}
targetPort: {{ .Values.service.authProxyPort }}
{{- end }}
protocol: TCP
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{ .Values.service.nodePort }}
{{ end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
selector:
app: {{ template "kibana.name" . }}
release: {{ .Release.Name }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}

Просмотреть файл

@ -1,11 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kibana.serviceAccountName" . }}
labels:
app: {{ template "kibana.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- end -}}

Просмотреть файл

@ -1,31 +0,0 @@
{{- if and .Values.plugins.enabled .Values.persistentVolumeClaim.enabled -}}
{{- if not .Values.persistentVolumeClaim.existingClaim -}}
apiVersion: "v1"
kind: "PersistentVolumeClaim"
metadata:
{{- if .Values.persistentVolumeClaim.annotations }}
annotations:
{{ toYaml .Values.persistentVolumeClaim.annotations | indent 4 }}
{{- end }}
labels:
app: {{ template "kibana.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.persistentVolumeClaim.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "kibana.fullname" . }}
spec:
accessModes:
{{ toYaml .Values.persistentVolumeClaim.accessModes | indent 4 }}
{{- if .Values.persistentVolumeClaim.storageClass }}
{{- if (eq "-" .Values.persistentVolumeClaim.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistentVolumeClaim.storageClass }}"
{{- end }}
{{- end }}
resources:
requests:
storage: "{{ .Values.persistentVolumeClaim.size }}"
{{- end -}}
{{- end -}}

Просмотреть файл

@ -1,188 +0,0 @@
image:
repository: "docker.elastic.co/kibana/kibana-oss"
tag: "6.5.4"
pullPolicy: "IfNotPresent"
commandline:
args: []
env: {}
# All Kibana configuration options are adjustable via env vars.
# To adjust a config option to an env var uppercase + replace `.` with `_`
# Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
#
# ELASTICSEARCH_URL: http://elasticsearch-client:9200
# SERVER_PORT: 5601
# LOGGING_VERBOSE: "true"
# SERVER_DEFAULTROUTE: "/app/kibana"
files:
kibana.yml:
## Default Kibana configuration from kibana-docker.
server.name: kibana
server.host: "0"
elasticsearch.url: http://elasticsearch:9200
## Custom config properties below
## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
# server.port: 5601
# logging.verbose: "true"
# server.defaultRoute: "/app/kibana"
deployment:
annotations: {}
service:
type: ClusterIP
externalPort: 443
internalPort: 5601
# authProxyPort: 5602 To be used with authProxyEnabled and a proxy extraContainer
## External IP addresses of service
## Default: nil
##
# externalIPs:
# - 192.168.0.1
#
## LoadBalancer IP if service.type is LoadBalancer
## Default: nil
##
# loadBalancerIP: 10.2.2.2
annotations: {}
# Annotation example: setup ssl with aws cert when service.type is LoadBalancer
# service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
labels: {}
## Label example: show service URL in `kubectl cluster-info`
# kubernetes.io/cluster-service: "true"
## Limit load balancer source ips to list of CIDRs (where available)
# loadBalancerSourceRanges: []
ingress:
enabled: false
# hosts:
# - kibana.localhost.localdomain
# - localhost.localdomain/kibana
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# tls:
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
serviceAccount:
# Specifies whether a service account should be created
create: false
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# If set and create is false, the service account must be existing
name:
livenessProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 10
readinessProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 10
successThreshold: 5
# Enable an authproxy. Specify container in extraContainers
authProxyEnabled: false
extraContainers: |
# - name: proxy
# image: quay.io/gambol99/keycloak-proxy:latest
# args:
# - --resource=uri=/*
# - --discovery-url=https://discovery-url
# - --client-id=client
# - --client-secret=secret
# - --listen=0.0.0.0:5602
# - --upstream-url=http://127.0.0.1:5601
# ports:
# - name: web
# containerPort: 9090
resources: {}
# limits:
# cpu: 100m
# memory: 300Mi
# requests:
# cpu: 100m
# memory: 300Mi
priorityClassName: ""
# Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
# affinity: {}
# Tolerations for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
podAnnotations: {}
replicaCount: 1
revisionHistoryLimit: 3
# To export a dashboard from a running Kibana 6.3.x use:
# curl --user <username>:<password> -XGET https://kibana.yourdomain.com:5601/api/kibana/dashboards/export?dashboard=<some-dashboard-uuid> > my-dashboard.json
# A dashboard is defined by a name and a string with the json payload or the download url
dashboardImport:
timeout: 60
xpackauth:
enabled: false
username: myuser
password: mypass
dashboards: {}
# k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json
# List of plugins to install using initContainer
# NOTE : We notice that lower resource constraints given to the chart + plugins are likely not going to work well.
plugins:
# set to true to enable plugins installation
enabled: false
# set to true to remove all kibana plugins before installation
reset: false
# Use <plugin_name,version,url> to add/upgrade plugin
values:
# - elastalert-kibana-plugin,1.0.1,https://github.com/bitsensor/elastalert-kibana-plugin/releases/download/1.0.1/elastalert-kibana-plugin-1.0.1-6.4.2.zip
# - logtrail,0.1.30,https://github.com/sivasamyk/logtrail/releases/download/v0.1.30/logtrail-6.4.2-0.1.30.zip
# - other_plugin
persistentVolumeClaim:
# set to true to use pvc
enabled: false
# set to true to use you own pvc
existingClaim: false
annotations: {}
accessModes:
- ReadWriteOnce
size: "5Gi"
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
# default security context
securityContext:
enabled: false
allowPrivilegeEscalation: false
runAsUser: 1000
fsGroup: 2000
extraConfigMapMounts: []
# - name: logtrail-configs
# configMap: kibana-logtrail
# mountPath: /usr/share/kibana/plugins/logtrail/logtrail.json
# subPath: logtrail.json

Просмотреть файл

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: elasticsearch

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше