Remove the deprecated Helm charts and related code

This work was deprecated in Vitess 7.0:
  https://github.com/vitessio/vitess/issues/6439

Signed-off-by: Matt Lord <mattalord@gmail.com>
This commit is contained in:
Matt Lord 2021-09-22 18:06:19 -04:00
Родитель b79db56a5f
Коммит ece6eb65d0
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: F94D24DFDB740617
59 изменённых файлов: 0 добавлений и 5958 удалений

Просмотреть файл

@ -1,6 +1,3 @@
By default, the [Helm Charts](https://github.com/vitessio/vitess/tree/main/helm)
point to the `vitess/lite` image on [Docker Hub](https://hub.docker.com/u/vitess/).
We created the `lite` image as a stripped down version of our old image `base` such that Kubernetes pods can start faster.
The `lite` image is updated automatically after every push to the GitHub main branch.

Просмотреть файл

@ -243,10 +243,6 @@ And finally, click on `Publish release`.
* Schedule and publish Tweet on Vitess account.
* Run following script to once the `base` Docker image is live.
```
https://github.com/vitessio/vitess/blob/master/helm/release.sh
```
* Deploy and release Java packages by following the `Java Packages Deploy & Release` section below.
### Java Packages Deploy & Release

Просмотреть файл

@ -1,57 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
schema:
initial: |-
create table product(
sku varbinary(128),
description varbinary(128),
price bigint,
primary key(sku)
);
create table customer(
customer_id bigint not null auto_increment,
email varbinary(128),
primary key(customer_id)
);
create table corder(
order_id bigint not null auto_increment,
customer_id bigint,
sku varbinary(128),
price bigint,
primary key(order_id)
);
vschema:
initial: |-
{
"tables": {
"product": {},
"customer": {},
"corder": {}
}
}
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,35 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,40 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
jobs:
- name: "move-tables"
kind: "vtctlclient"
command: "MoveTables -workflow=commerce2customer commerce customer \'{\"customer\":{}, \"corder\":{}}\'"
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,43 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
jobs:
- name: "mswitch1"
kind: "vtctlclient"
command: "SwitchReads -tablet_type=rdonly customer.commerce2customer"
- name: "mswitch2"
kind: "vtctlclient"
command: "SwitchReads -tablet_type=replica customer.commerce2customer"
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,40 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
jobs:
- name: "mswitch3"
kind: "vtctlclient"
command: "SwitchWrites customer.commerce2customer"
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,60 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
schema:
postsplit: |-
drop table customer;
drop table corder;
vschema:
postsplit: |-
{
"tables": {
"product": {}
}
}
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
jobs:
- name: "vclean1"
kind: "vtctlclient"
command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 rdonly"
- name: "vclean2"
kind: "vtctlclient"
command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 replica"
- name: "vclean3"
kind: "vtctlclient"
command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 master"
- name: "vclean4"
kind: "vtctlclient"
command: "ApplyRoutingRules -rules=\'{}\'"
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,94 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
schema:
seq: |-
create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
insert into customer_seq(id, next_id, cache) values(0, 1000, 100);
create table order_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
insert into order_seq(id, next_id, cache) values(0, 1000, 100);
vschema:
seq: |-
{
"tables": {
"customer_seq": {
"type": "sequence"
},
"order_seq": {
"type": "sequence"
},
"product": {}
}
}
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
schema:
sharded: |-
alter table customer change customer_id customer_id bigint not null;
alter table corder change order_id order_id bigint not null;
vschema:
sharded: |-
{
"sharded": true,
"vindexes": {
"hash": {
"type": "hash"
}
},
"tables": {
"customer": {
"column_vindexes": [
{
"column": "customer_id",
"name": "hash"
}
],
"auto_increment": {
"column": "customer_id",
"sequence": "customer_seq"
}
},
"corder": {
"column_vindexes": [
{
"column": "customer_id",
"name": "hash"
}
],
"auto_increment": {
"column": "order_id",
"sequence": "order_seq"
}
}
}
}
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,49 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 3
copySchema:
source: "customer/0"
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 3
copySchema:
source: "customer/0"
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,50 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 3
jobs:
- name: "reshard"
kind: "vtctlclient"
command: "Reshard customer.cust2cust \'0\' \'-80,80-\'"
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,53 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 3
jobs:
- name: "rswitch1"
kind: "vtctlclient"
command: "SwitchReads -tablet_type=rdonly customer.cust2cust"
- name: "rswitch2"
kind: "vtctlclient"
command: "SwitchReads -tablet_type=replica customer.cust2cust"
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,50 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 3
jobs:
- name: "rswitch3"
kind: "vtctlclient"
command: "SwitchWrites customer.cust2cust"
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,40 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 3
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,45 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 3
jobs:
- name: "delete-shard0"
kind: "vtctlclient"
command: "DeleteShard -recursive customer/0"
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,40 +0,0 @@
topology:
deploymentType: test
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "customer"
shards:
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 3
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 3
vttablet:
terminationGracePeriodSeconds: 1
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,58 +0,0 @@
# Instructions
Detailed instructions for running this example can be found at https://vitess.io.
This document contains the summary of the commands to be run.
```
# Start minikube
minikube start --cpus=4 --memory=8000
# Bring up initial cluster and commerce keyspace
helm install vitess ../../helm/vitess -f 101_initial_cluster.yaml
# Insert and verify data
mysql < ../common/insert_commerce_data.sql
mysql --table < ../common/select_commerce_data.sql
# Bring up customer keyspace
helm upgrade vitess ../../helm/vitess/ -f 201_customer_tablets.yaml
# Initiate move tables
vtctlclient MoveTables -workflow=commerce2customer commerce customer '{"customer":{}, "corder":{}}'
# Validate
vtctlclient VDiff customer.commerce2customer
# Cut-over
vtctlclient SwitchReads -tablet_type=rdonly customer.commerce2customer
vtctlclient SwitchReads -tablet_type=replica customer.commerce2customer
vtctlclient SwitchWrites customer.commerce2customer
# Clean-up
vtctlclient DropSources customer.commerce2customer
# Prepare for resharding
helm upgrade vitess ../../helm/vitess/ -f 301_customer_sharded.yaml
helm upgrade vitess ../../helm/vitess/ -f 302_new_shards.yaml
# Reshard
vtctlclient Reshard customer.cust2cust '0' '-80,80-'
# Validate
vtctlclient VDiff customer.cust2cust
# Cut-over
vtctlclient SwitchReads -tablet_type=rdonly customer.cust2cust
vtctlclient SwitchReads -tablet_type=replica customer.cust2cust
vtctlclient SwitchWrites customer.cust2cust
# Down shard 0
helm upgrade vitess ../../helm/vitess/ -f 306_down_shard_0.yaml
vtctlclient DeleteShard -recursive customer/0
# Delete deployment
helm delete vitess
kubectl delete pvc -l "app=vitess"
kubectl delete vitesstoponodes --all
```

Просмотреть файл

@ -1,74 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
schema:
initial: |-
create table product(
sku varbinary(128),
description varbinary(128),
price bigint,
primary key(sku)
);
create table customer(
customer_id bigint not null auto_increment,
email varbinary(128),
primary key(customer_id)
);
create table corder(
order_id bigint not null auto_increment,
customer_id bigint,
sku varbinary(128),
price bigint,
primary key(order_id)
);
vschema:
initial: |-
{
"tables": {
"product": {},
"customer": {},
"corder": {}
}
}
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,50 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
jobs:
- name: "create-customer-ks"
kind: "vtctlclient"
command: "CreateKeyspace -served_from='master:commerce,replica:commerce,rdonly:commerce' customer"
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,75 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
vschema:
vsplit: |-
{
"tables": {
"product": {}
}
}
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
copySchema:
source: "commerce/0"
tables:
- "customer"
- "corder"
vschema:
vsplit: |-
{
"tables": {
"customer": {},
"corder": {}
}
}
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,61 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
jobs:
- name: "vertical-split"
kind: "vtworker"
cell: "zone1"
command: "VerticalSplitClone -min_healthy_tablets=1 -tables=customer,corder customer/0"
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,63 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
jobs:
- name: "msf1"
kind: "vtctlclient"
command: "MigrateServedFrom customer/0 rdonly"
- name: "msf2"
kind: "vtctlclient"
command: "MigrateServedFrom customer/0 replica"
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,60 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
jobs:
- name: "msf3"
kind: "vtctlclient"
command: "MigrateServedFrom customer/0 master"
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,70 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
schema:
postsplit: |-
drop table customer;
drop table corder;
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
jobs:
- name: "vclean1"
kind: "vtctlclient"
command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 rdonly"
- name: "vclean2"
kind: "vtctlclient"
command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 replica"
- name: "vclean3"
kind: "vtctlclient"
command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 master"
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,114 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
schema:
seq: |-
create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
insert into customer_seq(id, next_id, cache) values(0, 1000, 100);
create table order_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
insert into order_seq(id, next_id, cache) values(0, 1000, 100);
vschema:
seq: |-
{
"tables": {
"customer_seq": {
"type": "sequence"
},
"order_seq": {
"type": "sequence"
},
"product": {}
}
}
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
schema:
sharded: |-
alter table customer change customer_id customer_id bigint not null;
alter table corder change order_id order_id bigint not null;
vschema:
sharded: |-
{
"sharded": true,
"vindexes": {
"hash": {
"type": "hash"
}
},
"tables": {
"customer": {
"column_vindexes": [
{
"column": "customer_id",
"name": "hash"
}
],
"auto_increment": {
"column": "customer_id",
"sequence": "customer_seq"
}
},
"corder": {
"column_vindexes": [
{
"column": "customer_id",
"name": "hash"
}
],
"auto_increment": {
"column": "order_id",
"sequence": "order_seq"
}
}
}
}
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,75 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
copySchema:
source: "customer/0"
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
copySchema:
source: "customer/0"
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,77 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
jobs:
- name: "horizontal-split"
kind: "vtworker"
cell: "zone1"
command: "SplitClone -min_healthy_rdonly_tablets=1 customer/0"
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,79 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
jobs:
- name: "mst1"
kind: "vtctlclient"
command: "MigrateServedTypes customer/0 rdonly"
- name: "mst2"
kind: "vtctlclient"
command: "MigrateServedTypes customer/0 replica"
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,76 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "customer"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
jobs:
- name: "mst3"
kind: "vtctlclient"
command: "MigrateServedTypes customer/0 master"
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,63 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "customer"
shards:
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,68 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "customer"
shards:
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
jobs:
- name: "delete-shard0"
kind: "vtctlclient"
command: "DeleteShard -recursive customer/0"
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,63 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "customer"
shards:
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 1
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
mysqlSize: "prod"
resources:
mysqlResources:
# It's generally not recommended to override this value for production usage.
terminationGracePeriodSeconds: 1
vtworker:
resources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,36 +0,0 @@
#!/bin/bash
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a convenience script to run the mysql client against the local example.
host=$(minikube service vtgate-zone1 --url=true --format="{{.IP}}" | tail -n 1)
port=$(minikube service vtgate-zone1 --url=true --format="{{.Port}}" | tail -n 1)
if [ -z $port ]; then
#This checks K8s running on an single node by kubeadm
if [ $(kubectl get nodes | grep -v NAM | wc -l) -eq 1 -o $(kubectl get nodes | grep -v NAM | grep master | wc -l ) -eq 1 ]; then
host="127.0.0.1"
port=`kubectl describe service vtgate-zone1 | grep NodePort | grep mysql | awk '{print $3}' | awk -F'/' '{print $1}'`
fi
fi
if [ -z $port ]; then
echo "Error: failed to obtain [host:port] minikube or kubectl."
exit 1;
fi
mysql -h "$host" -P "$port" $*

Просмотреть файл

@ -1,19 +0,0 @@
#!/bin/bash
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a convenience script to run vtctlclient against the local example.
xdg-open "$(minikube service vtctld --url|head -n 1)"

Просмотреть файл

@ -1,14 +0,0 @@
#!/bin/sh
kubectl port-forward service/vtctld 15000 15999 &
process_id1=$!
kubectl port-forward service/vtgate-zone1 15306:3306 15001 &
process_id2=$!
sleep 2
echo "You may point your browser to http://localhost:15000 for vtctld."
echo "You may point your browser to http://localhost:15001 for vtgate, use the following aliases as shortcuts:"
echo 'alias vtctlclient="vtctlclient -server=localhost:15999"'
echo 'alias mysql="mysql -h 127.0.0.1 -P 15306"'
echo "Hit Ctrl-C to stop the port forwards"
wait $process_id1
wait $process_id2

Просмотреть файл

@ -1,12 +0,0 @@
# Helm Charts
This directory contains [Helm](https://github.com/kubernetes/helm)
charts for running [Vitess](https://vitess.io) on
[Kubernetes](https://kubernetes.io).
Note that this is not in the `examples` directory because these are the
sources for canonical packages that we plan to publish to the official
[Kubernetes Charts Repository](https://github.com/kubernetes/charts).
However, you may also find them useful as a starting point for creating
customized charts for your site, or other general-purpose charts for
common cluster variations.

Просмотреть файл

@ -1,73 +0,0 @@
#!/bin/bash
set -ex
vt_base_version='v7.0.2'
orchestrator_version='3.2.3'
pmm_client_version='1.17.4'
docker pull vitess/base:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/k8s:$vt_base_version-buster .
docker tag vitess/k8s:$vt_base_version-buster vitess/k8s:$vt_base_version
docker push vitess/k8s:$vt_base_version-buster
docker push vitess/k8s:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtgate:$vt_base_version-buster vtgate
docker tag vitess/vtgate:$vt_base_version-buster vitess/vtgate:$vt_base_version
docker push vitess/vtgate:$vt_base_version-buster
docker push vitess/vtgate:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vttablet:$vt_base_version-buster vttablet
docker tag vitess/vttablet:$vt_base_version-buster vitess/vttablet:$vt_base_version
docker push vitess/vttablet:$vt_base_version-buster
docker push vitess/vttablet:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/mysqlctld:$vt_base_version-buster mysqlctld
docker tag vitess/mysqlctld:$vt_base_version-buster vitess/mysqlctld:$vt_base_version
docker push vitess/mysqlctld:$vt_base_version-buster
docker push vitess/mysqlctld:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/mysqlctl:$vt_base_version-buster mysqlctl
docker tag vitess/mysqlctl:$vt_base_version-buster vitess/mysqlctl:$vt_base_version
docker push vitess/mysqlctl:$vt_base_version-buster
docker push vitess/mysqlctl:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtctl:$vt_base_version-buster vtctl
docker tag vitess/vtctl:$vt_base_version-buster vitess/vtctl:$vt_base_version
docker push vitess/vtctl:$vt_base_version-buster
docker push vitess/vtctl:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtctlclient:$vt_base_version-buster vtctlclient
docker tag vitess/vtctlclient:$vt_base_version-buster vitess/vtctlclient:$vt_base_version
docker push vitess/vtctlclient:$vt_base_version-buster
docker push vitess/vtctlclient:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtctld:$vt_base_version-buster vtctld
docker tag vitess/vtctld:$vt_base_version-buster vitess/vtctld:$vt_base_version
docker push vitess/vtctld:$vt_base_version-buster
docker push vitess/vtctld:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtworker:$vt_base_version-buster vtworker
docker tag vitess/vtworker:$vt_base_version-buster vitess/vtworker:$vt_base_version
docker push vitess/vtworker:$vt_base_version-buster
docker push vitess/vtworker:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/logrotate:$vt_base_version-buster logrotate
docker tag vitess/logrotate:$vt_base_version-buster vitess/logrotate:$vt_base_version
docker push vitess/logrotate:$vt_base_version-buster
docker push vitess/logrotate:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/logtail:$vt_base_version-buster logtail
docker tag vitess/logtail:$vt_base_version-buster vitess/logtail:$vt_base_version
docker push vitess/logtail:$vt_base_version-buster
docker push vitess/logtail:$vt_base_version
docker build --build-arg VT_BASE_VER=$vt_base_version --build-arg PMM_CLIENT_VER=$pmm_client_version -t vitess/pmm-client:v$pmm_client_version-buster pmm-client
docker tag vitess/pmm-client:v$pmm_client_version-buster vitess/pmm-client:v$pmm_client_version
docker push vitess/pmm-client:v$pmm_client_version-buster
docker push vitess/pmm-client:v$pmm_client_version
docker build --build-arg VT_BASE_VER=$vt_base_version --build-arg ORC_VER=$orchestrator_version -t vitess/orchestrator:v$orchestrator_version-buster orchestrator
docker tag vitess/orchestrator:v$orchestrator_version-buster vitess/orchestrator:v$orchestrator_version
docker push vitess/orchestrator:v$orchestrator_version-buster
docker push vitess/orchestrator:v$orchestrator_version

2
helm/vitess/.gitignore поставляемый
Просмотреть файл

@ -1,2 +0,0 @@
# Don't check in site-local customizations.
site-values.yaml

Просмотреть файл

@ -1,21 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

Просмотреть файл

@ -1,86 +0,0 @@
## 2.0.1-0 - 2020-04-16
The charts now officially support Kubernetes 1.11 and newer.
### Changes
* The VitessTopoNode CRD is now created using the `apiextensions.k8s.io/v1beta1` API.
## 2.0.0-0 - 2020-04-03
Vitess now supports using the Kubernetes API as a topology provider. This means that it is now easier than ever to run Vitess on Kubernetes!
Properly supporting this new provider requires a major, breaking change of the helm charts. The `etcd-operator` has been deprecated as well so the Vitess team has decided to make the Kubernetes topology the default going forward.
### Upgrade and Migration Information
* This version introduces a `topologyProvider` configuration in `topology.globalCell` and in the configuration for each cell individually. The default from v2 on is to use the `k8s` topology provider. Explicitly set these values to `etcd2` in order to continue to use the etcd topology provider.
* The `root` is now being set properly for all topology cells. Prior to this version, all cells were using `""` as the root which worked, but was invalid. The root path for all cells will now be set to `/vitess/{{ $cell.name }}`. In order to upgrade a helm deployment from v1 to v2 you will need to stop all vitess components, migrate all etcd keys except `/global`, from `/` to `/vitess/{{ $cell.name }}`. There is no automation for this procedure at this time.
### Changes
* Update images of Vitess components to **TODO: we need new images based on a released tag, not just master at a point in time**
* Set the topology `root` in all new and existing cells to `/vitess/{{ $cell.name }}`
* Add `topology.globalCell.topologyProvider` - default to `k8s`
* Add `topolgy.cells[*].topologyProvider` - default to `k8s`
## 1.0.7-5 - 2019-12-02
### Changes
* Update images of Vitess components to v4.0.0
* Update MySQL image to Percona 5.7.26
* Support for OpenTracing
## 1.0.6 - 2019-01-20
### Changes
* Update Orchestrator default to 3.0.14
* Run `pmm-admin repair` on `pmm-client` startup to recover failures on `pmm-server`
* Backups now only run on `replica` (non-master), `rdonly`, or `spare` tablet types
## 1.0.5 - 2019-01-12
### Changes
* Set FailMasterPromotionIfSQLThreadNotUpToDate = true in Orchestrator config, to prevent
lagging replicas from being promoted to master and causing errant GTID problems.
**NOTE:** You need to manually restart your Orchestrator pods for this change to take effect
## 1.0.4 - 2019-01-01
### Changes
* Use the [Orchestrator API](https://github.com/openark/orchestrator/blob/master/docs/using-the-web-api.md)
to call `begin-downtime` before running `PlannedReparentShard` in the `preStopHook`, to make sure that Orchestrator
doesn't try to run an external failover while Vitess is reparenting. When it is complete, it calls `end-downtime`.
Also call `forget` on the instance after calling `vtctlclient DeleteTablet`. It will be rediscovered if/when
the tablet comes back up. This eliminates most possible race conditions that could cause split brain.
## 1.0.3 - 2018-12-20
### Changes
* Start tagging helm images and use them as default
* Added commonly used flags to values.yaml for vtgate & vttablet for discoverability.
Some match the binary flag defaults, and some have been set to more production ready values.
* Extended vttablet terminationGracePeriodSeconds from 600 to 60000000.
This will block on `PlannedReparent` in the `preStopHook` forever to prevent
unsafe `EmergencyReparent` operations when the pod is killed.
### Bug fixes
* Use `$MYSQL_FLAVOR` to set flavor instead of `$EXTRA_MY_CNF`
## 1.0.2 - 2018-12-11
### Bug fixes
* Renamed ImagePullPolicy to imagePullPolicy
* Added user-secret-volumes to backup CronJob
## 1.0.1 - 2018-12-07
### Changes
* Added support for [MySQL Custom Queries](https://www.percona.com/blog/2018/10/10/percona-monitoring-and-management-pmm-1-15-0-is-now-available/) in PMM
* Added Linux host monitoring for PMM
* Added keyspace and shard labels to jobs
* Remove old mysql.sock file in vttablet InitContainer
### Bug fixes
* PMM wouldn't bootstrap correctly on a new cluster
## 1.0.0 - 2018-12-03 Vitess Helm Chart goes GA!

Просмотреть файл

@ -1,20 +0,0 @@
apiVersion: v1
name: vitess
version: 2.0.1-0
description: Single-Chart Vitess Cluster
keywords:
- vitess
- mysql
- maria
- mariadb
- percona
- sql
- database
- shard
home: https://vitess.io
sources:
- https://github.com/vitessio/vitess
maintainers:
- name: Vitess Project
email: vitess@googlegroups.com
icon: https://vitess.io/img/logos/vitess.png

Просмотреть файл

@ -1,432 +0,0 @@
# Vitess
[Vitess](https://vitess.io) is a database clustering system for horizontal
scaling of MySQL. It is an open-source project started at YouTube,
and has been used there since 2011.
## Introduction
This chart creates a Vitess cluster on Kubernetes in a single
[release](https://github.com/kubernetes/helm/blob/master/docs/glossary.md#release).
It currently includes all Vitess components
(vtctld, vtgate, vttablet) inline (in `templates/`) rather than as sub-charts.
## Using Etcd For Topology Data
The chart will use Kubernetes as the topology store for Vitess. This is the preferred configuration when running Vitess in Kubernetes as it has no external dependencesi.
If you do wish to use `etcd` as the toplogy service, then you will need to create an etcd cluster and provide the configuration in your `values.yaml`. Etcd can be managed manually or via the [etcd-operator](https://github.com/coreos/etcd-operator).
## Installing the Chart
```console
helm/vitess$ helm install . -f site-values.yaml
```
See the [Configuration](#configuration) section below for what you need to put
in `site-values.yaml`.
You can install the chart without site values, but it will only launch a
skeleton cluster without any keyspaces (logical databases).
## Cleaning up
After deleting an installation of the chart, the PersistentVolumeClaims remain.
If you don't intend to use them again, you should delete them:
```shell
kubectl delete pvc -l app=vitess
```
## Configuration
You will need to provide a `site-values.yaml` file to specify your actual
logical database topology (e.g. whether to shard).
Here are examples of various configurations. To see additional options,
look at the default `values.yaml` file, which is well commented.
### Unsharded keyspace
```
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 3
mysqlProtocol:
enabled: false
keyspaces:
- name: "unsharded_dbname"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
```
### Unsharded + sharded keyspaces
```
topology:
cells:
- name: "zone1"
...
keyspaces:
- name: "unsharded_dbname"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- name: "sharded_db"
shards:
- name: "-80"
tablets:
- type: "replica"
vttablet:
replicas: 2
- name: "80-"
tablets:
- type: "replica"
vttablet:
replicas: 2
```
### Separate pools of replicas and rdonly tablets
```
topology:
cells:
- name: "zone1"
...
keyspaces:
- name: "unsharded_dbname"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
- type: "rdonly"
vttablet:
replicas: 2
```
### Append custom my.cnf to default Vitess settings
Create a config map with one or more standard `my.cnf` formatted files. Any settings
provided here will overwrite any colliding values from Vitess defaults.
`kubectl create configmap shared-my-cnf --from-file=shared.my.cnf`
*NOTE:* if using MySQL 8.0.x, this file must contain
`default_authentication_plugin = mysql_native_password`
```
topology:
cells:
...
vttablet:
# The name of a config map with N files inside of it. Each file will be added
# to $EXTRA_MY_CNF, overriding any default my.cnf settings
extraMyCnf: shared-my-cnf
```
### Use a custom database image and a specific Vitess release
```
topology:
cells:
...
vttablet:
vitessTag: "2.1"
mysqlImage: "percona:5.7.20"
flavor: percona
```
### Enable MySQL protocol support
```
topology:
cells:
- name: "zone1"
...
# enable or disable mysql protocol support, with accompanying auth details
mysqlProtocol:
enabled: false
username: myuser
# this is the secret that will be mounted as the user password
# kubectl create secret generic myuser-password --from-literal=password=abc123
passwordSecret: myuser-password
keyspaces:
...
```
### Enable backup/restore using Google Cloud Storage
Enabling backups creates a cron job per shard that defaults to executing once per day at midnight.
This can be overridden on a per shard level so you can stagger when backups occur.
```
topology:
cells:
- name: "zone1"
...
keyspaces:
- name: "unsharded_dbname"
shards:
- name: "0"
backup:
cron:
schedule: "0 1 * * *"
suspend: false
tablets:
- type: "replica"
vttablet:
replicas: 2
- name: "sharded_db"
shards:
- name: "-80"
backup:
cron:
schedule: "0 2 * * *"
suspend: false
tablets:
- type: "replica"
vttablet:
replicas: 2
- name: "80-"
backup:
cron:
schedule: "0 3 * * *"
suspend: false
tablets:
- type: "replica"
vttablet:
replicas: 2
config:
backup:
enabled: true
cron:
# the default schedule runs daily at midnight unless overridden by the individual shard
schedule: "0 0 * * *"
# if this is set to true, the cron jobs are created, but never execute
suspend: false
backup_storage_implementation: gcs
# Google Cloud Storage bucket to use for backups
gcs_backup_storage_bucket: vitess-backups
# root prefix for all backup-related object names
gcs_backup_storage_root: vtbackups
```
### Custom requests/limits
```
topology:
cells:
...
vttablet:
resources:
# common production values 2-4CPU/4-8Gi RAM
limits:
cpu: 2
memory: 4Gi
mysqlResources:
# common production values 4CPU/8-16Gi RAM
limits:
cpu: 4
memory: 8Gi
# PVC for mysql
dataVolumeClaimAnnotations:
dataVolumeClaimSpec:
# pd-ssd (Google Cloud)
# managed-premium (Azure)
# standard (AWS) - not sure what the default class is for ssd
storageClassName: "default"
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "10Gi"
```
### Custom PVC for MySQL data
```
topology:
cells:
...
vttablet:
dataVolumeClaimSpec:
# Google Cloud SSD
storageClassName: "pd-ssd"
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "100Gi"
```
### Enable PMM (Percona Monitoring and Management)
```
topology:
cells:
...
pmm:
enabled: true
pmmTag: "1.17.0"
client:
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi
server:
resources:
limits:
cpu: 2
memory: 4Gi
dataVolumeClaimSpec:
storageClassName: "default"
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "150Gi"
env:
metricsMemory: "3000000"
```
### Enable Orchestrator
#### NOTE: This requires at least Kubernetes 1.9
```
topology:
cells:
...
orchestrator:
enabled: true
```
### Enable TLS encryption for vitess grpc communication
Each component of vitess requires a certificate and private key to secure incoming requests and further configuration for every outgoing connection. In this example TLS certificates were generated and stored in several kubernetes secrets:
```yaml
vttablet:
extraFlags:
# configure which certificates to use for serving grpc requests
grpc_cert: /vt/usersecrets/vttablet-tls/vttablet.pem
grpc_key: /vt/usersecrets/vttablet-tls/vttablet-key.pem
tablet_grpc_ca: /vt/usersecrets/vttablet-tls/vitess-ca.pem
tablet_grpc_server_name: vttablet
secrets:
- vttablet-tls
vtctld:
extraFlags:
grpc_cert: /vt/usersecrets/vtctld-tls/vtctld.pem
grpc_key: /vt/usersecrets/vtctld-tls/vtctld-key.pem
tablet_grpc_ca: /vt/usersecrets/vtctld-tls/vitess-ca.pem
tablet_grpc_server_name: vttablet
tablet_manager_grpc_ca: /vt/usersecrets/vtctld-tls/vitess-ca.pem
tablet_manager_grpc_server_name: vttablet
secrets:
- vtctld-tls
vtctlclient: # configuration used by both InitShardMaster-jobs and orchestrator to be able to communicate with vtctld
extraFlags:
vtctld_grpc_ca: /vt/usersecrets/vitess-ca/vitess-ca.pem
vtctld_grpc_server_name: vtctld
secrets:
- vitess-ca
vtgate:
extraFlags:
grpc_cert: /vt/usersecrets/vtgate-tls/vtgate.pem
grpc_key: /vt/usersecrets/vtgate-tls/vtgate-key.pem
tablet_grpc_ca: /vt/usersecrets/vtgate-tls/vitess-ca.pem
tablet_grpc_server_name: vttablet
secrets:
- vtgate-tls
```
### Slave replication traffic encryption
To encrypt traffic between slaves and master additional flags can be provided. By default MySQL generates self-signed certificates on startup (otherwise specify `ssl_*` settings within you `extraMyCnf`), that can be used to encrypt the traffic:
```
vttablet:
extraFlags:
db_flags: 2048
db_repl_use_ssl: true
db-config-repl-flags: 2048
```
### Percona at rest encryption using the vault plugin
To use the [percona at rest encryption](https://www.percona.com/doc/percona-server/LATEST/management/data_at_rest_encryption.html) several additional settings have to be provided via an `extraMyCnf`-file. This makes only sense if the traffic is encrypted as well (see above sections), since binlog replication is unencrypted by default.
```
apiVersion: v1
kind: ConfigMap
metadata:
name: vttablet-extra-config
namespace: vitess
data:
extra.cnf: |-
early-plugin-load=keyring_vault=keyring_vault.so
# this includes default rpl plugins, see https://github.com/vitessio/vitess/blob/main/config/mycnf/master_mysql57.cnf for details
plugin-load=rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so;keyring_udf=keyring_udf.so
keyring_vault_config=/vt/usersecrets/vttablet-vault/vault.conf # load keyring configuration from secret
innodb_encrypt_tables=ON # encrypt all tables by default
encrypt_binlog=ON # binlog encryption
master_verify_checksum=ON # necessary for binlog encryption
binlog_checksum=CRC32 # necessary for binlog encryption
encrypt-tmp-files=ON # use temporary AES keys to encrypt temporary files
```
An example vault configuration, which is provided by the `vttablet-vault`-Secret in the above example:
```
vault_url = https://10.0.0.1:8200
secret_mount_point = vitess
token = 11111111-1111-1111-1111111111
vault_ca = /vt/usersecrets/vttablet-vault/vault-ca-bundle.pem
```
At last add the secret containing the vault configuration and the additional MySQL-configuration to your helm values:
```
vttablet:
flavor: "percona" # only works with percona
mysqlImage: "percona:5.7.23"
extraMyCnf: vttablet-extra-config
secrets:
- vttablet-vault
```
### Enable tracing (opentracing-jaeger)
To enable tracing using opentracing Jaeger of Vitess components add tracing config with tracer `opentracing-jaeger` to `extraFlags`. For example to enable tracing for `vtgate`:
```yaml
vtgate:
extraFlags:
jaeger-agent-host: "JAEGER-AGENT:6831"
tracing-sampling-rate: 0.1
tracer: opentracing-jaeger
```

Просмотреть файл

@ -1,42 +0,0 @@
# This is a copy of the crd def from: vitess/go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml
# It is not symlinked so that the helm charts do not have references to outside files
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: vitesstoponodes.topo.vitess.io
spec:
group: topo.vitess.io
additionalPrinterColumns:
- name: Key
type: string
description: The full key path
JSONPath: .data.key
validation:
openAPIV3Schema:
type: object
required:
- data
properties:
data:
type: object
required:
- key
- value
properties:
key:
description: A file-path like key. Must be an absolute path. Must not end with a /.
type: string
pattern: '^\/.+[^\/]$'
value:
description: A base64 encoded value. Must be a base64 encoded string or empty string.
type: string
pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$"
ephemeral:
description: Whether or not the node is considered ephemeral. True for lock and election nodes.
type: boolean
version: v1beta1
scope: Namespaced
names:
plural: vitesstoponodes
singular: vitesstoponode
kind: VitessTopoNode

Просмотреть файл

@ -1,65 +0,0 @@
topology:
cells:
- name: "zone1"
vtctld:
replicas: 1
vtgate:
replicas: 1
mysqlProtocol:
enabled: true
authType: "none"
keyspaces:
- name: "commerce"
shards:
- name: "0"
tablets:
- type: "replica"
vttablet:
replicas: 2
schema:
phase1: |-
create table product(
sku varbinary(128),
description varbinary(128),
price bigint,
primary key(sku)
);
create table customer(
user_id bigint not null auto_increment,
email varbinary(128),
primary key(user_id)
);
create table corder(
order_id bigint not null auto_increment,
user_id bigint,
product_id bigint,
msrp bigint,
primary key(order_id)
);
vschema:
phase1: |-
{
"tables": {
"product": {},
"customer": {},
"corder": {}
}
}
vtctld:
serviceType: "NodePort"
resources:
vtgate:
serviceType: "NodePort"
resources:
vttablet:
resources:
mysqlResources:
pmm:
enabled: false
orchestrator:
enabled: false

Просмотреть файл

@ -1,14 +0,0 @@
{{- $cell := (index .Values.topology.cells 0).name -}}
{{- $proxyURL := printf "http://localhost:8001/api/v1/namespaces/%s" .Release.Namespace -}}
Release name: {{.Release.Name}}
To access administrative web pages, start a proxy with:
kubectl proxy --port=8001
Then use the following URLs:
vtctld: {{$proxyURL}}/services/vtctld:web/proxy/app/
vtgate: {{$proxyURL}}/services/vtgate-{{$cell}}:web/proxy/
{{ if $.Values.orchestrator.enabled }}orchestrator: {{$proxyURL}}/services/orchestrator:web/proxy/{{ end }}
{{ if $.Values.pmm.enabled }} pmm: {{$proxyURL}}/services/pmm:web/proxy/{{ end }}

Просмотреть файл

@ -1,82 +0,0 @@
###################################
# backup cron
###################################
{{ define "vttablet-backup-cron" -}}
# set tuple values to more recognizable variables
{{- $cellClean := index . 0 -}}
{{- $keyspaceClean := index . 1 -}}
{{- $shardClean := index . 2 -}}
{{- $shardName := index . 3 -}}
{{- $keyspace := index . 4 -}}
{{- $shard := index . 5 -}}
{{- $vitessTag := index . 6 -}}
{{- $backup := index . 7 -}}
{{- $namespace := index . 8 -}}
{{- $defaultVtctlclient := index . 9 }}
{{ if $backup.enabled }}
# create cron job for current shard
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: {{ $shardName }}-backup
labels:
app: vitess
component: vttablet
cell: {{ $cellClean | quote }}
keyspace: {{ $keyspaceClean | quote }}
shard: {{ $shardClean | quote }}
backupJob: "true"
spec:
schedule: {{ $shard.backup.cron.schedule | default $backup.cron.schedule | quote }}
concurrencyPolicy: Forbid
suspend: {{ $shard.backup.cron.suspend | default $backup.cron.suspend }}
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 20
jobTemplate:
spec:
template:
metadata:
labels:
app: vitess
component: vttablet
cell: {{ $cellClean | quote }}
keyspace: {{ $keyspaceClean | quote }}
shard: {{ $shardClean | quote }}
backupJob: "true"
# pod spec
spec:
restartPolicy: Never
{{ include "pod-security" . | indent 10 }}
containers:
- name: backup
image: "vitess/vtctlclient:{{$vitessTag}}"
volumeMounts:
{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 14 }}
command: ["bash"]
args:
- "-c"
- |
set -ex
VTCTLD_SVC=vtctld.{{ $namespace }}:15999
VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }})
vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC BackupShard {{ $keyspace.name }}/{{ $shard.name }}
resources:
requests:
cpu: 10m
memory: 20Mi
volumes:
{{ include "user-secret-volumes" $defaultVtctlclient.secrets | indent 12 }}
{{ end }}
{{- end -}}

Просмотреть файл

@ -1,41 +0,0 @@
###################################
# etcd cluster managed by pre-installed etcd operator
###################################
{{ define "etcd" -}}
# set tuple values to more recognizable variables
{{- $name := index . 0 -}}
{{- $replicas := index . 1 -}}
{{- $version := index . 2 -}}
{{- $resources := index . 3 }}
{{- $clusterWide := index . 4 }}
###################################
# EtcdCluster
###################################
apiVersion: "etcd.database.coreos.com/v1beta2"
kind: "EtcdCluster"
metadata:
name: "etcd-{{ $name }}"
## Adding this annotation make this cluster managed by clusterwide operators
## namespaced operators ignore it
annotations:
{{ if $clusterWide }}
etcd.database.coreos.com/scope: clusterwide
{{ end }}
spec:
size: {{ $replicas }}
version: {{ $version | quote }}
pod:
resources:
{{ toYaml ($resources) | indent 6 }}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
# prefer to stay away from other same-cell etcd pods
- weight: 100
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
etcd_cluster: "etcd-{{ $name }}"
{{- end -}}

Просмотреть файл

@ -1,392 +0,0 @@
# Helper templates
#############################
# Format a flag map into a command line,
# as expected by the golang 'flag' package.
# Boolean flags must be given a value, such as "true" or "false".
#############################
{{- define "format-flags" -}}
{{- range $key, $value := . -}}
-{{$key}}={{$value | quote}}
{{end -}}
{{- end -}}
############################
# Format a flag map into a command line (inline),
# as expected by the golang 'flag' package.
# Boolean flags must be given a value, such as "true" or "false".
#############################
{{- define "format-flags-inline" -}}
{{- range $key, $value := . -}}
-{{$key}}={{$value | quote}}{{" "}}
{{- end -}}
{{- end -}}
#############################
# Repeat a string N times, where N is the total number
# of replicas. Len must be used on the calling end to
# get an int
#############################
{{- define "tablet-count" -}}
{{- range . -}}
{{- repeat (int .vttablet.replicas) "x" -}}
{{- end -}}
{{- end -}}
#############################
# Format a list of flag maps into a command line.
#############################
{{- define "format-flags-all" -}}
{{- range . }}{{template "format-flags" .}}{{end -}}
{{- end -}}
#############################
# Clean labels, making sure it starts and ends with [A-Za-z0-9].
# This is especially important for shard names, which can start or end with
# '-' (like -80 or 80-), which would be an invalid kubernetes label.
#############################
{{- define "clean-label" -}}
{{- $replaced_label := . | replace "_" "-"}}
{{- if hasPrefix "-" . -}}
x{{$replaced_label}}
{{- else if hasSuffix "-" . -}}
{{$replaced_label}}x
{{- else -}}
{{$replaced_label}}
{{- end -}}
{{- end -}}
#############################
# injects default vitess environment variables
#############################
{{- define "vitess-env" -}}
- name: VTROOT
value: "/vt"
- name: VTDATAROOT
value: "/vtdataroot"
- name: GOBIN
value: "/vt/bin"
- name: VT_MYSQL_ROOT
value: "/usr"
- name: PKG_CONFIG_PATH
value: "/vt/lib"
{{- end -}}
#############################
# inject default pod security
#############################
{{- define "pod-security" -}}
securityContext:
runAsUser: 1000
fsGroup: 2000
{{- end -}}
#############################
# support region nodeAffinity if defined
#############################
{{- define "node-affinity" -}}
{{- $region := . -}}
{{ with $region }}
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "failure-domain.beta.kubernetes.io/region"
operator: In
values: [{{ $region | quote }}]
{{- end -}}
{{- end -}}
#############################
# mycnf exec - expects extraMyCnf config map name
#############################
{{- define "mycnf-exec" -}}
if [ "$VT_DB_FLAVOR" = "percona" ]; then
MYSQL_FLAVOR=Percona
elif [ "$VT_DB_FLAVOR" = "mysql" ]; then
MYSQL_FLAVOR=MySQL56
elif [ "$VT_DB_FLAVOR" = "mysql56" ]; then
MYSQL_FLAVOR=MySQL56
elif [ "$VT_DB_FLAVOR" = "maria" ]; then
MYSQL_FLAVOR=MariaDB
elif [ "$VT_DB_FLAVOR" = "mariadb" ]; then
MYSQL_FLAVOR=MariaDB
elif [ "$VT_DB_FLAVOR" = "mariadb103" ]; then
MYSQL_FLAVOR=MariaDB103
fi
export MYSQL_FLAVOR
{{ if . }}
for filename in /vt/userconfig/*.cnf; do
export EXTRA_MY_CNF="$EXTRA_MY_CNF:$filename"
done
{{ end }}
{{- end -}}
#############################
#
# all backup helpers below
#
#############################
#############################
# backup flags - expects config.backup
#############################
{{- define "backup-flags" -}}
{{- $backup := index . 0 -}}
{{- $caller := index . 1 -}}
{{ with $backup }}
{{ if .enabled }}
{{ if eq $caller "vttablet" }}
-restore_from_backup
{{ end }}
-backup_storage_implementation=$VT_BACKUP_SERVICE
{{ if eq .backup_storage_implementation "gcs" }}
-gcs_backup_storage_bucket=$VT_GCS_BACKUP_STORAGE_BUCKET
-gcs_backup_storage_root=$VT_GCS_BACKUP_STORAGE_ROOT
{{ else if eq .backup_storage_implementation "s3" }}
-s3_backup_aws_region=$VT_S3_BACKUP_AWS_REGION
-s3_backup_storage_bucket=$VT_S3_BACKUP_STORAGE_BUCKET
-s3_backup_storage_root=$VT_S3_BACKUP_STORAGE_ROOT
-s3_backup_server_side_encryption=$VT_S3_BACKUP_SERVER_SIDE_ENCRYPTION
{{ else if eq .backup_storage_implementation "ceph" }}
-ceph_backup_storage_config=$CEPH_CREDENTIALS_FILE
{{ end }}
{{ end }}
{{ end }}
{{- end -}}
#############################
# backup env - expects config.backup
#############################
{{- define "backup-env" -}}
{{ if .enabled }}
- name: VT_BACKUP_SERVICE
valueFrom:
configMapKeyRef:
name: vitess-cm
key: backup.backup_storage_implementation
{{ if eq .backup_storage_implementation "gcs" }}
- name: VT_GCS_BACKUP_STORAGE_BUCKET
valueFrom:
configMapKeyRef:
name: vitess-cm
key: backup.gcs_backup_storage_bucket
- name: VT_GCS_BACKUP_STORAGE_ROOT
valueFrom:
configMapKeyRef:
name: vitess-cm
key: backup.gcs_backup_storage_root
{{ else if eq .backup_storage_implementation "s3" }}
- name: VT_S3_BACKUP_AWS_REGION
valueFrom:
configMapKeyRef:
name: vitess-cm
key: backup.s3_backup_aws_region
- name: VT_S3_BACKUP_STORAGE_BUCKET
valueFrom:
configMapKeyRef:
name: vitess-cm
key: backup.s3_backup_storage_bucket
- name: VT_S3_BACKUP_STORAGE_ROOT
valueFrom:
configMapKeyRef:
name: vitess-cm
key: backup.s3_backup_storage_root
- name: VT_S3_BACKUP_SERVER_SIDE_ENCRYPTION
valueFrom:
configMapKeyRef:
name: vitess-cm
key: backup.s3_backup_server_side_encryption
{{ end }}
{{ end }}
{{- end -}}
#############################
# backup volume - expects config.backup
#############################
{{- define "backup-volume" -}}
{{ if .enabled }}
{{ if eq .backup_storage_implementation "gcs" }}
{{ if .gcsSecret }}
- name: backup-creds
secret:
secretName: {{ .gcsSecret }}
{{ end }}
{{ else if eq .backup_storage_implementation "s3" }}
{{ if .s3Secret }}
- name: backup-creds
secret:
secretName: {{ .s3Secret }}
{{ end }}
{{ else if eq .backup_storage_implementation "ceph" }}
- name: backup-creds
secret:
secretName: {{required ".cephSecret necessary to use backup_storage_implementation: ceph!" .cephSecret }}
{{ end }}
{{ end }}
{{- end -}}
#############################
# backup volumeMount - expects config.backup
#############################
{{- define "backup-volumeMount" -}}
{{ if .enabled }}
{{ if eq .backup_storage_implementation "gcs" }}
{{ if .gcsSecret }}
- name: backup-creds
mountPath: /etc/secrets/creds
{{ end }}
{{ else if eq .backup_storage_implementation "s3" }}
{{ if .s3Secret }}
- name: backup-creds
mountPath: /etc/secrets/creds
{{ end }}
{{ else if eq .backup_storage_implementation "ceph" }}
- name: backup-creds
mountPath: /etc/secrets/creds
{{ end }}
{{ end }}
{{- end -}}
#############################
# backup exec
#############################
{{- define "backup-exec" -}}
{{ if .enabled }}
{{ if eq .backup_storage_implementation "gcs" }}
{{ if .gcsSecret }}
credsPath=/etc/secrets/creds/$(ls /etc/secrets/creds/ | head -1)
export GOOGLE_APPLICATION_CREDENTIALS=$credsPath
cat $GOOGLE_APPLICATION_CREDENTIALS
{{ end }}
{{ else if eq .backup_storage_implementation "s3" }}
{{ if .s3Secret }}
credsPath=/etc/secrets/creds/$(ls /etc/secrets/creds/ | head -1)
export AWS_SHARED_CREDENTIALS_FILE=$credsPath
cat $AWS_SHARED_CREDENTIALS_FILE
{{ end }}
{{ else if eq .backup_storage_implementation "ceph" }}
credsPath=/etc/secrets/creds/$(ls /etc/secrets/creds/ | head -1)
export CEPH_CREDENTIALS_FILE=$credsPath
cat $CEPH_CREDENTIALS_FILE
{{ end }}
{{ end }}
{{- end -}}
#############################
# user config volume - expects config map name
#############################
{{- define "user-config-volume" -}}
{{ if . }}
- name: user-config
configMap:
name: {{ . }}
{{ end }}
{{- end -}}
#############################
# user config volumeMount - expects config map name
#############################
{{- define "user-config-volumeMount" -}}
{{ if . }}
- name: user-config
mountPath: /vt/userconfig
{{ end }}
{{- end -}}
#############################
# user secret volumes - expects list of secret names
#############################
{{- define "user-secret-volumes" -}}
{{ if . }}
{{- range . }}
- name: user-secret-{{ . }}
secret:
secretName: {{ . }}
{{- end }}
{{ end }}
{{- end -}}
#############################
# user secret volumeMounts - expects list of secret names
#############################
{{- define "user-secret-volumeMounts" -}}
{{ if . }}
{{- range . }}
- name: user-secret-{{ . }}
mountPath: /vt/usersecrets/{{ . }}
{{- end }}
{{ end }}
{{- end -}}

Просмотреть файл

@ -1,141 +0,0 @@
###################################
# keyspace initializations
###################################
{{- define "vtctlclient-job" -}}
{{- $job := index . 0 -}}
{{- $defaultVtctlclient := index . 1 -}}
{{- $namespace := index . 2 -}}
{{- $vitessTag := $job.vitessTag | default $defaultVtctlclient.vitessTag -}}
{{- $secrets := $job.secrets | default $defaultVtctlclient.secrets }}
---
###################################
# Vitess vtctlclient Job
###################################
apiVersion: batch/v1
kind: Job
metadata:
name: vtctlclient-{{ $job.name }}
spec:
backoffLimit: 1
template:
metadata:
labels:
app: vitess
component: vtctlclient
vtctlclientJob: "true"
spec:
restartPolicy: OnFailure
containers:
- name: vtjob
image: "vitess/vtctlclient:{{$vitessTag}}"
volumeMounts:
{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 10 }}
resources:
{{ toYaml ($job.resources | default $defaultVtctlclient.resources) | indent 10 }}
command: ["bash"]
args:
- "-c"
- |
set -ex
VTCTLD_SVC=vtctld.{{ $namespace }}:15999
VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }})
vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC {{ $job.command }}
volumes:
{{ include "user-secret-volumes" $secrets | indent 8 }}
{{- end -}}
{{- define "vtworker-job" -}}
{{- $job := index . 0 -}}
{{- $defaultVtworker := index . 1 -}}
{{- $namespace := index . 2 -}}
{{- $cell := index . 3 -}}
{{- $vitessTag := $job.vitessTag | default $defaultVtworker.vitessTag -}}
{{- $secrets := $job.secrets | default $defaultVtworker.secrets }}
---
###################################
# vtworker ServiceAccount
###################################
apiVersion: v1
kind: ServiceAccount
metadata:
name: vtworker
labels:
app: vitess
---
###################################
# vtgate RoleBinding
###################################
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: vtworker-topo-member
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: vt-topo-member
subjects:
- kind: ServiceAccount
name: vtworker
namespace: {{ $namespace }}
---
###################################
# Vitess vtworker Job
###################################
apiVersion: batch/v1
kind: Job
metadata:
name: vtworker-{{ $job.name }}
spec:
backoffLimit: 1
template:
metadata:
labels:
app: vitess
component: vtworker
vtworkerJob: "true"
spec:
serviceAccountName: vtworker
{{ include "pod-security" . | indent 6 }}
restartPolicy: OnFailure
containers:
- name: vtjob
image: "vitess/vtworker:{{$vitessTag}}"
volumeMounts:
{{ include "user-secret-volumeMounts" $defaultVtworker.secrets | indent 10 }}
resources:
{{ toYaml ($job.resources | default $defaultVtworker.resources) | indent 10 }}
command: ["bash"]
args:
- "-c"
- |
set -ex
eval exec /vt/bin/vtworker $(cat <<END_OF_COMMAND
-topo_global_root=/vitess/global
{{- if eq ($cell.topologyProvider | default "") "etcd2" }}
-topo_implementation=etcd2
-topo_global_server_address="etcd-global-client.{{ $namespace }}:2379"
{{- else }}
-topo_implementation="k8s"
{{- end }}
-cell={{ $job.cell | quote }}
-logtostderr=true
-stderrthreshold=0
END_OF_COMMAND
) {{ $job.command }}
volumes:
{{ include "user-secret-volumes" $secrets | indent 8 }}
{{- end -}}

Просмотреть файл

@ -1,136 +0,0 @@
###################################
# keyspace initializations
###################################
{{- define "keyspace" -}}
{{- $cell := index . 0 -}}
{{- $keyspace := index . 1 -}}
{{- $defaultVtctlclient := index . 2 -}}
{{- $namespace := index . 3 -}}
# sanitize inputs for labels
{{- $keyspaceClean := include "clean-label" $keyspace.name -}}
{{- with $cell.vtctld -}}
# define image to use
{{- $vitessTag := .vitessTag | default $defaultVtctlclient.vitessTag -}}
{{- $secrets := .secrets | default $defaultVtctlclient.secrets -}}
{{- range $name, $schema := $keyspace.schema }}
---
###################################
# ApplySchema Job
###################################
apiVersion: batch/v1
kind: Job
metadata:
name: {{ $keyspaceClean }}-apply-schema-{{ $name }}
spec:
backoffLimit: 1
template:
spec:
restartPolicy: OnFailure
containers:
- name: apply-schema
image: "vitess/vtctlclient:{{$vitessTag}}"
volumeMounts:
{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 10 }}
command: ["bash"]
args:
- "-c"
- |
set -ex
VTCTLD_SVC=vtctld.{{ $namespace }}:15999
SECONDS=0
TIMEOUT_SECONDS=600
VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }})
# poll every 5 seconds to see if vtctld is ready
until vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ListAllTablets {{ $cell.name }} > /dev/null 2>&1; do
if (( $SECONDS > $TIMEOUT_SECONDS )); then
echo "timed out waiting for vtctlclient to be ready"
exit 1
fi
sleep 5
done
while true; do
if (( $SECONDS > $TIMEOUT_SECONDS )); then
echo "timed out waiting for master"
exit 1
fi
# wait for all shards to have a master
{{- range $shard := $keyspace.shards }}
master_alias=$(vtctlclient ${VTLCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC GetShard {{ $keyspace.name }}/{{ $shard.name }} | jq '.master_alias.uid')
if [ "$master_alias" == "null" -o "$master_alias" == "" ]; then
echo "no master for '{{ $keyspace.name }}/{{ $shard.name }}' yet, continuing to wait"
sleep 5
continue
fi
{{- end }}
break
done
vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ApplySchema -sql "$(cat <<END_OF_COMMAND
{{ $schema | indent 14}}
END_OF_COMMAND
)" {{ $keyspace.name }}
volumes:
{{ include "user-secret-volumes" $secrets | indent 8 }}
{{ end }}
{{- range $name, $vschema := $keyspace.vschema }}
---
###################################
# ApplyVSchema job
###################################
apiVersion: batch/v1
kind: Job
metadata:
name: {{ $keyspaceClean }}-apply-vschema-{{ $name }}
spec:
backoffLimit: 1
template:
spec:
restartPolicy: OnFailure
containers:
- name: apply-vschema
image: "vitess/vtctlclient:{{$vitessTag}}"
volumeMounts:
{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 10 }}
command: ["bash"]
args:
- "-c"
- |
set -ex
VTCTLD_SVC=vtctld.{{ $namespace }}:15999
SECONDS=0
TIMEOUT_SECONDS=600
VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }})
# poll every 5 seconds to see if keyspace is created
until vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC GetKeyspace {{ $keyspace.name }} > /dev/null 2>&1; do
if (( $SECONDS > $TIMEOUT_SECONDS )); then
echo "timed out waiting for keyspace {{ $keyspace.name }} to be ready"
exit 1
fi
sleep 5
done
vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ApplyVSchema -vschema "$(cat <<END_OF_COMMAND
{{ $vschema | indent 14 }}
END_OF_COMMAND
)" {{ $keyspace.name }}
volumes:
{{ include "user-secret-volumes" $secrets | indent 8 }}
{{- end -}}
{{- end -}}
{{- end -}}

Просмотреть файл

@ -1,150 +0,0 @@
###################################
# Orchestrator Config
###################################
{{ define "orchestrator-config" -}}
# set tuple values to more recognizable variables
{{- $orc := index . 0 -}}
{{- $namespace := index . 1 -}}
{{- $enableHeartbeat := index . 2 -}}
{{- $defaultVtctlclient := index . 3 }}
apiVersion: v1
kind: ConfigMap
metadata:
name: orchestrator-cm
data:
orchestrator.conf.json: |-
{
"ActiveNodeExpireSeconds": 5,
"ApplyMySQLPromotionAfterMasterFailover": true,
"AuditLogFile": "/tmp/orchestrator-audit.log",
"AuditToSyslog": false,
"AuthenticationMethod": "",
"AuthUserHeader": "",
"AutoPseudoGTID": false,
"BackendDB": "sqlite",
"BinlogEventsChunkSize": 10000,
"CandidateInstanceExpireMinutes": 60,
"CoMasterRecoveryMustPromoteOtherCoMaster": false,
"DataCenterPattern": "[.]([^.]+)[.][^.]+[.]vitess[.]io",
"Debug": true,
"DefaultInstancePort": 3306,
"DefaultRaftPort": 10008,
"DetachLostSlavesAfterMasterFailover": true,
"DetectClusterAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='ClusterAlias'",
"DetectClusterDomainQuery": "",
"DetectInstanceAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='Alias'",
"DetectPromotionRuleQuery": "SELECT value FROM _vt.local_metadata WHERE name='PromotionRule'",
"DetectDataCenterQuery": "SELECT value FROM _vt.local_metadata WHERE name='DataCenter'",
"DetectPseudoGTIDQuery": "",
"DetectSemiSyncEnforcedQuery": "SELECT @@global.rpl_semi_sync_master_wait_no_slave AND @@global.rpl_semi_sync_master_timeout > 1000000",
"DiscoverByShowSlaveHosts": false,
"EnableSyslog": false,
"ExpiryHostnameResolvesMinutes": 60,
"DelayMasterPromotionIfSQLThreadNotUpToDate": true,
"FailureDetectionPeriodBlockMinutes": 10,
"GraphiteAddr": "",
"GraphiteConvertHostnameDotsToUnderscores": true,
"GraphitePath": "",
"HostnameResolveMethod": "none",
"HTTPAuthPassword": "",
"HTTPAuthUser": "",
"HTTPAdvertise": "http://POD_NAME.orchestrator-headless.{{ $namespace }}:3000",
"InstanceBulkOperationsWaitTimeoutSeconds": 10,
"InstancePollSeconds": 5,
"ListenAddress": ":3000",
"MasterFailoverLostInstancesDowntimeMinutes": 0,
"MySQLConnectTimeoutSeconds": 1,
"MySQLHostnameResolveMethod": "none",
"MySQLTopologyCredentialsConfigFile": "",
"MySQLTopologyMaxPoolConnections": 3,
"MySQLTopologyPassword": "orc_client_user_password",
"MySQLTopologyReadTimeoutSeconds": 3,
"MySQLTopologySSLCAFile": "",
"MySQLTopologySSLCertFile": "",
"MySQLTopologySSLPrivateKeyFile": "",
"MySQLTopologySSLSkipVerify": true,
"MySQLTopologyUseMutualTLS": false,
"MySQLTopologyUser": "orc_client_user",
"OnFailureDetectionProcesses": [
"echo 'Detected {failureType} on {failureCluster}. Affected replicas: {countSlaves}' >> /tmp/recovery.log"
],
"OSCIgnoreHostnameFilters": [
],
"PhysicalEnvironmentPattern": "[.]([^.]+[.][^.]+)[.]vitess[.]io",
"PostFailoverProcesses": [
"echo '(for all types) Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Successor: {successorHost}:{successorPort}' >> /tmp/recovery.log"
],
"PostIntermediateMasterFailoverProcesses": [
"echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Successor: {successorHost}:{successorPort}' >> /tmp/recovery.log"
],
"PostMasterFailoverProcesses": [
"echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Promoted: {successorHost}:{successorPort}' >> /tmp/recovery.log",
"n=0; until [ $n -ge 10 ]; do vtctlclient {{ include "format-flags-inline" $defaultVtctlclient.extraFlags | toJson | trimAll "\"" }} -server vtctld.{{ $namespace }}:15999 TabletExternallyReparented {successorAlias} && break; n=$[$n+1]; sleep 5; done"
],
"PostponeSlaveRecoveryOnLagMinutes": 0,
"PostUnsuccessfulFailoverProcesses": [
],
"PowerAuthUsers": [
"*"
],
"PreFailoverProcesses": [
"echo 'Will recover from {failureType} on {failureCluster}' >> /tmp/recovery.log"
],
"ProblemIgnoreHostnameFilters": [
],
"PromotionIgnoreHostnameFilters": [
],
"PseudoGTIDMonotonicHint": "asc:",
"PseudoGTIDPattern": "drop view if exists .*?`_pseudo_gtid_hint__",
"RaftAdvertise": "POD_NAME.{{ $namespace }}",
"RaftBind": "POD_NAME",
"RaftDataDir": "/var/lib/orchestrator",
"RaftEnabled": true,
"RaftNodes": [
{{ range $i := until (int $orc.replicas) }}
"orchestrator-{{ $i }}.{{ $namespace }}"{{ if lt $i (sub (int64 $orc.replicas) 1) }},{{ end }}
{{ end }}
],
"ReadLongRunningQueries": false,
"ReadOnly": false,
"ReasonableMaintenanceReplicationLagSeconds": 20,
"ReasonableReplicationLagSeconds": 10,
"RecoverMasterClusterFilters": [
".*"
],
"RecoveryIgnoreHostnameFilters": [
],
"RecoveryPeriodBlockSeconds": 60,
"ReduceReplicationAnalysisCount": true,
"RejectHostnameResolvePattern": "",
"RemoveTextFromHostnameDisplay": ".vitess.io:3306",
{{ if $enableHeartbeat }}
"ReplicationLagQuery": "SELECT unix_timestamp() - floor(ts/1000000000) FROM `_vt`.heartbeat ORDER BY ts DESC LIMIT 1;",
{{ else }}
"ReplicationLagQuery": "",
{{ end }}
"ServeAgentsHttp": false,
"SkipBinlogEventsContaining": [
],
"SkipBinlogServerUnresolveCheck": true,
"SkipOrchestratorDatabaseUpdate": false,
"SlaveStartPostWaitMilliseconds": 1000,
"SnapshotTopologiesIntervalHours": 0,
"SQLite3DataFile": ":memory:",
"SSLCAFile": "",
"SSLCertFile": "",
"SSLPrivateKeyFile": "",
"SSLSkipVerify": false,
"SSLValidOUs": [
],
"StaleSeedFailMinutes": 60,
"StatusEndpoint": "/api/status",
"StatusOUVerify": false,
"UnseenAgentForgetHours": 6,
"UnseenInstanceForgetHours": 240,
"UseMutualTLS": false,
"UseSSL": false,
"VerifyReplicationFilters": false
}
{{ end }}

Просмотреть файл

@ -1,222 +0,0 @@
###################################
# Master Orchestrator Service
###################################
{{ define "orchestrator" -}}
# set tuple values to more recognizable variables
{{- $orc := index . 0 -}}
{{- $defaultVtctlclient := index . 1 }}
apiVersion: v1
kind: Service
metadata:
name: orchestrator
labels:
app: vitess
component: orchestrator
spec:
ports:
- name: web
port: 80
targetPort: 3000
selector:
app: vitess
component: orchestrator
type: ClusterIP
---
###################################
# Headless Orchestrator Service
###################################
apiVersion: v1
kind: Service
metadata:
name: orchestrator-headless
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
labels:
app: vitess
component: orchestrator
spec:
clusterIP: None
ports:
- name: web
port: 80
targetPort: 3000
selector:
component: orchestrator
app: vitess
---
###################################
# Orchestrator StatefulSet
###################################
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: orchestrator
spec:
serviceName: orchestrator-headless
replicas: {{ $orc.replicas }}
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: vitess
component: orchestrator
template:
metadata:
labels:
app: vitess
component: orchestrator
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
# strongly prefer to stay away from other orchestrators
- weight: 100
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: "vitess"
component: "orchestrator"
initContainers:
{{ include "init-orchestrator" $orc | indent 8 }}
containers:
- name: orchestrator
image: {{ $orc.image | quote }}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
name: web
protocol: TCP
- containerPort: 10008
name: raft
protocol: TCP
livenessProbe:
httpGet:
path: /api/lb-check
port: 3000
initialDelaySeconds: 300
timeoutSeconds: 10
readinessProbe:
httpGet:
path: "/api/raft-health"
port: 3000
timeoutSeconds: 10
resources:
{{ toYaml ($orc.resources) | indent 12 }}
volumeMounts:
- name: config-shared
mountPath: /conf/
- name: tmplogs
mountPath: /tmp
{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 12 }}
env:
- name: VTCTLD_SERVER_PORT
value: "15999"
- name: recovery-log
image: vitess/logtail:helm-2.0.0-0
imagePullPolicy: IfNotPresent
env:
- name: TAIL_FILEPATH
value: /tmp/recovery.log
volumeMounts:
- name: tmplogs
mountPath: /tmp
- name: audit-log
image: vitess/logtail:helm-2.0.0-0
imagePullPolicy: IfNotPresent
env:
- name: TAIL_FILEPATH
value: /tmp/orchestrator-audit.log
volumeMounts:
- name: tmplogs
mountPath: /tmp
volumes:
- name: config-map
configMap:
name: orchestrator-cm
- name: config-shared
emptyDir: {}
- name: tmplogs
emptyDir: {}
{{ include "user-secret-volumes" $defaultVtctlclient.secrets | indent 8 }}
{{- end -}}
###################################
# Per StatefulSet Orchestrator Service
###################################
{{ define "orchestrator-statefulset-service" -}}
# set tuple values to more recognizable variables
{{- $orc := index . 0 -}}
{{- $i := index . 1 }}
apiVersion: v1
kind: Service
metadata:
name: orchestrator-{{ $i }}
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
labels:
app: vitess
component: orchestrator
spec:
ports:
- name: web
port: 80
targetPort: 3000
- name: raft
port: 10008
targetPort: 10008
selector:
component: orchestrator
app: vitess
# this should be auto-filled by kubernetes
statefulset.kubernetes.io/pod-name: "orchestrator-{{ $i }}"
{{- end -}}
###################################
# init-container to copy and sed
# Orchestrator config from ConfigMap
###################################
{{ define "init-orchestrator" -}}
{{- $orc := . }}
- name: init-orchestrator
image: {{ $orc.image | quote }}
volumeMounts:
- name: config-map
mountPath: /conftmp/
- name: config-shared
mountPath: /conf/
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
command: ["bash"]
args:
- "-c"
- |
set -ex
# make a copy of the config map file before editing it locally
cp /conftmp/orchestrator.conf.json /conf/orchestrator.conf.json
# set the local config to advertise/bind its own service IP
sed -i -e "s/POD_NAME/$MY_POD_NAME/g" /conf/orchestrator.conf.json
{{- end -}}

Просмотреть файл

@ -1,231 +0,0 @@
###################################
# pmm Service + Deployment
###################################
{{ define "pmm" -}}
# set tuple values to more recognizable variables
{{- $pmm := index . 0 -}}
{{- $namespace := index . 1 }}
###################################
# pmm Service
###################################
kind: Service
apiVersion: v1
metadata:
name: pmm
labels:
component: pmm
app: vitess
spec:
ports:
- name: web
port: 80
selector:
component: pmm
app: vitess
type: ClusterIP
---
###################################
# pmm StatefulSet
###################################
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: pmm
spec:
serviceName: pmm
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: vitess
component: pmm
template:
metadata:
labels:
app: vitess
component: pmm
spec:
containers:
- name: pmm
image: "percona/pmm-server:{{ $pmm.pmmTag }}"
ports:
- name: web
containerPort: 80
volumeMounts:
- name: pmmdata
mountPath: /pmmdata
resources:
{{ toYaml $pmm.server.resources | indent 12 }}
env:
- name: DISABLE_UPDATES
value: "true"
- name: DISABLE_TELEMETRY
value: {{ $pmm.server.env.disableTelemetry | quote }}
- name: METRICS_RESOLUTION
value: {{ $pmm.server.env.metricsResolution | quote }}
- name: METRICS_RETENTION
value: {{ $pmm.server.env.metricsRetention | quote }}
- name: QUERIES_RETENTION
value: {{ $pmm.server.env.queriesRetention | quote }}
- name: METRICS_MEMORY
value: {{ $pmm.server.env.metricsMemory | quote }}
command: ["bash"]
args:
- "-c"
- |
set -ex
if [ ! -f /pmmdata/vitess-init ]; then
# the PV hasn't been initialized, so copy over default
# pmm-server directories before symlinking
mkdir -p /pmmdata
mv /opt/prometheus/data /pmmdata/data
mv /opt/consul-data /pmmdata
mv /var/lib/mysql /pmmdata
mv /var/lib/grafana /pmmdata
# initialize the PV and then mark it complete
touch /pmmdata/vitess-init
else
# remove the default directories so we can symlink the
# existing PV directories
rm -Rf /opt/prometheus/data
rm -Rf /opt/consul-data
rm -Rf /var/lib/mysql
rm -Rf /var/lib/grafana
fi
# symlink pmm-server paths to point to our PV
ln -s /pmmdata/data /opt/prometheus/
ln -s /pmmdata/consul-data /opt/
ln -s /pmmdata/mysql /var/lib/
ln -s /pmmdata/grafana /var/lib/
/opt/entrypoint.sh
volumeClaimTemplates:
- metadata:
name: pmmdata
annotations:
{{ toYaml $pmm.server.dataVolumeClaimAnnotations | indent 10 }}
spec:
{{ toYaml $pmm.server.dataVolumeClaimSpec | indent 8 }}
{{- end -}}
###################################
# sidecar container running pmm-client
###################################
{{ define "cont-pmm-client" -}}
{{- $pmm := index . 0 -}}
{{- $namespace := index . 1 -}}
{{- $keyspace := index . 2 }}
- name: "pmm-client"
image: "vitess/pmm-client:{{ $pmm.pmmTag }}"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: vtdataroot
mountPath: "/vtdataroot"
{{ if $keyspace.pmm }}{{if $keyspace.pmm.config }}
- name: config
mountPath: "/vt-pmm-config"
{{ end }}{{ end }}
ports:
- containerPort: 42001
name: query-data
- containerPort: 42002
name: mysql-metrics
securityContext:
# PMM requires root privileges
runAsUser: 0
resources:
{{ toYaml $pmm.client.resources | indent 4 }}
command: ["bash"]
args:
- "-c"
- |
set -ex
# Redirect pmm-client data to persistent volume
if [ ! -d /vtdataroot/pmm ]; then
FIRST_RUN=1
mkdir -p /vtdataroot/pmm/percona
mkdir -p /vtdataroot/pmm/init.d
fi
mv /usr/local/percona /usr/local/percona_tmp
mv /etc/init.d /etc/init.d_tmp
ln -s /vtdataroot/pmm/percona /usr/local/percona
ln -s /vtdataroot/pmm/init.d /etc/init.d
ln -s /vtdataroot/pmm/pmm-mysql-metrics-42002.log /var/log/pmm-mysql-metrics-42002.log
if [ ! -z "$FIRST_RUN" ]; then
cp -r /usr/local/percona_tmp/* /vtdataroot/pmm/percona || :
cp -r /etc/init.d_tmp/* /vtdataroot/pmm/init.d || :
fi
{{ if $keyspace.pmm }}{{if $keyspace.pmm.config }}
# link all the configmap files into their expected file locations
for filename in /vt-pmm-config/*; do
DEST_FILE=/vtdataroot/pmm/percona/pmm-client/$(basename "$filename")
rm -f $DEST_FILE
ln -s "$filename" $DEST_FILE
done
{{ end }}{{ end }}
# if this doesn't return an error, pmm-admin has already been configured
# and we want to stop/remove running services, in case pod ips have changed
if pmm-admin info; then
pmm-admin stop --all
pmm-admin repair
pmm-admin rm --all
fi
pmm-admin config --server pmm.{{ $namespace }} --bind-address `hostname -I` --client-address ${HOSTNAME}.vttablet --force
pmm-admin repair
# wait for mysql to be available before registering
until [ -e /vtdataroot/tabletdata/mysql.sock ]; do
echo "Waiting for mysql.sock file"
sleep 1
done
# creates systemd services
pmm-admin add linux:metrics
pmm-admin add mysql:metrics --user root --socket /vtdataroot/tabletdata/mysql.sock --force
pmm-admin add mysql:queries --user root --socket /vtdataroot/tabletdata/mysql.sock --force --query-source=perfschema
# keep the container alive but still responsive to stop requests
trap : TERM INT; sleep infinity & wait
- name: pmm-client-metrics-log
image: vitess/logtail:helm-2.0.0-0
imagePullPolicy: IfNotPresent
env:
- name: TAIL_FILEPATH
value: /vtdataroot/pmm/pmm-mysql-metrics-42002.log
volumeMounts:
- name: vtdataroot
mountPath: /vtdataroot
{{- end -}}

Просмотреть файл

@ -1,203 +0,0 @@
###################################
# shard initializations
###################################
{{ define "shard" -}}
{{- $cell := index . 0 -}}
{{- $keyspace := index . 1 -}}
{{- $shard := index . 2 -}}
{{- $defaultVtctlclient := index . 3 -}}
{{- $namespace := index . 4 -}}
{{- $totalTabletCount := index . 5 -}}
{{- $cellClean := include "clean-label" $cell.name -}}
{{- $keyspaceClean := include "clean-label" $keyspace.name -}}
{{- $shardClean := include "clean-label" $shard.name -}}
{{- $shardName := printf "%s-%s-%s" $cellClean $keyspaceClean $shardClean | lower -}}
{{- with $cell.vtctld }}
# define image to use
{{- $vitessTag := .vitessTag | default $defaultVtctlclient.vitessTag }}
---
###################################
# InitShardMaster Job
###################################
apiVersion: batch/v1
kind: Job
metadata:
name: {{ $shardName }}-init-shard-master
spec:
backoffLimit: 1
template:
metadata:
labels:
app: vitess
component: vttablet
cell: {{ $cellClean | quote }}
keyspace: {{ $keyspaceClean | quote }}
shard: {{ $shardClean | quote }}
initShardMasterJob: "true"
spec:
restartPolicy: OnFailure
containers:
- name: init-shard-master
image: "vitess/vtctlclient:{{$vitessTag}}"
volumeMounts:
{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 10 }}
command: ["bash"]
args:
- "-c"
- |
set -ex
VTCTLD_SVC=vtctld.{{ $namespace }}:15999
SECONDS=0
TIMEOUT_SECONDS=600
VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }})
# poll every 5 seconds to see if vtctld is ready
until vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ListAllTablets {{ $cell.name }} > /dev/null 2>&1; do
if (( $SECONDS > $TIMEOUT_SECONDS )); then
echo "timed out waiting for vtctlclient to be ready"
exit 1
fi
sleep 5
done
until [ $TABLETS_READY ]; do
# get all the tablets in the current cell
cellTablets="$(vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ListAllTablets {{ $cell.name }})"
# filter to only the tablets in our current shard
shardTablets=$( echo "$cellTablets" | awk 'substr( $5,1,{{ len $shardName }} ) == "{{ $shardName }}" {print $0}')
# check for a master tablet from the ListAllTablets call
masterTablet=$( echo "$shardTablets" | awk '$4 == "master" {print $1}')
if [ $masterTablet ]; then
echo "'$masterTablet' is already the master tablet, exiting without running InitShardMaster"
exit
fi
# check for a master tablet from the GetShard call
master_alias=$(vtctlclient ${VTLCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC GetShard {{ $keyspace.name }}/{{ $shard.name }} | jq '.master_alias.uid')
if [ "$master_alias" != "null" -a "$master_alias" != "" ]; then
echo "'$master_alias' is already the master tablet, exiting without running InitShardMaster"
exit
fi
# count the number of newlines for the given shard to get the tablet count
tabletCount=$( echo "$shardTablets" | wc | awk '{print $1}')
# check to see if the tablet count equals the expected tablet count
if [ $tabletCount == {{ $totalTabletCount }} ]; then
TABLETS_READY=true
else
if (( $SECONDS > $TIMEOUT_SECONDS )); then
echo "timed out waiting for tablets to be ready"
exit 1
fi
# wait 5 seconds for vttablets to continue getting ready
sleep 5
fi
done
# find the tablet id for the "-replica-0" stateful set for a given cell, keyspace and shard
tablet_id=$( echo "$shardTablets" | awk 'substr( $5,1,{{ add (len $shardName) 10 }} ) == "{{ $shardName }}-replica-0" {print $1}')
# initialize the shard master
until vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC InitShardMaster -force {{ $keyspace.name }}/{{ $shard.name }} $tablet_id; do
if (( $SECONDS > $TIMEOUT_SECONDS )); then
echo "timed out waiting for InitShardMaster to succeed"
exit 1
fi
sleep 5
done
volumes:
{{ include "user-secret-volumes" (.secrets | default $defaultVtctlclient.secrets) | indent 8 }}
{{- $copySchema := ($keyspace.copySchema | default $shard.copySchema) -}}
{{- if $copySchema }}
---
###################################
# CopySchemaShard Job
###################################
apiVersion: batch/v1
kind: Job
metadata:
name: {{ $keyspaceClean }}-copy-schema-{{ $shardClean }}
spec:
backoffLimit: 1
template:
metadata:
labels:
app: vitess
component: vttablet
cell: {{ $cellClean | quote }}
keyspace: {{ $keyspaceClean | quote }}
shard: {{ $shardClean | quote }}
copySchemaShardJob: "true"
spec:
restartPolicy: OnFailure
containers:
- name: copy-schema
image: "vitess/vtctlclient:{{$vitessTag}}"
volumeMounts:
{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 10 }}
command: ["bash"]
args:
- "-c"
- |
set -ex
VTCTLD_SVC=vtctld.{{ $namespace }}:15999
SECONDS=0
TIMEOUT_SECONDS=600
VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }})
# poll every 5 seconds to see if vtctld is ready
until vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ListAllTablets {{ $cell.name }} > /dev/null 2>&1; do
if (( $SECONDS > $TIMEOUT_SECONDS )); then
echo "timed out waiting for vtctlclient to be ready"
exit 1
fi
sleep 5
done
while true; do
if (( $SECONDS > $TIMEOUT_SECONDS )); then
echo "timed out waiting for master"
exit 1
fi
# wait for all shards to have a master
master_alias=$(vtctlclient ${VTLCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC GetShard {{ $keyspace.name }}/{{ $shard.name }} | jq '.master_alias.uid')
if [ "$master_alias" == "null" -o "$master_alias" == "" ]; then
echo "no master for '{{ $keyspace.name }}/{{ $shard.name }}' yet, continuing to wait"
sleep 5
continue
fi
break
done
vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC CopySchemaShard {{ if $copySchema.tables -}}
-tables='
{{- range $index, $table := $copySchema.tables -}}
{{- if $index -}},{{- end -}}
{{ $table }}
{{- end -}}
'
{{- end }} {{ $copySchema.source }} {{ $keyspace.name }}/{{ $shard.name }}
volumes:
{{ include "user-secret-volumes" (.secrets | default $defaultVtctlclient.secrets) | indent 8 }}
{{ end }}
{{- end -}}
{{- end -}}

Просмотреть файл

@ -1,167 +0,0 @@
###################################
# vtctld Service + Deployment
###################################
{{ define "vtctld" -}}
# set tuple values to more recognizable variables
{{- $topology := index . 0 -}}
{{- $cell := index . 1 -}}
{{- $defaultVtctld := index . 2 -}}
{{- $namespace := index . 3 -}}
{{- $config := index . 4 -}}
{{- with $cell.vtctld -}}
# define image to use
{{- $vitessTag := .vitessTag | default $defaultVtctld.vitessTag -}}
{{- $cellClean := include "clean-label" $cell.name }}
###################################
# vtctld Service
###################################
kind: Service
apiVersion: v1
metadata:
name: vtctld
labels:
component: vtctld
app: vitess
spec:
ports:
- name: web
port: 15000
- name: grpc
port: 15999
selector:
component: vtctld
app: vitess
type: {{.serviceType | default $defaultVtctld.serviceType}}
---
###################################
# vtctld ServiceAccount
###################################
apiVersion: v1
kind: ServiceAccount
metadata:
name: vtctld
labels:
app: vitess
---
###################################
# vtctld RoleBinding
###################################
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: vtctld-topo-member
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: vt-topo-member
subjects:
- kind: ServiceAccount
name: vtctld
namespace: {{ $namespace }}
---
###################################
# vtctld Service + Deployment
###################################
apiVersion: apps/v1
kind: Deployment
metadata:
name: vtctld
spec:
replicas: {{.replicas | default $defaultVtctld.replicas}}
selector:
matchLabels:
app: vitess
component: vtctld
template:
metadata:
labels:
app: vitess
component: vtctld
spec:
serviceAccountName: vtctld
{{ include "pod-security" . | indent 6 }}
{{ include "vtctld-affinity" (tuple $cellClean $cell.region) | indent 6 }}
containers:
- name: vtctld
image: vitess/vtctld:{{$vitessTag}}
imagePullPolicy: IfNotPresent
readinessProbe:
httpGet:
path: /debug/health
port: 15000
initialDelaySeconds: 30
timeoutSeconds: 5
livenessProbe:
httpGet:
path: /debug/status
port: 15000
initialDelaySeconds: 30
timeoutSeconds: 5
env:
{{ include "backup-env" $config.backup | indent 12 }}
volumeMounts:
{{ include "backup-volumeMount" $config.backup | indent 12 }}
{{ include "user-secret-volumeMounts" (.secrets | default $defaultVtctld.secrets) | indent 12 }}
resources:
{{ toYaml (.resources | default $defaultVtctld.resources) | indent 12 }}
command:
- bash
- "-c"
- |
set -ex;
{{ include "backup-exec" $config.backup | indent 14 }}
eval exec /vt/bin/vtctld $(cat <<END_OF_COMMAND
-cell={{$cellClean | quote}}
-web_dir="/vt/web/vtctld"
-web_dir2="/vt/web/vtctld2/app"
-workflow_manager_init
-workflow_manager_use_election
-logtostderr=true
-stderrthreshold=0
-port=15000
-grpc_port=15999
-service_map="grpc-vtctl"
-topo_global_root=/vitess/global
-proxy_tablets=true
{{- if eq ($cell.topologyProvider | default "") "etcd2" }}
-topo_implementation="etcd2"
-topo_global_server_address="etcd-global-client.{{ $namespace }}:2379"
{{- else }}
-topo_implementation="k8s"
-topo_global_server_address="k8s"
{{- end }}
{{ include "backup-flags" (tuple $config.backup "vtctld") | indent 16 }}
{{ include "format-flags-all" (tuple $defaultVtctld.extraFlags .extraFlags) | indent 16 }}
END_OF_COMMAND
)
volumes:
{{ include "backup-volume" $config.backup | indent 8 }}
{{ include "user-secret-volumes" (.secrets | default $defaultVtctld.secrets) | indent 8 }}
{{- end -}}
{{- end -}}
###################################
# vtctld-affinity sets node/pod affinities
###################################
{{ define "vtctld-affinity" -}}
# set tuple values to more recognizable variables
{{- $cellClean := index . 0 -}}
{{- $region := index . 1 -}}
{{ with $region }}
# affinity pod spec
affinity:
{{ include "node-affinity" $region | indent 2 }}
{{- end -}}
{{- end -}}

Просмотреть файл

@ -1,293 +0,0 @@
###################################
# vtgate Service + Deployment
###################################
{{ define "vtgate" -}}
# set tuple values to more recognizable variables
{{- $topology := index . 0 -}}
{{- $cell := index . 1 -}}
{{- $defaultVtgate := index . 2 -}}
{{- $namespace := index . 3 -}}
{{- with $cell.vtgate -}}
# define image to use
{{- $vitessTag := .vitessTag | default $defaultVtgate.vitessTag -}}
{{- $cellClean := include "clean-label" $cell.name }}
###################################
# vtgate Service
###################################
kind: Service
apiVersion: v1
metadata:
name: vtgate-{{ $cellClean }}
labels:
component: vtgate
cell: {{ $cellClean }}
app: vitess
spec:
ports:
- name: web
port: 15001
- name: grpc
port: 15991
{{ if $cell.mysqlProtocol.enabled }}
- name: mysql
port: 3306
{{ end }}
selector:
component: vtgate
cell: {{ $cellClean }}
app: vitess
type: {{.serviceType | default $defaultVtgate.serviceType}}
---
###################################
# vtgate ServiceAccount
###################################
apiVersion: v1
kind: ServiceAccount
metadata:
name: vtgate
labels:
app: vitess
---
###################################
# vtgate RoleBinding
###################################
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: vtgate-topo-member
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: vt-topo-member
subjects:
- kind: ServiceAccount
name: vtgate
namespace: {{ $namespace }}
---
###################################
# vtgate Deployment
###################################
apiVersion: apps/v1
kind: Deployment
metadata:
name: vtgate-{{ $cellClean }}
spec:
replicas: {{.replicas | default $defaultVtgate.replicas}}
selector:
matchLabels:
app: vitess
component: vtgate
cell: {{ $cellClean }}
template:
metadata:
labels:
app: vitess
component: vtgate
cell: {{ $cellClean }}
spec:
serviceAccountName: vtgate
{{ include "pod-security" . | indent 6 }}
{{ include "vtgate-affinity" (tuple $cellClean $cell.region) | indent 6 }}
{{ if $cell.mysqlProtocol.enabled }}
{{ if eq $cell.mysqlProtocol.authType "secret" }}
initContainers:
{{ include "init-mysql-creds" (tuple $vitessTag $cell) | indent 8 }}
{{ end }}
{{ end }}
containers:
- name: vtgate
image: vitess/vtgate:{{$vitessTag}}
imagePullPolicy: IfNotPresent
readinessProbe:
httpGet:
path: /debug/health
port: 15001
initialDelaySeconds: 30
timeoutSeconds: 5
livenessProbe:
httpGet:
path: /debug/status
port: 15001
initialDelaySeconds: 30
timeoutSeconds: 5
volumeMounts:
- name: creds
mountPath: "/mysqlcreds"
{{ include "user-secret-volumeMounts" (.secrets | default $defaultVtgate.secrets) | indent 12 }}
resources:
{{ toYaml (.resources | default $defaultVtgate.resources) | indent 12 }}
command:
- bash
- "-c"
- |
set -ex
eval exec /vt/bin/vtgate $(cat <<END_OF_COMMAND
-topo_global_root=/vitess/global
{{- if eq ($cell.topologyProvider | default "") "etcd2" }}
-topo_implementation=etcd2
-topo_global_server_address="etcd-global-client.{{ $namespace }}:2379"
{{- else }}
-topo_implementation="k8s"
-topo_global_server_address="k8s"
{{- end }}
-logtostderr=true
-stderrthreshold=0
-port=15001
-grpc_port=15991
{{ if $cell.mysqlProtocol.enabled }}
-mysql_server_port=3306
{{ if eq $cell.mysqlProtocol.authType "secret" }}
-mysql_auth_server_impl="static"
-mysql_auth_server_static_file="/mysqlcreds/creds.json"
{{ else if eq $cell.mysqlProtocol.authType "none" }}
-mysql_auth_server_impl="none"
{{ end }}
{{ end }}
-service_map="grpc-vtgateservice"
-cells_to_watch={{$cell.name | quote}}
-tablet_types_to_wait="MASTER,REPLICA"
-gateway_implementation="discoverygateway"
-cell={{$cell.name | quote}}
{{ include "format-flags-all" (tuple $defaultVtgate.extraFlags .extraFlags) | indent 16 }}
END_OF_COMMAND
)
volumes:
- name: creds
emptyDir: {}
{{ include "user-secret-volumes" (.secrets | default $defaultVtgate.secrets) | indent 8 }}
---
###################################
# vtgate PodDisruptionBudget
###################################
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: vtgate-{{ $cellClean }}
spec:
maxUnavailable: 1
selector:
matchLabels:
app: vitess
component: vtgate
cell: {{ $cellClean }}
{{ $maxReplicas := .maxReplicas | default .replicas }}
{{ if gt $maxReplicas .replicas }}
###################################
# optional HPA for vtgate
###################################
---
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: vtgate-{{ $cellClean }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: vtgate-{{ $cellClean }}
minReplicas: {{ .replicas }}
maxReplicas: {{ $maxReplicas }}
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: 70
{{- end -}}
{{- end -}}
{{- end -}}
###################################
# vtgate-affinity sets node/pod affinities
###################################
{{ define "vtgate-affinity" -}}
# set tuple values to more recognizable variables
{{- $cellClean := index . 0 -}}
{{- $region := index . 1 }}
# affinity pod spec
affinity:
{{ include "node-affinity" $region | indent 2 }}
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
# prefer to be scheduled with same-cell vttablets
- weight: 10
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: "vitess"
component: "vttablet"
cell: {{ $cellClean | quote }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
# prefer to stay away from other same-cell vtgates
- weight: 10
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: "vitess"
component: "vtgate"
cell: {{ $cellClean | quote }}
{{- end -}}
###################################
# init-container to set mysql credentials file
# it loops through the users and pulls out their
# respective passwords from mounted secrets
###################################
{{ define "init-mysql-creds" -}}
{{- $vitessTag := index . 0 -}}
{{- $cell := index . 1 -}}
{{- with $cell.mysqlProtocol }}
- name: init-mysql-creds
image: "vitess/vtgate:{{$vitessTag}}"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: creds
mountPath: "/mysqlcreds"
env:
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .passwordSecret }}
key: password
command: ["bash"]
args:
- "-c"
- |
set -ex
creds=$(cat <<END_OF_COMMAND
{
"{{ .username }}": [
{
"UserData": "{{ .username }}",
"Password": "$MYSQL_PASSWORD"
}
],
"vt_appdebug": []
}
END_OF_COMMAND
)
echo $creds > /mysqlcreds/creds.json
{{- end -}}
{{- end -}}

Просмотреть файл

@ -1,710 +0,0 @@
###################################
# vttablet Service
###################################
{{ define "vttablet-service" -}}
# set tuple values to more recognizable variables
{{- $pmm := index . 0 }}
apiVersion: v1
kind: Service
metadata:
name: vttablet
labels:
app: vitess
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
publishNotReadyAddresses: true
ports:
- port: 15002
name: web
- port: 16002
name: grpc
{{ if $pmm.enabled }}
- port: 42001
name: query-data
- port: 42002
name: mysql-metrics
{{ end }}
clusterIP: None
selector:
app: vitess
component: vttablet
---
{{- end -}}
###################################
# vttablet ServiceAccount
###################################
{{ define "vttablet-serviceaccount" -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: vttablet
labels:
app: vitess
---
{{ end }}
###################################
# vttablet RoleBinding
###################################
{{ define "vttablet-topo-role-binding" -}}
{{- $namespace := index . 0 -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: vttablet-topo-member
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: vt-topo-member
subjects:
- kind: ServiceAccount
name: vttablet
namespace: {{ $namespace }}
---
{{ end }}
###################################
# vttablet
###################################
{{ define "vttablet" -}}
# set tuple values to more recognizable variables
{{- $topology := index . 0 -}}
{{- $cell := index . 1 -}}
{{- $keyspace := index . 2 -}}
{{- $shard := index . 3 -}}
{{- $tablet := index . 4 -}}
{{- $defaultVttablet := index . 5 -}}
{{- $defaultVtctlclient := index . 6 -}}
{{- $namespace := index . 7 -}}
{{- $config := index . 8 -}}
{{- $pmm := index . 9 -}}
{{- $orc := index . 10 -}}
# sanitize inputs for labels
{{- $cellClean := include "clean-label" $cell.name -}}
{{- $keyspaceClean := include "clean-label" $keyspace.name -}}
{{- $shardClean := include "clean-label" $shard.name -}}
{{- $uid := "$(cat /vtdataroot/tabletdata/tablet-uid)" }}
{{- $setName := printf "%s-%s-%s-%s" $cellClean $keyspaceClean $shardClean $tablet.type | lower -}}
{{- $shardName := printf "%s-%s-%s" $cellClean $keyspaceClean $shardClean | lower -}}
{{- with $tablet.vttablet -}}
# define images to use
{{- $vitessTag := .vitessTag | default $defaultVttablet.vitessTag -}}
{{- $image := .image | default $defaultVttablet.image -}}
{{- $mysqlImage := .mysqlImage | default $defaultVttablet.mysqlImage -}}
{{- $mysqlImage := .mysqlImage | default $defaultVttablet.mysqlImage }}
---
###################################
# vttablet StatefulSet
###################################
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ $setName | quote }}
spec:
serviceName: vttablet
replicas: {{ .replicas | default $defaultVttablet.replicas }}
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: vitess
component: vttablet
cell: {{ $cellClean | quote }}
keyspace: {{ $keyspaceClean | quote }}
shard: {{ $shardClean | quote }}
type: {{ $tablet.type | quote }}
template:
metadata:
labels:
app: vitess
component: vttablet
cell: {{ $cellClean | quote }}
keyspace: {{ $keyspaceClean | quote }}
shard: {{ $shardClean | quote }}
type: {{ $tablet.type | quote }}
spec:
serviceAccountName: vttablet
terminationGracePeriodSeconds: {{ $defaultVttablet.terminationGracePeriodSeconds | default 60000000 }}
{{ include "pod-security" . | indent 6 }}
{{ if eq ($topology.deploymentType | default "prod") "prod" }}
{{ include "vttablet-affinity" (tuple $cellClean $keyspaceClean $shardClean $cell.region) | indent 6 }}
{{ end }}
initContainers:
{{ include "init-mysql" (tuple $topology $vitessTag $cellClean) | indent 8 }}
{{ include "init-vttablet" (tuple $topology $vitessTag $cell $cellClean $namespace) | indent 8 }}
containers:
{{ include "cont-mysql" (tuple $topology $cell $keyspace $shard $tablet $defaultVttablet $uid) | indent 8 }}
{{ include "cont-vttablet" (tuple $topology $cell $keyspace $shard $tablet $defaultVttablet $defaultVtctlclient $vitessTag $uid $namespace $config $orc) | indent 8 }}
{{ if eq ($topology.deploymentType | default "prod") "prod" }}
{{ include "cont-logrotate" . | indent 8 }}
{{ include "cont-mysql-generallog" . | indent 8 }}
{{ include "cont-mysql-errorlog" . | indent 8 }}
{{ include "cont-mysql-slowlog" . | indent 8 }}
{{ end }}
{{ if $pmm.enabled }}{{ include "cont-pmm-client" (tuple $pmm $namespace $keyspace) | indent 8 }}{{ end }}
volumes:
- name: vt
emptyDir: {}
{{ include "backup-volume" $config.backup | indent 8 }}
{{ include "user-config-volume" (.extraMyCnf | default $defaultVttablet.extraMyCnf) | indent 8 }}
{{ include "user-secret-volumes" (.secrets | default $defaultVttablet.secrets) | indent 8 }}
{{ if $keyspace.pmm }}{{if $keyspace.pmm.config }}
- name: config
configMap:
name: {{ $keyspace.pmm.config }}
{{ end }}{{ end }}
volumeClaimTemplates:
- metadata:
name: vtdataroot
annotations:
{{ toYaml (.dataVolumeClaimAnnotations | default $defaultVttablet.dataVolumeClaimAnnotations) | indent 10 }}
spec:
{{ toYaml (.dataVolumeClaimSpec | default $defaultVttablet.dataVolumeClaimSpec) | indent 8 }}
---
###################################
# vttablet PodDisruptionBudget
###################################
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ $setName | quote }}
spec:
maxUnavailable: 1
selector:
matchLabels:
app: vitess
component: vttablet
cell: {{ $cellClean | quote }}
keyspace: {{ $keyspaceClean | quote }}
shard: {{ $shardClean | quote }}
type: {{ $tablet.type | quote }}
# conditionally add cron job
{{ include "vttablet-backup-cron" (tuple $cellClean $keyspaceClean $shardClean $shardName $keyspace $shard $vitessTag $config.backup $namespace $defaultVtctlclient) }}
{{- end -}}
{{- end -}}
###################################
# init-container to copy binaries for mysql
###################################
{{ define "init-mysql" -}}
{{- $topology := index . 0 -}}
{{- $vitessTag := index . 1 -}}
{{- $cellClean := index . 2 }}
- name: "init-mysql"
image: "vitess/mysqlctld:{{$vitessTag}}"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: vtdataroot
mountPath: "/vtdataroot"
- name: vt
mountPath: "/vttmp"
command: ["bash"]
args:
- "-c"
- |
set -ex
# set up the directories vitess needs
mkdir -p /vttmp/bin
mkdir -p /vtdataroot/tabletdata
# copy necessary assets to the volumeMounts
cp /vt/bin/mysqlctld /vttmp/bin/
cp /bin/busybox /vttmp/bin/
cp -R /vt/config /vttmp/
# make sure the log files exist
touch /vtdataroot/tabletdata/error.log
touch /vtdataroot/tabletdata/slow-query.log
touch /vtdataroot/tabletdata/general.log
# remove the old socket file if it is still around
rm -f /vtdataroot/tabletdata/mysql.sock
rm -f /vtdataroot/tabletdata/mysql.sock.lock
{{- end -}}
###################################
# init-container to set tablet uid + register tablet with global topo
# This converts the unique identity assigned by StatefulSet (pod name)
# into a 31-bit unsigned integer for use as a Vitess tablet UID.
###################################
{{ define "init-vttablet" -}}
{{- $topology := index . 0 -}}
{{- $vitessTag := index . 1 -}}
{{- $cell := index . 2 -}}
{{- $cellClean := index . 3 -}}
{{- $namespace := index . 4 }}
- name: init-vttablet
image: "vitess/vtctl:{{$vitessTag}}"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: vtdataroot
mountPath: "/vtdataroot"
command: ["bash"]
args:
- "-c"
- |
set -ex
# Split pod name (via hostname) into prefix and ordinal index.
hostname=$(hostname -s)
[[ $hostname =~ ^(.+)-([0-9]+)$ ]] || exit 1
pod_prefix=${BASH_REMATCH[1]}
pod_index=${BASH_REMATCH[2]}
# Prepend cell name since tablet UIDs must be globally unique.
uid_name={{$cell.name | replace "_" "-" | lower}}-$pod_prefix
# Take MD5 hash of cellname-podprefix.
uid_hash=$(echo -n $uid_name | md5sum | awk "{print \$1}")
# Take first 24 bits of hash, convert to decimal.
# Shift left 2 decimal digits, add in index.
tablet_uid=$((16#${uid_hash:0:6} * 100 + $pod_index))
# Save UID for other containers to read.
echo $tablet_uid > /vtdataroot/tabletdata/tablet-uid
# Tell MySQL what hostname to report in SHOW SLAVE HOSTS.
echo report-host=$hostname.vttablet > /vtdataroot/tabletdata/report-host.cnf
# Orchestrator looks there, so it should match -tablet_hostname above.
# make sure that etcd is initialized
eval exec /vt/bin/vtctl $(cat <<END_OF_COMMAND
-topo_global_root=/vitess/global
{{- if eq ($cell.topologyProvider | default "") "etcd2" }}
-topo_implementation="etcd2"
-topo_global_server_address="etcd-global-client.{{ $namespace }}:2379"
{{- else }}
-topo_implementation=k8s
-topo_global_server_address=k8s
{{- end }}
-logtostderr=true
-stderrthreshold=0
UpdateCellInfo
-root /vitess/{{ $cell.name }}
{{- if eq ($cell.topologyProvider | default "") "etcd2" }}
-server_address="etcd-global-client.{{ $namespace }}:2379"
{{- else }}
-server_address=k8s
{{- end }}
{{ $cellClean | quote}}
END_OF_COMMAND
)
{{- end -}}
##########################
# main vttablet container
##########################
{{ define "cont-vttablet" -}}
{{- $topology := index . 0 -}}
{{- $cell := index . 1 -}}
{{- $keyspace := index . 2 -}}
{{- $shard := index . 3 -}}
{{- $tablet := index . 4 -}}
{{- $defaultVttablet := index . 5 -}}
{{- $defaultVtctlclient := index . 6 -}}
{{- $vitessTag := index . 7 -}}
{{- $uid := index . 8 -}}
{{- $namespace := index . 9 -}}
{{- $config := index . 10 -}}
{{- $orc := index . 11 -}}
{{- $cellClean := include "clean-label" $cell.name -}}
{{- with $tablet.vttablet }}
- name: vttablet
image: "vitess/vttablet:{{$vitessTag}}"
imagePullPolicy: IfNotPresent
readinessProbe:
httpGet:
path: /debug/health
port: 15002
initialDelaySeconds: 60
timeoutSeconds: 10
livenessProbe:
httpGet:
path: /debug/status
port: 15002
initialDelaySeconds: 60
timeoutSeconds: 10
volumeMounts:
- name: vtdataroot
mountPath: "/vtdataroot"
{{ include "backup-volumeMount" $config.backup | indent 4 }}
{{ include "user-config-volumeMount" (.extraMyCnf | default $defaultVttablet.extraMyCnf) | indent 4 }}
{{ include "user-secret-volumeMounts" (.secrets | default $defaultVttablet.secrets) | indent 4 }}
resources:
{{ toYaml (.resources | default $defaultVttablet.resources) | indent 6 }}
ports:
- name: web
containerPort: 15002
- name: grpc
containerPort: 16002
env:
{{ include "vitess-env" . | indent 4 }}
{{ include "backup-env" $config.backup | indent 4 }}
- name: VT_DB_FLAVOR
valueFrom:
configMapKeyRef:
name: vitess-cm
key: db.flavor
lifecycle:
preStop:
exec:
command:
- "bash"
- "-c"
- |
set -x
VTCTLD_SVC=vtctld.{{ $namespace }}:15999
VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }})
master_alias_json=$(/vt/bin/vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC GetShard {{ $keyspace.name }}/{{ $shard.name }})
master_cell=$(jq -r '.master_alias.cell' <<< "$master_alias_json")
master_uid=$(jq -r '.master_alias.uid' <<< "$master_alias_json")
master_alias=$master_cell-$master_uid
current_uid=$(cat /vtdataroot/tabletdata/tablet-uid)
current_alias={{ $cell.name }}-$current_uid
if [ $master_alias != $current_alias ]; then
# since this isn't the master, there's no reason to reparent
exit
fi
# TODO: add more robust health checks to make sure that we don't initiate a reparent
# if there isn't a healthy enough replica to take over
# - seconds behind master
# - use GTID_SUBTRACT
RETRY_COUNT=0
MAX_RETRY_COUNT=100000
hostname=$(hostname -s)
# retry reparenting
until [ $DONE_REPARENTING ]; do
{{ if $orc.enabled }}
# tell orchestrator to not attempt a recovery for 10 seconds while we are in the middle of reparenting
wget -q -S -O - "http://orchestrator.{{ $namespace }}/api/begin-downtime/$hostname.vttablet/3306/preStopHook/VitessPlannedReparent/10s"
{{ end }}
# reparent before shutting down
/vt/bin/vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC PlannedReparentShard -keyspace_shard={{ $keyspace.name }}/{{ $shard.name }} -avoid_master=$current_alias
{{ if $orc.enabled }}
# tell orchestrator to refresh its view of this tablet
wget -q -S -O - "http://orchestrator.{{ $namespace }}/api/refresh/$hostname.vttablet/3306"
# let orchestrator attempt recoveries now
wget -q -S -O - "http://orchestrator.{{ $namespace }}/api/end-downtime/$hostname.vttablet/3306"
{{ end }}
# if PlannedReparentShard succeeded, then don't retry
if [ $? -eq 0 ]; then
DONE_REPARENTING=true
# if we've reached the max retry count, exit unsuccessfully
elif [ $RETRY_COUNT -eq $MAX_RETRY_COUNT ]; then
exit 1
# otherwise, increment the retry count and sleep for 10 seconds
else
let RETRY_COUNT=RETRY_COUNT+1
sleep 10
fi
done
# delete the current tablet from topology. Not strictly necessary, but helps to prevent
# edge cases where there are two masters
/vt/bin/vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC DeleteTablet $current_alias
{{ if $orc.enabled }}
# tell orchestrator to forget the tablet, to prevent confusion / race conditions while the tablet restarts
wget -q -S -O - "http://orchestrator.{{ $namespace }}/api/forget/$hostname.vttablet/3306"
{{ end }}
command: ["bash"]
args:
- "-c"
- |
set -ex
{{ include "mycnf-exec" (.extraMyCnf | default $defaultVttablet.extraMyCnf) | indent 6 }}
{{ include "backup-exec" $config.backup | indent 6 }}
eval exec /vt/bin/vttablet $(cat <<END_OF_COMMAND
-topo_global_root /vitess/global
{{- if eq ($cell.topologyProvider | default "") "etcd2" }}
-topo_implementation="etcd2"
-topo_global_server_address="etcd-global-client.{{ $namespace }}:2379"
{{- else }}
-topo_implementation k8s
-topo_global_server_address k8s
{{- end }}
-logtostderr
-port 15002
-grpc_port 16002
-service_map "grpc-queryservice,grpc-tabletmanager,grpc-updatestream"
-tablet_dir "tabletdata"
-tablet-path "{{ $cell.name }}-$(cat /vtdataroot/tabletdata/tablet-uid)"
-tablet_hostname "$(hostname).vttablet"
-init_keyspace {{ $keyspace.name | quote }}
-init_shard {{ $shard.name | quote }}
-init_tablet_type {{ $tablet.type | quote }}
-health_check_interval "5s"
-mysqlctl_socket "/vtdataroot/mysqlctl.sock"
-enable_replication_reporter
{{ if $defaultVttablet.useKeyspaceNameAsDbName }}
-init_db_name_override {{ $keyspace.name | quote }}
{{ end }}
{{ if $defaultVttablet.enableSemisync }}
-enable_semi_sync
{{ end }}
{{ if $defaultVttablet.enableHeartbeat }}
-heartbeat_enable
{{ end }}
{{ if $orc.enabled }}
-orc_api_url "http://orchestrator.{{ $namespace }}/api"
-orc_discover_interval "5m"
{{ end }}
{{ include "backup-flags" (tuple $config.backup "vttablet") | indent 8 }}
{{ include "format-flags-all" (tuple $defaultVttablet.extraFlags .extraFlags) | indent 8 }}
END_OF_COMMAND
)
{{- end -}}
{{- end -}}
##########################
# main mysql container
##########################
{{ define "cont-mysql" -}}
{{- $topology := index . 0 -}}
{{- $cell := index . 1 -}}
{{- $keyspace := index . 2 -}}
{{- $shard := index . 3 -}}
{{- $tablet := index . 4 -}}
{{- $defaultVttablet := index . 5 -}}
{{- $uid := index . 6 -}}
{{- with $tablet.vttablet }}
- name: mysql
image: {{.mysqlImage | default $defaultVttablet.mysqlImage | quote}}
imagePullPolicy: IfNotPresent
readinessProbe:
exec:
command: ["mysqladmin", "ping", "-uroot", "--socket=/vtdataroot/tabletdata/mysql.sock"]
initialDelaySeconds: 60
timeoutSeconds: 10
volumeMounts:
- name: vtdataroot
mountPath: /vtdataroot
- name: vt
mountPath: /vt
{{ include "user-config-volumeMount" (.extraMyCnf | default $defaultVttablet.extraMyCnf) | indent 4 }}
{{ include "user-secret-volumeMounts" (.secrets | default $defaultVttablet.secrets) | indent 4 }}
resources:
{{ toYaml (.mysqlResources | default $defaultVttablet.mysqlResources) | indent 6 }}
env:
{{ include "vitess-env" . | indent 4 }}
- name: VT_DB_FLAVOR
valueFrom:
configMapKeyRef:
name: vitess-cm
key: db.flavor
lifecycle:
preStop:
exec:
command:
- "bash"
- "-c"
- |
set -x
# block shutting down mysqlctld until vttablet shuts down first
until [ $VTTABLET_GONE ]; do
# poll every 5 seconds to see if vttablet is still running
/vt/bin/busybox wget --spider localhost:15002/debug/vars
if [ $? -ne 0 ]; then
VTTABLET_GONE=true
fi
sleep 5
done
command: ["bash"]
args:
- "-c"
- |
set -ex
{{ include "mycnf-exec" (.extraMyCnf | default $defaultVttablet.extraMyCnf) | indent 6 }}
{{- if eq ($topology.deploymentType | default "prod") "test" }}
export EXTRA_MY_CNF="$EXTRA_MY_CNF:/vt/config/mycnf/default-fast.cnf"
{{- end }}
eval exec /vt/bin/mysqlctld $(cat <<END_OF_COMMAND
-logtostderr=true
-stderrthreshold=0
-tablet_dir "tabletdata"
-tablet_uid "{{$uid}}"
-socket_file "/vtdataroot/mysqlctl.sock"
-init_db_sql_file "/vt/config/init_db.sql"
END_OF_COMMAND
)
{{- end -}}
{{- end -}}
####################################
# Everything below here is enabled only if deploymentType is prod.
####################################
##########################
# run logrotate for all log files in /vtdataroot/tabletdata
##########################
{{ define "cont-logrotate" }}
- name: logrotate
image: vitess/logrotate:helm-2.0.0-0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: vtdataroot
mountPath: /vtdataroot
{{- end -}}
##########################
# redirect the error log file to stdout
##########################
{{ define "cont-mysql-errorlog" }}
- name: error-log
image: vitess/logtail:helm-2.0.0-0
imagePullPolicy: IfNotPresent
env:
- name: TAIL_FILEPATH
value: /vtdataroot/tabletdata/error.log
volumeMounts:
- name: vtdataroot
mountPath: /vtdataroot
{{- end -}}
##########################
# redirect the slow log file to stdout
##########################
{{ define "cont-mysql-slowlog" }}
- name: slow-log
image: vitess/logtail:helm-2.0.0-0
imagePullPolicy: IfNotPresent
env:
- name: TAIL_FILEPATH
value: /vtdataroot/tabletdata/slow-query.log
volumeMounts:
- name: vtdataroot
mountPath: /vtdataroot
{{- end -}}
##########################
# redirect the general log file to stdout
##########################
{{ define "cont-mysql-generallog" }}
- name: general-log
image: vitess/logtail:helm-2.0.0-0
imagePullPolicy: IfNotPresent
env:
- name: TAIL_FILEPATH
value: /vtdataroot/tabletdata/general.log
volumeMounts:
- name: vtdataroot
mountPath: /vtdataroot
{{- end -}}
###################################
# vttablet-affinity sets node/pod affinities
###################################
{{ define "vttablet-affinity" -}}
# set tuple values to more recognizable variables
{{- $cellClean := index . 0 -}}
{{- $keyspaceClean := index . 1 -}}
{{- $shardClean := index . 2 -}}
{{- $region := index . 3 }}
# affinity pod spec
affinity:
{{ include "node-affinity" $region | indent 2 }}
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
# prefer to be scheduled with same-cell vtgates
- weight: 10
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: "vitess"
component: "vtgate"
cell: {{ $cellClean | quote }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
# strongly prefer to stay away from same shard vttablets
- weight: 100
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: "vitess"
component: "vttablet"
cell: {{ $cellClean | quote }}
keyspace: {{ $keyspaceClean | quote }}
shard: {{ $shardClean | quote }}
# prefer to stay away from any vttablets
- weight: 10
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: "vitess"
component: "vttablet"
{{- end -}}

Просмотреть файл

@ -1,132 +0,0 @@
# Create global resources.
---
# Create role for topology crd
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: vt-topo-member
rules:
- apiGroups:
- topo.vitess.io
resources:
- vitesstoponodes
verbs:
- '*'
---
# create a single vttablet service
{{ include "vttablet-service" (tuple $.Values.pmm) }}
# create a single vttablet serviceaccount
{{ include "vttablet-serviceaccount" (tuple $.Release.Namespace) }}
# create a single vttablet rolebinding
{{ if eq $.Values.topology.globalCell.topologyProvider "k8s" }}
{{ include "vttablet-topo-role-binding" (tuple $.Release.Namespace) }}
{{ end }}
---
{{ if $.Values.pmm.enabled }}
# create the pmm service and stateful set
{{ include "pmm" (tuple $.Values.pmm $.Release.Namespace) }}
---
{{ end }}
{{ if $.Values.orchestrator.enabled }}
# create orchestrator global services and StatefulSet
{{ include "orchestrator" (tuple $.Values.orchestrator $.Values.vtctlclient) }}
---
# create orchestrator config map
{{ include "orchestrator-config" (tuple $.Values.orchestrator $.Release.Namespace $.Values.vttablet.enableHeartbeat $.Values.vtctlclient) }}
---
# create a Service per StatefulSet replica
{{ range $i := until (int $.Values.orchestrator.replicas) }}
{{ include "orchestrator-statefulset-service" (tuple $.Values.orchestrator $i) }}
---
{{ end }}
{{ end }}
{{ if eq $.Values.topology.globalCell.topologyProvider "etcd2" }}
# create an etcd cluster for the global topology
{{- $replicas := $.Values.topology.globalCell.replicas | default $.Values.etcd.replicas -}}
{{- $version := $.Values.topology.globalCell.version | default $.Values.etcd.version -}}
{{- $resources := $.Values.topology.globalCell.resources | default $.Values.etcd.resources -}}
{{- $clusterWide := $.Values.topology.globalCell.resources | default $.Values.etcd.clusterWide -}}
{{ include "etcd" (tuple "global" $replicas $version $resources $clusterWide) }}
{{ end }}
# Create requested resources in each cell.
{{ range $cell := $.Values.topology.cells }}
{{ if eq ($cell.topologyProvider | default "") "etcd2" }}
---
# create an etcd cluster per cell
{{- $cellClean := include "clean-label" $cell.name -}}
{{- $replicas := $cell.etcd.replicas | default $.Values.etcd.replicas -}}
{{- $version := $cell.etcd.version | default $.Values.etcd.version -}}
{{- $resources := $cell.etcd.resources | default $.Values.etcd.resources -}}
{{- $clusterWide := $cell.etcd.clusterWide | default $.Values.etcd.clusterWide -}}
{{ include "etcd" (tuple $cellClean $replicas $version $resources $clusterWide) }}
{{ end }}
---
# create one controller per cell
{{ include "vtctld" (tuple $.Values.topology $cell $.Values.vtctld $.Release.Namespace $.Values.config) }}
---
# create a pool of vtgates per cell
{{ include "vtgate" (tuple $.Values.topology $cell $.Values.vtgate $.Release.Namespace) }}
# Tablets for keyspaces
{{ range $keyspace := $cell.keyspaces }}
# Keyspace initializations
{{ include "keyspace" (tuple $cell $keyspace $.Values.vtctlclient $.Release.Namespace) }}
{{ range $shard := $keyspace.shards }}
{{ $totalTabletCount := len (include "tablet-count" $shard.tablets) }}
# Shard initializations
{{ include "shard" (tuple $cell $keyspace $shard $.Values.vtctlclient $.Release.Namespace $totalTabletCount) }}
# Tablet initializations
{{ range $tablet := $shard.tablets }}
{{ include "vttablet" (tuple $.Values.topology $cell $keyspace $shard $tablet $.Values.vttablet $.Values.vtctlclient $.Release.Namespace $.Values.config $.Values.pmm $.Values.orchestrator) }}
{{ end }} # range $tablet
{{ end }} # range $shard
{{ end }} # range $keyspace
{{ end }} # range $cell
{{ range $job := $.Values.jobs }}
{{ if eq $job.kind "vtctlclient" }}
{{ include "vtctlclient-job" (tuple $job $.Values.vtctlclient $.Release.Namespace) }}
{{ else }}
{{ range $cell := $.Values.topology.cells }}
{{ if eq $cell.name $job.cell }}
{{ include "vtworker-job" (tuple $job $.Values.vtworker $.Release.Namespace $cell) }}
{{ end }}
{{ end }}
{{ end }}
{{ end }}
---
{{ with $.Values.config }}
# shared ConfigMap
apiVersion: v1
kind: ConfigMap
metadata:
name: vitess-cm
data:
backup.backup_storage_implementation: {{ .backup.backup_storage_implementation }}
backup.gcs_backup_storage_bucket: {{ .backup.gcs_backup_storage_bucket }}
backup.gcs_backup_storage_root: {{ .backup.gcs_backup_storage_root }}
backup.s3_backup_aws_region: {{ .backup.s3_backup_aws_region }}
backup.s3_backup_storage_bucket: {{ .backup.s3_backup_storage_bucket }}
backup.s3_backup_storage_root: {{ .backup.s3_backup_storage_root }}
backup.s3_backup_server_side_encryption: {{ .backup.s3_backup_server_side_encryption }}
db.flavor: {{ $.Values.vttablet.flavor }}
{{ end }} # end with config

Просмотреть файл

@ -1,393 +0,0 @@
# This file contains default values for vitess.
#
# You can override these defaults when installing:
# helm install -f site-values.yaml .
#
# The contents of site-values.yaml will be merged into this default config.
# It's not necessary to copy the defaults into site-values.yaml.
#
# For command-line flag maps like backupFlags or extraFlags,
# use 'flag_name: true|false' to enable or disable a boolean flag.
# The main topology map declares what resources should be created.
# Values for each component (etcd, vtctld, ...) that are not specified here
# will be taken from defaults defined below.
# topology:
# config will be stored as a ConfigMap and mounted where appropriate
config:
# Backup flags will be applied to components that need them.
# These are defined globally since all components should agree.
backup:
enabled: false
# this creates 1 cron job per shard that will execute a backup using vtctlclient
# on this schedule. The job itself uses almost no resources.
cron:
# the default schedule runs daily at midnight unless overridden by the individual shard
schedule: "0 0 * * *"
# if this is set to true, the cron jobs are created, but never execute
suspend: false
# choose a backup service - valid values are gcs/s3
# TODO: add file and ceph support
backup_storage_implementation: gcs
#########
# gcs settings
#########
# Google Cloud Storage bucket to use for backups
gcs_backup_storage_bucket: vitess-backups
# root prefix for all backup-related object names
gcs_backup_storage_root: vtbackups
# secret that contains Google service account json with read/write access to the bucket
# kubectl create secret generic vitess-backups-creds --from-file=gcp-creds.json
# can be omitted if running on a GCE/GKE node with default permissions
gcsSecret: vitess-gcs-creds
#########
# s3 settings
#########
# AWS region to use
s3_backup_aws_region: us-east-1
# S3 bucket to use for backups
s3_backup_storage_bucket: vitess-backups
# root prefix for all backup-related object names
s3_backup_storage_root: vtbackups
# server-side encryption algorithm (e.g., AES256, aws:kms)
s3_backup_server_side_encryption: AES256
# secret that contains AWS S3 credentials file with read/write access to the bucket
# kubectl create secret generic s3-credentials --from-file=s3-creds
# can be omitted if running on a node with default permissions
s3Secret: vitess-s3-creds
topology:
# deploymentType can be "test" or "prod". Default is "prod".
# If the value is "test", then mysql is instanitated with a smaller footprint.
# For vttablet, additional containers like log-rotate, etc will not be brought up.
deploymentType: prod
globalCell:
topologyProvider: k8s
etcd:
replicas: 3
cells:
- name: zone1
topologyProvider: k8s
# set failure-domain.beta.kubernetes.io/region
# region: eastus
## You MUST set up at least one keyspace, as these are what actually define and
# create the Vitess cluster
#
# keyspaces:
# - name: "commerce"
# shards:
# - name: "0"
# tablets:
# - type: "replica"
# vttablet:
# replicas: 2
## this defines named jobs that will run vtctlclient ApplySchema to initialize
# your tables
#
# schema:
# phase1: |-
# create table product(
# sku varbinary(128),
# description varbinary(128),
# price bigint,
# primary key(sku)
# );
# create table customer(
# user_id bigint not null auto_increment,
# email varbinary(128),
# primary key(user_id)
# );
# create table corder(
# order_id bigint not null auto_increment,
# user_id bigint,
# product_id bigint,
# msrp bigint,
# primary key(order_id)
# );
## this defines named jobs that will run vtctlclient ApplyVSchema
#
# vschema:
# phase1: |-
# {
# "tables": {
# "product": {},
# "customer": {},
# "corder": {}
# }
# }
## this defines keyspace specific information for PMM
# pmm:
## PMM supports collecting metrics from custom SQL queries in a file named queries-mysqld.yml
# The specified ConfigMap will be mounted in a directory, so the file name is important.
# https://www.percona.com/blog/2018/10/10/percona-monitoring-and-management-pmm-1-15-0-is-now-available/
# config: pmm-commerce-config
# enable or disable mysql protocol support, with accompanying auth details
mysqlProtocol:
enabled: false
authType: secret
# authType can be: none or secret. For secret, perform the following changes:
# username: myuser
# this is the secret that will be mounted as the user password
# kubectl create secret generic myuser-password --from-literal=password=abc123
# passwordSecret: myuser-password
etcd:
replicas: 3
vtctld:
replicas: 1
vtgate:
replicas: 3
# if maxReplicas is higher than replicas, an HPA will be created
# maxReplicas: 6
jobs:
# examples:
#- name: "split-clone-customer"
# kind: "vtworker"
# cell: "zone1"
# command: "SplitClone customer/0"
#- name: "list-all-tablet-zone1"
# kind: "vtctlclient"
# command: "ListAllTablets zone1"
# Default values for etcd resources defined in 'topology'
etcd:
version: "3.3.10"
replicas: 3
resources:
requests:
cpu: 200m
memory: 100Mi
# If clusterWide is set to true, will add an annotation to EtcdCluster
# to make this cluster managed by clusterwide operators
# clusterWide: true
# Default values for vtctld resources defined in 'topology'
vtctld:
serviceType: ClusterIP
vitessTag: helm-2.0.2-0
resources:
# requests:
# cpu: 100m
# memory: 128Mi
extraFlags: {}
secrets: [] # secrets are mounted under /vt/usersecrets/{secretname}
# Default values for vtgate resources defined in 'topology'
vtgate:
serviceType: ClusterIP
vitessTag: helm-2.0.2-0
resources:
# requests:
# cpu: 500m
# memory: 512Mi
# Additional flags that will be appended to the vtgate command.
# The options below are the most commonly adjusted, but any flag can be put here.
# run vtgate --help to see all available flags
extraFlags:
# MySQL server version to advertise. (default "5.7.9-Vitess")
# If running 8.0, you may prefer to use something like "8.0.13-Vitess"
# to prevent db clients from running deprecated queries on startup
mysql_server_version: "5.7.9-Vitess"
secrets: [] # secrets are mounted under /vt/usersecrets/{secretname}
# Default values for vtctlclient resources defined in 'topology'
vtctlclient:
vitessTag: helm-2.0.2-0
extraFlags: {}
secrets: [] # secrets are mounted under /vt/usersecrets/{secretname}
# Default values for vtworker resources defined in 'jobs'
vtworker:
vitessTag: helm-2.0.2-0
extraFlags: {}
resources:
# requests:
# cpu: 500m
# memory: 512Mi
secrets: [] # secrets are mounted under /vt/usersecrets/{secretname}
# Default values for vttablet resources defined in 'topology'
vttablet:
vitessTag: helm-2.0.2-0
# valid values are
# - mysql56 (for MySQL 8.0)
# - mysql56 (for MySQL/Percona 5.6 or 5.7)
# - mariadb (for MariaDB <= 10.2)
# - mariadb103 (for MariaDB >= 10.3)
# the flavor determines the base my.cnf file for vitess to function
flavor: mysql56
mysqlImage: percona:5.7.26
# mysqlImage: mysql:5.7.24
# mysqlImage: mariadb:10.3.11
enableHeartbeat: false
# This requires at least 2 instances of "replica" tablet types, otherwise semi-sync
# will block forever. "rdonly" tablets do not ACK.
enableSemisync: false
# This sets the vttablet flag "-init_db_name_override" to be the keyspace name, rather
# than "vt_keyspace". This works better with many MySQL client applications
useKeyspaceNameAsDbName: true
# The name of a config map with N files inside of it. Each file will be added
# to $EXTRA_MY_CNF, overriding any default my.cnf settings
extraMyCnf: ""
# extraMyCnf: extra-my-cnf
# mysqlSize can be "test" or "prod". Default is "prod".
# If the value is "test", then mysql is instanitated with a smaller footprint.
mysqlSize: prod
# Additional flags that will be appended to the vttablet command.
# The options below are the most commonly adjusted, but any flag can be put here.
# run vttablet --help to see all available flags
extraFlags:
# query server max result size, maximum number of rows allowed to return
# from vttablet for non-streaming queries.
queryserver-config-max-result-size: 10000
# query server query timeout (in seconds), this is the query timeout in vttablet side.
# If a query takes more than this timeout, it will be killed.
queryserver-config-query-timeout: 30
# query server connection pool size, connection pool is used by
# regular queries (non streaming, not in a transaction)
queryserver-config-pool-size: 24
# query server stream connection pool size, stream pool is used by stream queries:
# queries that return results to client in a streaming fashion
queryserver-config-stream-pool-size: 100
# query server transaction cap is the maximum number of transactions allowed to
# happen at any given point of a time for a single vttablet.
# e.g. by setting transaction cap to 100, there are at most 100 transactions
# will be processed by a vttablet and the 101th transaction will be blocked
# (and fail if it cannot get connection within specified timeout)
queryserver-config-transaction-cap: 300
# Size of the connection pool for app connections
app_pool_size: 40
# Size of the connection pool for dba connections
dba_pool_size: 20
# User secrets that will be mounted under /vt/usersecrets/{secretname}/
secrets: []
resources:
# common production values 2-4CPU/4-8Gi RAM
# requests:
# cpu: 2
# memory: 4Gi
mysqlResources:
# common production values 4CPU/8-16Gi RAM
# requests:
# cpu: 4
# memory: 8Gi
# PVC for mysql
dataVolumeClaimAnnotations:
dataVolumeClaimSpec:
# pd-ssd (Google Cloud)
# managed-premium (Azure)
# standard (AWS) - not sure what the default class is for ssd
# Note: Leave storageClassName unset to use cluster-specific default class.
#storageClassName: pd-ssd
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
# Default values for pmm
pmm:
enabled: false
pmmTag: 1.17.0
client:
resources:
requests:
cpu: 50m
memory: 128Mi
server:
resources:
# requests:
# cpu: 500m
# memory: 1Gi
# PVC for pmm
dataVolumeClaimAnnotations:
dataVolumeClaimSpec:
# storageClassName: pd-ssd
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
env:
# DISABLE_TELEMETRY
# With telemetry enabled, your PMM Server sends some statistics to v.percona.com every 24 hours
disableTelemetry: true
# METRICS_RESOLUTION (Option)
# This option sets the minimum resolution for checking metrics. You should set it if the latency is higher than 1 second
metricsResolution: 1s
# METRICS_RETENTION (Option)
# This option determines how long metrics are stored at PMM Server.
# The value is passed as a combination of hours, minutes, and seconds, such as 720h0m0s.
# The minutes (a number followed by m) and seconds (a number followed by s) are optional.
metricsRetention: 720h
# QUERIES_RETENTION
# This option determines how many days queries are stored at PMM Server
queriesRetention: 8
# METRICS_MEMORY (Option) -- TODO: automatically calculate based on resource limits
# NOTE: The value must be passed in kilobytes
# NOTE: Make sure to quote this value so it isn't converted into scientific notation
# By default, Prometheus in PMM Server uses up to 768 MB of memory for storing the most recently used data chunks.
# Depending on the amount of data coming into Prometheus, you may require a higher limit to avoid throttling data ingestion,
# or allow less memory consumption if it is needed for other processes.
# The limit affects only memory reserved for data chunks. Actual RAM usage by Prometheus is higher.
# It is recommended to set this limit to roughly 2/3 of the total memory that you are planning to allow for Prometheus.
metricsMemory: "600000"
# Orchestrator requires at least version >= 3.0.9 and Kubernetes 1.9 to work
# Default values for orchestrator resources
orchestrator:
enabled: false
image: vitess/orchestrator:3.1.1
replicas: 3
resources:
requests:
cpu: 50m
memory: 350Mi