From ece6eb65d09ecca81b50684d38f66b580499ec5d Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Wed, 22 Sep 2021 18:06:19 -0400 Subject: [PATCH] Remove the deprecated Helm charts and related code This work was deprecated in Vitess 7.0: https://github.com/vitessio/vitess/issues/6439 Signed-off-by: Matt Lord --- doc/DockerBuild.md | 3 - doc/internal/ReleaseInstructions.md | 4 - examples/helm/101_initial_cluster.yaml | 57 -- examples/helm/201_customer_tablets.yaml | 35 - examples/helm/202_move_tables.yaml | 40 - examples/helm/203_switch_reads.yaml | 43 -- examples/helm/204_switch_writes.yaml | 40 - examples/helm/205_clean_commerce.yaml | 60 -- examples/helm/301_customer_sharded.yaml | 94 --- examples/helm/302_new_shards.yaml | 49 -- examples/helm/303_reshard.yaml | 50 -- examples/helm/304_switch_reads.yaml | 53 -- examples/helm/305_switch_writes.yaml | 50 -- examples/helm/306_down_shard_0.yaml | 40 - examples/helm/307_delete_shard_0.yaml | 45 -- examples/helm/308_final.yaml | 40 - examples/helm/README.md | 58 -- examples/helm/legacy/101_initial_cluster.yaml | 74 -- .../helm/legacy/201_customer_keyspace.yaml | 50 -- .../helm/legacy/202_customer_tablets.yaml | 75 -- examples/helm/legacy/203_vertical_split.yaml | 61 -- .../legacy/204_vertical_migrate_replicas.yaml | 63 -- .../legacy/205_vertical_migrate_master.yaml | 60 -- examples/helm/legacy/206_clean_commerce.yaml | 70 -- .../helm/legacy/301_customer_sharded.yaml | 114 --- examples/helm/legacy/302_new_shards.yaml | 75 -- .../helm/legacy/303_horizontal_split.yaml | 77 -- .../helm/legacy/304_migrate_replicas.yaml | 79 -- examples/helm/legacy/305_migrate_master.yaml | 76 -- examples/helm/legacy/306_down_shard_0.yaml | 63 -- examples/helm/legacy/307_delete_shard_0.yaml | 68 -- examples/helm/legacy/308_final.yaml | 63 -- examples/helm/legacy/kmysql.sh | 36 - examples/helm/legacy/kvtctld.sh | 19 - examples/helm/pf.sh | 14 - helm/README.md | 12 - helm/release.sh | 73 -- helm/vitess/.gitignore | 2 - helm/vitess/.helmignore | 21 - helm/vitess/CHANGELOG.md | 86 --- helm/vitess/Chart.yaml | 20 - helm/vitess/README.md | 432 ----------- helm/vitess/crds/VitessTopoNodes-crd.yaml | 42 -- helm/vitess/examples/minikube.yaml | 65 -- helm/vitess/templates/NOTES.txt | 14 - helm/vitess/templates/_cron-jobs.tpl | 82 -- helm/vitess/templates/_etcd.tpl | 41 - helm/vitess/templates/_helpers.tpl | 392 ---------- helm/vitess/templates/_jobs.tpl | 141 ---- helm/vitess/templates/_keyspace.tpl | 136 ---- helm/vitess/templates/_orchestrator-conf.tpl | 150 ---- helm/vitess/templates/_orchestrator.tpl | 222 ------ helm/vitess/templates/_pmm.tpl | 231 ------ helm/vitess/templates/_shard.tpl | 203 ----- helm/vitess/templates/_vtctld.tpl | 167 ---- helm/vitess/templates/_vtgate.tpl | 293 -------- helm/vitess/templates/_vttablet.tpl | 710 ------------------ helm/vitess/templates/vitess.yaml | 132 ---- helm/vitess/values.yaml | 393 ---------- 59 files changed, 5958 deletions(-) delete mode 100644 examples/helm/101_initial_cluster.yaml delete mode 100644 examples/helm/201_customer_tablets.yaml delete mode 100644 examples/helm/202_move_tables.yaml delete mode 100644 examples/helm/203_switch_reads.yaml delete mode 100644 examples/helm/204_switch_writes.yaml delete mode 100644 examples/helm/205_clean_commerce.yaml delete mode 100644 examples/helm/301_customer_sharded.yaml delete mode 100644 examples/helm/302_new_shards.yaml delete mode 100644 examples/helm/303_reshard.yaml delete mode 100644 examples/helm/304_switch_reads.yaml delete mode 100644 examples/helm/305_switch_writes.yaml delete mode 100644 examples/helm/306_down_shard_0.yaml delete mode 100644 examples/helm/307_delete_shard_0.yaml delete mode 100644 examples/helm/308_final.yaml delete mode 100644 examples/helm/README.md delete mode 100644 examples/helm/legacy/101_initial_cluster.yaml delete mode 100644 examples/helm/legacy/201_customer_keyspace.yaml delete mode 100644 examples/helm/legacy/202_customer_tablets.yaml delete mode 100644 examples/helm/legacy/203_vertical_split.yaml delete mode 100644 examples/helm/legacy/204_vertical_migrate_replicas.yaml delete mode 100644 examples/helm/legacy/205_vertical_migrate_master.yaml delete mode 100644 examples/helm/legacy/206_clean_commerce.yaml delete mode 100644 examples/helm/legacy/301_customer_sharded.yaml delete mode 100644 examples/helm/legacy/302_new_shards.yaml delete mode 100644 examples/helm/legacy/303_horizontal_split.yaml delete mode 100644 examples/helm/legacy/304_migrate_replicas.yaml delete mode 100644 examples/helm/legacy/305_migrate_master.yaml delete mode 100644 examples/helm/legacy/306_down_shard_0.yaml delete mode 100644 examples/helm/legacy/307_delete_shard_0.yaml delete mode 100644 examples/helm/legacy/308_final.yaml delete mode 100755 examples/helm/legacy/kmysql.sh delete mode 100755 examples/helm/legacy/kvtctld.sh delete mode 100755 examples/helm/pf.sh delete mode 100644 helm/README.md delete mode 100755 helm/release.sh delete mode 100644 helm/vitess/.gitignore delete mode 100644 helm/vitess/.helmignore delete mode 100644 helm/vitess/CHANGELOG.md delete mode 100644 helm/vitess/Chart.yaml delete mode 100644 helm/vitess/README.md delete mode 100644 helm/vitess/crds/VitessTopoNodes-crd.yaml delete mode 100644 helm/vitess/examples/minikube.yaml delete mode 100644 helm/vitess/templates/NOTES.txt delete mode 100644 helm/vitess/templates/_cron-jobs.tpl delete mode 100644 helm/vitess/templates/_etcd.tpl delete mode 100644 helm/vitess/templates/_helpers.tpl delete mode 100644 helm/vitess/templates/_jobs.tpl delete mode 100644 helm/vitess/templates/_keyspace.tpl delete mode 100644 helm/vitess/templates/_orchestrator-conf.tpl delete mode 100644 helm/vitess/templates/_orchestrator.tpl delete mode 100644 helm/vitess/templates/_pmm.tpl delete mode 100644 helm/vitess/templates/_shard.tpl delete mode 100644 helm/vitess/templates/_vtctld.tpl delete mode 100644 helm/vitess/templates/_vtgate.tpl delete mode 100644 helm/vitess/templates/_vttablet.tpl delete mode 100644 helm/vitess/templates/vitess.yaml delete mode 100644 helm/vitess/values.yaml diff --git a/doc/DockerBuild.md b/doc/DockerBuild.md index a2f9b70e96..39a28e9cb1 100644 --- a/doc/DockerBuild.md +++ b/doc/DockerBuild.md @@ -1,6 +1,3 @@ -By default, the [Helm Charts](https://github.com/vitessio/vitess/tree/main/helm) -point to the `vitess/lite` image on [Docker Hub](https://hub.docker.com/u/vitess/). - We created the `lite` image as a stripped down version of our old image `base` such that Kubernetes pods can start faster. The `lite` image is updated automatically after every push to the GitHub main branch. diff --git a/doc/internal/ReleaseInstructions.md b/doc/internal/ReleaseInstructions.md index 19e4542235..4956f3d642 100644 --- a/doc/internal/ReleaseInstructions.md +++ b/doc/internal/ReleaseInstructions.md @@ -243,10 +243,6 @@ And finally, click on `Publish release`. * Schedule and publish Tweet on Vitess account. * Run following script to once the `base` Docker image is live. -``` -https://github.com/vitessio/vitess/blob/master/helm/release.sh -``` - * Deploy and release Java packages by following the `Java Packages Deploy & Release` section below. ### Java Packages Deploy & Release diff --git a/examples/helm/101_initial_cluster.yaml b/examples/helm/101_initial_cluster.yaml deleted file mode 100644 index 237d65264c..0000000000 --- a/examples/helm/101_initial_cluster.yaml +++ /dev/null @@ -1,57 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - schema: - initial: |- - create table product( - sku varbinary(128), - description varbinary(128), - price bigint, - primary key(sku) - ); - create table customer( - customer_id bigint not null auto_increment, - email varbinary(128), - primary key(customer_id) - ); - create table corder( - order_id bigint not null auto_increment, - customer_id bigint, - sku varbinary(128), - price bigint, - primary key(order_id) - ); - vschema: - initial: |- - { - "tables": { - "product": {}, - "customer": {}, - "corder": {} - } - } - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/201_customer_tablets.yaml b/examples/helm/201_customer_tablets.yaml deleted file mode 100644 index d3bb7403f8..0000000000 --- a/examples/helm/201_customer_tablets.yaml +++ /dev/null @@ -1,35 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/202_move_tables.yaml b/examples/helm/202_move_tables.yaml deleted file mode 100644 index 0e04ea377b..0000000000 --- a/examples/helm/202_move_tables.yaml +++ /dev/null @@ -1,40 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -jobs: - - name: "move-tables" - kind: "vtctlclient" - command: "MoveTables -workflow=commerce2customer commerce customer \'{\"customer\":{}, \"corder\":{}}\'" - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/203_switch_reads.yaml b/examples/helm/203_switch_reads.yaml deleted file mode 100644 index e387ba8442..0000000000 --- a/examples/helm/203_switch_reads.yaml +++ /dev/null @@ -1,43 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -jobs: - - name: "mswitch1" - kind: "vtctlclient" - command: "SwitchReads -tablet_type=rdonly customer.commerce2customer" - - name: "mswitch2" - kind: "vtctlclient" - command: "SwitchReads -tablet_type=replica customer.commerce2customer" - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/204_switch_writes.yaml b/examples/helm/204_switch_writes.yaml deleted file mode 100644 index 1fc09d9ad0..0000000000 --- a/examples/helm/204_switch_writes.yaml +++ /dev/null @@ -1,40 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -jobs: - - name: "mswitch3" - kind: "vtctlclient" - command: "SwitchWrites customer.commerce2customer" - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/205_clean_commerce.yaml b/examples/helm/205_clean_commerce.yaml deleted file mode 100644 index 2bd5b60af6..0000000000 --- a/examples/helm/205_clean_commerce.yaml +++ /dev/null @@ -1,60 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - schema: - postsplit: |- - drop table customer; - drop table corder; - vschema: - postsplit: |- - { - "tables": { - "product": {} - } - } - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -jobs: - - name: "vclean1" - kind: "vtctlclient" - command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 rdonly" - - name: "vclean2" - kind: "vtctlclient" - command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 replica" - - name: "vclean3" - kind: "vtctlclient" - command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 master" - - name: "vclean4" - kind: "vtctlclient" - command: "ApplyRoutingRules -rules=\'{}\'" - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/301_customer_sharded.yaml b/examples/helm/301_customer_sharded.yaml deleted file mode 100644 index 34c6c7edcd..0000000000 --- a/examples/helm/301_customer_sharded.yaml +++ /dev/null @@ -1,94 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - schema: - seq: |- - create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; - insert into customer_seq(id, next_id, cache) values(0, 1000, 100); - create table order_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; - insert into order_seq(id, next_id, cache) values(0, 1000, 100); - vschema: - seq: |- - { - "tables": { - "customer_seq": { - "type": "sequence" - }, - "order_seq": { - "type": "sequence" - }, - "product": {} - } - } - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - schema: - sharded: |- - alter table customer change customer_id customer_id bigint not null; - alter table corder change order_id order_id bigint not null; - vschema: - sharded: |- - { - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - } - }, - "tables": { - "customer": { - "column_vindexes": [ - { - "column": "customer_id", - "name": "hash" - } - ], - "auto_increment": { - "column": "customer_id", - "sequence": "customer_seq" - } - }, - "corder": { - "column_vindexes": [ - { - "column": "customer_id", - "name": "hash" - } - ], - "auto_increment": { - "column": "order_id", - "sequence": "order_seq" - } - } - } - } - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/302_new_shards.yaml b/examples/helm/302_new_shards.yaml deleted file mode 100644 index 7acc253f55..0000000000 --- a/examples/helm/302_new_shards.yaml +++ /dev/null @@ -1,49 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 3 - copySchema: - source: "customer/0" - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 3 - copySchema: - source: "customer/0" - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/303_reshard.yaml b/examples/helm/303_reshard.yaml deleted file mode 100644 index 772549f7c9..0000000000 --- a/examples/helm/303_reshard.yaml +++ /dev/null @@ -1,50 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -jobs: - - name: "reshard" - kind: "vtctlclient" - command: "Reshard customer.cust2cust \'0\' \'-80,80-\'" - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/304_switch_reads.yaml b/examples/helm/304_switch_reads.yaml deleted file mode 100644 index 4c4adad639..0000000000 --- a/examples/helm/304_switch_reads.yaml +++ /dev/null @@ -1,53 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -jobs: - - name: "rswitch1" - kind: "vtctlclient" - command: "SwitchReads -tablet_type=rdonly customer.cust2cust" - - name: "rswitch2" - kind: "vtctlclient" - command: "SwitchReads -tablet_type=replica customer.cust2cust" - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/305_switch_writes.yaml b/examples/helm/305_switch_writes.yaml deleted file mode 100644 index cba04977c3..0000000000 --- a/examples/helm/305_switch_writes.yaml +++ /dev/null @@ -1,50 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -jobs: - - name: "rswitch3" - kind: "vtctlclient" - command: "SwitchWrites customer.cust2cust" - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/306_down_shard_0.yaml b/examples/helm/306_down_shard_0.yaml deleted file mode 100644 index 28297b42c8..0000000000 --- a/examples/helm/306_down_shard_0.yaml +++ /dev/null @@ -1,40 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/307_delete_shard_0.yaml b/examples/helm/307_delete_shard_0.yaml deleted file mode 100644 index 3ff4b6e364..0000000000 --- a/examples/helm/307_delete_shard_0.yaml +++ /dev/null @@ -1,45 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -jobs: - - name: "delete-shard0" - kind: "vtctlclient" - command: "DeleteShard -recursive customer/0" - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/308_final.yaml b/examples/helm/308_final.yaml deleted file mode 100644 index 28297b42c8..0000000000 --- a/examples/helm/308_final.yaml +++ /dev/null @@ -1,40 +0,0 @@ -topology: - deploymentType: test - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "customer" - shards: - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 3 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 3 - -vttablet: - terminationGracePeriodSeconds: 1 - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/README.md b/examples/helm/README.md deleted file mode 100644 index dcf017a727..0000000000 --- a/examples/helm/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Instructions - -Detailed instructions for running this example can be found at https://vitess.io. -This document contains the summary of the commands to be run. - - -``` -# Start minikube -minikube start --cpus=4 --memory=8000 - -# Bring up initial cluster and commerce keyspace -helm install vitess ../../helm/vitess -f 101_initial_cluster.yaml - -# Insert and verify data -mysql < ../common/insert_commerce_data.sql -mysql --table < ../common/select_commerce_data.sql - -# Bring up customer keyspace -helm upgrade vitess ../../helm/vitess/ -f 201_customer_tablets.yaml - -# Initiate move tables -vtctlclient MoveTables -workflow=commerce2customer commerce customer '{"customer":{}, "corder":{}}' - -# Validate -vtctlclient VDiff customer.commerce2customer - -# Cut-over -vtctlclient SwitchReads -tablet_type=rdonly customer.commerce2customer -vtctlclient SwitchReads -tablet_type=replica customer.commerce2customer -vtctlclient SwitchWrites customer.commerce2customer - -# Clean-up -vtctlclient DropSources customer.commerce2customer - -# Prepare for resharding -helm upgrade vitess ../../helm/vitess/ -f 301_customer_sharded.yaml -helm upgrade vitess ../../helm/vitess/ -f 302_new_shards.yaml - -# Reshard -vtctlclient Reshard customer.cust2cust '0' '-80,80-' - -# Validate -vtctlclient VDiff customer.cust2cust - -# Cut-over -vtctlclient SwitchReads -tablet_type=rdonly customer.cust2cust -vtctlclient SwitchReads -tablet_type=replica customer.cust2cust -vtctlclient SwitchWrites customer.cust2cust - -# Down shard 0 -helm upgrade vitess ../../helm/vitess/ -f 306_down_shard_0.yaml -vtctlclient DeleteShard -recursive customer/0 - -# Delete deployment -helm delete vitess -kubectl delete pvc -l "app=vitess" -kubectl delete vitesstoponodes --all -``` diff --git a/examples/helm/legacy/101_initial_cluster.yaml b/examples/helm/legacy/101_initial_cluster.yaml deleted file mode 100644 index 2db8b28fe7..0000000000 --- a/examples/helm/legacy/101_initial_cluster.yaml +++ /dev/null @@ -1,74 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - schema: - initial: |- - create table product( - sku varbinary(128), - description varbinary(128), - price bigint, - primary key(sku) - ); - create table customer( - customer_id bigint not null auto_increment, - email varbinary(128), - primary key(customer_id) - ); - create table corder( - order_id bigint not null auto_increment, - customer_id bigint, - sku varbinary(128), - price bigint, - primary key(order_id) - ); - vschema: - initial: |- - { - "tables": { - "product": {}, - "customer": {}, - "corder": {} - } - } - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/201_customer_keyspace.yaml b/examples/helm/legacy/201_customer_keyspace.yaml deleted file mode 100644 index 48e527d390..0000000000 --- a/examples/helm/legacy/201_customer_keyspace.yaml +++ /dev/null @@ -1,50 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -jobs: - - name: "create-customer-ks" - kind: "vtctlclient" - command: "CreateKeyspace -served_from='master:commerce,replica:commerce,rdonly:commerce' customer" - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/202_customer_tablets.yaml b/examples/helm/legacy/202_customer_tablets.yaml deleted file mode 100644 index a3aa566e46..0000000000 --- a/examples/helm/legacy/202_customer_tablets.yaml +++ /dev/null @@ -1,75 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - vschema: - vsplit: |- - { - "tables": { - "product": {} - } - } - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - copySchema: - source: "commerce/0" - tables: - - "customer" - - "corder" - vschema: - vsplit: |- - { - "tables": { - "customer": {}, - "corder": {} - } - } - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/203_vertical_split.yaml b/examples/helm/legacy/203_vertical_split.yaml deleted file mode 100644 index fbaf96f3df..0000000000 --- a/examples/helm/legacy/203_vertical_split.yaml +++ /dev/null @@ -1,61 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -jobs: - - name: "vertical-split" - kind: "vtworker" - cell: "zone1" - command: "VerticalSplitClone -min_healthy_tablets=1 -tables=customer,corder customer/0" - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/204_vertical_migrate_replicas.yaml b/examples/helm/legacy/204_vertical_migrate_replicas.yaml deleted file mode 100644 index c46219a982..0000000000 --- a/examples/helm/legacy/204_vertical_migrate_replicas.yaml +++ /dev/null @@ -1,63 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -jobs: - - name: "msf1" - kind: "vtctlclient" - command: "MigrateServedFrom customer/0 rdonly" - - name: "msf2" - kind: "vtctlclient" - command: "MigrateServedFrom customer/0 replica" - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/205_vertical_migrate_master.yaml b/examples/helm/legacy/205_vertical_migrate_master.yaml deleted file mode 100644 index abbcb47aac..0000000000 --- a/examples/helm/legacy/205_vertical_migrate_master.yaml +++ /dev/null @@ -1,60 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -jobs: - - name: "msf3" - kind: "vtctlclient" - command: "MigrateServedFrom customer/0 master" - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/206_clean_commerce.yaml b/examples/helm/legacy/206_clean_commerce.yaml deleted file mode 100644 index 4089a2863b..0000000000 --- a/examples/helm/legacy/206_clean_commerce.yaml +++ /dev/null @@ -1,70 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - schema: - postsplit: |- - drop table customer; - drop table corder; - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -jobs: - - name: "vclean1" - kind: "vtctlclient" - command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 rdonly" - - name: "vclean2" - kind: "vtctlclient" - command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 replica" - - name: "vclean3" - kind: "vtctlclient" - command: "SetShardTabletControl -blacklisted_tables=customer,corder -remove commerce/0 master" - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/301_customer_sharded.yaml b/examples/helm/legacy/301_customer_sharded.yaml deleted file mode 100644 index 6d906afcc6..0000000000 --- a/examples/helm/legacy/301_customer_sharded.yaml +++ /dev/null @@ -1,114 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - schema: - seq: |- - create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; - insert into customer_seq(id, next_id, cache) values(0, 1000, 100); - create table order_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; - insert into order_seq(id, next_id, cache) values(0, 1000, 100); - vschema: - seq: |- - { - "tables": { - "customer_seq": { - "type": "sequence" - }, - "order_seq": { - "type": "sequence" - }, - "product": {} - } - } - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - schema: - sharded: |- - alter table customer change customer_id customer_id bigint not null; - alter table corder change order_id order_id bigint not null; - vschema: - sharded: |- - { - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - } - }, - "tables": { - "customer": { - "column_vindexes": [ - { - "column": "customer_id", - "name": "hash" - } - ], - "auto_increment": { - "column": "customer_id", - "sequence": "customer_seq" - } - }, - "corder": { - "column_vindexes": [ - { - "column": "customer_id", - "name": "hash" - } - ], - "auto_increment": { - "column": "order_id", - "sequence": "order_seq" - } - } - } - } - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/302_new_shards.yaml b/examples/helm/legacy/302_new_shards.yaml deleted file mode 100644 index 1598dcd4c1..0000000000 --- a/examples/helm/legacy/302_new_shards.yaml +++ /dev/null @@ -1,75 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - copySchema: - source: "customer/0" - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - copySchema: - source: "customer/0" - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/303_horizontal_split.yaml b/examples/helm/legacy/303_horizontal_split.yaml deleted file mode 100644 index 1e9119c654..0000000000 --- a/examples/helm/legacy/303_horizontal_split.yaml +++ /dev/null @@ -1,77 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -jobs: - - name: "horizontal-split" - kind: "vtworker" - cell: "zone1" - command: "SplitClone -min_healthy_rdonly_tablets=1 customer/0" - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/304_migrate_replicas.yaml b/examples/helm/legacy/304_migrate_replicas.yaml deleted file mode 100644 index de2716f971..0000000000 --- a/examples/helm/legacy/304_migrate_replicas.yaml +++ /dev/null @@ -1,79 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -jobs: - - name: "mst1" - kind: "vtctlclient" - command: "MigrateServedTypes customer/0 rdonly" - - name: "mst2" - kind: "vtctlclient" - command: "MigrateServedTypes customer/0 replica" - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/305_migrate_master.yaml b/examples/helm/legacy/305_migrate_master.yaml deleted file mode 100644 index b85357bdaf..0000000000 --- a/examples/helm/legacy/305_migrate_master.yaml +++ /dev/null @@ -1,76 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "customer" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -jobs: - - name: "mst3" - kind: "vtctlclient" - command: "MigrateServedTypes customer/0 master" - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/306_down_shard_0.yaml b/examples/helm/legacy/306_down_shard_0.yaml deleted file mode 100644 index a540e81ba1..0000000000 --- a/examples/helm/legacy/306_down_shard_0.yaml +++ /dev/null @@ -1,63 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "customer" - shards: - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/307_delete_shard_0.yaml b/examples/helm/legacy/307_delete_shard_0.yaml deleted file mode 100644 index 20527187cc..0000000000 --- a/examples/helm/legacy/307_delete_shard_0.yaml +++ /dev/null @@ -1,68 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "customer" - shards: - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -jobs: - - name: "delete-shard0" - kind: "vtctlclient" - command: "DeleteShard -recursive customer/0" - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/308_final.yaml b/examples/helm/legacy/308_final.yaml deleted file mode 100644 index a540e81ba1..0000000000 --- a/examples/helm/legacy/308_final.yaml +++ /dev/null @@ -1,63 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "customer" - shards: - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 1 - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - mysqlSize: "prod" - resources: - mysqlResources: - # It's generally not recommended to override this value for production usage. - terminationGracePeriodSeconds: 1 - -vtworker: - resources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/examples/helm/legacy/kmysql.sh b/examples/helm/legacy/kmysql.sh deleted file mode 100755 index f9cfaaf80a..0000000000 --- a/examples/helm/legacy/kmysql.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is a convenience script to run the mysql client against the local example. - -host=$(minikube service vtgate-zone1 --url=true --format="{{.IP}}" | tail -n 1) -port=$(minikube service vtgate-zone1 --url=true --format="{{.Port}}" | tail -n 1) - -if [ -z $port ]; then - #This checks K8s running on an single node by kubeadm - if [ $(kubectl get nodes | grep -v NAM | wc -l) -eq 1 -o $(kubectl get nodes | grep -v NAM | grep master | wc -l ) -eq 1 ]; then - host="127.0.0.1" - port=`kubectl describe service vtgate-zone1 | grep NodePort | grep mysql | awk '{print $3}' | awk -F'/' '{print $1}'` - fi -fi - -if [ -z $port ]; then - echo "Error: failed to obtain [host:port] minikube or kubectl." - exit 1; - -fi - -mysql -h "$host" -P "$port" $* diff --git a/examples/helm/legacy/kvtctld.sh b/examples/helm/legacy/kvtctld.sh deleted file mode 100755 index 2499e70630..0000000000 --- a/examples/helm/legacy/kvtctld.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is a convenience script to run vtctlclient against the local example. - -xdg-open "$(minikube service vtctld --url|head -n 1)" diff --git a/examples/helm/pf.sh b/examples/helm/pf.sh deleted file mode 100755 index 5f8fee8bda..0000000000 --- a/examples/helm/pf.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh - -kubectl port-forward service/vtctld 15000 15999 & -process_id1=$! -kubectl port-forward service/vtgate-zone1 15306:3306 15001 & -process_id2=$! -sleep 2 -echo "You may point your browser to http://localhost:15000 for vtctld." -echo "You may point your browser to http://localhost:15001 for vtgate, use the following aliases as shortcuts:" -echo 'alias vtctlclient="vtctlclient -server=localhost:15999"' -echo 'alias mysql="mysql -h 127.0.0.1 -P 15306"' -echo "Hit Ctrl-C to stop the port forwards" -wait $process_id1 -wait $process_id2 diff --git a/helm/README.md b/helm/README.md deleted file mode 100644 index 1677ee0b63..0000000000 --- a/helm/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# Helm Charts - -This directory contains [Helm](https://github.com/kubernetes/helm) -charts for running [Vitess](https://vitess.io) on -[Kubernetes](https://kubernetes.io). - -Note that this is not in the `examples` directory because these are the -sources for canonical packages that we plan to publish to the official -[Kubernetes Charts Repository](https://github.com/kubernetes/charts). -However, you may also find them useful as a starting point for creating -customized charts for your site, or other general-purpose charts for -common cluster variations. diff --git a/helm/release.sh b/helm/release.sh deleted file mode 100755 index ffdd994f30..0000000000 --- a/helm/release.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash -set -ex - -vt_base_version='v7.0.2' -orchestrator_version='3.2.3' -pmm_client_version='1.17.4' - -docker pull vitess/base:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/k8s:$vt_base_version-buster . -docker tag vitess/k8s:$vt_base_version-buster vitess/k8s:$vt_base_version -docker push vitess/k8s:$vt_base_version-buster -docker push vitess/k8s:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtgate:$vt_base_version-buster vtgate -docker tag vitess/vtgate:$vt_base_version-buster vitess/vtgate:$vt_base_version -docker push vitess/vtgate:$vt_base_version-buster -docker push vitess/vtgate:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vttablet:$vt_base_version-buster vttablet -docker tag vitess/vttablet:$vt_base_version-buster vitess/vttablet:$vt_base_version -docker push vitess/vttablet:$vt_base_version-buster -docker push vitess/vttablet:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/mysqlctld:$vt_base_version-buster mysqlctld -docker tag vitess/mysqlctld:$vt_base_version-buster vitess/mysqlctld:$vt_base_version -docker push vitess/mysqlctld:$vt_base_version-buster -docker push vitess/mysqlctld:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/mysqlctl:$vt_base_version-buster mysqlctl -docker tag vitess/mysqlctl:$vt_base_version-buster vitess/mysqlctl:$vt_base_version -docker push vitess/mysqlctl:$vt_base_version-buster -docker push vitess/mysqlctl:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtctl:$vt_base_version-buster vtctl -docker tag vitess/vtctl:$vt_base_version-buster vitess/vtctl:$vt_base_version -docker push vitess/vtctl:$vt_base_version-buster -docker push vitess/vtctl:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtctlclient:$vt_base_version-buster vtctlclient -docker tag vitess/vtctlclient:$vt_base_version-buster vitess/vtctlclient:$vt_base_version -docker push vitess/vtctlclient:$vt_base_version-buster -docker push vitess/vtctlclient:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtctld:$vt_base_version-buster vtctld -docker tag vitess/vtctld:$vt_base_version-buster vitess/vtctld:$vt_base_version -docker push vitess/vtctld:$vt_base_version-buster -docker push vitess/vtctld:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtworker:$vt_base_version-buster vtworker -docker tag vitess/vtworker:$vt_base_version-buster vitess/vtworker:$vt_base_version -docker push vitess/vtworker:$vt_base_version-buster -docker push vitess/vtworker:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/logrotate:$vt_base_version-buster logrotate -docker tag vitess/logrotate:$vt_base_version-buster vitess/logrotate:$vt_base_version -docker push vitess/logrotate:$vt_base_version-buster -docker push vitess/logrotate:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/logtail:$vt_base_version-buster logtail -docker tag vitess/logtail:$vt_base_version-buster vitess/logtail:$vt_base_version -docker push vitess/logtail:$vt_base_version-buster -docker push vitess/logtail:$vt_base_version - -docker build --build-arg VT_BASE_VER=$vt_base_version --build-arg PMM_CLIENT_VER=$pmm_client_version -t vitess/pmm-client:v$pmm_client_version-buster pmm-client -docker tag vitess/pmm-client:v$pmm_client_version-buster vitess/pmm-client:v$pmm_client_version -docker push vitess/pmm-client:v$pmm_client_version-buster -docker push vitess/pmm-client:v$pmm_client_version - -docker build --build-arg VT_BASE_VER=$vt_base_version --build-arg ORC_VER=$orchestrator_version -t vitess/orchestrator:v$orchestrator_version-buster orchestrator -docker tag vitess/orchestrator:v$orchestrator_version-buster vitess/orchestrator:v$orchestrator_version -docker push vitess/orchestrator:v$orchestrator_version-buster -docker push vitess/orchestrator:v$orchestrator_version diff --git a/helm/vitess/.gitignore b/helm/vitess/.gitignore deleted file mode 100644 index fc97dd2673..0000000000 --- a/helm/vitess/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Don't check in site-local customizations. -site-values.yaml diff --git a/helm/vitess/.helmignore b/helm/vitess/.helmignore deleted file mode 100644 index f0c1319444..0000000000 --- a/helm/vitess/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/helm/vitess/CHANGELOG.md b/helm/vitess/CHANGELOG.md deleted file mode 100644 index b5550a7958..0000000000 --- a/helm/vitess/CHANGELOG.md +++ /dev/null @@ -1,86 +0,0 @@ -## 2.0.1-0 - 2020-04-16 - -The charts now officially support Kubernetes 1.11 and newer. - -### Changes -* The VitessTopoNode CRD is now created using the `apiextensions.k8s.io/v1beta1` API. - -## 2.0.0-0 - 2020-04-03 - -Vitess now supports using the Kubernetes API as a topology provider. This means that it is now easier than ever to run Vitess on Kubernetes! - -Properly supporting this new provider requires a major, breaking change of the helm charts. The `etcd-operator` has been deprecated as well so the Vitess team has decided to make the Kubernetes topology the default going forward. - -### Upgrade and Migration Information - -* This version introduces a `topologyProvider` configuration in `topology.globalCell` and in the configuration for each cell individually. The default from v2 on is to use the `k8s` topology provider. Explicitly set these values to `etcd2` in order to continue to use the etcd topology provider. -* The `root` is now being set properly for all topology cells. Prior to this version, all cells were using `""` as the root which worked, but was invalid. The root path for all cells will now be set to `/vitess/{{ $cell.name }}`. In order to upgrade a helm deployment from v1 to v2 you will need to stop all vitess components, migrate all etcd keys except `/global`, from `/` to `/vitess/{{ $cell.name }}`. There is no automation for this procedure at this time. - -### Changes -* Update images of Vitess components to **TODO: we need new images based on a released tag, not just master at a point in time** -* Set the topology `root` in all new and existing cells to `/vitess/{{ $cell.name }}` -* Add `topology.globalCell.topologyProvider` - default to `k8s` -* Add `topolgy.cells[*].topologyProvider` - default to `k8s` - -## 1.0.7-5 - 2019-12-02 - -### Changes -* Update images of Vitess components to v4.0.0 -* Update MySQL image to Percona 5.7.26 -* Support for OpenTracing - -## 1.0.6 - 2019-01-20 - -### Changes -* Update Orchestrator default to 3.0.14 -* Run `pmm-admin repair` on `pmm-client` startup to recover failures on `pmm-server` -* Backups now only run on `replica` (non-master), `rdonly`, or `spare` tablet types - -## 1.0.5 - 2019-01-12 - -### Changes -* Set FailMasterPromotionIfSQLThreadNotUpToDate = true in Orchestrator config, to prevent -lagging replicas from being promoted to master and causing errant GTID problems. - -**NOTE:** You need to manually restart your Orchestrator pods for this change to take effect - -## 1.0.4 - 2019-01-01 - -### Changes -* Use the [Orchestrator API](https://github.com/openark/orchestrator/blob/master/docs/using-the-web-api.md) -to call `begin-downtime` before running `PlannedReparentShard` in the `preStopHook`, to make sure that Orchestrator -doesn't try to run an external failover while Vitess is reparenting. When it is complete, it calls `end-downtime`. -Also call `forget` on the instance after calling `vtctlclient DeleteTablet`. It will be rediscovered if/when -the tablet comes back up. This eliminates most possible race conditions that could cause split brain. - -## 1.0.3 - 2018-12-20 - -### Changes -* Start tagging helm images and use them as default -* Added commonly used flags to values.yaml for vtgate & vttablet for discoverability. -Some match the binary flag defaults, and some have been set to more production ready values. -* Extended vttablet terminationGracePeriodSeconds from 600 to 60000000. -This will block on `PlannedReparent` in the `preStopHook` forever to prevent -unsafe `EmergencyReparent` operations when the pod is killed. - -### Bug fixes -* Use `$MYSQL_FLAVOR` to set flavor instead of `$EXTRA_MY_CNF` - -## 1.0.2 - 2018-12-11 - -### Bug fixes -* Renamed ImagePullPolicy to imagePullPolicy -* Added user-secret-volumes to backup CronJob - -## 1.0.1 - 2018-12-07 - -### Changes -* Added support for [MySQL Custom Queries](https://www.percona.com/blog/2018/10/10/percona-monitoring-and-management-pmm-1-15-0-is-now-available/) in PMM -* Added Linux host monitoring for PMM -* Added keyspace and shard labels to jobs -* Remove old mysql.sock file in vttablet InitContainer - -### Bug fixes -* PMM wouldn't bootstrap correctly on a new cluster - -## 1.0.0 - 2018-12-03 Vitess Helm Chart goes GA! diff --git a/helm/vitess/Chart.yaml b/helm/vitess/Chart.yaml deleted file mode 100644 index a6d308f079..0000000000 --- a/helm/vitess/Chart.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -name: vitess -version: 2.0.1-0 -description: Single-Chart Vitess Cluster -keywords: - - vitess - - mysql - - maria - - mariadb - - percona - - sql - - database - - shard -home: https://vitess.io -sources: - - https://github.com/vitessio/vitess -maintainers: - - name: Vitess Project - email: vitess@googlegroups.com -icon: https://vitess.io/img/logos/vitess.png diff --git a/helm/vitess/README.md b/helm/vitess/README.md deleted file mode 100644 index 93d53975ef..0000000000 --- a/helm/vitess/README.md +++ /dev/null @@ -1,432 +0,0 @@ -# Vitess - -[Vitess](https://vitess.io) is a database clustering system for horizontal -scaling of MySQL. It is an open-source project started at YouTube, -and has been used there since 2011. - -## Introduction - -This chart creates a Vitess cluster on Kubernetes in a single -[release](https://github.com/kubernetes/helm/blob/master/docs/glossary.md#release). -It currently includes all Vitess components -(vtctld, vtgate, vttablet) inline (in `templates/`) rather than as sub-charts. - -## Using Etcd For Topology Data - -The chart will use Kubernetes as the topology store for Vitess. This is the preferred configuration when running Vitess in Kubernetes as it has no external dependencesi. - -If you do wish to use `etcd` as the toplogy service, then you will need to create an etcd cluster and provide the configuration in your `values.yaml`. Etcd can be managed manually or via the [etcd-operator](https://github.com/coreos/etcd-operator). - -## Installing the Chart - -```console -helm/vitess$ helm install . -f site-values.yaml -``` - -See the [Configuration](#configuration) section below for what you need to put -in `site-values.yaml`. -You can install the chart without site values, but it will only launch a -skeleton cluster without any keyspaces (logical databases). - -## Cleaning up - -After deleting an installation of the chart, the PersistentVolumeClaims remain. -If you don't intend to use them again, you should delete them: - -```shell -kubectl delete pvc -l app=vitess -``` - -## Configuration - -You will need to provide a `site-values.yaml` file to specify your actual -logical database topology (e.g. whether to shard). -Here are examples of various configurations. To see additional options, -look at the default `values.yaml` file, which is well commented. - -### Unsharded keyspace - -``` -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 3 - mysqlProtocol: - enabled: false - keyspaces: - - name: "unsharded_dbname" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 -``` - -### Unsharded + sharded keyspaces - -``` -topology: - cells: - - name: "zone1" - ... - keyspaces: - - name: "unsharded_dbname" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - name: "sharded_db" - shards: - - name: "-80" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - name: "80-" - tablets: - - type: "replica" - vttablet: - replicas: 2 -``` - -### Separate pools of replicas and rdonly tablets - -``` -topology: - cells: - - name: "zone1" - ... - keyspaces: - - name: "unsharded_dbname" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - - type: "rdonly" - vttablet: - replicas: 2 -``` - -### Append custom my.cnf to default Vitess settings - -Create a config map with one or more standard `my.cnf` formatted files. Any settings -provided here will overwrite any colliding values from Vitess defaults. - -`kubectl create configmap shared-my-cnf --from-file=shared.my.cnf` - -*NOTE:* if using MySQL 8.0.x, this file must contain -`default_authentication_plugin = mysql_native_password` - -``` -topology: - cells: - ... - -vttablet: - - # The name of a config map with N files inside of it. Each file will be added - # to $EXTRA_MY_CNF, overriding any default my.cnf settings - extraMyCnf: shared-my-cnf -``` - -### Use a custom database image and a specific Vitess release - -``` -topology: - cells: - ... - -vttablet: - vitessTag: "2.1" - mysqlImage: "percona:5.7.20" - flavor: percona -``` - -### Enable MySQL protocol support - -``` -topology: - cells: - - name: "zone1" - ... - # enable or disable mysql protocol support, with accompanying auth details - mysqlProtocol: - enabled: false - username: myuser - # this is the secret that will be mounted as the user password - # kubectl create secret generic myuser-password --from-literal=password=abc123 - passwordSecret: myuser-password - - keyspaces: - ... -``` - -### Enable backup/restore using Google Cloud Storage - -Enabling backups creates a cron job per shard that defaults to executing once per day at midnight. -This can be overridden on a per shard level so you can stagger when backups occur. - -``` -topology: - cells: - - name: "zone1" - ... - keyspaces: - - name: "unsharded_dbname" - shards: - - name: "0" - backup: - cron: - schedule: "0 1 * * *" - suspend: false - tablets: - - type: "replica" - vttablet: - replicas: 2 - - name: "sharded_db" - shards: - - name: "-80" - backup: - cron: - schedule: "0 2 * * *" - suspend: false - tablets: - - type: "replica" - vttablet: - replicas: 2 - - name: "80-" - backup: - cron: - schedule: "0 3 * * *" - suspend: false - tablets: - - type: "replica" - vttablet: - replicas: 2 - -config: - backup: - enabled: true - - cron: - # the default schedule runs daily at midnight unless overridden by the individual shard - schedule: "0 0 * * *" - - # if this is set to true, the cron jobs are created, but never execute - suspend: false - - backup_storage_implementation: gcs - - # Google Cloud Storage bucket to use for backups - gcs_backup_storage_bucket: vitess-backups - - # root prefix for all backup-related object names - gcs_backup_storage_root: vtbackups -``` - -### Custom requests/limits - -``` -topology: - cells: - ... - -vttablet: - resources: - # common production values 2-4CPU/4-8Gi RAM - limits: - cpu: 2 - memory: 4Gi - mysqlResources: - # common production values 4CPU/8-16Gi RAM - limits: - cpu: 4 - memory: 8Gi - # PVC for mysql - dataVolumeClaimAnnotations: - dataVolumeClaimSpec: - # pd-ssd (Google Cloud) - # managed-premium (Azure) - # standard (AWS) - not sure what the default class is for ssd - storageClassName: "default" - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: "10Gi" -``` - -### Custom PVC for MySQL data - -``` -topology: - cells: - ... - -vttablet: - dataVolumeClaimSpec: - # Google Cloud SSD - storageClassName: "pd-ssd" - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: "100Gi" -``` - -### Enable PMM (Percona Monitoring and Management) - -``` -topology: - cells: - ... - -pmm: - enabled: true - pmmTag: "1.17.0" - client: - resources: - requests: - cpu: 50m - memory: 128Mi - limits: - cpu: 200m - memory: 256Mi - server: - resources: - limits: - cpu: 2 - memory: 4Gi - dataVolumeClaimSpec: - storageClassName: "default" - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: "150Gi" - env: - metricsMemory: "3000000" -``` - -### Enable Orchestrator -#### NOTE: This requires at least Kubernetes 1.9 - -``` -topology: - cells: - ... - -orchestrator: - enabled: true -``` - -### Enable TLS encryption for vitess grpc communication - -Each component of vitess requires a certificate and private key to secure incoming requests and further configuration for every outgoing connection. In this example TLS certificates were generated and stored in several kubernetes secrets: -```yaml -vttablet: - extraFlags: - # configure which certificates to use for serving grpc requests - grpc_cert: /vt/usersecrets/vttablet-tls/vttablet.pem - grpc_key: /vt/usersecrets/vttablet-tls/vttablet-key.pem - tablet_grpc_ca: /vt/usersecrets/vttablet-tls/vitess-ca.pem - tablet_grpc_server_name: vttablet - secrets: - - vttablet-tls - -vtctld: - extraFlags: - grpc_cert: /vt/usersecrets/vtctld-tls/vtctld.pem - grpc_key: /vt/usersecrets/vtctld-tls/vtctld-key.pem - tablet_grpc_ca: /vt/usersecrets/vtctld-tls/vitess-ca.pem - tablet_grpc_server_name: vttablet - tablet_manager_grpc_ca: /vt/usersecrets/vtctld-tls/vitess-ca.pem - tablet_manager_grpc_server_name: vttablet - secrets: - - vtctld-tls - -vtctlclient: # configuration used by both InitShardMaster-jobs and orchestrator to be able to communicate with vtctld - extraFlags: - vtctld_grpc_ca: /vt/usersecrets/vitess-ca/vitess-ca.pem - vtctld_grpc_server_name: vtctld - secrets: - - vitess-ca - -vtgate: - extraFlags: - grpc_cert: /vt/usersecrets/vtgate-tls/vtgate.pem - grpc_key: /vt/usersecrets/vtgate-tls/vtgate-key.pem - tablet_grpc_ca: /vt/usersecrets/vtgate-tls/vitess-ca.pem - tablet_grpc_server_name: vttablet - secrets: - - vtgate-tls -``` - -### Slave replication traffic encryption - -To encrypt traffic between slaves and master additional flags can be provided. By default MySQL generates self-signed certificates on startup (otherwise specify `ssl_*` settings within you `extraMyCnf`), that can be used to encrypt the traffic: -``` -vttablet: - extraFlags: - db_flags: 2048 - db_repl_use_ssl: true - db-config-repl-flags: 2048 - -``` - -### Percona at rest encryption using the vault plugin - -To use the [percona at rest encryption](https://www.percona.com/doc/percona-server/LATEST/management/data_at_rest_encryption.html) several additional settings have to be provided via an `extraMyCnf`-file. This makes only sense if the traffic is encrypted as well (see above sections), since binlog replication is unencrypted by default. -``` -apiVersion: v1 -kind: ConfigMap -metadata: - name: vttablet-extra-config - namespace: vitess -data: - extra.cnf: |- - early-plugin-load=keyring_vault=keyring_vault.so - # this includes default rpl plugins, see https://github.com/vitessio/vitess/blob/main/config/mycnf/master_mysql57.cnf for details - plugin-load=rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so;keyring_udf=keyring_udf.so - keyring_vault_config=/vt/usersecrets/vttablet-vault/vault.conf # load keyring configuration from secret - innodb_encrypt_tables=ON # encrypt all tables by default - encrypt_binlog=ON # binlog encryption - master_verify_checksum=ON # necessary for binlog encryption - binlog_checksum=CRC32 # necessary for binlog encryption - encrypt-tmp-files=ON # use temporary AES keys to encrypt temporary files -``` - -An example vault configuration, which is provided by the `vttablet-vault`-Secret in the above example: -``` -vault_url = https://10.0.0.1:8200 -secret_mount_point = vitess -token = 11111111-1111-1111-1111111111 -vault_ca = /vt/usersecrets/vttablet-vault/vault-ca-bundle.pem -``` - -At last add the secret containing the vault configuration and the additional MySQL-configuration to your helm values: -``` -vttablet: - flavor: "percona" # only works with percona - mysqlImage: "percona:5.7.23" - extraMyCnf: vttablet-extra-config - secrets: - - vttablet-vault -``` - -### Enable tracing (opentracing-jaeger) - -To enable tracing using opentracing Jaeger of Vitess components add tracing config with tracer `opentracing-jaeger` to `extraFlags`. For example to enable tracing for `vtgate`: - -```yaml -vtgate: - extraFlags: - jaeger-agent-host: "JAEGER-AGENT:6831" - tracing-sampling-rate: 0.1 - tracer: opentracing-jaeger -``` diff --git a/helm/vitess/crds/VitessTopoNodes-crd.yaml b/helm/vitess/crds/VitessTopoNodes-crd.yaml deleted file mode 100644 index 4e29ee627e..0000000000 --- a/helm/vitess/crds/VitessTopoNodes-crd.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# This is a copy of the crd def from: vitess/go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml -# It is not symlinked so that the helm charts do not have references to outside files -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: vitesstoponodes.topo.vitess.io -spec: - group: topo.vitess.io - additionalPrinterColumns: - - name: Key - type: string - description: The full key path - JSONPath: .data.key - validation: - openAPIV3Schema: - type: object - required: - - data - properties: - data: - type: object - required: - - key - - value - properties: - key: - description: A file-path like key. Must be an absolute path. Must not end with a /. - type: string - pattern: '^\/.+[^\/]$' - value: - description: A base64 encoded value. Must be a base64 encoded string or empty string. - type: string - pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" - ephemeral: - description: Whether or not the node is considered ephemeral. True for lock and election nodes. - type: boolean - version: v1beta1 - scope: Namespaced - names: - plural: vitesstoponodes - singular: vitesstoponode - kind: VitessTopoNode diff --git a/helm/vitess/examples/minikube.yaml b/helm/vitess/examples/minikube.yaml deleted file mode 100644 index 73c8ec9e23..0000000000 --- a/helm/vitess/examples/minikube.yaml +++ /dev/null @@ -1,65 +0,0 @@ -topology: - cells: - - name: "zone1" - vtctld: - replicas: 1 - vtgate: - replicas: 1 - mysqlProtocol: - enabled: true - authType: "none" - keyspaces: - - name: "commerce" - shards: - - name: "0" - tablets: - - type: "replica" - vttablet: - replicas: 2 - schema: - phase1: |- - create table product( - sku varbinary(128), - description varbinary(128), - price bigint, - primary key(sku) - ); - create table customer( - user_id bigint not null auto_increment, - email varbinary(128), - primary key(user_id) - ); - create table corder( - order_id bigint not null auto_increment, - user_id bigint, - product_id bigint, - msrp bigint, - primary key(order_id) - ); - vschema: - phase1: |- - { - "tables": { - "product": {}, - "customer": {}, - "corder": {} - } - } - -vtctld: - serviceType: "NodePort" - resources: - -vtgate: - serviceType: "NodePort" - resources: - -vttablet: - resources: - mysqlResources: - -pmm: - enabled: false - -orchestrator: - enabled: false diff --git a/helm/vitess/templates/NOTES.txt b/helm/vitess/templates/NOTES.txt deleted file mode 100644 index 3b3e7e5168..0000000000 --- a/helm/vitess/templates/NOTES.txt +++ /dev/null @@ -1,14 +0,0 @@ -{{- $cell := (index .Values.topology.cells 0).name -}} -{{- $proxyURL := printf "http://localhost:8001/api/v1/namespaces/%s" .Release.Namespace -}} - -Release name: {{.Release.Name}} - -To access administrative web pages, start a proxy with: - kubectl proxy --port=8001 - -Then use the following URLs: - - vtctld: {{$proxyURL}}/services/vtctld:web/proxy/app/ - vtgate: {{$proxyURL}}/services/vtgate-{{$cell}}:web/proxy/ -{{ if $.Values.orchestrator.enabled }}orchestrator: {{$proxyURL}}/services/orchestrator:web/proxy/{{ end }} -{{ if $.Values.pmm.enabled }} pmm: {{$proxyURL}}/services/pmm:web/proxy/{{ end }} diff --git a/helm/vitess/templates/_cron-jobs.tpl b/helm/vitess/templates/_cron-jobs.tpl deleted file mode 100644 index 3b4ffd1385..0000000000 --- a/helm/vitess/templates/_cron-jobs.tpl +++ /dev/null @@ -1,82 +0,0 @@ -################################### -# backup cron -################################### -{{ define "vttablet-backup-cron" -}} -# set tuple values to more recognizable variables -{{- $cellClean := index . 0 -}} -{{- $keyspaceClean := index . 1 -}} -{{- $shardClean := index . 2 -}} -{{- $shardName := index . 3 -}} -{{- $keyspace := index . 4 -}} -{{- $shard := index . 5 -}} -{{- $vitessTag := index . 6 -}} -{{- $backup := index . 7 -}} -{{- $namespace := index . 8 -}} -{{- $defaultVtctlclient := index . 9 }} - -{{ if $backup.enabled }} -# create cron job for current shard ---- -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: {{ $shardName }}-backup - labels: - app: vitess - component: vttablet - cell: {{ $cellClean | quote }} - keyspace: {{ $keyspaceClean | quote }} - shard: {{ $shardClean | quote }} - backupJob: "true" - -spec: - schedule: {{ $shard.backup.cron.schedule | default $backup.cron.schedule | quote }} - concurrencyPolicy: Forbid - suspend: {{ $shard.backup.cron.suspend | default $backup.cron.suspend }} - successfulJobsHistoryLimit: 3 - failedJobsHistoryLimit: 20 - - jobTemplate: - spec: - template: - metadata: - labels: - app: vitess - component: vttablet - cell: {{ $cellClean | quote }} - keyspace: {{ $keyspaceClean | quote }} - shard: {{ $shardClean | quote }} - backupJob: "true" - - # pod spec - spec: - restartPolicy: Never -{{ include "pod-security" . | indent 10 }} - - containers: - - name: backup - image: "vitess/vtctlclient:{{$vitessTag}}" - volumeMounts: -{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 14 }} - - command: ["bash"] - args: - - "-c" - - | - set -ex - - VTCTLD_SVC=vtctld.{{ $namespace }}:15999 - VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }}) - - vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC BackupShard {{ $keyspace.name }}/{{ $shard.name }} - - resources: - requests: - cpu: 10m - memory: 20Mi - volumes: -{{ include "user-secret-volumes" $defaultVtctlclient.secrets | indent 12 }} - -{{ end }} - -{{- end -}} diff --git a/helm/vitess/templates/_etcd.tpl b/helm/vitess/templates/_etcd.tpl deleted file mode 100644 index ad701cc612..0000000000 --- a/helm/vitess/templates/_etcd.tpl +++ /dev/null @@ -1,41 +0,0 @@ -################################### -# etcd cluster managed by pre-installed etcd operator -################################### -{{ define "etcd" -}} -# set tuple values to more recognizable variables -{{- $name := index . 0 -}} -{{- $replicas := index . 1 -}} -{{- $version := index . 2 -}} -{{- $resources := index . 3 }} -{{- $clusterWide := index . 4 }} - -################################### -# EtcdCluster -################################### -apiVersion: "etcd.database.coreos.com/v1beta2" -kind: "EtcdCluster" -metadata: - name: "etcd-{{ $name }}" - ## Adding this annotation make this cluster managed by clusterwide operators - ## namespaced operators ignore it - annotations: - {{ if $clusterWide }} - etcd.database.coreos.com/scope: clusterwide - {{ end }} -spec: - size: {{ $replicas }} - version: {{ $version | quote }} - pod: - resources: -{{ toYaml ($resources) | indent 6 }} - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - # prefer to stay away from other same-cell etcd pods - - weight: 100 - podAffinityTerm: - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - etcd_cluster: "etcd-{{ $name }}" -{{- end -}} diff --git a/helm/vitess/templates/_helpers.tpl b/helm/vitess/templates/_helpers.tpl deleted file mode 100644 index 695d198758..0000000000 --- a/helm/vitess/templates/_helpers.tpl +++ /dev/null @@ -1,392 +0,0 @@ -# Helper templates - -############################# -# Format a flag map into a command line, -# as expected by the golang 'flag' package. -# Boolean flags must be given a value, such as "true" or "false". -############################# -{{- define "format-flags" -}} -{{- range $key, $value := . -}} --{{$key}}={{$value | quote}} -{{end -}} -{{- end -}} - -############################ -# Format a flag map into a command line (inline), -# as expected by the golang 'flag' package. -# Boolean flags must be given a value, such as "true" or "false". -############################# -{{- define "format-flags-inline" -}} -{{- range $key, $value := . -}} --{{$key}}={{$value | quote}}{{" "}} -{{- end -}} -{{- end -}} - -############################# -# Repeat a string N times, where N is the total number -# of replicas. Len must be used on the calling end to -# get an int -############################# -{{- define "tablet-count" -}} -{{- range . -}} -{{- repeat (int .vttablet.replicas) "x" -}} -{{- end -}} -{{- end -}} - -############################# -# Format a list of flag maps into a command line. -############################# -{{- define "format-flags-all" -}} -{{- range . }}{{template "format-flags" .}}{{end -}} -{{- end -}} - -############################# -# Clean labels, making sure it starts and ends with [A-Za-z0-9]. -# This is especially important for shard names, which can start or end with -# '-' (like -80 or 80-), which would be an invalid kubernetes label. -############################# -{{- define "clean-label" -}} -{{- $replaced_label := . | replace "_" "-"}} -{{- if hasPrefix "-" . -}} -x{{$replaced_label}} -{{- else if hasSuffix "-" . -}} -{{$replaced_label}}x -{{- else -}} -{{$replaced_label}} -{{- end -}} -{{- end -}} - -############################# -# injects default vitess environment variables -############################# -{{- define "vitess-env" -}} -- name: VTROOT - value: "/vt" -- name: VTDATAROOT - value: "/vtdataroot" -- name: GOBIN - value: "/vt/bin" -- name: VT_MYSQL_ROOT - value: "/usr" -- name: PKG_CONFIG_PATH - value: "/vt/lib" -{{- end -}} - -############################# -# inject default pod security -############################# -{{- define "pod-security" -}} -securityContext: - runAsUser: 1000 - fsGroup: 2000 -{{- end -}} - -############################# -# support region nodeAffinity if defined -############################# -{{- define "node-affinity" -}} -{{- $region := . -}} -{{ with $region }} -nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "failure-domain.beta.kubernetes.io/region" - operator: In - values: [{{ $region | quote }}] -{{- end -}} -{{- end -}} - -############################# -# mycnf exec - expects extraMyCnf config map name -############################# -{{- define "mycnf-exec" -}} - -if [ "$VT_DB_FLAVOR" = "percona" ]; then - MYSQL_FLAVOR=Percona - -elif [ "$VT_DB_FLAVOR" = "mysql" ]; then - MYSQL_FLAVOR=MySQL56 - -elif [ "$VT_DB_FLAVOR" = "mysql56" ]; then - MYSQL_FLAVOR=MySQL56 - -elif [ "$VT_DB_FLAVOR" = "maria" ]; then - MYSQL_FLAVOR=MariaDB - -elif [ "$VT_DB_FLAVOR" = "mariadb" ]; then - MYSQL_FLAVOR=MariaDB - -elif [ "$VT_DB_FLAVOR" = "mariadb103" ]; then - MYSQL_FLAVOR=MariaDB103 - -fi - -export MYSQL_FLAVOR - -{{ if . }} -for filename in /vt/userconfig/*.cnf; do - export EXTRA_MY_CNF="$EXTRA_MY_CNF:$filename" -done -{{ end }} - -{{- end -}} - -############################# -# -# all backup helpers below -# -############################# - -############################# -# backup flags - expects config.backup -############################# -{{- define "backup-flags" -}} -{{- $backup := index . 0 -}} -{{- $caller := index . 1 -}} - -{{ with $backup }} - - {{ if .enabled }} - {{ if eq $caller "vttablet" }} --restore_from_backup - {{ end }} - --backup_storage_implementation=$VT_BACKUP_SERVICE - - {{ if eq .backup_storage_implementation "gcs" }} --gcs_backup_storage_bucket=$VT_GCS_BACKUP_STORAGE_BUCKET --gcs_backup_storage_root=$VT_GCS_BACKUP_STORAGE_ROOT - - {{ else if eq .backup_storage_implementation "s3" }} --s3_backup_aws_region=$VT_S3_BACKUP_AWS_REGION --s3_backup_storage_bucket=$VT_S3_BACKUP_STORAGE_BUCKET --s3_backup_storage_root=$VT_S3_BACKUP_STORAGE_ROOT --s3_backup_server_side_encryption=$VT_S3_BACKUP_SERVER_SIDE_ENCRYPTION - - {{ else if eq .backup_storage_implementation "ceph" }} --ceph_backup_storage_config=$CEPH_CREDENTIALS_FILE - {{ end }} - - {{ end }} - -{{ end }} - -{{- end -}} - -############################# -# backup env - expects config.backup -############################# -{{- define "backup-env" -}} - -{{ if .enabled }} - -- name: VT_BACKUP_SERVICE - valueFrom: - configMapKeyRef: - name: vitess-cm - key: backup.backup_storage_implementation - -{{ if eq .backup_storage_implementation "gcs" }} - -- name: VT_GCS_BACKUP_STORAGE_BUCKET - valueFrom: - configMapKeyRef: - name: vitess-cm - key: backup.gcs_backup_storage_bucket -- name: VT_GCS_BACKUP_STORAGE_ROOT - valueFrom: - configMapKeyRef: - name: vitess-cm - key: backup.gcs_backup_storage_root - -{{ else if eq .backup_storage_implementation "s3" }} - -- name: VT_S3_BACKUP_AWS_REGION - valueFrom: - configMapKeyRef: - name: vitess-cm - key: backup.s3_backup_aws_region -- name: VT_S3_BACKUP_STORAGE_BUCKET - valueFrom: - configMapKeyRef: - name: vitess-cm - key: backup.s3_backup_storage_bucket -- name: VT_S3_BACKUP_STORAGE_ROOT - valueFrom: - configMapKeyRef: - name: vitess-cm - key: backup.s3_backup_storage_root -- name: VT_S3_BACKUP_SERVER_SIDE_ENCRYPTION - valueFrom: - configMapKeyRef: - name: vitess-cm - key: backup.s3_backup_server_side_encryption - -{{ end }} - -{{ end }} - -{{- end -}} - -############################# -# backup volume - expects config.backup -############################# -{{- define "backup-volume" -}} - -{{ if .enabled }} - - {{ if eq .backup_storage_implementation "gcs" }} - - {{ if .gcsSecret }} -- name: backup-creds - secret: - secretName: {{ .gcsSecret }} - {{ end }} - - {{ else if eq .backup_storage_implementation "s3" }} - - {{ if .s3Secret }} -- name: backup-creds - secret: - secretName: {{ .s3Secret }} - {{ end }} - - {{ else if eq .backup_storage_implementation "ceph" }} - -- name: backup-creds - secret: - secretName: {{required ".cephSecret necessary to use backup_storage_implementation: ceph!" .cephSecret }} - - {{ end }} - -{{ end }} - -{{- end -}} - -############################# -# backup volumeMount - expects config.backup -############################# -{{- define "backup-volumeMount" -}} - -{{ if .enabled }} - - {{ if eq .backup_storage_implementation "gcs" }} - - {{ if .gcsSecret }} -- name: backup-creds - mountPath: /etc/secrets/creds - {{ end }} - - {{ else if eq .backup_storage_implementation "s3" }} - - {{ if .s3Secret }} -- name: backup-creds - mountPath: /etc/secrets/creds - {{ end }} - - {{ else if eq .backup_storage_implementation "ceph" }} - -- name: backup-creds - mountPath: /etc/secrets/creds - - {{ end }} - -{{ end }} - -{{- end -}} - -############################# -# backup exec -############################# -{{- define "backup-exec" -}} - -{{ if .enabled }} - - {{ if eq .backup_storage_implementation "gcs" }} - - {{ if .gcsSecret }} -credsPath=/etc/secrets/creds/$(ls /etc/secrets/creds/ | head -1) - -export GOOGLE_APPLICATION_CREDENTIALS=$credsPath -cat $GOOGLE_APPLICATION_CREDENTIALS - {{ end }} - - {{ else if eq .backup_storage_implementation "s3" }} - - {{ if .s3Secret }} -credsPath=/etc/secrets/creds/$(ls /etc/secrets/creds/ | head -1) - -export AWS_SHARED_CREDENTIALS_FILE=$credsPath -cat $AWS_SHARED_CREDENTIALS_FILE - {{ end }} - - {{ else if eq .backup_storage_implementation "ceph" }} - -credsPath=/etc/secrets/creds/$(ls /etc/secrets/creds/ | head -1) -export CEPH_CREDENTIALS_FILE=$credsPath -cat $CEPH_CREDENTIALS_FILE - - {{ end }} - -{{ end }} - -{{- end -}} - -############################# -# user config volume - expects config map name -############################# -{{- define "user-config-volume" -}} - -{{ if . }} - -- name: user-config - configMap: - name: {{ . }} - -{{ end }} - -{{- end -}} - -############################# -# user config volumeMount - expects config map name -############################# -{{- define "user-config-volumeMount" -}} - -{{ if . }} - -- name: user-config - mountPath: /vt/userconfig - -{{ end }} - -{{- end -}} - -############################# -# user secret volumes - expects list of secret names -############################# -{{- define "user-secret-volumes" -}} - -{{ if . }} -{{- range . }} -- name: user-secret-{{ . }} - secret: - secretName: {{ . }} -{{- end }} -{{ end }} - -{{- end -}} - -############################# -# user secret volumeMounts - expects list of secret names -############################# -{{- define "user-secret-volumeMounts" -}} - -{{ if . }} -{{- range . }} -- name: user-secret-{{ . }} - mountPath: /vt/usersecrets/{{ . }} -{{- end }} -{{ end }} - -{{- end -}} diff --git a/helm/vitess/templates/_jobs.tpl b/helm/vitess/templates/_jobs.tpl deleted file mode 100644 index 51a75111d5..0000000000 --- a/helm/vitess/templates/_jobs.tpl +++ /dev/null @@ -1,141 +0,0 @@ -################################### -# keyspace initializations -################################### - -{{- define "vtctlclient-job" -}} -{{- $job := index . 0 -}} -{{- $defaultVtctlclient := index . 1 -}} -{{- $namespace := index . 2 -}} - -{{- $vitessTag := $job.vitessTag | default $defaultVtctlclient.vitessTag -}} -{{- $secrets := $job.secrets | default $defaultVtctlclient.secrets }} ---- -################################### -# Vitess vtctlclient Job -################################### -apiVersion: batch/v1 -kind: Job -metadata: - name: vtctlclient-{{ $job.name }} -spec: - backoffLimit: 1 - template: - metadata: - labels: - app: vitess - component: vtctlclient - vtctlclientJob: "true" - - spec: - restartPolicy: OnFailure - containers: - - name: vtjob - image: "vitess/vtctlclient:{{$vitessTag}}" - volumeMounts: -{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 10 }} - resources: -{{ toYaml ($job.resources | default $defaultVtctlclient.resources) | indent 10 }} - - command: ["bash"] - args: - - "-c" - - | - set -ex - - VTCTLD_SVC=vtctld.{{ $namespace }}:15999 - VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }}) - vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC {{ $job.command }} - volumes: -{{ include "user-secret-volumes" $secrets | indent 8 }} - -{{- end -}} - -{{- define "vtworker-job" -}} -{{- $job := index . 0 -}} -{{- $defaultVtworker := index . 1 -}} -{{- $namespace := index . 2 -}} -{{- $cell := index . 3 -}} - -{{- $vitessTag := $job.vitessTag | default $defaultVtworker.vitessTag -}} -{{- $secrets := $job.secrets | default $defaultVtworker.secrets }} ---- - -################################### -# vtworker ServiceAccount -################################### -apiVersion: v1 -kind: ServiceAccount -metadata: - name: vtworker - labels: - app: vitess ---- - -################################### -# vtgate RoleBinding -################################### -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: vtworker-topo-member -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: vt-topo-member -subjects: -- kind: ServiceAccount - name: vtworker - namespace: {{ $namespace }} ---- - -################################### -# Vitess vtworker Job -################################### -apiVersion: batch/v1 -kind: Job -metadata: - name: vtworker-{{ $job.name }} -spec: - backoffLimit: 1 - template: - metadata: - labels: - app: vitess - component: vtworker - vtworkerJob: "true" - - spec: - serviceAccountName: vtworker -{{ include "pod-security" . | indent 6 }} - restartPolicy: OnFailure - containers: - - name: vtjob - image: "vitess/vtworker:{{$vitessTag}}" - volumeMounts: -{{ include "user-secret-volumeMounts" $defaultVtworker.secrets | indent 10 }} - resources: -{{ toYaml ($job.resources | default $defaultVtworker.resources) | indent 10 }} - - command: ["bash"] - args: - - "-c" - - | - set -ex - - eval exec /vt/bin/vtworker $(cat < /dev/null 2>&1; do - if (( $SECONDS > $TIMEOUT_SECONDS )); then - echo "timed out waiting for vtctlclient to be ready" - exit 1 - fi - sleep 5 - done - - while true; do - if (( $SECONDS > $TIMEOUT_SECONDS )); then - echo "timed out waiting for master" - exit 1 - fi - - # wait for all shards to have a master - {{- range $shard := $keyspace.shards }} - master_alias=$(vtctlclient ${VTLCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC GetShard {{ $keyspace.name }}/{{ $shard.name }} | jq '.master_alias.uid') - if [ "$master_alias" == "null" -o "$master_alias" == "" ]; then - echo "no master for '{{ $keyspace.name }}/{{ $shard.name }}' yet, continuing to wait" - sleep 5 - continue - fi - {{- end }} - - break - done - - vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ApplySchema -sql "$(cat < /dev/null 2>&1; do - if (( $SECONDS > $TIMEOUT_SECONDS )); then - echo "timed out waiting for keyspace {{ $keyspace.name }} to be ready" - exit 1 - fi - sleep 5 - done - - vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ApplyVSchema -vschema "$(cat < 1000000", - "DiscoverByShowSlaveHosts": false, - "EnableSyslog": false, - "ExpiryHostnameResolvesMinutes": 60, - "DelayMasterPromotionIfSQLThreadNotUpToDate": true, - "FailureDetectionPeriodBlockMinutes": 10, - "GraphiteAddr": "", - "GraphiteConvertHostnameDotsToUnderscores": true, - "GraphitePath": "", - "HostnameResolveMethod": "none", - "HTTPAuthPassword": "", - "HTTPAuthUser": "", - "HTTPAdvertise": "http://POD_NAME.orchestrator-headless.{{ $namespace }}:3000", - "InstanceBulkOperationsWaitTimeoutSeconds": 10, - "InstancePollSeconds": 5, - "ListenAddress": ":3000", - "MasterFailoverLostInstancesDowntimeMinutes": 0, - "MySQLConnectTimeoutSeconds": 1, - "MySQLHostnameResolveMethod": "none", - "MySQLTopologyCredentialsConfigFile": "", - "MySQLTopologyMaxPoolConnections": 3, - "MySQLTopologyPassword": "orc_client_user_password", - "MySQLTopologyReadTimeoutSeconds": 3, - "MySQLTopologySSLCAFile": "", - "MySQLTopologySSLCertFile": "", - "MySQLTopologySSLPrivateKeyFile": "", - "MySQLTopologySSLSkipVerify": true, - "MySQLTopologyUseMutualTLS": false, - "MySQLTopologyUser": "orc_client_user", - "OnFailureDetectionProcesses": [ - "echo 'Detected {failureType} on {failureCluster}. Affected replicas: {countSlaves}' >> /tmp/recovery.log" - ], - "OSCIgnoreHostnameFilters": [ - ], - "PhysicalEnvironmentPattern": "[.]([^.]+[.][^.]+)[.]vitess[.]io", - "PostFailoverProcesses": [ - "echo '(for all types) Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Successor: {successorHost}:{successorPort}' >> /tmp/recovery.log" - ], - "PostIntermediateMasterFailoverProcesses": [ - "echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Successor: {successorHost}:{successorPort}' >> /tmp/recovery.log" - ], - "PostMasterFailoverProcesses": [ - "echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Promoted: {successorHost}:{successorPort}' >> /tmp/recovery.log", - "n=0; until [ $n -ge 10 ]; do vtctlclient {{ include "format-flags-inline" $defaultVtctlclient.extraFlags | toJson | trimAll "\"" }} -server vtctld.{{ $namespace }}:15999 TabletExternallyReparented {successorAlias} && break; n=$[$n+1]; sleep 5; done" - ], - "PostponeSlaveRecoveryOnLagMinutes": 0, - "PostUnsuccessfulFailoverProcesses": [ - ], - "PowerAuthUsers": [ - "*" - ], - "PreFailoverProcesses": [ - "echo 'Will recover from {failureType} on {failureCluster}' >> /tmp/recovery.log" - ], - "ProblemIgnoreHostnameFilters": [ - ], - "PromotionIgnoreHostnameFilters": [ - ], - "PseudoGTIDMonotonicHint": "asc:", - "PseudoGTIDPattern": "drop view if exists .*?`_pseudo_gtid_hint__", - "RaftAdvertise": "POD_NAME.{{ $namespace }}", - "RaftBind": "POD_NAME", - "RaftDataDir": "/var/lib/orchestrator", - "RaftEnabled": true, - "RaftNodes": [ - {{ range $i := until (int $orc.replicas) }} - "orchestrator-{{ $i }}.{{ $namespace }}"{{ if lt $i (sub (int64 $orc.replicas) 1) }},{{ end }} - {{ end }} - ], - "ReadLongRunningQueries": false, - "ReadOnly": false, - "ReasonableMaintenanceReplicationLagSeconds": 20, - "ReasonableReplicationLagSeconds": 10, - "RecoverMasterClusterFilters": [ - ".*" - ], - "RecoveryIgnoreHostnameFilters": [ - ], - "RecoveryPeriodBlockSeconds": 60, - "ReduceReplicationAnalysisCount": true, - "RejectHostnameResolvePattern": "", - "RemoveTextFromHostnameDisplay": ".vitess.io:3306", -{{ if $enableHeartbeat }} - "ReplicationLagQuery": "SELECT unix_timestamp() - floor(ts/1000000000) FROM `_vt`.heartbeat ORDER BY ts DESC LIMIT 1;", -{{ else }} - "ReplicationLagQuery": "", -{{ end }} - "ServeAgentsHttp": false, - "SkipBinlogEventsContaining": [ - ], - "SkipBinlogServerUnresolveCheck": true, - "SkipOrchestratorDatabaseUpdate": false, - "SlaveStartPostWaitMilliseconds": 1000, - "SnapshotTopologiesIntervalHours": 0, - "SQLite3DataFile": ":memory:", - "SSLCAFile": "", - "SSLCertFile": "", - "SSLPrivateKeyFile": "", - "SSLSkipVerify": false, - "SSLValidOUs": [ - ], - "StaleSeedFailMinutes": 60, - "StatusEndpoint": "/api/status", - "StatusOUVerify": false, - "UnseenAgentForgetHours": 6, - "UnseenInstanceForgetHours": 240, - "UseMutualTLS": false, - "UseSSL": false, - "VerifyReplicationFilters": false - } -{{ end }} diff --git a/helm/vitess/templates/_orchestrator.tpl b/helm/vitess/templates/_orchestrator.tpl deleted file mode 100644 index 6c8a94de0d..0000000000 --- a/helm/vitess/templates/_orchestrator.tpl +++ /dev/null @@ -1,222 +0,0 @@ -################################### -# Master Orchestrator Service -################################### -{{ define "orchestrator" -}} -# set tuple values to more recognizable variables -{{- $orc := index . 0 -}} -{{- $defaultVtctlclient := index . 1 }} - -apiVersion: v1 -kind: Service -metadata: - name: orchestrator - labels: - app: vitess - component: orchestrator -spec: - ports: - - name: web - port: 80 - targetPort: 3000 - selector: - app: vitess - component: orchestrator - type: ClusterIP - ---- -################################### -# Headless Orchestrator Service -################################### -apiVersion: v1 -kind: Service -metadata: - name: orchestrator-headless - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" - labels: - app: vitess - component: orchestrator -spec: - clusterIP: None - ports: - - name: web - port: 80 - targetPort: 3000 - selector: - component: orchestrator - app: vitess - ---- - -################################### -# Orchestrator StatefulSet -################################### -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: orchestrator -spec: - serviceName: orchestrator-headless - replicas: {{ $orc.replicas }} - podManagementPolicy: Parallel - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app: vitess - component: orchestrator - template: - metadata: - labels: - app: vitess - component: orchestrator - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - # strongly prefer to stay away from other orchestrators - - weight: 100 - podAffinityTerm: - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - app: "vitess" - component: "orchestrator" - - initContainers: -{{ include "init-orchestrator" $orc | indent 8 }} - - containers: - - name: orchestrator - image: {{ $orc.image | quote }} - imagePullPolicy: IfNotPresent - ports: - - containerPort: 3000 - name: web - protocol: TCP - - containerPort: 10008 - name: raft - protocol: TCP - livenessProbe: - httpGet: - path: /api/lb-check - port: 3000 - initialDelaySeconds: 300 - timeoutSeconds: 10 - readinessProbe: - httpGet: - path: "/api/raft-health" - port: 3000 - timeoutSeconds: 10 - - resources: -{{ toYaml ($orc.resources) | indent 12 }} - - volumeMounts: - - name: config-shared - mountPath: /conf/ - - name: tmplogs - mountPath: /tmp -{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 12 }} - env: - - name: VTCTLD_SERVER_PORT - value: "15999" - - - name: recovery-log - image: vitess/logtail:helm-2.0.0-0 - imagePullPolicy: IfNotPresent - env: - - name: TAIL_FILEPATH - value: /tmp/recovery.log - volumeMounts: - - name: tmplogs - mountPath: /tmp - - - name: audit-log - image: vitess/logtail:helm-2.0.0-0 - imagePullPolicy: IfNotPresent - env: - - name: TAIL_FILEPATH - value: /tmp/orchestrator-audit.log - volumeMounts: - - name: tmplogs - mountPath: /tmp - - volumes: - - name: config-map - configMap: - name: orchestrator-cm - - name: config-shared - emptyDir: {} - - name: tmplogs - emptyDir: {} -{{ include "user-secret-volumes" $defaultVtctlclient.secrets | indent 8 }} - -{{- end -}} - -################################### -# Per StatefulSet Orchestrator Service -################################### -{{ define "orchestrator-statefulset-service" -}} -# set tuple values to more recognizable variables -{{- $orc := index . 0 -}} -{{- $i := index . 1 }} - -apiVersion: v1 -kind: Service -metadata: - name: orchestrator-{{ $i }} - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" - labels: - app: vitess - component: orchestrator -spec: - ports: - - name: web - port: 80 - targetPort: 3000 - - name: raft - port: 10008 - targetPort: 10008 - selector: - component: orchestrator - app: vitess - # this should be auto-filled by kubernetes - statefulset.kubernetes.io/pod-name: "orchestrator-{{ $i }}" - -{{- end -}} - -################################### -# init-container to copy and sed -# Orchestrator config from ConfigMap -################################### -{{ define "init-orchestrator" -}} -{{- $orc := . }} - -- name: init-orchestrator - image: {{ $orc.image | quote }} - volumeMounts: - - name: config-map - mountPath: /conftmp/ - - name: config-shared - mountPath: /conf/ - env: - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - command: ["bash"] - args: - - "-c" - - | - set -ex - - # make a copy of the config map file before editing it locally - cp /conftmp/orchestrator.conf.json /conf/orchestrator.conf.json - - # set the local config to advertise/bind its own service IP - sed -i -e "s/POD_NAME/$MY_POD_NAME/g" /conf/orchestrator.conf.json - -{{- end -}} diff --git a/helm/vitess/templates/_pmm.tpl b/helm/vitess/templates/_pmm.tpl deleted file mode 100644 index 5ba0d48ce4..0000000000 --- a/helm/vitess/templates/_pmm.tpl +++ /dev/null @@ -1,231 +0,0 @@ -################################### -# pmm Service + Deployment -################################### -{{ define "pmm" -}} -# set tuple values to more recognizable variables -{{- $pmm := index . 0 -}} -{{- $namespace := index . 1 }} - -################################### -# pmm Service -################################### -kind: Service -apiVersion: v1 -metadata: - name: pmm - labels: - component: pmm - app: vitess -spec: - ports: - - name: web - port: 80 - - selector: - component: pmm - app: vitess - type: ClusterIP ---- -################################### -# pmm StatefulSet -################################### -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: pmm -spec: - serviceName: pmm - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app: vitess - component: pmm - template: - metadata: - labels: - app: vitess - component: pmm - spec: - containers: - - name: pmm - image: "percona/pmm-server:{{ $pmm.pmmTag }}" - - ports: - - name: web - containerPort: 80 - - volumeMounts: - - name: pmmdata - mountPath: /pmmdata - - resources: -{{ toYaml $pmm.server.resources | indent 12 }} - - env: - - name: DISABLE_UPDATES - value: "true" - - - name: DISABLE_TELEMETRY - value: {{ $pmm.server.env.disableTelemetry | quote }} - - - name: METRICS_RESOLUTION - value: {{ $pmm.server.env.metricsResolution | quote }} - - - name: METRICS_RETENTION - value: {{ $pmm.server.env.metricsRetention | quote }} - - - name: QUERIES_RETENTION - value: {{ $pmm.server.env.queriesRetention | quote }} - - - name: METRICS_MEMORY - value: {{ $pmm.server.env.metricsMemory | quote }} - - command: ["bash"] - args: - - "-c" - - | - set -ex - - - if [ ! -f /pmmdata/vitess-init ]; then - # the PV hasn't been initialized, so copy over default - # pmm-server directories before symlinking - mkdir -p /pmmdata - - mv /opt/prometheus/data /pmmdata/data - mv /opt/consul-data /pmmdata - mv /var/lib/mysql /pmmdata - mv /var/lib/grafana /pmmdata - - # initialize the PV and then mark it complete - touch /pmmdata/vitess-init - else - # remove the default directories so we can symlink the - # existing PV directories - rm -Rf /opt/prometheus/data - rm -Rf /opt/consul-data - rm -Rf /var/lib/mysql - rm -Rf /var/lib/grafana - fi - - # symlink pmm-server paths to point to our PV - ln -s /pmmdata/data /opt/prometheus/ - ln -s /pmmdata/consul-data /opt/ - ln -s /pmmdata/mysql /var/lib/ - ln -s /pmmdata/grafana /var/lib/ - - /opt/entrypoint.sh - - volumeClaimTemplates: - - metadata: - name: pmmdata - annotations: -{{ toYaml $pmm.server.dataVolumeClaimAnnotations | indent 10 }} - spec: -{{ toYaml $pmm.server.dataVolumeClaimSpec | indent 8 }} - -{{- end -}} - -################################### -# sidecar container running pmm-client -################################### -{{ define "cont-pmm-client" -}} -{{- $pmm := index . 0 -}} -{{- $namespace := index . 1 -}} -{{- $keyspace := index . 2 }} - -- name: "pmm-client" - image: "vitess/pmm-client:{{ $pmm.pmmTag }}" - imagePullPolicy: IfNotPresent - volumeMounts: - - name: vtdataroot - mountPath: "/vtdataroot" -{{ if $keyspace.pmm }}{{if $keyspace.pmm.config }} - - name: config - mountPath: "/vt-pmm-config" -{{ end }}{{ end }} - - ports: - - containerPort: 42001 - name: query-data - - containerPort: 42002 - name: mysql-metrics - - securityContext: - # PMM requires root privileges - runAsUser: 0 - - resources: -{{ toYaml $pmm.client.resources | indent 4 }} - - command: ["bash"] - args: - - "-c" - - | - set -ex - - # Redirect pmm-client data to persistent volume - if [ ! -d /vtdataroot/pmm ]; then - FIRST_RUN=1 - mkdir -p /vtdataroot/pmm/percona - mkdir -p /vtdataroot/pmm/init.d - fi - - mv /usr/local/percona /usr/local/percona_tmp - mv /etc/init.d /etc/init.d_tmp - ln -s /vtdataroot/pmm/percona /usr/local/percona - ln -s /vtdataroot/pmm/init.d /etc/init.d - ln -s /vtdataroot/pmm/pmm-mysql-metrics-42002.log /var/log/pmm-mysql-metrics-42002.log - - if [ ! -z "$FIRST_RUN" ]; then - cp -r /usr/local/percona_tmp/* /vtdataroot/pmm/percona || : - cp -r /etc/init.d_tmp/* /vtdataroot/pmm/init.d || : - fi - -{{ if $keyspace.pmm }}{{if $keyspace.pmm.config }} - # link all the configmap files into their expected file locations - for filename in /vt-pmm-config/*; do - DEST_FILE=/vtdataroot/pmm/percona/pmm-client/$(basename "$filename") - rm -f $DEST_FILE - ln -s "$filename" $DEST_FILE - done -{{ end }}{{ end }} - - # if this doesn't return an error, pmm-admin has already been configured - # and we want to stop/remove running services, in case pod ips have changed - if pmm-admin info; then - pmm-admin stop --all - pmm-admin repair - pmm-admin rm --all - fi - - pmm-admin config --server pmm.{{ $namespace }} --bind-address `hostname -I` --client-address ${HOSTNAME}.vttablet --force - pmm-admin repair - - # wait for mysql to be available before registering - until [ -e /vtdataroot/tabletdata/mysql.sock ]; do - echo "Waiting for mysql.sock file" - sleep 1 - done - - # creates systemd services - pmm-admin add linux:metrics - pmm-admin add mysql:metrics --user root --socket /vtdataroot/tabletdata/mysql.sock --force - pmm-admin add mysql:queries --user root --socket /vtdataroot/tabletdata/mysql.sock --force --query-source=perfschema - - # keep the container alive but still responsive to stop requests - trap : TERM INT; sleep infinity & wait - -- name: pmm-client-metrics-log - image: vitess/logtail:helm-2.0.0-0 - imagePullPolicy: IfNotPresent - env: - - name: TAIL_FILEPATH - value: /vtdataroot/pmm/pmm-mysql-metrics-42002.log - volumeMounts: - - name: vtdataroot - mountPath: /vtdataroot - -{{- end -}} diff --git a/helm/vitess/templates/_shard.tpl b/helm/vitess/templates/_shard.tpl deleted file mode 100644 index 3d075c24f8..0000000000 --- a/helm/vitess/templates/_shard.tpl +++ /dev/null @@ -1,203 +0,0 @@ -################################### -# shard initializations -################################### - -{{ define "shard" -}} -{{- $cell := index . 0 -}} -{{- $keyspace := index . 1 -}} -{{- $shard := index . 2 -}} -{{- $defaultVtctlclient := index . 3 -}} -{{- $namespace := index . 4 -}} -{{- $totalTabletCount := index . 5 -}} - -{{- $cellClean := include "clean-label" $cell.name -}} -{{- $keyspaceClean := include "clean-label" $keyspace.name -}} -{{- $shardClean := include "clean-label" $shard.name -}} -{{- $shardName := printf "%s-%s-%s" $cellClean $keyspaceClean $shardClean | lower -}} - -{{- with $cell.vtctld }} -# define image to use -{{- $vitessTag := .vitessTag | default $defaultVtctlclient.vitessTag }} ---- -################################### -# InitShardMaster Job -################################### -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ $shardName }}-init-shard-master -spec: - backoffLimit: 1 - template: - metadata: - labels: - app: vitess - component: vttablet - cell: {{ $cellClean | quote }} - keyspace: {{ $keyspaceClean | quote }} - shard: {{ $shardClean | quote }} - initShardMasterJob: "true" - - spec: - restartPolicy: OnFailure - containers: - - name: init-shard-master - image: "vitess/vtctlclient:{{$vitessTag}}" - volumeMounts: -{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 10 }} - - command: ["bash"] - args: - - "-c" - - | - set -ex - - VTCTLD_SVC=vtctld.{{ $namespace }}:15999 - SECONDS=0 - TIMEOUT_SECONDS=600 - VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }}) - - # poll every 5 seconds to see if vtctld is ready - until vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ListAllTablets {{ $cell.name }} > /dev/null 2>&1; do - if (( $SECONDS > $TIMEOUT_SECONDS )); then - echo "timed out waiting for vtctlclient to be ready" - exit 1 - fi - sleep 5 - done - - until [ $TABLETS_READY ]; do - # get all the tablets in the current cell - cellTablets="$(vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ListAllTablets {{ $cell.name }})" - - # filter to only the tablets in our current shard - shardTablets=$( echo "$cellTablets" | awk 'substr( $5,1,{{ len $shardName }} ) == "{{ $shardName }}" {print $0}') - - # check for a master tablet from the ListAllTablets call - masterTablet=$( echo "$shardTablets" | awk '$4 == "master" {print $1}') - if [ $masterTablet ]; then - echo "'$masterTablet' is already the master tablet, exiting without running InitShardMaster" - exit - fi - - # check for a master tablet from the GetShard call - master_alias=$(vtctlclient ${VTLCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC GetShard {{ $keyspace.name }}/{{ $shard.name }} | jq '.master_alias.uid') - if [ "$master_alias" != "null" -a "$master_alias" != "" ]; then - echo "'$master_alias' is already the master tablet, exiting without running InitShardMaster" - exit - fi - - # count the number of newlines for the given shard to get the tablet count - tabletCount=$( echo "$shardTablets" | wc | awk '{print $1}') - - # check to see if the tablet count equals the expected tablet count - if [ $tabletCount == {{ $totalTabletCount }} ]; then - TABLETS_READY=true - else - if (( $SECONDS > $TIMEOUT_SECONDS )); then - echo "timed out waiting for tablets to be ready" - exit 1 - fi - - # wait 5 seconds for vttablets to continue getting ready - sleep 5 - fi - - done - - # find the tablet id for the "-replica-0" stateful set for a given cell, keyspace and shard - tablet_id=$( echo "$shardTablets" | awk 'substr( $5,1,{{ add (len $shardName) 10 }} ) == "{{ $shardName }}-replica-0" {print $1}') - - # initialize the shard master - until vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC InitShardMaster -force {{ $keyspace.name }}/{{ $shard.name }} $tablet_id; do - if (( $SECONDS > $TIMEOUT_SECONDS )); then - echo "timed out waiting for InitShardMaster to succeed" - exit 1 - fi - sleep 5 - done - volumes: -{{ include "user-secret-volumes" (.secrets | default $defaultVtctlclient.secrets) | indent 8 }} - -{{- $copySchema := ($keyspace.copySchema | default $shard.copySchema) -}} -{{- if $copySchema }} ---- -################################### -# CopySchemaShard Job -################################### -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ $keyspaceClean }}-copy-schema-{{ $shardClean }} -spec: - backoffLimit: 1 - template: - metadata: - labels: - app: vitess - component: vttablet - cell: {{ $cellClean | quote }} - keyspace: {{ $keyspaceClean | quote }} - shard: {{ $shardClean | quote }} - copySchemaShardJob: "true" - - spec: - restartPolicy: OnFailure - containers: - - name: copy-schema - image: "vitess/vtctlclient:{{$vitessTag}}" - volumeMounts: -{{ include "user-secret-volumeMounts" $defaultVtctlclient.secrets | indent 10 }} - - command: ["bash"] - args: - - "-c" - - | - set -ex - - VTCTLD_SVC=vtctld.{{ $namespace }}:15999 - SECONDS=0 - TIMEOUT_SECONDS=600 - VTCTL_EXTRA_FLAGS=({{ include "format-flags-inline" $defaultVtctlclient.extraFlags }}) - - # poll every 5 seconds to see if vtctld is ready - until vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ListAllTablets {{ $cell.name }} > /dev/null 2>&1; do - if (( $SECONDS > $TIMEOUT_SECONDS )); then - echo "timed out waiting for vtctlclient to be ready" - exit 1 - fi - sleep 5 - done - - while true; do - if (( $SECONDS > $TIMEOUT_SECONDS )); then - echo "timed out waiting for master" - exit 1 - fi - - # wait for all shards to have a master - master_alias=$(vtctlclient ${VTLCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC GetShard {{ $keyspace.name }}/{{ $shard.name }} | jq '.master_alias.uid') - if [ "$master_alias" == "null" -o "$master_alias" == "" ]; then - echo "no master for '{{ $keyspace.name }}/{{ $shard.name }}' yet, continuing to wait" - sleep 5 - continue - fi - - break - done - - vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC CopySchemaShard {{ if $copySchema.tables -}} - -tables=' - {{- range $index, $table := $copySchema.tables -}} - {{- if $index -}},{{- end -}} - {{ $table }} - {{- end -}} - ' - {{- end }} {{ $copySchema.source }} {{ $keyspace.name }}/{{ $shard.name }} - volumes: -{{ include "user-secret-volumes" (.secrets | default $defaultVtctlclient.secrets) | indent 8 }} -{{ end }} - - -{{- end -}} -{{- end -}} diff --git a/helm/vitess/templates/_vtctld.tpl b/helm/vitess/templates/_vtctld.tpl deleted file mode 100644 index be70e1f4d4..0000000000 --- a/helm/vitess/templates/_vtctld.tpl +++ /dev/null @@ -1,167 +0,0 @@ -################################### -# vtctld Service + Deployment -################################### -{{ define "vtctld" -}} -# set tuple values to more recognizable variables -{{- $topology := index . 0 -}} -{{- $cell := index . 1 -}} -{{- $defaultVtctld := index . 2 -}} -{{- $namespace := index . 3 -}} -{{- $config := index . 4 -}} - -{{- with $cell.vtctld -}} - -# define image to use -{{- $vitessTag := .vitessTag | default $defaultVtctld.vitessTag -}} -{{- $cellClean := include "clean-label" $cell.name }} - -################################### -# vtctld Service -################################### -kind: Service -apiVersion: v1 -metadata: - name: vtctld - labels: - component: vtctld - app: vitess -spec: - ports: - - name: web - port: 15000 - - name: grpc - port: 15999 - selector: - component: vtctld - app: vitess - type: {{.serviceType | default $defaultVtctld.serviceType}} ---- - -################################### -# vtctld ServiceAccount -################################### -apiVersion: v1 -kind: ServiceAccount -metadata: - name: vtctld - labels: - app: vitess ---- - -################################### -# vtctld RoleBinding -################################### -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: vtctld-topo-member -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: vt-topo-member -subjects: -- kind: ServiceAccount - name: vtctld - namespace: {{ $namespace }} ---- - -################################### -# vtctld Service + Deployment -################################### -apiVersion: apps/v1 -kind: Deployment -metadata: - name: vtctld -spec: - replicas: {{.replicas | default $defaultVtctld.replicas}} - selector: - matchLabels: - app: vitess - component: vtctld - template: - metadata: - labels: - app: vitess - component: vtctld - spec: - serviceAccountName: vtctld -{{ include "pod-security" . | indent 6 }} -{{ include "vtctld-affinity" (tuple $cellClean $cell.region) | indent 6 }} - containers: - - name: vtctld - image: vitess/vtctld:{{$vitessTag}} - imagePullPolicy: IfNotPresent - readinessProbe: - httpGet: - path: /debug/health - port: 15000 - initialDelaySeconds: 30 - timeoutSeconds: 5 - livenessProbe: - httpGet: - path: /debug/status - port: 15000 - initialDelaySeconds: 30 - timeoutSeconds: 5 - env: -{{ include "backup-env" $config.backup | indent 12 }} - volumeMounts: -{{ include "backup-volumeMount" $config.backup | indent 12 }} -{{ include "user-secret-volumeMounts" (.secrets | default $defaultVtctld.secrets) | indent 12 }} - resources: -{{ toYaml (.resources | default $defaultVtctld.resources) | indent 12 }} - command: - - bash - - "-c" - - | - set -ex; - -{{ include "backup-exec" $config.backup | indent 14 }} - - eval exec /vt/bin/vtctld $(cat < /mysqlcreds/creds.json - -{{- end -}} -{{- end -}} diff --git a/helm/vitess/templates/_vttablet.tpl b/helm/vitess/templates/_vttablet.tpl deleted file mode 100644 index 4c168dc157..0000000000 --- a/helm/vitess/templates/_vttablet.tpl +++ /dev/null @@ -1,710 +0,0 @@ -################################### -# vttablet Service -################################### -{{ define "vttablet-service" -}} -# set tuple values to more recognizable variables -{{- $pmm := index . 0 }} -apiVersion: v1 -kind: Service -metadata: - name: vttablet - labels: - app: vitess - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" -spec: - publishNotReadyAddresses: true - ports: - - port: 15002 - name: web - - port: 16002 - name: grpc -{{ if $pmm.enabled }} - - port: 42001 - name: query-data - - port: 42002 - name: mysql-metrics -{{ end }} - clusterIP: None - selector: - app: vitess - component: vttablet ---- -{{- end -}} - -################################### -# vttablet ServiceAccount -################################### -{{ define "vttablet-serviceaccount" -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: vttablet - labels: - app: vitess ---- -{{ end }} - -################################### -# vttablet RoleBinding -################################### -{{ define "vttablet-topo-role-binding" -}} -{{- $namespace := index . 0 -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: vttablet-topo-member -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: vt-topo-member -subjects: -- kind: ServiceAccount - name: vttablet - namespace: {{ $namespace }} ---- -{{ end }} - -################################### -# vttablet -################################### -{{ define "vttablet" -}} -# set tuple values to more recognizable variables -{{- $topology := index . 0 -}} -{{- $cell := index . 1 -}} -{{- $keyspace := index . 2 -}} -{{- $shard := index . 3 -}} -{{- $tablet := index . 4 -}} -{{- $defaultVttablet := index . 5 -}} -{{- $defaultVtctlclient := index . 6 -}} -{{- $namespace := index . 7 -}} -{{- $config := index . 8 -}} -{{- $pmm := index . 9 -}} -{{- $orc := index . 10 -}} - -# sanitize inputs for labels -{{- $cellClean := include "clean-label" $cell.name -}} -{{- $keyspaceClean := include "clean-label" $keyspace.name -}} -{{- $shardClean := include "clean-label" $shard.name -}} -{{- $uid := "$(cat /vtdataroot/tabletdata/tablet-uid)" }} -{{- $setName := printf "%s-%s-%s-%s" $cellClean $keyspaceClean $shardClean $tablet.type | lower -}} -{{- $shardName := printf "%s-%s-%s" $cellClean $keyspaceClean $shardClean | lower -}} - -{{- with $tablet.vttablet -}} - -# define images to use -{{- $vitessTag := .vitessTag | default $defaultVttablet.vitessTag -}} -{{- $image := .image | default $defaultVttablet.image -}} -{{- $mysqlImage := .mysqlImage | default $defaultVttablet.mysqlImage -}} -{{- $mysqlImage := .mysqlImage | default $defaultVttablet.mysqlImage }} ---- -################################### -# vttablet StatefulSet -################################### -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ $setName | quote }} -spec: - serviceName: vttablet - replicas: {{ .replicas | default $defaultVttablet.replicas }} - podManagementPolicy: Parallel - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app: vitess - component: vttablet - cell: {{ $cellClean | quote }} - keyspace: {{ $keyspaceClean | quote }} - shard: {{ $shardClean | quote }} - type: {{ $tablet.type | quote }} - template: - metadata: - labels: - app: vitess - component: vttablet - cell: {{ $cellClean | quote }} - keyspace: {{ $keyspaceClean | quote }} - shard: {{ $shardClean | quote }} - type: {{ $tablet.type | quote }} - spec: - serviceAccountName: vttablet - terminationGracePeriodSeconds: {{ $defaultVttablet.terminationGracePeriodSeconds | default 60000000 }} -{{ include "pod-security" . | indent 6 }} - -{{ if eq ($topology.deploymentType | default "prod") "prod" }} -{{ include "vttablet-affinity" (tuple $cellClean $keyspaceClean $shardClean $cell.region) | indent 6 }} -{{ end }} - - initContainers: -{{ include "init-mysql" (tuple $topology $vitessTag $cellClean) | indent 8 }} -{{ include "init-vttablet" (tuple $topology $vitessTag $cell $cellClean $namespace) | indent 8 }} - - containers: -{{ include "cont-mysql" (tuple $topology $cell $keyspace $shard $tablet $defaultVttablet $uid) | indent 8 }} -{{ include "cont-vttablet" (tuple $topology $cell $keyspace $shard $tablet $defaultVttablet $defaultVtctlclient $vitessTag $uid $namespace $config $orc) | indent 8 }} - -{{ if eq ($topology.deploymentType | default "prod") "prod" }} -{{ include "cont-logrotate" . | indent 8 }} -{{ include "cont-mysql-generallog" . | indent 8 }} -{{ include "cont-mysql-errorlog" . | indent 8 }} -{{ include "cont-mysql-slowlog" . | indent 8 }} -{{ end }} - -{{ if $pmm.enabled }}{{ include "cont-pmm-client" (tuple $pmm $namespace $keyspace) | indent 8 }}{{ end }} - - volumes: - - name: vt - emptyDir: {} -{{ include "backup-volume" $config.backup | indent 8 }} -{{ include "user-config-volume" (.extraMyCnf | default $defaultVttablet.extraMyCnf) | indent 8 }} -{{ include "user-secret-volumes" (.secrets | default $defaultVttablet.secrets) | indent 8 }} -{{ if $keyspace.pmm }}{{if $keyspace.pmm.config }} - - name: config - configMap: - name: {{ $keyspace.pmm.config }} -{{ end }}{{ end }} - - volumeClaimTemplates: - - metadata: - name: vtdataroot - annotations: -{{ toYaml (.dataVolumeClaimAnnotations | default $defaultVttablet.dataVolumeClaimAnnotations) | indent 10 }} - spec: -{{ toYaml (.dataVolumeClaimSpec | default $defaultVttablet.dataVolumeClaimSpec) | indent 8 }} - ---- -################################### -# vttablet PodDisruptionBudget -################################### -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ $setName | quote }} -spec: - maxUnavailable: 1 - selector: - matchLabels: - app: vitess - component: vttablet - cell: {{ $cellClean | quote }} - keyspace: {{ $keyspaceClean | quote }} - shard: {{ $shardClean | quote }} - type: {{ $tablet.type | quote }} - -# conditionally add cron job -{{ include "vttablet-backup-cron" (tuple $cellClean $keyspaceClean $shardClean $shardName $keyspace $shard $vitessTag $config.backup $namespace $defaultVtctlclient) }} - -{{- end -}} -{{- end -}} - -################################### -# init-container to copy binaries for mysql -################################### -{{ define "init-mysql" -}} -{{- $topology := index . 0 -}} -{{- $vitessTag := index . 1 -}} -{{- $cellClean := index . 2 }} - -- name: "init-mysql" - image: "vitess/mysqlctld:{{$vitessTag}}" - imagePullPolicy: IfNotPresent - volumeMounts: - - name: vtdataroot - mountPath: "/vtdataroot" - - name: vt - mountPath: "/vttmp" - - command: ["bash"] - args: - - "-c" - - | - set -ex - # set up the directories vitess needs - mkdir -p /vttmp/bin - mkdir -p /vtdataroot/tabletdata - - # copy necessary assets to the volumeMounts - cp /vt/bin/mysqlctld /vttmp/bin/ - cp /bin/busybox /vttmp/bin/ - cp -R /vt/config /vttmp/ - - # make sure the log files exist - touch /vtdataroot/tabletdata/error.log - touch /vtdataroot/tabletdata/slow-query.log - touch /vtdataroot/tabletdata/general.log - - # remove the old socket file if it is still around - rm -f /vtdataroot/tabletdata/mysql.sock - rm -f /vtdataroot/tabletdata/mysql.sock.lock - -{{- end -}} - -################################### -# init-container to set tablet uid + register tablet with global topo -# This converts the unique identity assigned by StatefulSet (pod name) -# into a 31-bit unsigned integer for use as a Vitess tablet UID. -################################### -{{ define "init-vttablet" -}} -{{- $topology := index . 0 -}} -{{- $vitessTag := index . 1 -}} -{{- $cell := index . 2 -}} -{{- $cellClean := index . 3 -}} -{{- $namespace := index . 4 }} - -- name: init-vttablet - image: "vitess/vtctl:{{$vitessTag}}" - imagePullPolicy: IfNotPresent - volumeMounts: - - name: vtdataroot - mountPath: "/vtdataroot" - command: ["bash"] - args: - - "-c" - - | - set -ex - # Split pod name (via hostname) into prefix and ordinal index. - hostname=$(hostname -s) - [[ $hostname =~ ^(.+)-([0-9]+)$ ]] || exit 1 - pod_prefix=${BASH_REMATCH[1]} - pod_index=${BASH_REMATCH[2]} - # Prepend cell name since tablet UIDs must be globally unique. - uid_name={{$cell.name | replace "_" "-" | lower}}-$pod_prefix - # Take MD5 hash of cellname-podprefix. - uid_hash=$(echo -n $uid_name | md5sum | awk "{print \$1}") - # Take first 24 bits of hash, convert to decimal. - # Shift left 2 decimal digits, add in index. - tablet_uid=$((16#${uid_hash:0:6} * 100 + $pod_index)) - # Save UID for other containers to read. - echo $tablet_uid > /vtdataroot/tabletdata/tablet-uid - # Tell MySQL what hostname to report in SHOW SLAVE HOSTS. - echo report-host=$hostname.vttablet > /vtdataroot/tabletdata/report-host.cnf - # Orchestrator looks there, so it should match -tablet_hostname above. - - # make sure that etcd is initialized - eval exec /vt/bin/vtctl $(cat <= 10.3) - # the flavor determines the base my.cnf file for vitess to function - flavor: mysql56 - - mysqlImage: percona:5.7.26 - # mysqlImage: mysql:5.7.24 - # mysqlImage: mariadb:10.3.11 - - enableHeartbeat: false - - # This requires at least 2 instances of "replica" tablet types, otherwise semi-sync - # will block forever. "rdonly" tablets do not ACK. - enableSemisync: false - - # This sets the vttablet flag "-init_db_name_override" to be the keyspace name, rather - # than "vt_keyspace". This works better with many MySQL client applications - useKeyspaceNameAsDbName: true - - # The name of a config map with N files inside of it. Each file will be added - # to $EXTRA_MY_CNF, overriding any default my.cnf settings - extraMyCnf: "" - # extraMyCnf: extra-my-cnf - - # mysqlSize can be "test" or "prod". Default is "prod". - # If the value is "test", then mysql is instanitated with a smaller footprint. - mysqlSize: prod - - # Additional flags that will be appended to the vttablet command. - # The options below are the most commonly adjusted, but any flag can be put here. - # run vttablet --help to see all available flags - extraFlags: - # query server max result size, maximum number of rows allowed to return - # from vttablet for non-streaming queries. - queryserver-config-max-result-size: 10000 - - # query server query timeout (in seconds), this is the query timeout in vttablet side. - # If a query takes more than this timeout, it will be killed. - queryserver-config-query-timeout: 30 - - # query server connection pool size, connection pool is used by - # regular queries (non streaming, not in a transaction) - queryserver-config-pool-size: 24 - - # query server stream connection pool size, stream pool is used by stream queries: - # queries that return results to client in a streaming fashion - queryserver-config-stream-pool-size: 100 - - # query server transaction cap is the maximum number of transactions allowed to - # happen at any given point of a time for a single vttablet. - # e.g. by setting transaction cap to 100, there are at most 100 transactions - # will be processed by a vttablet and the 101th transaction will be blocked - # (and fail if it cannot get connection within specified timeout) - queryserver-config-transaction-cap: 300 - - # Size of the connection pool for app connections - app_pool_size: 40 - - # Size of the connection pool for dba connections - dba_pool_size: 20 - - # User secrets that will be mounted under /vt/usersecrets/{secretname}/ - secrets: [] - - resources: - # common production values 2-4CPU/4-8Gi RAM - # requests: - # cpu: 2 - # memory: 4Gi - - mysqlResources: - # common production values 4CPU/8-16Gi RAM - # requests: - # cpu: 4 - # memory: 8Gi - - # PVC for mysql - dataVolumeClaimAnnotations: - dataVolumeClaimSpec: - # pd-ssd (Google Cloud) - # managed-premium (Azure) - # standard (AWS) - not sure what the default class is for ssd - # Note: Leave storageClassName unset to use cluster-specific default class. - #storageClassName: pd-ssd - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 10Gi - -# Default values for pmm -pmm: - enabled: false - pmmTag: 1.17.0 - client: - resources: - requests: - cpu: 50m - memory: 128Mi - - server: - resources: - # requests: - # cpu: 500m - # memory: 1Gi - - # PVC for pmm - dataVolumeClaimAnnotations: - dataVolumeClaimSpec: - # storageClassName: pd-ssd - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 10Gi - env: - # DISABLE_TELEMETRY - # With telemetry enabled, your PMM Server sends some statistics to v.percona.com every 24 hours - disableTelemetry: true - - # METRICS_RESOLUTION (Option) - # This option sets the minimum resolution for checking metrics. You should set it if the latency is higher than 1 second - metricsResolution: 1s - - # METRICS_RETENTION (Option) - # This option determines how long metrics are stored at PMM Server. - # The value is passed as a combination of hours, minutes, and seconds, such as 720h0m0s. - # The minutes (a number followed by m) and seconds (a number followed by s) are optional. - metricsRetention: 720h - - # QUERIES_RETENTION - # This option determines how many days queries are stored at PMM Server - queriesRetention: 8 - - # METRICS_MEMORY (Option) -- TODO: automatically calculate based on resource limits - # NOTE: The value must be passed in kilobytes - # NOTE: Make sure to quote this value so it isn't converted into scientific notation - - # By default, Prometheus in PMM Server uses up to 768 MB of memory for storing the most recently used data chunks. - # Depending on the amount of data coming into Prometheus, you may require a higher limit to avoid throttling data ingestion, - # or allow less memory consumption if it is needed for other processes. - # The limit affects only memory reserved for data chunks. Actual RAM usage by Prometheus is higher. - # It is recommended to set this limit to roughly 2/3 of the total memory that you are planning to allow for Prometheus. - metricsMemory: "600000" - -# Orchestrator requires at least version >= 3.0.9 and Kubernetes 1.9 to work -# Default values for orchestrator resources -orchestrator: - enabled: false - image: vitess/orchestrator:3.1.1 - replicas: 3 - resources: - requests: - cpu: 50m - memory: 350Mi