зеркало из https://github.com/github/vitess-gh.git
First pass at Kubernetes support.
This commit is contained in:
Родитель
e6f8e85d8e
Коммит
559e598458
|
@ -0,0 +1,45 @@
|
|||
# Vitess on Kubernetes
|
||||
|
||||
This directory contains an example configuration for running Vitess on
|
||||
[Kubernetes](https://github.com/GoogleCloudPlatform/kubernetes/). Refer to the
|
||||
appropriate [Getting Started Guide](https://github.com/GoogleCloudPlatform/kubernetes/#contents)
|
||||
to get Kubernetes up and running if you haven't already. We currently test
|
||||
against HEAD, so you may want to build Kubernetes from the latest source.
|
||||
|
||||
## ZooKeeper
|
||||
|
||||
Once you have a running Kubernetes deployment, make sure
|
||||
*kubernetes/cluster/kubecfg.sh* is in your path, and then run:
|
||||
|
||||
```
|
||||
vitess$ examples/kubernetes/zk-up.sh
|
||||
```
|
||||
|
||||
This will create a quorum of ZooKeeper servers. Clients can connect to port 2181
|
||||
of any [minion](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/DESIGN.md#cluster-architecture)
|
||||
(assuming the firewall is set to allow it), and the Kubernetes proxy will
|
||||
load-balance the connection to any of the servers.
|
||||
|
||||
A simple way to test out your ZooKeeper deployment is by logging into one of
|
||||
your minions and running the *zk* client utility inside Docker. For example, if
|
||||
you are running [Kubernetes on Google Compute Engine](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/getting-started-guides/gce.md):
|
||||
|
||||
```
|
||||
# log in to a minion
|
||||
$ gcloud compute ssh kubernetes-minion-1
|
||||
|
||||
# show zk command usage
|
||||
kubernetes-minion-1:~$ sudo docker run -ti --rm vitess/root zk
|
||||
|
||||
# create a test node in ZooKeeper
|
||||
kubernetes-minion-1:~$ sudo docker run -ti --rm vitess/root zk -zk.addrs $HOSTNAME:2181 touch -p /zk/test_cell/vt
|
||||
|
||||
# check that the node is there
|
||||
kubernetes-minion-1:~$ sudo docker run -ti --rm vitess/root zk -zk.addrs $HOSTNAME:2181 ls /zk/test_cell
|
||||
```
|
||||
|
||||
To tear down the ZooKeeper deployment (again, with *kubecfg.sh* in your path):
|
||||
|
||||
```
|
||||
vitess$ examples/kubernetes/zk-down.sh
|
||||
```
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: v1beta1
|
||||
kind: Service
|
||||
id: zk-client
|
||||
port: 2181
|
||||
containerPort: 2181
|
||||
selector:
|
||||
name: zk
|
||||
labels:
|
||||
name: zk
|
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is an example script that tears down the ZooKeeper servers started by
|
||||
# zk-up.sh. It assumes that kubernetes/cluster/kubecfg.sh is in the path.
|
||||
|
||||
# Delete pods.
|
||||
for zkid in 1 2 3; do
|
||||
echo "Deleting zk$zkid pod..."
|
||||
kubecfg.sh delete pods/zk$zkid
|
||||
done
|
||||
|
||||
# Delete client service.
|
||||
echo "Deleting zk-client service..."
|
||||
kubecfg.sh delete services/zk-client
|
||||
|
||||
# Delete leader and election services.
|
||||
for zkid in 1 2 3; do
|
||||
echo "Deleting zk$zkid-leader service..."
|
||||
kubecfg.sh delete services/zk$zkid-leader
|
||||
|
||||
echo "Deleting zk$zkid-election service..."
|
||||
kubecfg.sh delete services/zk$zkid-election
|
||||
done
|
|
@ -0,0 +1,24 @@
|
|||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: zk{{zkid}}
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: zk{{zkid}}
|
||||
containers:
|
||||
- name: zk{{zkid}}
|
||||
image: vitess/root
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
command: [sh, "-c", "chown vitess /vt/vtdataroot && su -p -c \"/vt/bin/zkctl -follow -zk.myid {{zkid}} -zk.cfg {{zkcfg}} init\" vitess"]
|
||||
volumes:
|
||||
- name: syslog
|
||||
source: {hostDir: {path: /dev/log}}
|
||||
- name: vtdataroot
|
||||
source: {emptyDir: {}}
|
||||
labels:
|
||||
name: zk
|
||||
zkid: {{zkid}}
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1beta1
|
||||
kind: Service
|
||||
id: zk{{zkid}}-{{svc}}
|
||||
port: {{port}}{{zkid}}
|
||||
containerPort: {{port}}{{zkid}}
|
||||
selector:
|
||||
name: zk
|
||||
zkid: {{zkid}}
|
||||
labels:
|
||||
name: zk
|
||||
zkid: {{zkid}}
|
|
@ -0,0 +1,47 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is an example script that creates a quorum of ZooKeeper servers.
|
||||
# It assumes that kubernetes/cluster/kubecfg.sh is in the path.
|
||||
|
||||
set -e
|
||||
|
||||
# List of all servers in the quorum.
|
||||
zkcfg=(\
|
||||
'1@$SERVICE_HOST:28881:38881:2181' \
|
||||
'2@$SERVICE_HOST:28882:38882:2181' \
|
||||
'3@$SERVICE_HOST:28883:38883:2181' \
|
||||
)
|
||||
printf -v zkcfg ",%s" "${zkcfg[@]}"
|
||||
zkcfg=${zkcfg:1}
|
||||
|
||||
# Create the pods.
|
||||
echo "Creating zk pods..."
|
||||
for zkid in 1 2 3; do
|
||||
cat zk-pod-template.yaml | \
|
||||
sed -e "s/{{zkid}}/$zkid/g" -e "s/{{zkcfg}}/$zkcfg/g" | \
|
||||
kubecfg.sh -c - create pods
|
||||
done
|
||||
|
||||
# Create the client service, which will load-balance across all replicas.
|
||||
echo "Creating zk services..."
|
||||
kubecfg.sh -c zk-client-service.yaml create services
|
||||
|
||||
# Create a service for the leader and election ports of each replica.
|
||||
# This is necessary because ZooKeeper servers need to know how to specifically
|
||||
# contact replica N (not just "any replica") in order to create a quorum.
|
||||
# We also have to append the zkid of each server to the port number, because
|
||||
# every service in Kubernetes needs a unique port number (for now).
|
||||
|
||||
ports=( 2888 3888 )
|
||||
svcs=( leader election )
|
||||
|
||||
for zkid in 1 2 3; do
|
||||
for i in 0 1; do
|
||||
port=${ports[$i]}
|
||||
svc=${svcs[$i]}
|
||||
|
||||
cat zk-service-template.yaml | \
|
||||
sed -e "s/{{zkid}}/$zkid/g" -e "s/{{port}}/$port/g" -e "s/{{svc}}/$svc/g" | \
|
||||
kubecfg.sh -c - create services
|
||||
done
|
||||
done
|
Загрузка…
Ссылка в новой задаче