зеркало из https://github.com/github/vitess-gh.git
Merge branch 'master' into resharding
This commit is contained in:
Коммит
4df0c2b603
|
@ -59,6 +59,12 @@ $ gcloud alpha container clusters create example --machine-type n1-standard-1 --
|
|||
|
||||
If prompted, install the alpha commands.
|
||||
|
||||
Update the configuration with the cluster name:
|
||||
|
||||
```
|
||||
$ gcloud config set container/cluster example
|
||||
```
|
||||
|
||||
## Start an etcd cluster for Vitess
|
||||
|
||||
Once you have a running Kubernetes deployment, make sure to set `KUBECTL`
|
||||
|
@ -103,8 +109,18 @@ $ gcloud compute firewall-rules create vtctld --allow tcp:15000
|
|||
|
||||
# get the address of the load balancer for vtctld
|
||||
$ gcloud compute forwarding-rules list
|
||||
NAME REGION IP_ADDRESS IP_PROTOCOL TARGET
|
||||
vtctld us-central1 12.34.56.78 TCP us-central1/targetPools/vtctld
|
||||
NAME REGION IP_ADDRESS IP_PROTOCOL TARGET
|
||||
aa6f47950f5a011e4b8f242010af0fe1 us-central1 12.34.56.78 TCP us-central1/targetPools/aa6f47950f5a011e4b8f242010af0fe1
|
||||
```
|
||||
|
||||
Note that Kubernetes will generate the name of the forwarding-rule and
|
||||
target-pool based on a hash of source/target IP addresses. If there are
|
||||
multiple rules (perhaps due to running other services on GKE), use the following
|
||||
to determine the correct target pool:
|
||||
|
||||
```
|
||||
$ util/get_forwarded_pool.sh example us-central1 15000
|
||||
aa6f47950f5a011e4b8f242010af0fe1
|
||||
```
|
||||
|
||||
In the example above, you would then access vtctld at
|
||||
|
|
|
@ -9,15 +9,15 @@ GKE_CLUSTER_NAME=${GKE_CLUSTER_NAME:-'example'}
|
|||
gke_region=`echo $GKE_ZONE | sed "s/-[^-]*$//"`
|
||||
base_ssd_name="$GKE_CLUSTER_NAME-vt-ssd-"
|
||||
|
||||
gcloud alpha container clusters delete $GKE_CLUSTER_NAME
|
||||
gcloud alpha container clusters delete $GKE_CLUSTER_NAME -z $GKE_ZONE -q
|
||||
|
||||
num_ssds=`gcloud compute disks list | awk -v name="$base_ssd_name" -v zone=$GKE_ZONE '$1~name && $2==zone' | wc -l`
|
||||
for i in `seq 1 $num_ssds`; do
|
||||
gcloud compute disks delete $base_ssd_name$i --zone $GKE_ZONE -q
|
||||
done
|
||||
|
||||
vtctld="k8s-${GKE_CLUSTER_NAME}-default-vtctld"
|
||||
vtgate="k8s-${GKE_CLUSTER_NAME}-default-vtgate"
|
||||
vtctld=`util/get_forwarded_pool.sh $GKE_CLUSTER_NAME $gke_region 15000`
|
||||
vtgate=`util/get_forwarded_pool.sh $GKE_CLUSTER_NAME $gke_region 15001`
|
||||
|
||||
gcloud compute forwarding-rules delete $vtctld -q --region=$gke_region
|
||||
gcloud compute forwarding-rules delete $vtgate -q --region=$gke_region
|
||||
|
|
|
@ -17,7 +17,6 @@ GKE_ZONE=${GKE_ZONE:-'us-central1-b'}
|
|||
GKE_MACHINE_TYPE=${GKE_MACHINE_TYPE:-'n1-standard-1'}
|
||||
GKE_CLUSTER_NAME=${GKE_CLUSTER_NAME:-'example'}
|
||||
GKE_SSD_SIZE_GB=${GKE_SSD_SIZE_GB:-0}
|
||||
GKE_NUM_NODES=${GKE_NUM_NODES:-0}
|
||||
SHARDS=${SHARDS:-'-80,80-'}
|
||||
TABLETS_PER_SHARD=${TABLETS_PER_SHARD:-3}
|
||||
MAX_TASK_WAIT_RETRIES=${MAX_TASK_WAIT_RETRIES:-300}
|
||||
|
@ -25,11 +24,13 @@ MAX_VTTABLET_TOPO_WAIT_RETRIES=${MAX_VTTABLET_TOPO_WAIT_RETRIES:-180}
|
|||
BENCHMARK_CLUSTER=${BENCHMARK_CLUSTER:-true}
|
||||
VTGATE_COUNT=${VTGATE_COUNT:-0}
|
||||
|
||||
# Get region from zone (everything to last dash)
|
||||
gke_region=`echo $GKE_ZONE | sed "s/-[^-]*$//"`
|
||||
vttablet_template='vttablet-pod-template.yaml'
|
||||
vtgate_template='vtgate-controller-template.yaml'
|
||||
vtgate_script='vtgate-up.sh'
|
||||
if $BENCHMARK_CLUSTER; then
|
||||
vttablet_template='vttablet-pod-benchmarking-template.yaml'
|
||||
vtgate_template='vtgate-controller-benchmarking-template.yaml'
|
||||
vtgate_script='vtgate-benchmarking-up.sh'
|
||||
fi
|
||||
|
||||
# export for vttablet scripts
|
||||
|
@ -94,15 +95,11 @@ gcloud config set compute/zone $GKE_ZONE
|
|||
project_id=`gcloud config list project | sed -n 2p | cut -d " " -f 3`
|
||||
num_shards=`echo $SHARDS | tr "," " " | wc -w`
|
||||
total_tablet_count=$(($num_shards*$TABLETS_PER_SHARD))
|
||||
num_nodes=$GKE_NUM_NODES
|
||||
if [ $num_nodes -eq 0 ]; then
|
||||
num_nodes=$(($total_tablet_count>3?$total_tablet_count:3))
|
||||
fi
|
||||
vtgate_count=$VTGATE_COUNT
|
||||
if [ $vtgate_count -eq 0 ]; then
|
||||
vtgate_count=$(($total_tablet_count/4>3?$total_tablet_count/4:3))
|
||||
fi
|
||||
|
||||
num_nodes=$(($total_tablet_count+$vtgate_count))
|
||||
|
||||
echo "****************************"
|
||||
echo "*Creating cluster:"
|
||||
|
@ -117,11 +114,12 @@ echo "* Cluster name: $GKE_CLUSTER_NAME"
|
|||
echo "* Project ID: $project_id"
|
||||
echo "****************************"
|
||||
gcloud alpha container clusters create $GKE_CLUSTER_NAME --machine-type $GKE_MACHINE_TYPE --num-nodes $num_nodes
|
||||
gcloud config set container/cluster $GKE_CLUSTER_NAME
|
||||
|
||||
# We label the nodes so that we can force a 1:1 relationship between vttablets and nodes
|
||||
for i in `seq 1 $num_nodes`; do
|
||||
for j in `seq 0 2`; do
|
||||
$KUBECTL label nodes k8s-$GKE_CLUSTER_NAME-node-${i}.c.${project_id}.internal id=$i
|
||||
$KUBECTL label nodes k8s-$GKE_CLUSTER_NAME-node-${i} id=$i
|
||||
result=`$KUBECTL get nodes | grep id=$i`
|
||||
if [ -n "$result" ]; then
|
||||
break
|
||||
|
@ -149,17 +147,18 @@ wait_for_running_tasks etcd 6
|
|||
|
||||
run_script vtctld-up.sh
|
||||
run_script vttablet-up.sh FORCE_NODE=true VTTABLET_TEMPLATE=$vttablet_template
|
||||
run_script vtgate-up.sh VTGATE_REPLICAS=$vtgate_count VTGATE_TEMPLATE=$vtgate_template
|
||||
run_script $vtgate_script STARTING_INDEX=$total_tablet_count VTGATE_REPLICAS=$vtgate_count
|
||||
|
||||
wait_for_running_tasks vtctld 1
|
||||
wait_for_running_tasks vttablet $total_tablet_count
|
||||
wait_for_running_tasks vtgate $vtgate_count
|
||||
|
||||
echo Creating firewall rule for vtctld...
|
||||
vtctl_port=15000
|
||||
gcloud compute firewall-rules create ${GKE_CLUSTER_NAME}-vtctld --allow tcp:$vtctl_port
|
||||
vtctl_ip=`gcloud compute forwarding-rules list | grep $GKE_CLUSTER_NAME | grep vtctld | awk '{print $3}'`
|
||||
vtctl_server="$vtctl_ip:$vtctl_port"
|
||||
vtctld_port=15000
|
||||
gcloud compute firewall-rules create ${GKE_CLUSTER_NAME}-vtctld --allow tcp:$vtctld_port
|
||||
vtctld_pool=`util/get_forwarded_pool.sh $GKE_CLUSTER_NAME $gke_region $vtctld_port`
|
||||
vtctld_ip=`gcloud compute forwarding-rules list | grep $vtctld_pool | awk '{print $3}'`
|
||||
vtctl_server="$vtctld_ip:$vtctld_port"
|
||||
kvtctl="$GOPATH/bin/vtctlclient -server $vtctl_server"
|
||||
|
||||
echo Waiting for tablets to be visible in the topology
|
||||
|
@ -208,15 +207,11 @@ echo Done
|
|||
echo Creating firewall rule for vtgate
|
||||
vtgate_port=15001
|
||||
gcloud compute firewall-rules create ${GKE_CLUSTER_NAME}-vtgate --allow tcp:$vtgate_port
|
||||
vtgate_ip=`gcloud compute forwarding-rules list | grep $GKE_CLUSTER_NAME | grep vtgate | awk '{print $3}'`
|
||||
if [ -z "$vtgate_ip" ]
|
||||
then
|
||||
vtgate_server="No firewall rules created for vtgate. Add createExternalLoadBalancer: true if access to vtgate is desired"
|
||||
else
|
||||
vtgate_server="$vtgate_ip:$vtgate_port"
|
||||
fi
|
||||
vtgate_pool=`util/get_forwarded_pool.sh $GKE_CLUSTER_NAME $gke_region $vtgate_port`
|
||||
vtgate_ip=`gcloud compute forwarding-rules list | grep $vtgate_pool | awk '{print $3}'`
|
||||
vtgate_server="$vtgate_ip:$vtgate_port"
|
||||
|
||||
if ! [ -z $NEWRELIC_LICENSE_KEY ]; then
|
||||
if [ -n "$NEWRELIC_LICENSE_KEY" -a $GKE_SSD_SIZE_GB -gt 0 ]; then
|
||||
for i in `seq 1 $num_nodes`; do
|
||||
nodename=k8s-$GKE_CLUSTER_NAME-node-${i}
|
||||
gcloud compute copy-files newrelic.sh $nodename:~/
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
|
||||
cluster_name=$1
|
||||
region=$2
|
||||
port=$3
|
||||
|
||||
target_pools=`gcloud compute target-pools list | awk 'NR>1 {print $1}'`
|
||||
for pool in $target_pools; do
|
||||
if [ -n "`gcloud compute target-pools describe $pool --region $region | grep k8s-$cluster_name-node`" ]; then
|
||||
if [ -n "`gcloud compute forwarding-rules describe $pool --region $region | grep "portRange: $port"`" ]; then
|
||||
echo $pool
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
done
|
||||
exit -1
|
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is an example script that stops vtgate.
|
||||
|
||||
set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
VTGATE_REPLICAS=${VTGATE_REPLICAS:-3}
|
||||
|
||||
echo "Stopping vtgate replicationController..."
|
||||
for uid in `seq 1 $VTGATE_REPLICAS`; do
|
||||
$KUBECTL stop replicationController vtgate-$uid
|
||||
done
|
||||
|
||||
echo "Deleting vtgate service..."
|
||||
$KUBECTL delete service vtgate
|
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is an example script that starts a vtgate replicationController.
|
||||
|
||||
#set -e
|
||||
|
||||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
VTGATE_REPLICAS=${VTGATE_REPLICAS:-3}
|
||||
VTDATAROOT_VOLUME=${VTDATAROOT_VOLUME:-''}
|
||||
STARTING_INDEX=${STARTING_INDEX:--1}
|
||||
|
||||
vtdataroot_volume='{emptyDir: {}}'
|
||||
if [ -n "$VTDATAROOT_VOLUME" ]; then
|
||||
vtdataroot_volume="{hostDir: {path: ${VTDATAROOT_VOLUME}}}"
|
||||
fi
|
||||
|
||||
echo "Creating vtgate service..."
|
||||
$KUBECTL create -f vtgate-service.yaml
|
||||
|
||||
echo "Creating vtgate pods..."
|
||||
for uid in `seq 1 $VTGATE_REPLICAS`; do
|
||||
sed_script=""
|
||||
for var in uid vtdataroot_volume; do
|
||||
sed_script+="s,{{$var}},${!var},g;"
|
||||
done
|
||||
if [ "$STARTING_INDEX" -gt -1 ]; then
|
||||
vtgate_index=$(($STARTING_INDEX+$uid))
|
||||
sed_script+="\$anodeSelector:\n id: \"$vtgate_index\""
|
||||
fi
|
||||
cat vtgate-pod-benchmarking-template.yaml | sed -e "$sed_script" | $KUBECTL create -f -
|
||||
done
|
|
@ -1,44 +0,0 @@
|
|||
apiVersion: v1beta1
|
||||
kind: ReplicationController
|
||||
id: vtgate
|
||||
desiredState:
|
||||
replicas: {{replicas}}
|
||||
replicaSelector: {name: vtgate}
|
||||
podTemplate:
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: vtgate
|
||||
containers:
|
||||
- name: vtgate
|
||||
image: vitess/root
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
command:
|
||||
- sh
|
||||
- "-c"
|
||||
- >-
|
||||
mkdir -p $VTDATAROOT/tmp &&
|
||||
chown -R vitess /vt &&
|
||||
su -p -c "/vt/bin/vtgate
|
||||
-topo_implementation etcd
|
||||
-etcd_global_addrs http://$ETCD_GLOBAL_SERVICE_HOST:$ETCD_GLOBAL_SERVICE_PORT
|
||||
-log_dir $VTDATAROOT/tmp
|
||||
-alsologtostderr
|
||||
-port 15001
|
||||
-cell test" vitess
|
||||
env:
|
||||
- name: GOMAXPROCS
|
||||
value: 16
|
||||
volumes:
|
||||
- name: syslog
|
||||
source: {hostDir: {path: /dev/log}}
|
||||
- name: vtdataroot
|
||||
source: {{vtdataroot_volume}}
|
||||
labels:
|
||||
name: vtgate
|
||||
labels:
|
||||
name: vtgate
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: vtgate-{{uid}}
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: vt
|
||||
containers:
|
||||
- name: vtgate
|
||||
image: vitess/root
|
||||
volumeMounts:
|
||||
- name: syslog
|
||||
mountPath: /dev/log
|
||||
- name: vtdataroot
|
||||
mountPath: /vt/vtdataroot
|
||||
command:
|
||||
- sh
|
||||
- "-c"
|
||||
- >-
|
||||
mkdir -p $VTDATAROOT/tmp &&
|
||||
chown -R vitess /vt &&
|
||||
su -p -c "/vt/bin/vtgate
|
||||
-topo_implementation etcd
|
||||
-etcd_global_addrs http://$ETCD_GLOBAL_SERVICE_HOST:$ETCD_GLOBAL_SERVICE_PORT
|
||||
-log_dir $VTDATAROOT/tmp
|
||||
-alsologtostderr
|
||||
-port 15001
|
||||
-cell test" vitess
|
||||
env:
|
||||
- name: GOMAXPROCS
|
||||
value: "16"
|
||||
volumes:
|
||||
- name: syslog
|
||||
source: {hostDir: {path: /dev/log}}
|
||||
- name: vtdataroot
|
||||
source: {{vtdataroot_volume}}
|
||||
labels:
|
||||
name: vtgate
|
|
@ -8,7 +8,6 @@ script_root=`dirname "${BASH_SOURCE}"`
|
|||
source $script_root/env.sh
|
||||
|
||||
VTGATE_REPLICAS=${VTGATE_REPLICAS:-3}
|
||||
VTGATE_TEMPLATE=${VTGATE_TEMPLATE:-'vtgate-controller-template.yaml'}
|
||||
VTDATAROOT_VOLUME=${VTDATAROOT_VOLUME:-''}
|
||||
|
||||
vtdataroot_volume='{emptyDir: {}}'
|
||||
|
@ -27,4 +26,4 @@ for var in replicas vtdataroot_volume; do
|
|||
done
|
||||
|
||||
echo "Creating vtgate replicationController..."
|
||||
cat $VTGATE_TEMPLATE | sed -e "$sed_script" | $KUBECTL create -f -
|
||||
cat vtgate-controller-template.yaml | sed -e "$sed_script" | $KUBECTL create -f -
|
||||
|
|
|
@ -65,7 +65,7 @@ desiredState:
|
|||
-rowcache-socket $VTDATAROOT/{{tablet_subdir}}/memcache.sock" vitess
|
||||
env:
|
||||
- name: GOMAXPROCS
|
||||
value: 16
|
||||
value: "16"
|
||||
- name: mysql
|
||||
image: vitess/root
|
||||
volumeMounts:
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakecacheservice"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakesqldb"
|
||||
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/youtube/vitess/go/sqldb"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakesqldb"
|
||||
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
mproto "github.com/youtube/vitess/go/mysql/proto"
|
||||
"github.com/youtube/vitess/go/sqldb"
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakesqldb"
|
||||
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
|
|
@ -16,9 +16,9 @@ import (
|
|||
"github.com/youtube/vitess/go/vt/tableacl"
|
||||
"github.com/youtube/vitess/go/vt/tableacl/simpleacl"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakecacheservice"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakesqldb"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/planbuilder"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/proto"
|
||||
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"github.com/youtube/vitess/go/sqltypes"
|
||||
"github.com/youtube/vitess/go/vt/schema"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakecacheservice"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakesqldb"
|
||||
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@ import (
|
|||
|
||||
mproto "github.com/youtube/vitess/go/mysql/proto"
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakesqldb"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/proto"
|
||||
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/youtube/vitess/go/sqldb"
|
||||
"github.com/youtube/vitess/go/sqltypes"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakecacheservice"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakesqldb"
|
||||
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"github.com/youtube/vitess/go/mysql/proto"
|
||||
"github.com/youtube/vitess/go/sqldb"
|
||||
"github.com/youtube/vitess/go/vt/tabletserver/fakesqldb"
|
||||
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче