зеркало из https://github.com/github/vitess-gh.git
Merge branch 'master' into jacques_dottablenames
Signed-off-by: Jacques Grove <aquarapid@gmail.com>
This commit is contained in:
Коммит
f144e9b967
|
@ -13,11 +13,11 @@ jobs:
|
|||
go-version: 1.13
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
sudo apt-get update || echo "update failed"
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget
|
||||
sudo service mysql stop
|
||||
sudo service etcd stop
|
||||
|
|
|
@ -13,11 +13,11 @@ jobs:
|
|||
go-version: 1.13
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
sudo apt-get update || echo "update failed"
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget
|
||||
sudo service mysql stop
|
||||
sudo service etcd stop
|
||||
|
|
|
@ -13,11 +13,11 @@ jobs:
|
|||
go-version: 1.13
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
sudo apt-get update || echo "update failed"
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget
|
||||
sudo service mysql stop
|
||||
sudo service etcd stop
|
||||
|
|
|
@ -13,11 +13,11 @@ jobs:
|
|||
go-version: 1.13
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
sudo apt-get update || echo "update failed"
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget
|
||||
sudo service mysql stop
|
||||
sudo service etcd stop
|
||||
|
|
|
@ -7,7 +7,7 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
|
@ -17,12 +17,12 @@ jobs:
|
|||
go-version: 1.13
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
if [ ${{matrix.os}} = "ubuntu-latest" ]; then
|
||||
sudo apt-get update || echo "update failed"
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget
|
||||
sudo service mysql stop
|
||||
sudo service etcd stop
|
||||
|
|
|
@ -3,28 +3,75 @@ on: [push, pull_request]
|
|||
jobs:
|
||||
|
||||
build:
|
||||
name: Unit Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
strategy:
|
||||
matrix:
|
||||
name: [mysql57, mysql80]
|
||||
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.12
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
sudo apt-get update || echo "update failed"
|
||||
sudo apt-get install -y mysql-server mysql-client make unzip g++ curl git wget ant openjdk-8-jdk
|
||||
sudo apt-get update
|
||||
|
||||
if [ ${{matrix.name}} = "mysql57" ]; then
|
||||
sudo apt-get install -y mysql-server mysql-client
|
||||
else
|
||||
# Uninstall likely installed MySQL first
|
||||
sudo apt-get remove -y mysql-server mysql-client
|
||||
|
||||
if [ ${{matrix.name}} = "percona56" ]; then
|
||||
# Currently this fails on vt/vttablet/tabletserver/vstreamer
|
||||
# Once we fix issue #5571 we can enable it.
|
||||
sudo rm -rf /var/lib/mysql
|
||||
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
|
||||
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
|
||||
sudo apt update
|
||||
sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y percona-server-server-5.6 percona-server-client-5.6
|
||||
elif [ ${{matrix.name}} = "mysql80" ]; then
|
||||
wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
|
||||
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
|
||||
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
|
||||
sudo apt-get update
|
||||
sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
|
||||
elif [ ${{matrix.name}} = "mariadb101" ]; then
|
||||
sudo apt install -y mariadb-server mariadb-client
|
||||
elif [ ${{matrix.name}} = "mariadb102" ]; then
|
||||
# Currently this fails on vitess.io/vitess/go/mysql
|
||||
# Once we fix issue #5569 we can enable it.
|
||||
sudo apt-get install -y software-properties-common
|
||||
sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8
|
||||
sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.2/ubuntu bionic main'
|
||||
sudo apt update
|
||||
sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server
|
||||
elif [ ${{matrix.name}} = "mariadb103" ]; then
|
||||
# Currently this fails on vitess.io/vitess/go/mysql
|
||||
# Once we fix issue #5569 we can enable it.
|
||||
sudo apt-get install -y software-properties-common
|
||||
sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8
|
||||
sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.3/ubuntu bionic main'
|
||||
sudo apt update
|
||||
sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server
|
||||
fi
|
||||
fi
|
||||
|
||||
sudo apt-get install -y make unzip g++ curl git wget ant openjdk-8-jdk
|
||||
sudo service mysql stop
|
||||
sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263
|
||||
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
|
||||
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
|
||||
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile"
|
||||
|
||||
mkdir -p dist bin
|
||||
curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist
|
||||
mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/
|
||||
|
||||
go mod download
|
||||
|
||||
- name: Run make tools
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
name: unit_race
|
||||
on: [push]
|
||||
on: [repository_dispatch] # disable for now
|
||||
jobs:
|
||||
|
||||
build:
|
||||
|
@ -13,16 +13,20 @@ jobs:
|
|||
go-version: 1.12
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
sudo apt-get update || echo "update failed"
|
||||
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y mysql-server mysql-client make unzip g++ curl git wget
|
||||
sudo service mysql stop
|
||||
sudo service etcd stop
|
||||
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
|
||||
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
|
||||
|
||||
mkdir -p dist bin
|
||||
curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist
|
||||
mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/
|
||||
|
||||
go mod download
|
||||
|
||||
- name: Run make tools
|
||||
|
|
49
Makefile
49
Makefile
|
@ -22,7 +22,7 @@ export GODEBUG=tls13=0
|
|||
# Since we are not using this Makefile for compilation, limiting parallelism will not increase build time.
|
||||
.NOTPARALLEL:
|
||||
|
||||
.PHONY: all build build_web test clean unit_test unit_test_cover unit_test_race integration_test proto proto_banner site_test site_integration_test docker_bootstrap docker_test docker_unit_test java_test reshard_tests e2e_test e2e_test_race minimaltools tools
|
||||
.PHONY: all build build_web install test clean unit_test unit_test_cover unit_test_race integration_test proto proto_banner site_test site_integration_test docker_bootstrap docker_test docker_unit_test java_test reshard_tests e2e_test e2e_test_race minimaltools tools
|
||||
|
||||
all: build
|
||||
|
||||
|
@ -56,6 +56,20 @@ endif
|
|||
bash ./build.env
|
||||
go install $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) -ldflags "$(shell tools/build_version_flags.sh)" -gcflags -'N -l' ./go/...
|
||||
|
||||
# install copies the files needed to run Vitess into the given directory tree.
|
||||
# Usage: make install PREFIX=/path/to/install/root
|
||||
install: build
|
||||
# binaries
|
||||
mkdir -p "$${PREFIX}/bin"
|
||||
cp "$${VTROOT}/bin/"{mysqlctld,vtctld,vtctlclient,vtgate,vttablet,vtworker,vtbackup} "$${PREFIX}/bin/"
|
||||
# config files
|
||||
cp -R config "$${PREFIX}/"
|
||||
# vtctld web UI files
|
||||
mkdir -p "$${PREFIX}/src/vitess.io/vitess/web"
|
||||
cp -R web/vtctld "$${PREFIX}/src/vitess.io/vitess/web/"
|
||||
mkdir -p "$${PREFIX}/src/vitess.io/vitess/web/vtctld2"
|
||||
cp -R web/vtctld2/app "$${PREFIX}/src/vitess.io/vitess/web/vtctld2/"
|
||||
|
||||
parser:
|
||||
make -C go/vt/sqlparser
|
||||
|
||||
|
@ -206,38 +220,45 @@ docker_base_percona80:
|
|||
chmod -R o=g *
|
||||
docker build -f docker/base/Dockerfile.percona80 -t vitess/base:percona80 .
|
||||
|
||||
# Run "make docker_lite PROMPT_NOTICE=false" to avoid that the script
|
||||
# prompts you to press ENTER and confirm that the vitess/base image is not
|
||||
# rebuild by this target as well.
|
||||
docker_lite:
|
||||
cd docker/lite && ./build.sh --prompt=$(PROMPT_NOTICE)
|
||||
chmod -R o=g *
|
||||
docker build -f docker/lite/Dockerfile -t vitess/lite .
|
||||
|
||||
docker_lite_mysql56:
|
||||
cd docker/lite && ./build.sh --prompt=$(PROMPT_NOTICE) mysql56
|
||||
chmod -R o=g *
|
||||
docker build -f docker/lite/Dockerfile.mysql56 -t vitess/lite:mysql56 .
|
||||
|
||||
docker_lite_mysql57:
|
||||
cd docker/lite && ./build.sh --prompt=$(PROMPT_NOTICE) mysql57
|
||||
chmod -R o=g *
|
||||
docker build -f docker/lite/Dockerfile.mysql57 -t vitess/lite:mysql57 .
|
||||
|
||||
docker_lite_mysql80:
|
||||
cd docker/lite && ./build.sh --prompt=$(PROMPT_NOTICE) mysql80
|
||||
chmod -R o=g *
|
||||
docker build -f docker/lite/Dockerfile.mysql80 -t vitess/lite:mysql80 .
|
||||
|
||||
docker_lite_mariadb:
|
||||
cd docker/lite && ./build.sh --prompt=$(PROMPT_NOTICE) mariadb
|
||||
chmod -R o=g *
|
||||
docker build -f docker/lite/Dockerfile.mariadb -t vitess/lite:mariadb .
|
||||
|
||||
docker_lite_mariadb103:
|
||||
cd docker/lite && ./build.sh --prompt=$(PROMPT_NOTICE) mariadb103
|
||||
chmod -R o=g *
|
||||
docker build -f docker/lite/Dockerfile.mariadb103 -t vitess/lite:mariadb103 .
|
||||
|
||||
docker_lite_percona:
|
||||
cd docker/lite && ./build.sh --prompt=$(PROMPT_NOTICE) percona
|
||||
chmod -R o=g *
|
||||
docker build -f docker/lite/Dockerfile.percona -t vitess/lite:percona .
|
||||
|
||||
docker_lite_percona57:
|
||||
cd docker/lite && ./build.sh --prompt=$(PROMPT_NOTICE) percona57
|
||||
chmod -R o=g *
|
||||
docker build -f docker/lite/Dockerfile.percona57 -t vitess/lite:percona57 .
|
||||
|
||||
docker_lite_percona80:
|
||||
cd docker/lite && ./build.sh --prompt=$(PROMPT_NOTICE) percona80
|
||||
chmod -R o=g *
|
||||
docker build -f docker/lite/Dockerfile.percona80 -t vitess/lite:percona80 .
|
||||
|
||||
docker_lite_alpine:
|
||||
cd docker/lite && ./build.sh --prompt=$(PROMPT_NOTICE) alpine
|
||||
chmod -R o=g *
|
||||
docker build -f docker/lite/Dockerfile.alpine -t vitess/lite:alpine .
|
||||
|
||||
docker_guestbook:
|
||||
cd examples/kubernetes/guestbook && ./build.sh
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM vitess/base AS builder
|
||||
FROM debian:stretch-slim AS staging
|
||||
|
||||
RUN mkdir -p /vt/vtdataroot/ \
|
||||
&& mkdir -p /vt/bin \
|
||||
&& mkdir -p /vt/src/vitess.io/vitess/web/vtctld2 \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld /vt/src/vitess.io/vitess/web/vtctld
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld2/app /vt/src/vitess.io/vitess/web/vtctld2/app
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/config /vt/config
|
||||
COPY --from=builder /vt/bin/mysqlctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctlclient /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtgate /vt/bin/
|
||||
COPY --from=builder /vt/bin/vttablet /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtworker /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtbackup /vt/bin/
|
||||
|
||||
RUN chown -R vitess:vitess /vt
|
||||
|
||||
FROM debian:stretch-slim
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
gnupg dirmngr ca-certificates wget libdbd-mysql-perl rsync libaio1 libatomic1 libcurl3 libev4 \
|
||||
&& for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done \
|
||||
&& echo 'deb http://repo.mysql.com/apt/debian/ stretch mysql-5.7' > /etc/apt/sources.list.d/mysql.list \
|
||||
&& for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && break; done \
|
||||
&& echo 'deb http://repo.percona.com/apt stretch main' > /etc/apt/sources.list.d/percona.list && \
|
||||
{ \
|
||||
echo debconf debconf/frontend select Noninteractive; \
|
||||
echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
|
||||
echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
|
||||
} | debconf-set-selections \
|
||||
&& apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -y --no-install-recommends \
|
||||
bzip2 \
|
||||
libmysqlclient20 \
|
||||
mysql-client \
|
||||
mysql-server \
|
||||
libjemalloc1 \
|
||||
libtcmalloc-minimal4 \
|
||||
percona-xtrabackup-24 \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess \
|
||||
&& rm -rf /var/lib/mysql/
|
||||
|
||||
# Set up Vitess environment (just enough to run pre-built Go binaries)
|
||||
ENV VTROOT /vt/src/vitess.io/vitess
|
||||
ENV VTDATAROOT /vt/vtdataroot
|
||||
ENV PATH $VTROOT/bin:$PATH
|
||||
|
||||
# Copy binaries (placed by build.sh)
|
||||
COPY --from=staging /vt/ /vt/
|
||||
|
||||
# Create mount point for actual data (e.g. MySQL data dir)
|
||||
VOLUME /vt/vtdataroot
|
||||
USER vitess
|
|
@ -0,0 +1 @@
|
|||
Dockerfile.mysql57
|
|
@ -1,33 +1,54 @@
|
|||
# This image is only meant to be built from within the build.sh script.
|
||||
FROM vitess/base AS builder
|
||||
FROM alpine:3.8 AS staging
|
||||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
RUN mkdir -p /vt/vtdataroot/ && mkdir -p /vt/bin && mkdir -p /vt/src/vitess.io/vitess/web/vtctld2
|
||||
# NOTE: We have to build the Vitess binaries from scratch instead of sharing
|
||||
# a base image because Docker Hub dropped the feature we relied upon to
|
||||
# ensure images contain the right binaries.
|
||||
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld /vt/src/vitess.io/vitess/web/vtctld
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld2/app /vt/src/vitess.io/vitess/web/vtctld2/app
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/config /vt/config
|
||||
COPY --from=builder /vt/bin/mysqlctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctlclient /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtgate /vt/bin/
|
||||
COPY --from=builder /vt/bin/vttablet /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtworker /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtbackup /vt/bin/
|
||||
# Use a temporary layer for the build stage.
|
||||
FROM vitess/bootstrap:mariadb103 AS builder
|
||||
|
||||
# Allows some docker builds to disable CGO
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
# Re-copy sources from working tree.
|
||||
COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
|
||||
|
||||
# Build and install Vitess in a temporary output directory.
|
||||
USER vitess
|
||||
RUN make install PREFIX=/vt/install
|
||||
|
||||
# Start over and build the final image.
|
||||
FROM alpine:3.8
|
||||
|
||||
# Install dependencies
|
||||
RUN echo '@edge http://nl.alpinelinux.org/alpine/edge/main' >> /etc/apk/repositories && \
|
||||
apk add --no-cache mariadb@edge mariadb-client@edge bzip2 bash
|
||||
|
||||
# Set up Vitess user and directory tree.
|
||||
RUN addgroup -S vitess && adduser -S -G vitess vitess
|
||||
RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
|
||||
|
||||
# Set up Vitess environment (just enough to run pre-built Go binaries)
|
||||
ENV VTROOT /vt/src/vitess.io/vitess
|
||||
ENV VTDATAROOT /vt/vtdataroot
|
||||
ENV PATH $VTROOT/bin:$PATH
|
||||
ENV MYSQL_FLAVOR MariaDB103
|
||||
|
||||
# Create vitess user
|
||||
RUN addgroup -S vitess && adduser -S -G vitess vitess && mkdir -p /vt
|
||||
COPY --from=staging /vt/ /vt/
|
||||
# Copy artifacts from builder layer.
|
||||
COPY --from=builder --chown=vitess:vitess /vt/install /vt
|
||||
|
||||
# Create mount point for actual data (e.g. MySQL data dir)
|
||||
VOLUME /vt/vtdataroot
|
||||
USER vitess
|
||||
|
|
|
@ -1,36 +1,44 @@
|
|||
FROM vitess/base AS builder
|
||||
FROM debian:stretch-slim AS staging
|
||||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
RUN mkdir -p /vt/vtdataroot/ \
|
||||
&& mkdir -p /vt/bin \
|
||||
&& mkdir -p /vt/src/vitess.io/vitess/web/vtctld2 \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
# NOTE: We have to build the Vitess binaries from scratch instead of sharing
|
||||
# a base image because Docker Hub dropped the feature we relied upon to
|
||||
# ensure images contain the right binaries.
|
||||
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld /vt/src/vitess.io/vitess/web/vtctld
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld2/app /vt/src/vitess.io/vitess/web/vtctld2/app
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/config /vt/config
|
||||
COPY --from=builder /vt/bin/mysqlctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctlclient /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtgate /vt/bin/
|
||||
COPY --from=builder /vt/bin/vttablet /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtworker /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtbackup /vt/bin/
|
||||
# Use a temporary layer for the build stage.
|
||||
FROM vitess/bootstrap:mariadb AS builder
|
||||
|
||||
RUN chown -R vitess:vitess /vt
|
||||
# Allows some docker builds to disable CGO
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
# Re-copy sources from working tree.
|
||||
COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
|
||||
|
||||
# Build and install Vitess in a temporary output directory.
|
||||
USER vitess
|
||||
RUN make install PREFIX=/vt/install
|
||||
|
||||
# Start over and build the final image.
|
||||
FROM debian:stretch-slim
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates \
|
||||
&& for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 0xF1656F24C74CD1D8 && break; done \
|
||||
&& echo 'deb http://sfo1.mirrors.digitalocean.com/mariadb/repo/10.2/debian stretch main' > /etc/apt/sources.list.d/mariadb.list \
|
||||
&& apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
bzip2 \
|
||||
mariadb-server \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
|
||||
RUN /vt/dist/install_dependencies.sh mariadb
|
||||
|
||||
# Set up Vitess user and directory tree.
|
||||
RUN groupadd -r vitess && useradd -r -g vitess vitess
|
||||
RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
|
||||
|
||||
# Set up Vitess environment (just enough to run pre-built Go binaries)
|
||||
ENV VTROOT /vt/src/vitess.io/vitess
|
||||
|
@ -38,8 +46,8 @@ ENV VTDATAROOT /vt/vtdataroot
|
|||
ENV PATH $VTROOT/bin:$PATH
|
||||
ENV MYSQL_FLAVOR MariaDB
|
||||
|
||||
# Copy binaries (placed by build.sh)
|
||||
COPY --from=staging /vt/ /vt/
|
||||
# Copy artifacts from builder layer.
|
||||
COPY --from=builder --chown=vitess:vitess /vt/install /vt
|
||||
|
||||
# Create mount point for actual data (e.g. MySQL data dir)
|
||||
VOLUME /vt/vtdataroot
|
||||
|
|
|
@ -1,35 +1,44 @@
|
|||
FROM vitess/base AS builder
|
||||
FROM debian:stretch-slim AS staging
|
||||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
RUN mkdir -p /vt/vtdataroot/ \
|
||||
&& mkdir -p /vt/bin \
|
||||
&& mkdir -p /vt/src/vitess.io/vitess/web/vtctld2 \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
# NOTE: We have to build the Vitess binaries from scratch instead of sharing
|
||||
# a base image because Docker Hub dropped the feature we relied upon to
|
||||
# ensure images contain the right binaries.
|
||||
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld /vt/src/vitess.io/vitess/web/vtctld
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld2/app /vt/src/vitess.io/vitess/web/vtctld2/app
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/config /vt/config
|
||||
COPY --from=builder /vt/bin/mysqlctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctlclient /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtgate /vt/bin/
|
||||
COPY --from=builder /vt/bin/vttablet /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtworker /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtbackup /vt/bin/
|
||||
RUN chown -R vitess:vitess /vt
|
||||
# Use a temporary layer for the build stage.
|
||||
FROM vitess/bootstrap:mariadb103 AS builder
|
||||
|
||||
# Allows some docker builds to disable CGO
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
# Re-copy sources from working tree.
|
||||
COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
|
||||
|
||||
# Build and install Vitess in a temporary output directory.
|
||||
USER vitess
|
||||
RUN make install PREFIX=/vt/install
|
||||
|
||||
# Start over and build the final image.
|
||||
FROM debian:stretch-slim
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates \
|
||||
&& for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 0xF1656F24C74CD1D8 && break; done \
|
||||
&& echo 'deb http://sfo1.mirrors.digitalocean.com/mariadb/repo/10.3/debian stretch main' > /etc/apt/sources.list.d/mariadb.list \
|
||||
&& apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
bzip2 \
|
||||
mariadb-server \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
|
||||
RUN /vt/dist/install_dependencies.sh mariadb103
|
||||
|
||||
# Set up Vitess user and directory tree.
|
||||
RUN groupadd -r vitess && useradd -r -g vitess vitess
|
||||
RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
|
||||
|
||||
# Set up Vitess environment (just enough to run pre-built Go binaries)
|
||||
ENV VTROOT /vt/src/vitess.io/vitess
|
||||
|
@ -37,8 +46,8 @@ ENV VTDATAROOT /vt/vtdataroot
|
|||
ENV PATH $VTROOT/bin:$PATH
|
||||
ENV MYSQL_FLAVOR MariaDB103
|
||||
|
||||
# Copy binaries (placed by build.sh)
|
||||
COPY --from=staging /vt/ /vt/
|
||||
# Copy artifacts from builder layer.
|
||||
COPY --from=builder --chown=vitess:vitess /vt/install /vt
|
||||
|
||||
# Create mount point for actual data (e.g. MySQL data dir)
|
||||
VOLUME /vt/vtdataroot
|
||||
|
|
|
@ -1,47 +1,52 @@
|
|||
FROM vitess/base AS builder
|
||||
FROM debian:stretch-slim AS staging
|
||||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
RUN mkdir -p /vt/vtdataroot/ \
|
||||
&& mkdir -p /vt/bin \
|
||||
&& mkdir -p /vt/src/vitess.io/vitess/web/vtctld2 \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
# NOTE: We have to build the Vitess binaries from scratch instead of sharing
|
||||
# a base image because Docker Hub dropped the feature we relied upon to
|
||||
# ensure images contain the right binaries.
|
||||
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld /vt/src/vitess.io/vitess/web/vtctld
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld2/app /vt/src/vitess.io/vitess/web/vtctld2/app
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/config /vt/config
|
||||
COPY --from=builder /vt/bin/mysqlctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctlclient /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtgate /vt/bin/
|
||||
COPY --from=builder /vt/bin/vttablet /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtworker /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtbackup /vt/bin/
|
||||
# Use a temporary layer for the build stage.
|
||||
FROM vitess/bootstrap:mysql56 AS builder
|
||||
|
||||
RUN chown -R vitess:vitess /vt
|
||||
# Allows some docker builds to disable CGO
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
# Re-copy sources from working tree.
|
||||
COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
|
||||
|
||||
# Build and install Vitess in a temporary output directory.
|
||||
USER vitess
|
||||
RUN make install PREFIX=/vt/install
|
||||
|
||||
# Start over and build the final image.
|
||||
FROM debian:stretch-slim
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates \
|
||||
&& for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done \
|
||||
&& echo 'deb http://repo.mysql.com/apt/debian/ stretch mysql-5.6' > /etc/apt/sources.list.d/mysql.list \
|
||||
&& apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -y --no-install-recommends \
|
||||
bzip2 \
|
||||
libmysqlclient18 \
|
||||
mysql-client \
|
||||
mysql-server \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
|
||||
RUN /vt/dist/install_dependencies.sh mysql56
|
||||
|
||||
# Set up Vitess user and directory tree.
|
||||
RUN groupadd -r vitess && useradd -r -g vitess vitess
|
||||
RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
|
||||
|
||||
# Set up Vitess environment (just enough to run pre-built Go binaries)
|
||||
ENV VTROOT /vt/src/vitess.io/vitess
|
||||
ENV VTDATAROOT /vt/vtdataroot
|
||||
ENV PATH $VTROOT/bin:$PATH
|
||||
|
||||
# Copy binaries (placed by build.sh)
|
||||
COPY --from=staging /vt/ /vt/
|
||||
# Copy artifacts from builder layer.
|
||||
COPY --from=builder --chown=vitess:vitess /vt/install /vt
|
||||
|
||||
# Create mount point for actual data (e.g. MySQL data dir)
|
||||
VOLUME /vt/vtdataroot
|
||||
|
|
|
@ -1,47 +1,52 @@
|
|||
FROM vitess/base AS builder
|
||||
FROM debian:stretch-slim AS staging
|
||||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
RUN mkdir -p /vt/vtdataroot/ \
|
||||
&& mkdir -p /vt/bin \
|
||||
&& mkdir -p /vt/src/vitess.io/vitess/web/vtctld2 \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
# NOTE: We have to build the Vitess binaries from scratch instead of sharing
|
||||
# a base image because Docker Hub dropped the feature we relied upon to
|
||||
# ensure images contain the right binaries.
|
||||
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld /vt/src/vitess.io/vitess/web/vtctld
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld2/app /vt/src/vitess.io/vitess/web/vtctld2/app
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/config /vt/config
|
||||
COPY --from=builder /vt/bin/mysqlctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctlclient /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtgate /vt/bin/
|
||||
COPY --from=builder /vt/bin/vttablet /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtworker /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtbackup /vt/bin/
|
||||
# Use a temporary layer for the build stage.
|
||||
FROM vitess/bootstrap:mysql57 AS builder
|
||||
|
||||
RUN chown -R vitess:vitess /vt
|
||||
# Allows some docker builds to disable CGO
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
# Re-copy sources from working tree.
|
||||
COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
|
||||
|
||||
# Build and install Vitess in a temporary output directory.
|
||||
USER vitess
|
||||
RUN make install PREFIX=/vt/install
|
||||
|
||||
# Start over and build the final image.
|
||||
FROM debian:stretch-slim
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates \
|
||||
&& for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done \
|
||||
&& echo 'deb http://repo.mysql.com/apt/debian/ stretch mysql-5.7' > /etc/apt/sources.list.d/mysql.list \
|
||||
&& apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -y --no-install-recommends \
|
||||
bzip2 \
|
||||
libmysqlclient20 \
|
||||
mysql-client \
|
||||
mysql-server \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
|
||||
RUN /vt/dist/install_dependencies.sh mysql57
|
||||
|
||||
# Set up Vitess user and directory tree.
|
||||
RUN groupadd -r vitess && useradd -r -g vitess vitess
|
||||
RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
|
||||
|
||||
# Set up Vitess environment (just enough to run pre-built Go binaries)
|
||||
ENV VTROOT /vt/src/vitess.io/vitess
|
||||
ENV VTDATAROOT /vt/vtdataroot
|
||||
ENV PATH $VTROOT/bin:$PATH
|
||||
|
||||
# Copy binaries (placed by build.sh)
|
||||
COPY --from=staging /vt/ /vt/
|
||||
# Copy artifacts from builder layer.
|
||||
COPY --from=builder --chown=vitess:vitess /vt/install /vt
|
||||
|
||||
# Create mount point for actual data (e.g. MySQL data dir)
|
||||
VOLUME /vt/vtdataroot
|
||||
|
|
|
@ -1,39 +1,44 @@
|
|||
FROM vitess/base AS builder
|
||||
FROM debian:stretch-slim AS staging
|
||||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
RUN mkdir -p /vt/vtdataroot/ \
|
||||
&& mkdir -p /vt/bin \
|
||||
&& mkdir -p /vt/src/vitess.io/vitess/web/vtctld2 \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
# NOTE: We have to build the Vitess binaries from scratch instead of sharing
|
||||
# a base image because Docker Hub dropped the feature we relied upon to
|
||||
# ensure images contain the right binaries.
|
||||
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld /vt/src/vitess.io/vitess/web/vtctld
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld2/app /vt/src/vitess.io/vitess/web/vtctld2/app
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/config /vt/config
|
||||
COPY --from=builder /vt/bin/mysqlctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctlclient /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtgate /vt/bin/
|
||||
COPY --from=builder /vt/bin/vttablet /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtworker /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtbackup /vt/bin/
|
||||
# Use a temporary layer for the build stage.
|
||||
FROM vitess/bootstrap:mysql80 AS builder
|
||||
|
||||
RUN chown -R vitess:vitess /vt
|
||||
# Allows some docker builds to disable CGO
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
# Re-copy sources from working tree.
|
||||
COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
|
||||
|
||||
# Build and install Vitess in a temporary output directory.
|
||||
USER vitess
|
||||
RUN make install PREFIX=/vt/install
|
||||
|
||||
# Start over and build the final image.
|
||||
FROM debian:stretch-slim
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates \
|
||||
&& for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done \
|
||||
&& echo 'deb http://repo.mysql.com/apt/debian/ stretch mysql-8.0' > /etc/apt/sources.list.d/mysql.list \
|
||||
&& apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -y --no-install-recommends \
|
||||
bzip2 \
|
||||
libmysqlclient21 \
|
||||
mysql-client \
|
||||
mysql-server \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
|
||||
RUN /vt/dist/install_dependencies.sh mysql80
|
||||
|
||||
# Set up Vitess user and directory tree.
|
||||
RUN groupadd -r vitess && useradd -r -g vitess vitess
|
||||
RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
|
||||
|
||||
# Set up Vitess environment (just enough to run pre-built Go binaries)
|
||||
ENV VTROOT /vt/src/vitess.io/vitess
|
||||
|
@ -41,8 +46,8 @@ ENV VTDATAROOT /vt/vtdataroot
|
|||
ENV PATH $VTROOT/bin:$PATH
|
||||
ENV MYSQL_FLAVOR MySQL80
|
||||
|
||||
# Copy binaries (placed by build.sh)
|
||||
COPY --from=staging /vt/ /vt/
|
||||
# Copy artifacts from builder layer.
|
||||
COPY --from=builder --chown=vitess:vitess /vt/install /vt
|
||||
|
||||
# Create mount point for actual data (e.g. MySQL data dir)
|
||||
VOLUME /vt/vtdataroot
|
||||
|
|
|
@ -1,49 +1,52 @@
|
|||
FROM vitess/base AS builder
|
||||
FROM debian:stretch-slim AS staging
|
||||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
RUN mkdir -p /vt/vtdataroot/ \
|
||||
&& mkdir -p /vt/bin \
|
||||
&& mkdir -p /vt/src/vitess.io/vitess/web/vtctld2 \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
# NOTE: We have to build the Vitess binaries from scratch instead of sharing
|
||||
# a base image because Docker Hub dropped the feature we relied upon to
|
||||
# ensure images contain the right binaries.
|
||||
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld /vt/src/vitess.io/vitess/web/vtctld
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld2/app /vt/src/vitess.io/vitess/web/vtctld2/app
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/config /vt/config
|
||||
COPY --from=builder /vt/bin/mysqlctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctlclient /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtgate /vt/bin/
|
||||
COPY --from=builder /vt/bin/vttablet /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtworker /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtbackup /vt/bin/
|
||||
# Use a temporary layer for the build stage.
|
||||
FROM vitess/bootstrap:percona AS builder
|
||||
|
||||
RUN chown -R vitess:vitess /vt
|
||||
# Allows some docker builds to disable CGO
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
# Re-copy sources from working tree.
|
||||
COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
|
||||
|
||||
# Build and install Vitess in a temporary output directory.
|
||||
USER vitess
|
||||
RUN make install PREFIX=/vt/install
|
||||
|
||||
# Start over and build the final image.
|
||||
FROM debian:stretch-slim
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates \
|
||||
&& for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keys.gnupg.net 9334A25F8507EFA5 && break; done \
|
||||
&& echo 'deb http://repo.percona.com/apt stretch main' > /etc/apt/sources.list.d/percona.list && \
|
||||
{ \
|
||||
echo debconf debconf/frontend select Noninteractive; \
|
||||
echo percona-server-server-5.6 percona-server-server/root_password password 'unused'; \
|
||||
echo percona-server-server-5.6 percona-server-server/root_password_again password 'unused'; \
|
||||
} | debconf-set-selections \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
percona-server-server-5.6 \
|
||||
bzip2 \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
|
||||
RUN /vt/dist/install_dependencies.sh percona
|
||||
|
||||
# Set up Vitess user and directory tree.
|
||||
RUN groupadd -r vitess && useradd -r -g vitess vitess
|
||||
RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
|
||||
|
||||
# Set up Vitess environment (just enough to run pre-built Go binaries)
|
||||
ENV VTROOT /vt/src/vitess.io/vitess
|
||||
ENV VTDATAROOT /vt/vtdataroot
|
||||
ENV PATH $VTROOT/bin:$PATH
|
||||
|
||||
# Copy binaries (placed by build.sh)
|
||||
COPY --from=staging /vt/ /vt/
|
||||
# Copy artifacts from builder layer.
|
||||
COPY --from=builder --chown=vitess:vitess /vt/install /vt
|
||||
|
||||
# Create mount point for actual data (e.g. MySQL data dir)
|
||||
VOLUME /vt/vtdataroot
|
||||
|
|
|
@ -1,50 +1,52 @@
|
|||
FROM vitess/base AS builder
|
||||
FROM debian:stretch-slim AS staging
|
||||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
RUN mkdir -p /vt/vtdataroot/ \
|
||||
&& mkdir -p /vt/bin \
|
||||
&& mkdir -p /vt/src/vitess.io/vitess/web/vtctld2 \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
# NOTE: We have to build the Vitess binaries from scratch instead of sharing
|
||||
# a base image because Docker Hub dropped the feature we relied upon to
|
||||
# ensure images contain the right binaries.
|
||||
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld /vt/src/vitess.io/vitess/web/vtctld
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld2/app /vt/src/vitess.io/vitess/web/vtctld2/app
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/config /vt/config
|
||||
COPY --from=builder /vt/bin/mysqlctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctlclient /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtgate /vt/bin/
|
||||
COPY --from=builder /vt/bin/vttablet /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtworker /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtbackup /vt/bin/
|
||||
# Use a temporary layer for the build stage.
|
||||
FROM vitess/bootstrap:percona57 AS builder
|
||||
|
||||
RUN chown -R vitess:vitess /vt
|
||||
# Allows some docker builds to disable CGO
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
# Re-copy sources from working tree.
|
||||
COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
|
||||
|
||||
# Build and install Vitess in a temporary output directory.
|
||||
USER vitess
|
||||
RUN make install PREFIX=/vt/install
|
||||
|
||||
# Start over and build the final image.
|
||||
FROM debian:stretch-slim
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates \
|
||||
&& for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keys.gnupg.net 9334A25F8507EFA5 && break; done \
|
||||
&& echo 'deb http://repo.percona.com/apt stretch main' > /etc/apt/sources.list.d/percona.list && \
|
||||
{ \
|
||||
echo debconf debconf/frontend select Noninteractive; \
|
||||
echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
|
||||
echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
|
||||
} | debconf-set-selections \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
percona-server-server-5.7 \
|
||||
libperconaserverclient20 \
|
||||
bzip2 \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
|
||||
RUN /vt/dist/install_dependencies.sh percona57
|
||||
|
||||
# Set up Vitess user and directory tree.
|
||||
RUN groupadd -r vitess && useradd -r -g vitess vitess
|
||||
RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
|
||||
|
||||
# Set up Vitess environment (just enough to run pre-built Go binaries)
|
||||
ENV VTROOT /vt/src/vitess.io/vitess
|
||||
ENV VTDATAROOT /vt/vtdataroot
|
||||
ENV PATH $VTROOT/bin:$PATH
|
||||
|
||||
# Copy binaries (placed by build.sh)
|
||||
COPY --from=staging /vt/ /vt/
|
||||
# Copy artifacts from builder layer.
|
||||
COPY --from=builder --chown=vitess:vitess /vt/install /vt
|
||||
|
||||
# Create mount point for actual data (e.g. MySQL data dir)
|
||||
VOLUME /vt/vtdataroot
|
||||
|
|
|
@ -1,44 +1,44 @@
|
|||
FROM vitess/base AS builder
|
||||
FROM debian:stretch-slim AS staging
|
||||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
RUN mkdir -p /vt/vtdataroot/ \
|
||||
&& mkdir -p /vt/bin \
|
||||
&& mkdir -p /vt/src/vitess.io/vitess/web/vtctld2 \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
# NOTE: We have to build the Vitess binaries from scratch instead of sharing
|
||||
# a base image because Docker Hub dropped the feature we relied upon to
|
||||
# ensure images contain the right binaries.
|
||||
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld /vt/src/vitess.io/vitess/web/vtctld
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/web/vtctld2/app /vt/src/vitess.io/vitess/web/vtctld2/app
|
||||
COPY --from=builder /vt/src/vitess.io/vitess/config /vt/config
|
||||
COPY --from=builder /vt/bin/mysqlctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctld /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtctlclient /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtgate /vt/bin/
|
||||
COPY --from=builder /vt/bin/vttablet /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtworker /vt/bin/
|
||||
COPY --from=builder /vt/bin/vtbackup /vt/bin/
|
||||
# Use a temporary layer for the build stage.
|
||||
FROM vitess/bootstrap:percona80 AS builder
|
||||
|
||||
RUN chown -R vitess:vitess /vt
|
||||
# Allows some docker builds to disable CGO
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
# Re-copy sources from working tree.
|
||||
COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
|
||||
|
||||
# Build and install Vitess in a temporary output directory.
|
||||
USER vitess
|
||||
RUN make install PREFIX=/vt/install
|
||||
|
||||
# Start over and build the final image.
|
||||
FROM debian:stretch-slim
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates \
|
||||
&& for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keys.gnupg.net 9334A25F8507EFA5 && break; done \
|
||||
&& echo 'deb http://repo.percona.com/ps-80/apt stretch main' > /etc/apt/sources.list.d/percona.list && \
|
||||
{ \
|
||||
echo debconf debconf/frontend select Noninteractive; \
|
||||
echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \
|
||||
echo percona-server-server-8.0 percona-server-server/root_password_again password 'unused'; \
|
||||
} | debconf-set-selections \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
percona-server-server \
|
||||
libperconaserverclient21 \
|
||||
percona-server-tokudb \
|
||||
percona-server-rocksdb \
|
||||
bzip2 \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd -r vitess && useradd -r -g vitess vitess
|
||||
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
|
||||
RUN /vt/dist/install_dependencies.sh percona80
|
||||
|
||||
# Set up Vitess user and directory tree.
|
||||
RUN groupadd -r vitess && useradd -r -g vitess vitess
|
||||
RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
|
||||
|
||||
# Set up Vitess environment (just enough to run pre-built Go binaries)
|
||||
ENV VTROOT /vt/src/vitess.io/vitess
|
||||
|
@ -46,8 +46,8 @@ ENV VTDATAROOT /vt/vtdataroot
|
|||
ENV PATH $VTROOT/bin:$PATH
|
||||
ENV MYSQL_FLAVOR MySQL80
|
||||
|
||||
# Copy binaries (placed by build.sh)
|
||||
COPY --from=staging /vt/ /vt/
|
||||
# Copy artifacts from builder layer.
|
||||
COPY --from=builder --chown=vitess:vitess /vt/install /vt
|
||||
|
||||
# Create mount point for actual data (e.g. MySQL data dir)
|
||||
VOLUME /vt/vtdataroot
|
||||
|
|
|
@ -1,71 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2019 The Vitess Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is the script to build the vitess/lite Docker image by extracting
|
||||
# the pre-built binaries from a vitess/base image.
|
||||
|
||||
set -ex
|
||||
|
||||
# Parse command line arguments.
|
||||
prompt_notice=true
|
||||
if [[ "$1" == "--prompt"* ]]; then
|
||||
if [[ "$1" == "--prompt=false" ]]; then
|
||||
prompt_notice=false
|
||||
fi
|
||||
shift
|
||||
fi
|
||||
|
||||
flavor=$1
|
||||
base_image=vitess/base
|
||||
lite_image=vitess/lite
|
||||
dockerfile=Dockerfile
|
||||
tag=latest
|
||||
|
||||
if [[ -n "$flavor" ]]; then
|
||||
lite_image=vitess/lite:$flavor
|
||||
dockerfile=Dockerfile.$flavor
|
||||
tag=$flavor
|
||||
else
|
||||
echo "Flavor not specified as first argument. Building default image."
|
||||
fi
|
||||
|
||||
# Abort if base image does not exist.
|
||||
if ! docker inspect $base_image &>/dev/null; then
|
||||
echo "ERROR: Dependent image $base_image does not exist. Run 'make $make_target' to build it locally or 'docker pull $base_image' to fetch it from Docker Hub (if it is published)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Educate the user that they have to build or pull vitess/base themselves.
|
||||
if [[ "$prompt_notice" = true ]]; then
|
||||
cat <<END
|
||||
|
||||
This script is going to repack and copy the existing *local* base image '$base_image' into a smaller image '$lite_image'.
|
||||
|
||||
It does NOT recompile the Vitess binaries. For that you will have to rebuild or pull the base image.
|
||||
|
||||
The 'docker images' output below shows you how old your local base image is:
|
||||
|
||||
$(docker images vitess/base | grep -E "(CREATED|$tag)")
|
||||
|
||||
If you need a newer base image, you will have to manually run 'make $make_target' to build it locally
|
||||
or 'docker pull $base_image' to fetch it from Docker Hub.
|
||||
|
||||
Press ENTER to continue building '$lite_image' or Ctrl-C to cancel.
|
||||
END
|
||||
read
|
||||
fi
|
||||
|
||||
docker build --no-cache -f $dockerfile -t $lite_image .
|
|
@ -0,0 +1,175 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is a script that gets run as part of the Dockerfile build
|
||||
# to install dependencies for the vitess/lite family of images.
|
||||
#
|
||||
# Usage: install_dependencies.sh <flavor>
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
FLAVOR="$1"
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
retry() {
|
||||
for i in $(seq 1 10); do
|
||||
if "$@"; then return; fi
|
||||
done
|
||||
}
|
||||
|
||||
# Install base packages that are common to all flavors.
|
||||
BASE_PACKAGES=(
|
||||
bzip2
|
||||
ca-certificates
|
||||
dirmngr
|
||||
gnupg
|
||||
libaio1
|
||||
libatomic1
|
||||
libcurl3
|
||||
libdbd-mysql-perl
|
||||
libev4
|
||||
libjemalloc1
|
||||
libtcmalloc-minimal4
|
||||
procps
|
||||
rsync
|
||||
wget
|
||||
)
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends "${BASE_PACKAGES[@]}"
|
||||
|
||||
# Packages specific to certain flavors.
|
||||
case "${FLAVOR}" in
|
||||
mysql56)
|
||||
PACKAGES=(
|
||||
libmysqlclient18
|
||||
mysql-client
|
||||
mysql-server
|
||||
percona-xtrabackup-24
|
||||
)
|
||||
;;
|
||||
mysql57)
|
||||
PACKAGES=(
|
||||
libmysqlclient20
|
||||
mysql-client
|
||||
mysql-server
|
||||
percona-xtrabackup-24
|
||||
)
|
||||
;;
|
||||
mysql80)
|
||||
PACKAGES=(
|
||||
libmysqlclient21
|
||||
mysql-client
|
||||
mysql-server
|
||||
percona-xtrabackup-80
|
||||
)
|
||||
;;
|
||||
percona)
|
||||
PACKAGES=(
|
||||
percona-server-server-5.6
|
||||
percona-xtrabackup-24
|
||||
)
|
||||
;;
|
||||
percona57)
|
||||
PACKAGES=(
|
||||
libperconaserverclient20
|
||||
percona-server-server-5.7
|
||||
percona-xtrabackup-24
|
||||
)
|
||||
;;
|
||||
percona80)
|
||||
PACKAGES=(
|
||||
libperconaserverclient21
|
||||
percona-server-rocksdb
|
||||
percona-server-server
|
||||
percona-server-tokudb
|
||||
percona-xtrabackup-80
|
||||
)
|
||||
;;
|
||||
mariadb|mariadb103)
|
||||
PACKAGES=(
|
||||
mariadb-server
|
||||
)
|
||||
;;
|
||||
*)
|
||||
echo "Unknown flavor ${FLAVOR}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Get GPG keys for extra apt repositories.
|
||||
case "${FLAVOR}" in
|
||||
mysql56|mysql57|mysql80)
|
||||
# repo.mysql.com
|
||||
retry apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 8C718D3B5072E1F5
|
||||
;;
|
||||
mariadb|mariadb103)
|
||||
# digitalocean.com
|
||||
retry apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys F1656F24C74CD1D8
|
||||
;;
|
||||
esac
|
||||
|
||||
# All flavors (except mariadb*) include Percona XtraBackup (from repo.percona.com).
|
||||
retry apt-key adv --no-tty --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5
|
||||
|
||||
# Add extra apt repositories for MySQL.
|
||||
case "${FLAVOR}" in
|
||||
mysql56)
|
||||
echo 'deb http://repo.mysql.com/apt/debian/ stretch mysql-5.6' > /etc/apt/sources.list.d/mysql.list
|
||||
;;
|
||||
mysql57)
|
||||
echo 'deb http://repo.mysql.com/apt/debian/ stretch mysql-5.7' > /etc/apt/sources.list.d/mysql.list
|
||||
;;
|
||||
mysql80)
|
||||
echo 'deb http://repo.mysql.com/apt/debian/ stretch mysql-8.0' > /etc/apt/sources.list.d/mysql.list
|
||||
;;
|
||||
mariadb)
|
||||
echo 'deb http://sfo1.mirrors.digitalocean.com/mariadb/repo/10.2/debian stretch main' > /etc/apt/sources.list.d/mariadb.list
|
||||
;;
|
||||
mariadb103)
|
||||
echo 'deb http://sfo1.mirrors.digitalocean.com/mariadb/repo/10.3/debian stretch main' > /etc/apt/sources.list.d/mariadb.list
|
||||
;;
|
||||
esac
|
||||
|
||||
# Add extra apt repositories for Percona Server and/or Percona XtraBackup.
|
||||
case "${FLAVOR}" in
|
||||
mysql56|mysql57|mysql80|percona|percona57)
|
||||
echo 'deb http://repo.percona.com/apt stretch main' > /etc/apt/sources.list.d/percona.list
|
||||
;;
|
||||
percona80)
|
||||
echo 'deb http://repo.percona.com/apt stretch main' > /etc/apt/sources.list.d/percona.list
|
||||
echo 'deb http://repo.percona.com/ps-80/apt stretch main' > /etc/apt/sources.list.d/percona80.list
|
||||
;;
|
||||
esac
|
||||
|
||||
# Pre-fill values for installation prompts that are normally interactive.
|
||||
case "${FLAVOR}" in
|
||||
percona)
|
||||
debconf-set-selections <<EOF
|
||||
debconf debconf/frontend select Noninteractive
|
||||
percona-server-server-5.6 percona-server-server/root_password password 'unused'
|
||||
percona-server-server-5.6 percona-server-server/root_password_again password 'unused'
|
||||
EOF
|
||||
;;
|
||||
percona57)
|
||||
debconf-set-selections <<EOF
|
||||
debconf debconf/frontend select Noninteractive
|
||||
percona-server-server-5.7 percona-server-server/root_password password 'unused'
|
||||
percona-server-server-5.7 percona-server-server/root_password_again password 'unused'
|
||||
EOF
|
||||
;;
|
||||
percona80)
|
||||
debconf-set-selections <<EOF
|
||||
debconf debconf/frontend select Noninteractive
|
||||
percona-server-server-8.0 percona-server-server/root_password password 'unused'
|
||||
percona-server-server-8.0 percona-server-server/root_password_again password 'unused'
|
||||
EOF
|
||||
;;
|
||||
esac
|
||||
|
||||
# Install flavor-specific packages
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends "${PACKAGES[@]}"
|
||||
|
||||
# Clean up files we won't need in the final image.
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
rm -rf /var/lib/mysql/
|
|
@ -15,9 +15,6 @@
|
|||
# limitations under the License.
|
||||
|
||||
hostname=`hostname -f`
|
||||
if [ $(uname) == "Darwin" ]; then
|
||||
hostname="localhost"
|
||||
fi
|
||||
vtctld_web_port=15000
|
||||
export VTDATAROOT="${VTDATAROOT:-${VTROOT}/vtdataroot}"
|
||||
|
||||
|
|
|
@ -71,11 +71,27 @@ for uid_index in $uids; do
|
|||
echo " $VTDATAROOT/$tablet_dir"
|
||||
action='start'
|
||||
fi
|
||||
|
||||
set +e
|
||||
|
||||
$VTROOT/bin/mysqlctl \
|
||||
-log_dir $VTDATAROOT/tmp \
|
||||
-tablet_uid $uid \
|
||||
-mysql_port $mysql_port \
|
||||
$action &
|
||||
$action
|
||||
|
||||
err=$?
|
||||
if [[ $err -ne 0 ]]; then
|
||||
fail "This script fails to start mysqld, possibly due to apparmor or selinux protection.
|
||||
Utilities to help investigate:
|
||||
apparmor: \"sudo aa-status\"
|
||||
selinux: \"sudo sestatus\"
|
||||
Please disable if so indicated.
|
||||
You may also need to empty your \$VTDATAROOT to start clean."
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
done
|
||||
|
||||
# Wait for all mysqld to start up.
|
||||
|
|
3
go.mod
3
go.mod
|
@ -25,7 +25,6 @@ require (
|
|||
github.com/golang/mock v1.3.1
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049
|
||||
github.com/google/btree v1.0.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf // indirect
|
||||
github.com/gorilla/websocket v0.0.0-20160912153041-2d1e4548da23
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
|
||||
|
@ -50,8 +49,6 @@ require (
|
|||
github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.0-20160115111002-cca8bbc07984
|
||||
github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02
|
||||
github.com/opentracing/opentracing-go v1.1.0
|
||||
|
|
|
@ -289,11 +289,11 @@ func TestAlias(t *testing.T) {
|
|||
|
||||
func waitTillAllTabletsAreHealthyInVtgate(t *testing.T, vtgateInstance cluster.VtgateProcess, shards ...string) {
|
||||
for _, shard := range shards {
|
||||
err := vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", keyspaceName, shard))
|
||||
err := vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", keyspaceName, shard), 1)
|
||||
assert.Nil(t, err)
|
||||
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard))
|
||||
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), 1)
|
||||
assert.Nil(t, err)
|
||||
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard))
|
||||
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard), 1)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -423,10 +423,10 @@ func (cluster *LocalProcessCluster) WaitForTabletsToHealthyInVtgate() (err error
|
|||
for _, keyspace := range cluster.Keyspaces {
|
||||
for _, shard := range keyspace.Shards {
|
||||
isRdOnlyPresent = false
|
||||
if err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", keyspace.Name, shard.Name)); err != nil {
|
||||
if err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", keyspace.Name, shard.Name), 1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name)); err != nil {
|
||||
if err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name), 1); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tablet := range shard.Vttablets {
|
||||
|
@ -435,7 +435,7 @@ func (cluster *LocalProcessCluster) WaitForTabletsToHealthyInVtgate() (err error
|
|||
}
|
||||
}
|
||||
if isRdOnlyPresent {
|
||||
err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name))
|
||||
err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name), 1)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"os/exec"
|
||||
"path"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
@ -131,7 +132,8 @@ func (vtgate *VtgateProcess) WaitForStatus() bool {
|
|||
}
|
||||
|
||||
// GetStatusForTabletOfShard function gets status for a specific tablet of a shard in keyspace
|
||||
func (vtgate *VtgateProcess) GetStatusForTabletOfShard(name string) bool {
|
||||
// endPointsCount : number of endpoints
|
||||
func (vtgate *VtgateProcess) GetStatusForTabletOfShard(name string, endPointsCount int) bool {
|
||||
resp, err := http.Get(vtgate.VerifyURL)
|
||||
if err != nil {
|
||||
return false
|
||||
|
@ -149,9 +151,9 @@ func (vtgate *VtgateProcess) GetStatusForTabletOfShard(name string) bool {
|
|||
for _, key := range object.MapKeys() {
|
||||
if key.String() == name {
|
||||
value := fmt.Sprintf("%v", object.MapIndex(key))
|
||||
return value == "1"
|
||||
countStr := strconv.Itoa(endPointsCount)
|
||||
return value == countStr
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return masterConnectionExist
|
||||
|
@ -160,10 +162,11 @@ func (vtgate *VtgateProcess) GetStatusForTabletOfShard(name string) bool {
|
|||
}
|
||||
|
||||
// WaitForStatusOfTabletInShard function waits till status of a tablet in shard is 1
|
||||
func (vtgate *VtgateProcess) WaitForStatusOfTabletInShard(name string) error {
|
||||
// endPointsCount: how many endpoints to wait for
|
||||
func (vtgate *VtgateProcess) WaitForStatusOfTabletInShard(name string, endPointsCount int) error {
|
||||
timeout := time.Now().Add(10 * time.Second)
|
||||
for time.Now().Before(timeout) {
|
||||
if vtgate.GetStatusForTabletOfShard(name) {
|
||||
if vtgate.GetStatusForTabletOfShard(name, endPointsCount) {
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
|
@ -226,3 +229,22 @@ func VtgateProcessInstance(port int, grpcPort int, mySQLServerPort int, cell str
|
|||
|
||||
return vtgate
|
||||
}
|
||||
|
||||
// GetVars returns map of vars
|
||||
func (vtgate *VtgateProcess) GetVars() (map[string]interface{}, error) {
|
||||
resultMap := make(map[string]interface{})
|
||||
resp, err := http.Get(vtgate.VerifyURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting response from %s", vtgate.VerifyURL)
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
respByte, _ := ioutil.ReadAll(resp.Body)
|
||||
err := json.Unmarshal(respByte, &resultMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("not able to parse response body")
|
||||
}
|
||||
return resultMap, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsuccessful response")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,6 +118,7 @@ func initCluster(shardNames []string, totalTabletsRequired int) {
|
|||
clusterInstance.VtTabletExtraArgs,
|
||||
clusterInstance.EnableSemiSync)
|
||||
tablet.Alias = tablet.VttabletProcess.TabletPath
|
||||
|
||||
shard.Vttablets = append(shard.Vttablets, tablet)
|
||||
}
|
||||
for _, proc := range mysqlCtlProcessList {
|
||||
|
|
|
@ -233,6 +233,7 @@ func initCluster(shardNames []string, totalTabletsRequired int) {
|
|||
clusterInstance.VtTabletExtraArgs,
|
||||
clusterInstance.EnableSemiSync)
|
||||
tablet.Alias = tablet.VttabletProcess.TabletPath
|
||||
|
||||
shard.Vttablets = append(shard.Vttablets, tablet)
|
||||
}
|
||||
for _, proc := range mysqlCtlProcessList {
|
||||
|
|
|
@ -239,18 +239,23 @@ func checkStreamHealthEqualsBinlogPlayerVars(t *testing.T, vttablet cluster.Vtta
|
|||
}
|
||||
|
||||
// CheckBinlogServerVars checks the binlog server variables are correctly exported.
|
||||
func CheckBinlogServerVars(t *testing.T, vttablet cluster.Vttablet, minStatement int, minTxn int) {
|
||||
func CheckBinlogServerVars(t *testing.T, vttablet cluster.Vttablet, minStatement int, minTxn int, isVerticalSplit bool) {
|
||||
resultMap := vttablet.VttabletProcess.GetVars()
|
||||
assert.Contains(t, resultMap, "UpdateStreamKeyRangeStatements")
|
||||
assert.Contains(t, resultMap, "UpdateStreamKeyRangeTransactions")
|
||||
skey := "UpdateStreamKeyRangeStatements"
|
||||
tkey := "UpdateStreamKeyRangeTransactions"
|
||||
if isVerticalSplit {
|
||||
skey = "UpdateStreamTablesStatements"
|
||||
tkey = "UpdateStreamTablesTransactions"
|
||||
}
|
||||
assert.Contains(t, resultMap, skey)
|
||||
assert.Contains(t, resultMap, tkey)
|
||||
if minStatement > 0 {
|
||||
value := fmt.Sprintf("%v", reflect.ValueOf(resultMap["UpdateStreamKeyRangeStatements"]))
|
||||
value := fmt.Sprintf("%v", reflect.ValueOf(resultMap[skey]))
|
||||
iValue, _ := strconv.Atoi(value)
|
||||
assert.True(t, iValue >= minStatement, fmt.Sprintf("only got %d < %d statements", iValue, minStatement))
|
||||
}
|
||||
|
||||
if minTxn > 0 {
|
||||
value := fmt.Sprintf("%v", reflect.ValueOf(resultMap["UpdateStreamKeyRangeStatements"]))
|
||||
value := fmt.Sprintf("%v", reflect.ValueOf(resultMap[tkey]))
|
||||
iValue, _ := strconv.Atoi(value)
|
||||
assert.True(t, iValue >= minTxn, fmt.Sprintf("only got %d < %d transactions", iValue, minTxn))
|
||||
}
|
||||
|
|
|
@ -341,7 +341,7 @@ func TestInitialSharding(t *testing.T, keyspace *cluster.Keyspace, keyType query
|
|||
assert.Nil(t, err)
|
||||
|
||||
for _, tabletType := range []string{"master", "replica", "rdonly"} {
|
||||
if err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.%s", keyspaceName, shard1.Name, tabletType)); err != nil {
|
||||
if err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.%s", keyspaceName, shard1.Name, tabletType), 1); err != nil {
|
||||
assert.Fail(t, err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -400,12 +400,11 @@ func TestInitialSharding(t *testing.T, keyspace *cluster.Keyspace, keyType query
|
|||
|
||||
// Wait for the endpoints, either local or remote.
|
||||
for _, shard := range []cluster.Shard{shard1, shard21, shard22} {
|
||||
|
||||
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", keyspaceName, shard.Name))
|
||||
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", keyspaceName, shard.Name), 1)
|
||||
assert.Nil(t, err)
|
||||
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard.Name))
|
||||
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard.Name), 1)
|
||||
assert.Nil(t, err)
|
||||
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard.Name))
|
||||
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard.Name), 1)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
|
@ -493,7 +492,7 @@ func TestInitialSharding(t *testing.T, keyspace *cluster.Keyspace, keyType query
|
|||
sharding.CheckDestinationMaster(t, *shard22.MasterTablet(), []string{shard1Ks}, *ClusterInstance)
|
||||
|
||||
// check that binlog server exported the stats vars
|
||||
sharding.CheckBinlogServerVars(t, *shard1.Replica(), 0, 0)
|
||||
sharding.CheckBinlogServerVars(t, *shard1.Replica(), 0, 0, false)
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{*shard21.Rdonly(), *shard22.Rdonly()} {
|
||||
err = ClusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet.Alias)
|
||||
|
@ -510,7 +509,7 @@ func TestInitialSharding(t *testing.T, keyspace *cluster.Keyspace, keyType query
|
|||
|
||||
sharding.CheckDestinationMaster(t, *shard21.MasterTablet(), []string{shard1Ks}, *ClusterInstance)
|
||||
sharding.CheckDestinationMaster(t, *shard22.MasterTablet(), []string{shard1Ks}, *ClusterInstance)
|
||||
sharding.CheckBinlogServerVars(t, *shard1.Replica(), 1000, 1000)
|
||||
sharding.CheckBinlogServerVars(t, *shard1.Replica(), 1000, 1000, false)
|
||||
|
||||
err = ClusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", shard21.Rdonly().Alias)
|
||||
assert.Nil(t, err)
|
||||
|
@ -558,8 +557,8 @@ func TestInitialSharding(t *testing.T, keyspace *cluster.Keyspace, keyType query
|
|||
_ = shard21.Rdonly().VttabletProcess.WaitForTabletType("SERVING")
|
||||
_ = shard22.Rdonly().VttabletProcess.WaitForTabletType("SERVING")
|
||||
|
||||
_ = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard21.Name))
|
||||
_ = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard22.Name))
|
||||
_ = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard21.Name), 1)
|
||||
_ = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard22.Name), 1)
|
||||
|
||||
//then serve replica from the split shards
|
||||
|
||||
|
|
|
@ -529,7 +529,7 @@ func TestResharding(t *testing.T, useVarbinaryShardingKeyType bool) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
// check that binlog server exported the stats vars
|
||||
sharding.CheckBinlogServerVars(t, *shard1Replica1, 0, 0)
|
||||
sharding.CheckBinlogServerVars(t, *shard1Replica1, 0, 0, false)
|
||||
|
||||
// Check that the throttler was enabled.
|
||||
// The stream id is hard-coded as 1, which is the first id generated through auto-inc.
|
||||
|
@ -554,7 +554,8 @@ func TestResharding(t *testing.T, useVarbinaryShardingKeyType bool) {
|
|||
checkMultiShardValues(t, keyspaceName, shardingKeyType)
|
||||
sharding.CheckBinlogPlayerVars(t, *shard2Master, []string{shard1Ks}, 30)
|
||||
sharding.CheckBinlogPlayerVars(t, *shard3Master, []string{shard1Ks}, 30)
|
||||
sharding.CheckBinlogServerVars(t, *shard1Replica1, 100, 100)
|
||||
|
||||
sharding.CheckBinlogServerVars(t, *shard1Replica1, 100, 100, false)
|
||||
|
||||
// use vtworker to compare the data (after health-checking the destination
|
||||
// rdonly tablets so discovery works)
|
||||
|
@ -613,7 +614,8 @@ func TestResharding(t *testing.T, useVarbinaryShardingKeyType bool) {
|
|||
insertLots(100, 100, *shard1Master, tableName, fixedParentID, keyspaceName)
|
||||
log.Debug("Checking 100 percent of data was sent quickly")
|
||||
assert.True(t, checkLotsTimeout(t, 100, 100, tableName, keyspaceName, shardingKeyType))
|
||||
sharding.CheckBinlogServerVars(t, *shard1Replica2, 80, 80)
|
||||
|
||||
sharding.CheckBinlogServerVars(t, *shard1Replica2, 80, 80, false)
|
||||
|
||||
// check we can't migrate the master just yet
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("MigrateServedTypes", shard1Ks, "master")
|
||||
|
|
|
@ -0,0 +1,736 @@
|
|||
/*
|
||||
Copyright 2019 The Vitess Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package verticalsplit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"vitess.io/vitess/go/json2"
|
||||
"vitess.io/vitess/go/vt/vtgate/vtgateconn"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/mysql"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/test/endtoend/cluster"
|
||||
"vitess.io/vitess/go/test/endtoend/sharding"
|
||||
"vitess.io/vitess/go/vt/proto/topodata"
|
||||
_ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn"
|
||||
)
|
||||
|
||||
var (
|
||||
clusterInstance *cluster.LocalProcessCluster
|
||||
sourceKeyspace = "source_keyspace"
|
||||
destinationKeyspace = "destination_keyspace"
|
||||
hostname = "localhost"
|
||||
cellj = "test_nj"
|
||||
shardName = "0"
|
||||
createTabletTemplate = `
|
||||
create table %s(
|
||||
id bigint not null,
|
||||
msg varchar(64),
|
||||
primary key (id),
|
||||
index by_msg (msg)
|
||||
) Engine=InnoDB;`
|
||||
createViewTemplate = "create view %s(id, msg) as select id, msg from %s;"
|
||||
createMoving3NoPkTable = `
|
||||
create table moving3_no_pk (
|
||||
id bigint not null,
|
||||
msg varchar(64)
|
||||
) Engine=InnoDB;`
|
||||
tableArr = []string{"moving1", "moving2", "staying1", "staying2"}
|
||||
insertIndex = 0
|
||||
moving1First int
|
||||
moving2First int
|
||||
staying1First int
|
||||
staying2First int
|
||||
moving3NoPkFirst int
|
||||
)
|
||||
|
||||
func TestVerticalSplit(t *testing.T) {
|
||||
flag.Parse()
|
||||
code, err := initializeCluster()
|
||||
if err != nil {
|
||||
t.Errorf("setup failed with status code %d", code)
|
||||
}
|
||||
defer teardownCluster()
|
||||
|
||||
// Adding another cell in the same cluster
|
||||
err = clusterInstance.TopoProcess.ManageTopoDir("mkdir", "/vitess/"+"test_ca")
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtctlProcess.AddCellInfo("test_ca")
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.TopoProcess.ManageTopoDir("mkdir", "/vitess/"+"test_ny")
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtctlProcess.AddCellInfo("test_ny")
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Nil(t, err, "error should be Nil")
|
||||
|
||||
// source keyspace, with 4 tables
|
||||
sourceShard := clusterInstance.Keyspaces[0].Shards[0]
|
||||
sourceMasterTablet := *sourceShard.Vttablets[0]
|
||||
sourceReplicaTablet := *sourceShard.Vttablets[1]
|
||||
sourceRdOnlyTablet1 := *sourceShard.Vttablets[2]
|
||||
sourceRdOnlyTablet2 := *sourceShard.Vttablets[3]
|
||||
sourceKs := fmt.Sprintf("%s/%s", sourceKeyspace, shardName)
|
||||
|
||||
// source tablets init
|
||||
for _, tablet := range []cluster.Vttablet{sourceMasterTablet, sourceReplicaTablet, sourceRdOnlyTablet1, sourceRdOnlyTablet2} {
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, cellj, sourceKeyspace, hostname, sourceShard.Name)
|
||||
assert.Nil(t, err)
|
||||
err = tablet.VttabletProcess.CreateDB(sourceKeyspace)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// destination keyspace, with just two tables
|
||||
destinationShard := clusterInstance.Keyspaces[1].Shards[0]
|
||||
destinationMasterTablet := *destinationShard.Vttablets[0]
|
||||
destinationReplicaTablet := *destinationShard.Vttablets[1]
|
||||
destinationRdOnlyTablet1 := *destinationShard.Vttablets[2]
|
||||
destinationRdOnlyTablet2 := *destinationShard.Vttablets[3]
|
||||
|
||||
// destination tablets init
|
||||
for _, tablet := range []cluster.Vttablet{destinationMasterTablet, destinationReplicaTablet, destinationRdOnlyTablet1, destinationRdOnlyTablet2} {
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(&tablet, cellj, destinationKeyspace, hostname, destinationShard.Name)
|
||||
assert.Nil(t, err)
|
||||
err = tablet.VttabletProcess.CreateDB(destinationKeyspace)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// RebuildKeyspaceGraph source keyspace
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", sourceKeyspace)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// RebuildKeyspaceGraph destination keyspace
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", destinationKeyspace)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// source schema
|
||||
for _, tablet := range []cluster.Vttablet{sourceMasterTablet, sourceReplicaTablet, sourceRdOnlyTablet1, sourceRdOnlyTablet2} {
|
||||
for _, tableName := range tableArr {
|
||||
_, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf(createTabletTemplate, tableName), sourceKeyspace, true)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
_, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf(createViewTemplate, "view1", "moving1"), sourceKeyspace, true)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// RBR (default behaviour) only because Vitess requires the primary key for query rewrites if
|
||||
// it is running with statement based replication.
|
||||
_, err = tablet.VttabletProcess.QueryTablet(createMoving3NoPkTable, sourceKeyspace, true)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// destination schema
|
||||
// Insert data directly because vtgate would redirect us.
|
||||
for _, tablet := range []cluster.Vttablet{destinationMasterTablet, destinationReplicaTablet, destinationRdOnlyTablet1, destinationRdOnlyTablet2} {
|
||||
_, err = tablet.VttabletProcess.QueryTablet(fmt.Sprintf(createTabletTemplate, "extra1"), destinationKeyspace, true)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// source and destination master and replica tablets will be started
|
||||
for _, tablet := range []cluster.Vttablet{sourceMasterTablet, sourceReplicaTablet, destinationMasterTablet, destinationReplicaTablet} {
|
||||
_ = tablet.VttabletProcess.Setup()
|
||||
}
|
||||
|
||||
// rdonly tablets will be started
|
||||
for _, tablet := range []cluster.Vttablet{sourceRdOnlyTablet1, sourceRdOnlyTablet2, destinationRdOnlyTablet1, destinationRdOnlyTablet2} {
|
||||
_ = tablet.VttabletProcess.Setup()
|
||||
}
|
||||
|
||||
// check SrvKeyspace
|
||||
ksServedFrom := "ServedFrom(master): source_keyspace\nServedFrom(rdonly): source_keyspace\nServedFrom(replica): source_keyspace\n"
|
||||
checkSrvKeyspaceServedFrom(t, cellj, destinationKeyspace, ksServedFrom, *clusterInstance)
|
||||
|
||||
// reparent to make the tablets work (we use health check, fix their types)
|
||||
err = clusterInstance.VtctlclientProcess.InitShardMaster(sourceKeyspace, shardName, cellj, sourceMasterTablet.TabletUID)
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtctlclientProcess.InitShardMaster(destinationKeyspace, shardName, cellj, destinationMasterTablet.TabletUID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
sourceMasterTablet.Type = "master"
|
||||
destinationMasterTablet.Type = "master"
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{sourceReplicaTablet, destinationReplicaTablet} {
|
||||
_ = tablet.VttabletProcess.WaitForTabletType("SERVING")
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
for _, tablet := range []cluster.Vttablet{sourceRdOnlyTablet1, sourceRdOnlyTablet2, destinationRdOnlyTablet1, destinationRdOnlyTablet2} {
|
||||
_ = tablet.VttabletProcess.WaitForTabletType("SERVING")
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
for _, tablet := range []cluster.Vttablet{sourceMasterTablet, destinationMasterTablet, sourceReplicaTablet, destinationReplicaTablet, sourceRdOnlyTablet1, sourceRdOnlyTablet2, destinationRdOnlyTablet1, destinationRdOnlyTablet2} {
|
||||
assert.Equal(t, tablet.VttabletProcess.GetTabletStatus(), "SERVING")
|
||||
}
|
||||
|
||||
err = clusterInstance.StartVtgate()
|
||||
assert.Nil(t, err)
|
||||
|
||||
vtParams := mysql.ConnParams{
|
||||
Host: clusterInstance.Hostname,
|
||||
Port: clusterInstance.VtgateMySQLPort,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
conn, err := mysql.Connect(ctx, &vtParams)
|
||||
assert.Nil(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
err = clusterInstance.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", sourceKeyspace, shardName), 1)
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", sourceKeyspace, shardName), 1)
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", sourceKeyspace, shardName), 2)
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", destinationKeyspace, shardName), 1)
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", destinationKeyspace, shardName), 1)
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", destinationKeyspace, shardName), 2)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// create the schema on the source keyspace, add some values
|
||||
insertInitialValues(t, conn, sourceMasterTablet, destinationMasterTablet)
|
||||
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("CopySchemaShard", "--tables", "/moving/,view1", sourceRdOnlyTablet1.Alias, "destination_keyspace/0")
|
||||
assert.Nil(t, err, "CopySchemaShard failed")
|
||||
|
||||
// starting vtworker
|
||||
httpPort := clusterInstance.GetAndReservePort()
|
||||
grpcPort := clusterInstance.GetAndReservePort()
|
||||
clusterInstance.VtworkerProcess = *cluster.VtworkerProcessInstance(
|
||||
httpPort,
|
||||
grpcPort,
|
||||
clusterInstance.TopoPort,
|
||||
clusterInstance.Hostname,
|
||||
clusterInstance.TmpDirectory)
|
||||
|
||||
err = clusterInstance.VtworkerProcess.ExecuteVtworkerCommand(httpPort, grpcPort,
|
||||
"--cell", cellj,
|
||||
"--command_display_interval", "10ms",
|
||||
"--use_v3_resharding_mode=true",
|
||||
"VerticalSplitClone",
|
||||
"--tables", "/moving/,view1",
|
||||
"--chunk_count", "10",
|
||||
"--min_rows_per_chunk", "1",
|
||||
"--min_healthy_tablets", "1", "destination_keyspace/0")
|
||||
assert.Nil(t, err)
|
||||
|
||||
// test Cancel first
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("CancelResharding", "destination_keyspace/0")
|
||||
assert.Nil(t, err)
|
||||
err = destinationMasterTablet.VttabletProcess.WaitForBinLogPlayerCount(0)
|
||||
assert.Nil(t, err)
|
||||
// master should be in serving state after cancel
|
||||
sharding.CheckTabletQueryServices(t, []cluster.Vttablet{destinationMasterTablet}, "SERVING", false, *clusterInstance)
|
||||
|
||||
// redo VerticalSplitClone
|
||||
err = clusterInstance.VtworkerProcess.ExecuteVtworkerCommand(clusterInstance.GetAndReservePort(), clusterInstance.GetAndReservePort(),
|
||||
"--cell", cellj,
|
||||
"--command_display_interval", "10ms",
|
||||
"--use_v3_resharding_mode=true",
|
||||
"VerticalSplitClone",
|
||||
"--tables", "/moving/,view1",
|
||||
"--chunk_count", "10",
|
||||
"--min_rows_per_chunk", "1",
|
||||
"--min_healthy_tablets", "1", "destination_keyspace/0")
|
||||
assert.Nil(t, err)
|
||||
|
||||
// check values are present
|
||||
checkValues(t, &destinationMasterTablet, destinationKeyspace, "vt_destination_keyspace", "moving1", moving1First, 100)
|
||||
checkValues(t, &destinationMasterTablet, destinationKeyspace, "vt_destination_keyspace", "moving2", moving2First, 100)
|
||||
checkValues(t, &destinationMasterTablet, destinationKeyspace, "vt_destination_keyspace", "view1", moving1First, 100)
|
||||
checkValues(t, &destinationMasterTablet, destinationKeyspace, "vt_destination_keyspace", "moving3_no_pk", moving3NoPkFirst, 100)
|
||||
|
||||
// Verify vreplication table entries
|
||||
dbParams := mysql.ConnParams{
|
||||
Uname: "vt_dba",
|
||||
UnixSocket: path.Join(destinationMasterTablet.VttabletProcess.Directory, "mysql.sock"),
|
||||
}
|
||||
dbParams.DbName = "_vt"
|
||||
dbConn, err := mysql.Connect(ctx, &dbParams)
|
||||
assert.Nil(t, err)
|
||||
qr, err := dbConn.ExecuteFetch("select * from vreplication", 1000, true)
|
||||
assert.Nil(t, err, "error should be Nil")
|
||||
assert.Equal(t, 1, len(qr.Rows))
|
||||
assert.Contains(t, fmt.Sprintf("%v", qr.Rows), "SplitClone")
|
||||
assert.Contains(t, fmt.Sprintf("%v", qr.Rows), `keyspace:\"source_keyspace\" shard:\"0\" tables:\"/moving/\" tables:\"view1\`)
|
||||
dbConn.Close()
|
||||
|
||||
// check the binlog player is running and exporting vars
|
||||
sharding.CheckDestinationMaster(t, destinationMasterTablet, []string{sourceKs}, *clusterInstance)
|
||||
|
||||
// check that binlog server exported the stats vars
|
||||
sharding.CheckBinlogServerVars(t, sourceReplicaTablet, 0, 0, true)
|
||||
|
||||
// add values to source, make sure they're replicated
|
||||
moving1FirstAdd1 := insertValues(t, conn, sourceKeyspace, "moving1", 100)
|
||||
_ = insertValues(t, conn, sourceKeyspace, "staying1", 100)
|
||||
moving2FirstAdd1 := insertValues(t, conn, sourceKeyspace, "moving2", 100)
|
||||
checkValuesTimeout(t, destinationMasterTablet, "moving1", moving1FirstAdd1, 100, 30)
|
||||
checkValuesTimeout(t, destinationMasterTablet, "moving2", moving2FirstAdd1, 100, 30)
|
||||
sharding.CheckBinlogPlayerVars(t, destinationMasterTablet, []string{sourceKs}, 30)
|
||||
sharding.CheckBinlogServerVars(t, sourceReplicaTablet, 100, 100, true)
|
||||
|
||||
// use vtworker to compare the data
|
||||
t.Log("Running vtworker VerticalSplitDiff")
|
||||
err = clusterInstance.VtworkerProcess.ExecuteVtworkerCommand(clusterInstance.GetAndReservePort(),
|
||||
clusterInstance.GetAndReservePort(),
|
||||
"--use_v3_resharding_mode=true",
|
||||
"--cell", "test_nj",
|
||||
"VerticalSplitDiff",
|
||||
"--min_healthy_rdonly_tablets", "1",
|
||||
"destination_keyspace/0")
|
||||
assert.Nil(t, err)
|
||||
|
||||
// get status for destination master tablet, make sure we have it all
|
||||
sharding.CheckRunningBinlogPlayer(t, destinationMasterTablet, 700, 300)
|
||||
|
||||
// check query service is off on destination master, as filtered
|
||||
// replication is enabled. Even health check should not interfere.
|
||||
destinationMasterTabletVars := destinationMasterTablet.VttabletProcess.GetVars()
|
||||
assert.NotNil(t, destinationMasterTabletVars)
|
||||
assert.Contains(t, reflect.ValueOf(destinationMasterTabletVars["TabletStateName"]).String(), "NOT_SERVING")
|
||||
|
||||
// check we can't migrate the master just yet
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("MigrateServedFrom", "destination_keyspace/0", "master")
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// migrate rdonly only in test_ny cell, make sure nothing is migrated
|
||||
// in test_nj
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("MigrateServedFrom", "--cells=test_ny", "destination_keyspace/0", "rdonly")
|
||||
assert.Nil(t, err)
|
||||
|
||||
// check SrvKeyspace
|
||||
checkSrvKeyspaceServedFrom(t, cellj, destinationKeyspace, ksServedFrom, *clusterInstance)
|
||||
checkBlacklistedTables(t, sourceMasterTablet, sourceKeyspace, nil)
|
||||
checkBlacklistedTables(t, sourceReplicaTablet, sourceKeyspace, nil)
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet1, sourceKeyspace, nil)
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet2, sourceKeyspace, nil)
|
||||
|
||||
// migrate test_nj only, using command line manual fix command,
|
||||
// and restore it back.
|
||||
keyspaceJSON, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", "destination_keyspace")
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateKeyspaceJSON(t, keyspaceJSON, []string{"test_ca", "test_nj"})
|
||||
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetKeyspaceServedFrom", "-source=source_keyspace", "-remove", "-cells=test_nj,test_ca", "destination_keyspace", "rdonly")
|
||||
assert.Nil(t, err)
|
||||
|
||||
// again validating keyspaceJSON
|
||||
keyspaceJSON, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", "destination_keyspace")
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateKeyspaceJSON(t, keyspaceJSON, nil)
|
||||
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetKeyspaceServedFrom", "-source=source_keyspace", "destination_keyspace", "rdonly")
|
||||
assert.Nil(t, err)
|
||||
|
||||
keyspaceJSON, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", "destination_keyspace")
|
||||
assert.Nil(t, err)
|
||||
|
||||
validateKeyspaceJSON(t, keyspaceJSON, []string{})
|
||||
|
||||
// now serve rdonly from the destination shards
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("MigrateServedFrom", "destination_keyspace/0", "rdonly")
|
||||
assert.Nil(t, err)
|
||||
checkSrvKeyspaceServedFrom(t, cellj, destinationKeyspace, "ServedFrom(master): source_keyspace\nServedFrom(replica): source_keyspace\n", *clusterInstance)
|
||||
checkBlacklistedTables(t, sourceMasterTablet, sourceKeyspace, nil)
|
||||
checkBlacklistedTables(t, sourceReplicaTablet, sourceKeyspace, nil)
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet1, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet2, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
|
||||
grpcAddress := fmt.Sprintf("%s:%d", "localhost", clusterInstance.VtgateProcess.GrpcPort)
|
||||
gconn, err := vtgateconn.Dial(ctx, grpcAddress)
|
||||
assert.Nil(t, err)
|
||||
defer gconn.Close()
|
||||
|
||||
checkClientConnRedirectionExecuteKeyrange(ctx, t, gconn, destinationKeyspace, []topodata.TabletType{topodata.TabletType_MASTER, topodata.TabletType_REPLICA}, []string{"moving1", "moving2"})
|
||||
|
||||
// then serve replica from the destination shards
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("MigrateServedFrom", "destination_keyspace/0", "replica")
|
||||
assert.Nil(t, err)
|
||||
checkSrvKeyspaceServedFrom(t, cellj, destinationKeyspace, "ServedFrom(master): source_keyspace\n", *clusterInstance)
|
||||
checkBlacklistedTables(t, sourceMasterTablet, sourceKeyspace, nil)
|
||||
checkBlacklistedTables(t, sourceReplicaTablet, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet1, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet2, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkClientConnRedirectionExecuteKeyrange(ctx, t, gconn, destinationKeyspace, []topodata.TabletType{topodata.TabletType_MASTER}, []string{"moving1", "moving2"})
|
||||
|
||||
// move replica back and forth
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("MigrateServedFrom", "-reverse", "destination_keyspace/0", "replica")
|
||||
assert.Nil(t, err)
|
||||
checkSrvKeyspaceServedFrom(t, cellj, destinationKeyspace, "ServedFrom(master): source_keyspace\nServedFrom(replica): source_keyspace\n", *clusterInstance)
|
||||
checkBlacklistedTables(t, sourceMasterTablet, sourceKeyspace, nil)
|
||||
checkBlacklistedTables(t, sourceReplicaTablet, sourceKeyspace, nil)
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet1, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet2, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("MigrateServedFrom", "destination_keyspace/0", "replica")
|
||||
assert.Nil(t, err)
|
||||
checkSrvKeyspaceServedFrom(t, cellj, destinationKeyspace, "ServedFrom(master): source_keyspace\n", *clusterInstance)
|
||||
checkBlacklistedTables(t, sourceMasterTablet, sourceKeyspace, nil)
|
||||
checkBlacklistedTables(t, sourceReplicaTablet, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet1, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet2, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkClientConnRedirectionExecuteKeyrange(ctx, t, gconn, destinationKeyspace, []topodata.TabletType{topodata.TabletType_MASTER}, []string{"moving1", "moving2"})
|
||||
|
||||
// Cancel should fail now
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("CancelResharding", "destination_keyspace/0")
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// then serve master from the destination shards
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("MigrateServedFrom", "destination_keyspace/0", "master")
|
||||
checkSrvKeyspaceServedFrom(t, cellj, destinationKeyspace, "", *clusterInstance)
|
||||
checkBlacklistedTables(t, sourceMasterTablet, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkBlacklistedTables(t, sourceReplicaTablet, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet1, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
checkBlacklistedTables(t, sourceRdOnlyTablet2, sourceKeyspace, []string{"/moving/", "view1"})
|
||||
|
||||
// check the binlog player is gone now
|
||||
_ = destinationMasterTablet.VttabletProcess.WaitForBinLogPlayerCount(0)
|
||||
|
||||
// check the stats are correct
|
||||
checkStats(t)
|
||||
|
||||
// now remove the tables on the source shard. The blacklisted tables
|
||||
// in the source shard won't match any table, make sure that works.
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ApplySchema", "-sql=drop view view1", "source_keyspace")
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, table := range []string{"moving1", "moving2"} {
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ApplySchema", "--sql=drop table "+table, "source_keyspace")
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
for _, tablet := range []cluster.Vttablet{sourceMasterTablet, sourceReplicaTablet, sourceRdOnlyTablet1, sourceRdOnlyTablet2} {
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ReloadSchema", tablet.Alias)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
qr, _ = sourceMasterTablet.VttabletProcess.QueryTablet("select count(1) from staying1", sourceKeyspace, true)
|
||||
assert.Equal(t, 1, len(qr.Rows), fmt.Sprintf("cannot read staying1: got %d", len(qr.Rows)))
|
||||
|
||||
// test SetShardTabletControl
|
||||
verifyVtctlSetShardTabletControl(t)
|
||||
|
||||
}
|
||||
|
||||
func verifyVtctlSetShardTabletControl(t *testing.T) {
|
||||
// clear the rdonly entry:
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("SetShardTabletControl", "--remove", "source_keyspace/0", "rdonly")
|
||||
assert.Nil(t, err)
|
||||
assertTabletControls(t, clusterInstance, []topodata.TabletType{topodata.TabletType_MASTER, topodata.TabletType_REPLICA})
|
||||
|
||||
// re-add rdonly:
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetShardTabletControl", "--blacklisted_tables=/moving/,view1", "source_keyspace/0", "rdonly")
|
||||
assert.Nil(t, err)
|
||||
assertTabletControls(t, clusterInstance, []topodata.TabletType{topodata.TabletType_MASTER, topodata.TabletType_REPLICA, topodata.TabletType_RDONLY})
|
||||
|
||||
//and then clear all entries:
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetShardTabletControl", "--remove", "source_keyspace/0", "rdonly")
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetShardTabletControl", "--remove", "source_keyspace/0", "replica")
|
||||
assert.Nil(t, err)
|
||||
err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetShardTabletControl", "--remove", "source_keyspace/0", "master")
|
||||
assert.Nil(t, err)
|
||||
|
||||
shardJSON, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", "source_keyspace/0")
|
||||
var shardJSONData topodata.Shard
|
||||
err = json2.Unmarshal([]byte(shardJSON), &shardJSONData)
|
||||
assert.Empty(t, shardJSONData.TabletControls)
|
||||
|
||||
}
|
||||
|
||||
func assertTabletControls(t *testing.T, clusterInstance *cluster.LocalProcessCluster, aliases []topodata.TabletType) {
|
||||
shardJSON, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", "source_keyspace/0")
|
||||
assert.Nil(t, err)
|
||||
var shardJSONData topodata.Shard
|
||||
err = json2.Unmarshal([]byte(shardJSON), &shardJSONData)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(shardJSONData.TabletControls), len(aliases))
|
||||
for _, tc := range shardJSONData.TabletControls {
|
||||
assert.Contains(t, aliases, tc.TabletType)
|
||||
assert.Equal(t, []string{"/moving/", "view1"}, tc.BlacklistedTables)
|
||||
}
|
||||
}
|
||||
|
||||
func checkStats(t *testing.T) {
|
||||
|
||||
resultMap, err := clusterInstance.VtgateProcess.GetVars()
|
||||
assert.Nil(t, err)
|
||||
resultVtTabletCall := resultMap["VttabletCall"]
|
||||
resultVtTabletCallMap := resultVtTabletCall.(map[string]interface{})
|
||||
resultHistograms := resultVtTabletCallMap["Histograms"]
|
||||
resultHistogramsMap := resultHistograms.(map[string]interface{})
|
||||
resultTablet := resultHistogramsMap["Execute.source_keyspace.0.replica"]
|
||||
resultTableMap := resultTablet.(map[string]interface{})
|
||||
resultCountStr := fmt.Sprintf("%v", reflect.ValueOf(resultTableMap["Count"]))
|
||||
assert.Equal(t, "2", resultCountStr, fmt.Sprintf("unexpected value for VttabletCall(Execute.source_keyspace.0.replica) inside %s", resultCountStr))
|
||||
|
||||
// Verify master reads done by self._check_client_conn_redirection().
|
||||
resultVtgateAPI := resultMap["VtgateApi"]
|
||||
resultVtgateAPIMap := resultVtgateAPI.(map[string]interface{})
|
||||
resultAPIHistograms := resultVtgateAPIMap["Histograms"]
|
||||
resultAPIHistogramsMap := resultAPIHistograms.(map[string]interface{})
|
||||
resultTabletDestination := resultAPIHistogramsMap["ExecuteKeyRanges.destination_keyspace.master"]
|
||||
resultTabletDestinationMap := resultTabletDestination.(map[string]interface{})
|
||||
resultCountStrDestination := fmt.Sprintf("%v", reflect.ValueOf(resultTabletDestinationMap["Count"]))
|
||||
assert.Equal(t, "6", resultCountStrDestination, fmt.Sprintf("unexpected value for VtgateApi(ExecuteKeyRanges.destination_keyspace.master) inside %s)", resultCountStrDestination))
|
||||
|
||||
assert.Empty(t, resultMap["VtgateApiErrorCounts"])
|
||||
|
||||
}
|
||||
|
||||
func insertInitialValues(t *testing.T, conn *mysql.Conn, sourceMasterTablet cluster.Vttablet, destinationMasterTablet cluster.Vttablet) {
|
||||
moving1First = insertValues(t, conn, sourceKeyspace, "moving1", 100)
|
||||
moving2First = insertValues(t, conn, sourceKeyspace, "moving2", 100)
|
||||
staying1First = insertValues(t, conn, sourceKeyspace, "staying1", 100)
|
||||
staying2First = insertValues(t, conn, sourceKeyspace, "staying2", 100)
|
||||
checkValues(t, &sourceMasterTablet, sourceKeyspace, "vt_source_keyspace", "moving1", moving1First, 100)
|
||||
checkValues(t, &sourceMasterTablet, sourceKeyspace, "vt_source_keyspace", "moving2", moving2First, 100)
|
||||
checkValues(t, &sourceMasterTablet, sourceKeyspace, "vt_source_keyspace", "staying1", staying1First, 100)
|
||||
checkValues(t, &sourceMasterTablet, sourceKeyspace, "vt_source_keyspace", "staying2", staying2First, 100)
|
||||
checkValues(t, &sourceMasterTablet, sourceKeyspace, "vt_source_keyspace", "view1", moving1First, 100)
|
||||
|
||||
moving3NoPkFirst = insertValues(t, conn, sourceKeyspace, "moving3_no_pk", 100)
|
||||
checkValues(t, &sourceMasterTablet, sourceKeyspace, "vt_source_keyspace", "moving3_no_pk", moving3NoPkFirst, 100)
|
||||
|
||||
// Insert data directly because vtgate would redirect us.
|
||||
_, err := destinationMasterTablet.VttabletProcess.QueryTablet(fmt.Sprintf("insert into %s (id, msg) values(%d, 'value %d')", "extra1", 1, 1), destinationKeyspace, true)
|
||||
assert.Nil(t, err)
|
||||
checkValues(t, &destinationMasterTablet, destinationKeyspace, "vt_destination_keyspace", "extra1", 1, 1)
|
||||
}
|
||||
|
||||
func checkClientConnRedirectionExecuteKeyrange(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateConn, keyspace string, servedFromDbTypes []topodata.TabletType, movedTables []string) {
|
||||
var testKeyRange = &topodata.KeyRange{
|
||||
Start: []byte{},
|
||||
End: []byte{},
|
||||
}
|
||||
keyRanges := []*topodata.KeyRange{testKeyRange}
|
||||
// check that the ServedFrom indirection worked correctly.
|
||||
for _, tableType := range servedFromDbTypes {
|
||||
for _, table := range movedTables {
|
||||
_, err := conn.ExecuteKeyRanges(ctx, fmt.Sprintf("select * from %s", table), keyspace, keyRanges, nil, tableType, nil)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkValues(t *testing.T, tablet *cluster.Vttablet, keyspace string, dbname string, table string, first int, count int) {
|
||||
t.Logf("Checking %d values from %s/%s starting at %d", count, dbname, table, first)
|
||||
qr, _ := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("select id, msg from %s where id>=%d order by id limit %d", table, first, count), keyspace, true)
|
||||
assert.Equal(t, count, len(qr.Rows), fmt.Sprintf("got wrong number of rows: %d != %d", len(qr.Rows), count))
|
||||
i := 0
|
||||
for i < count {
|
||||
result, _ := sqltypes.ToInt64(qr.Rows[i][0])
|
||||
assert.Equal(t, int64(first+i), result, fmt.Sprintf("got wrong number of rows: %d != %d", len(qr.Rows), first+i))
|
||||
assert.Contains(t, qr.Rows[i][1].String(), fmt.Sprintf("value %d", first+i), fmt.Sprintf("invalid msg[%d]: 'value %d' != '%s'", i, first+i, qr.Rows[i][1].String()))
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func checkValuesTimeout(t *testing.T, tablet cluster.Vttablet, table string, first int, count int, timeoutInSec int) bool {
|
||||
for i := 0; i < timeoutInSec; i-- {
|
||||
qr, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("select id, msg from %s where id>=%d order by id limit %d", table, first, count), destinationKeyspace, true)
|
||||
if err != nil {
|
||||
assert.Nil(t, err, "select failed on master tablet")
|
||||
}
|
||||
if len(qr.Rows) == count {
|
||||
return true
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func checkBlacklistedTables(t *testing.T, tablet cluster.Vttablet, keyspace string, expected []string) {
|
||||
tabletStatus := tablet.VttabletProcess.GetStatus()
|
||||
if expected != nil {
|
||||
assert.Contains(t, tabletStatus, fmt.Sprintf("BlacklistedTables: %s", strings.Join(expected, " ")))
|
||||
} else {
|
||||
assert.NotContains(t, tabletStatus, "BlacklistedTables")
|
||||
}
|
||||
|
||||
// check we can or cannot access the tables
|
||||
for _, table := range []string{"moving1", "moving2"} {
|
||||
if expected != nil && strings.Contains(strings.Join(expected, " "), "moving") {
|
||||
// table is blacklisted, should get error
|
||||
err := clusterInstance.VtctlclientProcess.ExecuteCommand("VtTabletExecute", "-json", tablet.Alias, fmt.Sprintf("select count(1) from %s", table))
|
||||
assert.NotNil(t, err, "disallowed due to rule: enforce blacklisted tables")
|
||||
} else {
|
||||
// table is not blacklisted, should just work
|
||||
_, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("select count(1) from %s", table), keyspace, true)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func insertValues(t *testing.T, conn *mysql.Conn, keyspace string, table string, count int) int {
|
||||
result := insertIndex
|
||||
i := 0
|
||||
for i < count {
|
||||
execQuery(t, conn, "begin")
|
||||
execQuery(t, conn, "use `"+keyspace+":0`")
|
||||
execQuery(t, conn, fmt.Sprintf("insert into %s (id, msg) values(%d, 'value %d')", table, insertIndex, insertIndex))
|
||||
execQuery(t, conn, "commit")
|
||||
insertIndex++
|
||||
i++
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func teardownCluster() {
|
||||
clusterInstance.Teardown()
|
||||
}
|
||||
|
||||
func execQuery(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result {
|
||||
t.Helper()
|
||||
qr, err := conn.ExecuteFetch(query, 1000, true)
|
||||
assert.Nil(t, err)
|
||||
return qr
|
||||
}
|
||||
|
||||
// CheckSrvKeyspaceServedFrom verifies the servedFrom with expected
|
||||
func checkSrvKeyspaceServedFrom(t *testing.T, cell string, ksname string, expected string, ci cluster.LocalProcessCluster) {
|
||||
srvKeyspace := sharding.GetSrvKeyspace(t, cell, ksname, ci)
|
||||
tabletTypeKeyspaceMap := make(map[string]string)
|
||||
result := ""
|
||||
if srvKeyspace.GetServedFrom() != nil {
|
||||
for _, servedFrom := range srvKeyspace.GetServedFrom() {
|
||||
tabletTy := strings.ToLower(servedFrom.GetTabletType().String())
|
||||
tabletTypeKeyspaceMap[tabletTy] = servedFrom.GetKeyspace()
|
||||
}
|
||||
}
|
||||
if tabletTypeKeyspaceMap["master"] != "" {
|
||||
result = result + fmt.Sprintf("ServedFrom(%s): %s\n", "master", tabletTypeKeyspaceMap["master"])
|
||||
}
|
||||
if tabletTypeKeyspaceMap["rdonly"] != "" {
|
||||
result = result + fmt.Sprintf("ServedFrom(%s): %s\n", "rdonly", tabletTypeKeyspaceMap["rdonly"])
|
||||
}
|
||||
if tabletTypeKeyspaceMap["replica"] != "" {
|
||||
result = result + fmt.Sprintf("ServedFrom(%s): %s\n", "replica", tabletTypeKeyspaceMap["replica"])
|
||||
}
|
||||
assert.Equal(t, expected, result, fmt.Sprintf("Mismatch in srv keyspace for cell %s keyspace %s, expected:\n %s\ngot:\n%s", cell, ksname, expected, result))
|
||||
assert.Equal(t, "", srvKeyspace.GetShardingColumnName(), fmt.Sprintf("Got a sharding_column_name in SrvKeyspace: %s", srvKeyspace.GetShardingColumnName()))
|
||||
assert.Equal(t, "UNSET", srvKeyspace.GetShardingColumnType().String(), fmt.Sprintf("Got a sharding_column_type in SrvKeyspace: %s", srvKeyspace.GetShardingColumnType().String()))
|
||||
}
|
||||
|
||||
func initializeCluster() (int, error) {
|
||||
var mysqlProcesses []*exec.Cmd
|
||||
clusterInstance = &cluster.LocalProcessCluster{Cell: cellj, Hostname: hostname}
|
||||
|
||||
// Start topo server
|
||||
if err := clusterInstance.StartTopo(); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
for _, keyspaceStr := range []string{sourceKeyspace, destinationKeyspace} {
|
||||
KeyspacePtr := &cluster.Keyspace{Name: keyspaceStr}
|
||||
keyspace := *KeyspacePtr
|
||||
if keyspaceStr == sourceKeyspace {
|
||||
if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspace.Name); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
} else {
|
||||
if err := clusterInstance.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "--served_from", "master:source_keyspace,replica:source_keyspace,rdonly:source_keyspace", "destination_keyspace"); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
}
|
||||
shard := &cluster.Shard{
|
||||
Name: shardName,
|
||||
}
|
||||
for i := 0; i < 4; i++ {
|
||||
// instantiate vttablet object with reserved ports
|
||||
tabletUID := clusterInstance.GetAndReserveTabletUID()
|
||||
var tablet *cluster.Vttablet = nil
|
||||
if i == 0 {
|
||||
tablet = clusterInstance.GetVttabletInstance("replica", tabletUID, cellj)
|
||||
} else if i == 1 {
|
||||
tablet = clusterInstance.GetVttabletInstance("replica", tabletUID, cellj)
|
||||
} else {
|
||||
tablet = clusterInstance.GetVttabletInstance("rdonly", tabletUID, cellj)
|
||||
}
|
||||
// Start Mysqlctl process
|
||||
tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory)
|
||||
if proc, err := tablet.MysqlctlProcess.StartProcess(); err != nil {
|
||||
return 1, err
|
||||
} else {
|
||||
mysqlProcesses = append(mysqlProcesses, proc)
|
||||
}
|
||||
// start vttablet process
|
||||
tablet.VttabletProcess = cluster.VttabletProcessInstance(tablet.HTTPPort,
|
||||
tablet.GrpcPort,
|
||||
tablet.TabletUID,
|
||||
clusterInstance.Cell,
|
||||
shardName,
|
||||
keyspace.Name,
|
||||
clusterInstance.VtctldProcess.Port,
|
||||
tablet.Type,
|
||||
clusterInstance.TopoProcess.Port,
|
||||
clusterInstance.Hostname,
|
||||
clusterInstance.TmpDirectory,
|
||||
clusterInstance.VtTabletExtraArgs,
|
||||
clusterInstance.EnableSemiSync)
|
||||
tablet.Alias = tablet.VttabletProcess.TabletPath
|
||||
|
||||
shard.Vttablets = append(shard.Vttablets, tablet)
|
||||
}
|
||||
keyspace.Shards = append(keyspace.Shards, *shard)
|
||||
clusterInstance.Keyspaces = append(clusterInstance.Keyspaces, keyspace)
|
||||
}
|
||||
for _, proc := range mysqlProcesses {
|
||||
err := proc.Wait()
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func validateKeyspaceJSON(t *testing.T, keyspaceJSON string, cellsArr []string) {
|
||||
var keyspace topodata.Keyspace
|
||||
err := json2.Unmarshal([]byte(keyspaceJSON), &keyspace)
|
||||
assert.Nil(t, err)
|
||||
found := false
|
||||
for _, servedFrom := range keyspace.GetServedFroms() {
|
||||
if strings.ToLower(servedFrom.GetTabletType().String()) == "rdonly" {
|
||||
found = true
|
||||
if cellsArr != nil {
|
||||
if len(cellsArr) > 0 {
|
||||
for _, eachCell := range cellsArr {
|
||||
assert.Contains(t, strings.Join(servedFrom.GetCells(), " "), eachCell)
|
||||
}
|
||||
} else {
|
||||
assert.Equal(t, []string{}, servedFrom.GetCells())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if cellsArr != nil {
|
||||
assert.Equal(t, true, found)
|
||||
} else {
|
||||
assert.Equal(t, false, found)
|
||||
}
|
||||
}
|
|
@ -54,6 +54,7 @@ func TestTabletReshuffle(t *testing.T) {
|
|||
|
||||
//Create new tablet
|
||||
rTablet := clusterInstance.GetVttabletInstance("replica", 0, "")
|
||||
|
||||
//Init Tablets
|
||||
err = clusterInstance.VtctlclientProcess.InitTablet(rTablet, cell, keyspaceName, hostname, shardName)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -356,6 +356,7 @@ func externalReparenting(ctx context.Context, t *testing.T, clusterInstance *clu
|
|||
|
||||
// Wait for replica to catch up to master.
|
||||
waitForReplicationPos(ctx, t, master, replica, 60.0)
|
||||
|
||||
duration := time.Since(start)
|
||||
minUnavailabilityInS := 1.0
|
||||
if duration.Seconds() < minUnavailabilityInS {
|
||||
|
@ -372,7 +373,9 @@ func externalReparenting(ctx context.Context, t *testing.T, clusterInstance *clu
|
|||
}
|
||||
|
||||
// Configure old master to replicate from new master.
|
||||
|
||||
_, gtID := cluster.GetMasterPosition(t, *newMaster, hostname)
|
||||
|
||||
// Use 'localhost' as hostname because Travis CI worker hostnames
|
||||
// are too long for MySQL replication.
|
||||
changeMasterCommands := fmt.Sprintf("RESET SLAVE;SET GLOBAL gtid_slave_pos = '%s';CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d ,MASTER_USER='vt_repl', MASTER_USE_GTID = slave_pos;START SLAVE;", gtID, "localhost", newMaster.MySQLPort)
|
||||
|
|
|
@ -33,6 +33,7 @@ type uvindex struct{ matchid, matchkr bool }
|
|||
func (*uvindex) String() string { return "uvindex" }
|
||||
func (*uvindex) Cost() int { return 1 }
|
||||
func (*uvindex) IsUnique() bool { return true }
|
||||
func (*uvindex) NeedsVCursor() bool { return false }
|
||||
func (*uvindex) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
@ -62,6 +63,7 @@ type nvindex struct{ matchid, matchkr bool }
|
|||
func (*nvindex) String() string { return "nvindex" }
|
||||
func (*nvindex) Cost() int { return 1 }
|
||||
func (*nvindex) IsUnique() bool { return false }
|
||||
func (*nvindex) NeedsVCursor() bool { return false }
|
||||
func (*nvindex) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
|
|
@ -285,6 +285,7 @@ type keyRangeLookuper struct {
|
|||
func (v *keyRangeLookuper) String() string { return "keyrange_lookuper" }
|
||||
func (*keyRangeLookuper) Cost() int { return 0 }
|
||||
func (*keyRangeLookuper) IsUnique() bool { return false }
|
||||
func (*keyRangeLookuper) NeedsVCursor() bool { return false }
|
||||
func (*keyRangeLookuper) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
|
||||
return []bool{}, nil
|
||||
}
|
||||
|
@ -309,6 +310,7 @@ type keyRangeLookuperUnique struct {
|
|||
func (v *keyRangeLookuperUnique) String() string { return "keyrange_lookuper" }
|
||||
func (*keyRangeLookuperUnique) Cost() int { return 0 }
|
||||
func (*keyRangeLookuperUnique) IsUnique() bool { return true }
|
||||
func (*keyRangeLookuperUnique) NeedsVCursor() bool { return false }
|
||||
func (*keyRangeLookuperUnique) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
|
||||
return []bool{}, nil
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ type hashIndex struct{ name string }
|
|||
func (v *hashIndex) String() string { return v.name }
|
||||
func (*hashIndex) Cost() int { return 1 }
|
||||
func (*hashIndex) IsUnique() bool { return true }
|
||||
func (*hashIndex) NeedsVCursor() bool { return false }
|
||||
func (*hashIndex) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
|
||||
return []bool{}, nil
|
||||
}
|
||||
|
@ -59,6 +60,7 @@ type lookupIndex struct{ name string }
|
|||
func (v *lookupIndex) String() string { return v.name }
|
||||
func (*lookupIndex) Cost() int { return 2 }
|
||||
func (*lookupIndex) IsUnique() bool { return true }
|
||||
func (*lookupIndex) NeedsVCursor() bool { return false }
|
||||
func (*lookupIndex) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
|
||||
return []bool{}, nil
|
||||
}
|
||||
|
@ -83,6 +85,7 @@ type multiIndex struct{ name string }
|
|||
func (v *multiIndex) String() string { return v.name }
|
||||
func (*multiIndex) Cost() int { return 3 }
|
||||
func (*multiIndex) IsUnique() bool { return false }
|
||||
func (*multiIndex) NeedsVCursor() bool { return false }
|
||||
func (*multiIndex) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
|
||||
return []bool{}, nil
|
||||
}
|
||||
|
@ -108,6 +111,7 @@ type costlyIndex struct{ name string }
|
|||
func (v *costlyIndex) String() string { return v.name }
|
||||
func (*costlyIndex) Cost() int { return 10 }
|
||||
func (*costlyIndex) IsUnique() bool { return false }
|
||||
func (*costlyIndex) NeedsVCursor() bool { return false }
|
||||
func (*costlyIndex) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
|
||||
return []bool{}, nil
|
||||
}
|
||||
|
|
|
@ -54,6 +54,11 @@ func (vind *Binary) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (vind *Binary) NeedsVCursor() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify returns true if ids maps to ksids.
|
||||
func (vind *Binary) Verify(_ VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) {
|
||||
out := make([]bool, len(ids))
|
||||
|
|
|
@ -19,9 +19,9 @@ package vindexes
|
|||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
)
|
||||
|
@ -33,16 +33,11 @@ func init() {
|
|||
binOnlyVindex = vindex.(SingleColumn)
|
||||
}
|
||||
|
||||
func TestBinaryCost(t *testing.T) {
|
||||
if binOnlyVindex.Cost() != 1 {
|
||||
t.Errorf("Cost(): %d, want 1", binOnlyVindex.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinaryString(t *testing.T) {
|
||||
if strings.Compare("binary_varchar", binOnlyVindex.String()) != 0 {
|
||||
t.Errorf("String(): %s, want binary_varchar", binOnlyVindex.String())
|
||||
}
|
||||
func TestBinaryInfo(t *testing.T) {
|
||||
assert.Equal(t, 1, binOnlyVindex.Cost())
|
||||
assert.Equal(t, "binary_varchar", binOnlyVindex.String())
|
||||
assert.True(t, binOnlyVindex.IsUnique())
|
||||
assert.False(t, binOnlyVindex.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestBinaryMap(t *testing.T) {
|
||||
|
|
|
@ -53,6 +53,11 @@ func (vind *BinaryMD5) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (vind *BinaryMD5) NeedsVCursor() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify returns true if ids maps to ksids.
|
||||
func (vind *BinaryMD5) Verify(_ VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) {
|
||||
out := make([]bool, len(ids))
|
||||
|
|
|
@ -20,8 +20,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
)
|
||||
|
@ -33,16 +32,11 @@ func init() {
|
|||
binVindex = vindex.(SingleColumn)
|
||||
}
|
||||
|
||||
func TestBinaryMD5Cost(t *testing.T) {
|
||||
if binVindex.Cost() != 1 {
|
||||
t.Errorf("Cost(): %d, want 1", binVindex.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinaryMD5String(t *testing.T) {
|
||||
if strings.Compare("binary_md5_varchar", binVindex.String()) != 0 {
|
||||
t.Errorf("String(): %s, want binary_md5_varchar", binVindex.String())
|
||||
}
|
||||
func TestBinaryMD5Info(t *testing.T) {
|
||||
assert.Equal(t, 1, binVindex.Cost())
|
||||
assert.Equal(t, "binary_md5_varchar", binVindex.String())
|
||||
assert.True(t, binVindex.IsUnique())
|
||||
assert.False(t, binVindex.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestBinaryMD5Map(t *testing.T) {
|
||||
|
|
|
@ -75,6 +75,11 @@ func (lu *ConsistentLookup) IsUnique() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (lu *ConsistentLookup) NeedsVCursor() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (lu *ConsistentLookup) Map(vcursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, 0, len(ids))
|
||||
|
@ -129,6 +134,11 @@ func (lu *ConsistentLookupUnique) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (lu *ConsistentLookupUnique) NeedsVCursor() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (lu *ConsistentLookupUnique) Map(vcursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, 0, len(ids))
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
querypb "vitess.io/vitess/go/vt/proto/query"
|
||||
|
@ -46,28 +47,18 @@ func TestConsistentLookupInit(t *testing.T) {
|
|||
|
||||
func TestConsistentLookupInfo(t *testing.T) {
|
||||
lookup := createConsistentLookup(t, "consistent_lookup")
|
||||
if lookup.Cost() != 20 {
|
||||
t.Errorf("Cost(): %d, want 20", lookup.Cost())
|
||||
}
|
||||
if strings.Compare("consistent_lookup", lookup.String()) != 0 {
|
||||
t.Errorf("String(): %s, want consistent_lookup", lookup.String())
|
||||
}
|
||||
if lookup.IsUnique() {
|
||||
t.Errorf("IsUnique(): %v, want false", lookup.IsUnique())
|
||||
}
|
||||
assert.Equal(t, 20, lookup.Cost())
|
||||
assert.Equal(t, "consistent_lookup", lookup.String())
|
||||
assert.False(t, lookup.IsUnique())
|
||||
assert.True(t, lookup.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestConsistentLookupUniqueInfo(t *testing.T) {
|
||||
lookup := createConsistentLookup(t, "consistent_lookup_unique")
|
||||
if lookup.Cost() != 10 {
|
||||
t.Errorf("Cost(): %d, want 10", lookup.Cost())
|
||||
}
|
||||
if strings.Compare("consistent_lookup_unique", lookup.String()) != 0 {
|
||||
t.Errorf("String(): %s, want consistent_lookup_unique", lookup.String())
|
||||
}
|
||||
if !lookup.IsUnique() {
|
||||
t.Errorf("IsUnique(): %v, want true", lookup.IsUnique())
|
||||
}
|
||||
assert.Equal(t, 10, lookup.Cost())
|
||||
assert.Equal(t, "consistent_lookup_unique", lookup.String())
|
||||
assert.True(t, lookup.IsUnique())
|
||||
assert.True(t, lookup.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestConsistentLookupMap(t *testing.T) {
|
||||
|
|
|
@ -64,6 +64,11 @@ func (vind *Hash) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (vind *Hash) NeedsVCursor() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (vind *Hash) Map(cursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, len(ids))
|
||||
|
|
|
@ -18,9 +18,9 @@ package vindexes
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
)
|
||||
|
@ -35,16 +35,11 @@ func init() {
|
|||
hash = hv.(SingleColumn)
|
||||
}
|
||||
|
||||
func TestHashCost(t *testing.T) {
|
||||
if hash.Cost() != 1 {
|
||||
t.Errorf("Cost(): %d, want 1", hash.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashString(t *testing.T) {
|
||||
if strings.Compare("nn", hash.String()) != 0 {
|
||||
t.Errorf("String(): %s, want hash", hash.String())
|
||||
}
|
||||
func TestHashInfo(t *testing.T) {
|
||||
assert.Equal(t, 1, hash.Cost())
|
||||
assert.Equal(t, "nn", hash.String())
|
||||
assert.True(t, hash.IsUnique())
|
||||
assert.False(t, hash.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestHashMap(t *testing.T) {
|
||||
|
|
|
@ -61,6 +61,11 @@ func (ln *LookupNonUnique) IsUnique() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (ln *LookupNonUnique) NeedsVCursor() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (ln *LookupNonUnique) Map(vcursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, 0, len(ids))
|
||||
|
@ -211,6 +216,11 @@ func (lu *LookupUnique) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (lu *LookupUnique) NeedsVCursor() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (lu *LookupUnique) Map(vcursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, 0, len(ids))
|
||||
|
|
|
@ -93,6 +93,11 @@ func (lh *LookupHash) IsUnique() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (lh *LookupHash) NeedsVCursor() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (lh *LookupHash) Map(vcursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, 0, len(ids))
|
||||
|
@ -244,6 +249,11 @@ func (lhu *LookupHashUnique) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (lhu *LookupHashUnique) NeedsVCursor() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (lhu *LookupHashUnique) Map(vcursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, 0, len(ids))
|
||||
|
|
|
@ -18,9 +18,9 @@ package vindexes
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
|
||||
|
@ -49,28 +49,18 @@ func TestLookupHashNew(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLookupHashCost(t *testing.T) {
|
||||
func TestLookupHashInfo(t *testing.T) {
|
||||
lookuphash := createLookup(t, "lookup_hash", false)
|
||||
assert.Equal(t, 20, lookuphash.Cost())
|
||||
assert.Equal(t, "lookup_hash", lookuphash.String())
|
||||
assert.False(t, lookuphash.IsUnique())
|
||||
assert.True(t, lookuphash.NeedsVCursor())
|
||||
|
||||
lookuphashunique := createLookup(t, "lookup_hash_unique", false)
|
||||
|
||||
if lookuphash.Cost() != 20 {
|
||||
t.Errorf("Cost(): %d, want 20", lookuphash.Cost())
|
||||
}
|
||||
if lookuphashunique.Cost() != 10 {
|
||||
t.Errorf("Cost(): %d, want 10", lookuphashunique.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupHashString(t *testing.T) {
|
||||
lookuphash := createLookup(t, "lookup_hash", false)
|
||||
lookuphashunique := createLookup(t, "lookup_hash_unique", false)
|
||||
|
||||
if strings.Compare("lookup_hash", lookuphash.String()) != 0 {
|
||||
t.Errorf("String(): %s, want lookup_hash", lookuphash.String())
|
||||
}
|
||||
if strings.Compare("lookup_hash_unique", lookuphashunique.String()) != 0 {
|
||||
t.Errorf("String(): %s, want lookup_hash_unique", lookuphashunique.String())
|
||||
}
|
||||
assert.Equal(t, 10, lookuphashunique.Cost())
|
||||
assert.Equal(t, "lookup_hash_unique", lookuphashunique.String())
|
||||
assert.True(t, lookuphashunique.IsUnique())
|
||||
assert.True(t, lookuphashunique.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestLookupHashMap(t *testing.T) {
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
|
||||
|
@ -54,11 +55,12 @@ func TestLookupHashUniqueNew(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLookupHashUniqueCost(t *testing.T) {
|
||||
func TestLookupHashUniqueInfo(t *testing.T) {
|
||||
lhu := createLookup(t, "lookup_hash_unique", false)
|
||||
if lhu.Cost() != 10 {
|
||||
t.Errorf("Cost(): %d, want 10", lhu.Cost())
|
||||
}
|
||||
assert.Equal(t, 10, lhu.Cost())
|
||||
assert.Equal(t, "lookup_hash_unique", lhu.String())
|
||||
assert.True(t, lhu.IsUnique())
|
||||
assert.True(t, lhu.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestLookupHashUniqueMap(t *testing.T) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
"strings"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
|
@ -113,18 +114,12 @@ func TestLookupNonUniqueNew(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLookupNonUniqueCost(t *testing.T) {
|
||||
func TestLookupNonUniqueInfo(t *testing.T) {
|
||||
lookupNonUnique := createLookup(t, "lookup", false)
|
||||
if lookupNonUnique.Cost() != 20 {
|
||||
t.Errorf("Cost(): %d, want 20", lookupNonUnique.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupNonUniqueString(t *testing.T) {
|
||||
lookupNonUnique := createLookup(t, "lookup", false)
|
||||
if strings.Compare("lookup", lookupNonUnique.String()) != 0 {
|
||||
t.Errorf("String(): %s, want lookup", lookupNonUnique.String())
|
||||
}
|
||||
assert.Equal(t, 20, lookupNonUnique.Cost())
|
||||
assert.Equal(t, "lookup", lookupNonUnique.String())
|
||||
assert.False(t, lookupNonUnique.IsUnique())
|
||||
assert.True(t, lookupNonUnique.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestLookupNilVCursor(t *testing.T) {
|
||||
|
|
|
@ -94,6 +94,11 @@ func (lh *LookupUnicodeLooseMD5Hash) IsUnique() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (lh *LookupUnicodeLooseMD5Hash) NeedsVCursor() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (lh *LookupUnicodeLooseMD5Hash) Map(vcursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, 0, len(ids))
|
||||
|
@ -256,6 +261,11 @@ func (lhu *LookupUnicodeLooseMD5HashUnique) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (lhu *LookupUnicodeLooseMD5HashUnique) NeedsVCursor() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (lhu *LookupUnicodeLooseMD5HashUnique) Map(vcursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, 0, len(ids))
|
||||
|
|
|
@ -18,9 +18,9 @@ package vindexes
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
querypb "vitess.io/vitess/go/vt/proto/query"
|
||||
|
@ -56,18 +56,11 @@ func TestLookupUniqueNew(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLookupUniqueCost(t *testing.T) {
|
||||
func TestLookupUniqueInfo(t *testing.T) {
|
||||
lookupUnique := createLookup(t, "lookup_unique", false)
|
||||
if lookupUnique.Cost() != 10 {
|
||||
t.Errorf("Cost(): %d, want 10", lookupUnique.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupUniqueString(t *testing.T) {
|
||||
lookupUnique := createLookup(t, "lookup_unique", false)
|
||||
if strings.Compare("lookup_unique", lookupUnique.String()) != 0 {
|
||||
t.Errorf("String(): %s, want lookup_unique", lookupUnique.String())
|
||||
}
|
||||
assert.Equal(t, 10, lookupUnique.Cost())
|
||||
assert.Equal(t, "lookup_unique", lookupUnique.String())
|
||||
assert.True(t, lookupUnique.IsUnique())
|
||||
}
|
||||
|
||||
func TestLookupUniqueMap(t *testing.T) {
|
||||
|
|
|
@ -48,9 +48,9 @@ func (vind *Null) String() string {
|
|||
return vind.name
|
||||
}
|
||||
|
||||
// Cost returns the cost of this index as 0.
|
||||
// Cost returns the cost of this index as 100.
|
||||
func (vind *Null) Cost() int {
|
||||
return 0
|
||||
return 100
|
||||
}
|
||||
|
||||
// IsUnique returns true since the Vindex is unique.
|
||||
|
@ -58,6 +58,11 @@ func (vind *Null) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (vind *Null) NeedsVCursor() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (vind *Null) Map(cursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, 0, len(ids))
|
||||
|
|
|
@ -18,9 +18,9 @@ package vindexes
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
)
|
||||
|
@ -35,16 +35,11 @@ func init() {
|
|||
null = hv.(SingleColumn)
|
||||
}
|
||||
|
||||
func TestNullCost(t *testing.T) {
|
||||
if null.Cost() != 0 {
|
||||
t.Errorf("Cost(): %d, want 0", null.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNullString(t *testing.T) {
|
||||
if strings.Compare("nn", null.String()) != 0 {
|
||||
t.Errorf("String(): %s, want null", null.String())
|
||||
}
|
||||
func TestNullInfo(t *testing.T) {
|
||||
assert.Equal(t, 100, null.Cost())
|
||||
assert.Equal(t, "nn", null.String())
|
||||
assert.True(t, null.IsUnique())
|
||||
assert.False(t, null.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestNullMap(t *testing.T) {
|
||||
|
|
|
@ -57,6 +57,11 @@ func (*Numeric) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (*Numeric) NeedsVCursor() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify returns true if ids and ksids match.
|
||||
func (*Numeric) Verify(_ VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) {
|
||||
out := make([]bool, len(ids))
|
||||
|
|
|
@ -80,6 +80,11 @@ func (vind *NumericStaticMap) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (vind *NumericStaticMap) NeedsVCursor() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify returns true if ids and ksids match.
|
||||
func (vind *NumericStaticMap) Verify(_ VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) {
|
||||
out := make([]bool, len(ids))
|
||||
|
|
|
@ -20,8 +20,8 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
)
|
||||
|
@ -38,24 +38,13 @@ func createVindex() (SingleColumn, error) {
|
|||
return vindex.(SingleColumn), nil
|
||||
}
|
||||
|
||||
func TestNumericStaticMapCost(t *testing.T) {
|
||||
func TestNumericStaticMapInfo(t *testing.T) {
|
||||
numericStaticMap, err := createVindex()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create vindex: %v", err)
|
||||
}
|
||||
if numericStaticMap.Cost() != 1 {
|
||||
t.Errorf("Cost(): %d, want 1", numericStaticMap.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNumericStaticMapString(t *testing.T) {
|
||||
numericStaticMap, err := createVindex()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create vindex: %v", err)
|
||||
}
|
||||
if strings.Compare("numericStaticMap", numericStaticMap.String()) != 0 {
|
||||
t.Errorf("String(): %s, want num", numericStaticMap.String())
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, numericStaticMap.Cost())
|
||||
assert.Equal(t, "numericStaticMap", numericStaticMap.String())
|
||||
assert.True(t, numericStaticMap.IsUnique())
|
||||
assert.False(t, numericStaticMap.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestNumericStaticMapMap(t *testing.T) {
|
||||
|
|
|
@ -20,8 +20,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
)
|
||||
|
@ -33,16 +32,11 @@ func init() {
|
|||
numeric = vindex.(SingleColumn)
|
||||
}
|
||||
|
||||
func TestNumericCost(t *testing.T) {
|
||||
if numeric.Cost() != 0 {
|
||||
t.Errorf("Cost(): %d, want 0", numeric.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNumericString(t *testing.T) {
|
||||
if strings.Compare("num", numeric.String()) != 0 {
|
||||
t.Errorf("String(): %s, want num", numeric.String())
|
||||
}
|
||||
func TestNumericInfo(t *testing.T) {
|
||||
assert.Equal(t, 0, numeric.Cost())
|
||||
assert.Equal(t, "num", numeric.String())
|
||||
assert.True(t, numeric.IsUnique())
|
||||
assert.False(t, numeric.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestNumericMap(t *testing.T) {
|
||||
|
|
|
@ -79,6 +79,11 @@ func (ge *RegionExperimental) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (ge *RegionExperimental) NeedsVCursor() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Map satisfies MultiColumn.
|
||||
func (ge *RegionExperimental) Map(vcursor VCursor, rowsColValues [][]sqltypes.Value) ([]key.Destination, error) {
|
||||
destinations := make([]key.Destination, 0, len(rowsColValues))
|
||||
|
|
|
@ -21,16 +21,18 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
)
|
||||
|
||||
func TestRegionExperimentalMisc(t *testing.T) {
|
||||
ge, err := createRegionVindex(t, "region_experimental", "f1,f2", 1)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, ge.Cost())
|
||||
assert.Equal(t, "region_experimental", ge.String())
|
||||
assert.True(t, ge.IsUnique())
|
||||
assert.False(t, ge.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestRegionExperimentalMap(t *testing.T) {
|
||||
|
|
|
@ -59,6 +59,11 @@ func (vind *ReverseBits) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (vind *ReverseBits) NeedsVCursor() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Map returns the corresponding KeyspaceId values for the given ids.
|
||||
func (vind *ReverseBits) Map(cursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, len(ids))
|
||||
|
|
|
@ -18,9 +18,9 @@ package vindexes
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
)
|
||||
|
@ -35,16 +35,11 @@ func init() {
|
|||
reverseBits = hv.(SingleColumn)
|
||||
}
|
||||
|
||||
func TestReverseBitsCost(t *testing.T) {
|
||||
if reverseBits.Cost() != 1 {
|
||||
t.Errorf("Cost(): %d, want 1", reverseBits.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestReverseBitsString(t *testing.T) {
|
||||
if strings.Compare("rr", reverseBits.String()) != 0 {
|
||||
t.Errorf("String(): %s, want hash", reverseBits.String())
|
||||
}
|
||||
func TestReverseBitsInfo(t *testing.T) {
|
||||
assert.Equal(t, 1, reverseBits.Cost())
|
||||
assert.Equal(t, "rr", reverseBits.String())
|
||||
assert.True(t, reverseBits.IsUnique())
|
||||
assert.False(t, reverseBits.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestReverseBitsMap(t *testing.T) {
|
||||
|
|
|
@ -62,6 +62,11 @@ func (vind *UnicodeLooseMD5) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (vind *UnicodeLooseMD5) NeedsVCursor() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify returns true if ids maps to ksids.
|
||||
func (vind *UnicodeLooseMD5) Verify(_ VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) {
|
||||
out := make([]bool, len(ids))
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
)
|
||||
|
@ -33,16 +34,11 @@ func init() {
|
|||
charVindex = vindex.(SingleColumn)
|
||||
}
|
||||
|
||||
func TestUnicodeLooseMD5Cost(t *testing.T) {
|
||||
if charVindex.Cost() != 1 {
|
||||
t.Errorf("Cost(): %d, want 1", charVindex.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnicodeLooseMD5String(t *testing.T) {
|
||||
if strings.Compare("utf8ch", charVindex.String()) != 0 {
|
||||
t.Errorf("String(): %s, want utf8ch", charVindex.String())
|
||||
}
|
||||
func TestUnicodeLooseMD5Info(t *testing.T) {
|
||||
assert.Equal(t, 1, charVindex.Cost())
|
||||
assert.Equal(t, "utf8ch", charVindex.String())
|
||||
assert.True(t, charVindex.IsUnique())
|
||||
assert.False(t, charVindex.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestUnicodeLooseMD5Map(t *testing.T) {
|
||||
|
|
|
@ -56,8 +56,15 @@ type Vindex interface {
|
|||
Cost() int
|
||||
|
||||
// IsUnique returns true if the Vindex is unique.
|
||||
// Which means Map() maps to either a KeyRange or a single KeyspaceID.
|
||||
// A Unique Vindex is allowed to return non-unique values like
|
||||
// a keyrange. This is in situations where the vindex does not
|
||||
// have enough information to map to a keyspace id. If so, such
|
||||
// a vindex cannot be primary.
|
||||
IsUnique() bool
|
||||
|
||||
// NeedsVCursor returns true if the Vindex makes calls into the
|
||||
// VCursor. Such vindexes cannot be used by vreplication.
|
||||
NeedsVCursor() bool
|
||||
}
|
||||
|
||||
// SingleColumn defines the interface for a single column vindex.
|
||||
|
|
|
@ -45,6 +45,7 @@ type stFU struct {
|
|||
func (v *stFU) String() string { return v.name }
|
||||
func (*stFU) Cost() int { return 1 }
|
||||
func (*stFU) IsUnique() bool { return true }
|
||||
func (*stFU) NeedsVCursor() bool { return false }
|
||||
func (*stFU) Verify(VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { return []bool{}, nil }
|
||||
func (*stFU) Map(cursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) { return nil, nil }
|
||||
|
||||
|
@ -63,6 +64,7 @@ type stLN struct {
|
|||
func (v *stLN) String() string { return v.name }
|
||||
func (*stLN) Cost() int { return 0 }
|
||||
func (*stLN) IsUnique() bool { return false }
|
||||
func (*stLN) NeedsVCursor() bool { return false }
|
||||
func (*stLN) Verify(VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { return []bool{}, nil }
|
||||
func (*stLN) Map(cursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) { return nil, nil }
|
||||
func (*stLN) Create(VCursor, [][]sqltypes.Value, [][]byte, bool) error { return nil }
|
||||
|
@ -85,6 +87,7 @@ type stLU struct {
|
|||
func (v *stLU) String() string { return v.name }
|
||||
func (*stLU) Cost() int { return 2 }
|
||||
func (*stLU) IsUnique() bool { return true }
|
||||
func (*stLU) NeedsVCursor() bool { return false }
|
||||
func (*stLU) Verify(VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { return []bool{}, nil }
|
||||
func (*stLU) Map(cursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) { return nil, nil }
|
||||
func (*stLU) Create(VCursor, [][]sqltypes.Value, [][]byte, bool) error { return nil }
|
||||
|
@ -110,6 +113,7 @@ type stLO struct {
|
|||
func (v *stLO) String() string { return v.name }
|
||||
func (*stLO) Cost() int { return 2 }
|
||||
func (*stLO) IsUnique() bool { return true }
|
||||
func (*stLO) NeedsVCursor() bool { return false }
|
||||
func (*stLO) Verify(VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { return []bool{}, nil }
|
||||
func (*stLO) Map(cursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) { return nil, nil }
|
||||
func (*stLO) Create(VCursor, [][]sqltypes.Value, [][]byte, bool) error { return nil }
|
||||
|
|
|
@ -56,6 +56,11 @@ func (vind *XXHash) IsUnique() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// NeedsVCursor satisfies the Vindex interface.
|
||||
func (vind *XXHash) NeedsVCursor() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Map can map ids to key.Destination objects.
|
||||
func (vind *XXHash) Map(cursor VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
|
||||
out := make([]key.Destination, len(ids))
|
||||
|
|
|
@ -20,10 +20,10 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"vitess.io/vitess/go/sqltypes"
|
||||
"vitess.io/vitess/go/vt/key"
|
||||
|
@ -39,16 +39,11 @@ func init() {
|
|||
xxHash = hv.(SingleColumn)
|
||||
}
|
||||
|
||||
func TestXXHashCost(t *testing.T) {
|
||||
if xxHash.Cost() != 1 {
|
||||
t.Errorf("Cost(): %d, want 1", xxHash.Cost())
|
||||
}
|
||||
}
|
||||
|
||||
func TestXXHashString(t *testing.T) {
|
||||
if strings.Compare("xxhash_name", xxHash.String()) != 0 {
|
||||
t.Errorf("String(): %s, want xxhash_name", xxHash.String())
|
||||
}
|
||||
func TestXXHashInfo(t *testing.T) {
|
||||
assert.Equal(t, 1, xxHash.Cost())
|
||||
assert.Equal(t, "xxhash_name", xxHash.String())
|
||||
assert.True(t, xxHash.IsUnique())
|
||||
assert.False(t, xxHash.NeedsVCursor())
|
||||
}
|
||||
|
||||
func TestXXHashMap(t *testing.T) {
|
||||
|
|
|
@ -31,16 +31,12 @@ type localVSchema struct {
|
|||
vschema *vindexes.VSchema
|
||||
}
|
||||
|
||||
func (lvs *localVSchema) FindTable(tablename string) (*vindexes.Table, error) {
|
||||
ks, ok := lvs.vschema.Keyspaces[lvs.keyspace]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("keyspace %s not found in vschema", lvs.keyspace)
|
||||
func (lvs *localVSchema) FindColVindex(tablename string) (*vindexes.ColumnVindex, error) {
|
||||
table, err := lvs.findTable(tablename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
table := ks.Tables[tablename]
|
||||
if table == nil {
|
||||
return nil, fmt.Errorf("table %s not found", tablename)
|
||||
}
|
||||
return table, nil
|
||||
return identifyColVindex(table)
|
||||
}
|
||||
|
||||
func (lvs *localVSchema) FindOrCreateVindex(qualifiedName string) (vindexes.Vindex, error) {
|
||||
|
@ -66,3 +62,37 @@ func (lvs *localVSchema) FindOrCreateVindex(qualifiedName string) (vindexes.Vind
|
|||
}
|
||||
return vindexes.CreateVindex(name, name, map[string]string{})
|
||||
}
|
||||
|
||||
func (lvs *localVSchema) findTable(tablename string) (*vindexes.Table, error) {
|
||||
ks, ok := lvs.vschema.Keyspaces[lvs.keyspace]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("keyspace %s not found in vschema", lvs.keyspace)
|
||||
}
|
||||
table := ks.Tables[tablename]
|
||||
if table == nil {
|
||||
return nil, fmt.Errorf("table %s not found", tablename)
|
||||
}
|
||||
return table, nil
|
||||
}
|
||||
|
||||
func identifyColVindex(table *vindexes.Table) (*vindexes.ColumnVindex, error) {
|
||||
if len(table.ColumnVindexes) == 0 {
|
||||
return nil, fmt.Errorf("table %s has no vindex", table.Name.String())
|
||||
}
|
||||
var result *vindexes.ColumnVindex
|
||||
for _, cv := range table.ColumnVindexes {
|
||||
if cv.Vindex.NeedsVCursor() {
|
||||
continue
|
||||
}
|
||||
if !cv.Vindex.IsUnique() {
|
||||
continue
|
||||
}
|
||||
if result == nil || result.Vindex.Cost() > cv.Vindex.Cost() {
|
||||
result = cv
|
||||
}
|
||||
}
|
||||
if result == nil {
|
||||
return nil, fmt.Errorf("could not find a vindex to compute keyspace id for table %v", table.Name.String())
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -27,7 +27,121 @@ import (
|
|||
"vitess.io/vitess/go/vt/vtgate/vindexes"
|
||||
)
|
||||
|
||||
var testSrvVSchema = &vschemapb.SrvVSchema{
|
||||
func TestFindColVindex(t *testing.T) {
|
||||
testSrvVSchema := &vschemapb.SrvVSchema{
|
||||
Keyspaces: map[string]*vschemapb.Keyspace{
|
||||
"ks1": {
|
||||
Sharded: true,
|
||||
Vindexes: map[string]*vschemapb.Vindex{
|
||||
"hash": {
|
||||
Type: "hash",
|
||||
},
|
||||
"lookup_unique": {
|
||||
Type: "lookup_unique",
|
||||
Params: map[string]string{
|
||||
"table": "t",
|
||||
"from": "fromc",
|
||||
"to": "toc",
|
||||
},
|
||||
},
|
||||
"lookup": {
|
||||
Type: "lookup",
|
||||
Params: map[string]string{
|
||||
"table": "t",
|
||||
"from": "fromc",
|
||||
"to": "toc",
|
||||
},
|
||||
},
|
||||
"numeric": {
|
||||
Type: "numeric",
|
||||
},
|
||||
},
|
||||
Tables: map[string]*vschemapb.Table{
|
||||
"t1": {
|
||||
ColumnVindexes: []*vschemapb.ColumnVindex{{
|
||||
Name: "hash",
|
||||
Columns: []string{"id"},
|
||||
}},
|
||||
},
|
||||
"nogoodvindex1": {
|
||||
ColumnVindexes: []*vschemapb.ColumnVindex{{
|
||||
Name: "lookup_unique",
|
||||
Columns: []string{"id"},
|
||||
}},
|
||||
},
|
||||
"nogoodvindex2": {
|
||||
ColumnVindexes: []*vschemapb.ColumnVindex{{
|
||||
Name: "lookup_unique",
|
||||
Columns: []string{"id"},
|
||||
}, {
|
||||
Name: "lookup",
|
||||
Columns: []string{"id"},
|
||||
}},
|
||||
},
|
||||
"cheapest": {
|
||||
ColumnVindexes: []*vschemapb.ColumnVindex{{
|
||||
Name: "hash",
|
||||
Columns: []string{"id"},
|
||||
}, {
|
||||
Name: "numeric",
|
||||
Columns: []string{"id"},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
"unsharded": {
|
||||
Tables: map[string]*vschemapb.Table{
|
||||
"t1": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
vschema, err := vindexes.BuildVSchema(testSrvVSchema)
|
||||
require.NoError(t, err)
|
||||
|
||||
testcases := []struct {
|
||||
keyspace string
|
||||
tablename string
|
||||
vindexname string
|
||||
err string
|
||||
}{{
|
||||
keyspace: "ks1",
|
||||
tablename: "t1",
|
||||
vindexname: "hash",
|
||||
}, {
|
||||
keyspace: "ks1",
|
||||
tablename: "nogoodvindex1",
|
||||
err: "could not find a vindex to compute keyspace id for table nogoodvindex1",
|
||||
}, {
|
||||
keyspace: "ks1",
|
||||
tablename: "nogoodvindex2",
|
||||
err: "could not find a vindex to compute keyspace id for table nogoodvindex2",
|
||||
}, {
|
||||
keyspace: "ks1",
|
||||
tablename: "cheapest",
|
||||
vindexname: "numeric",
|
||||
}, {
|
||||
keyspace: "unsharded",
|
||||
tablename: "t1",
|
||||
err: "table t1 has no vindex",
|
||||
}}
|
||||
for _, tcase := range testcases {
|
||||
lvs := &localVSchema{
|
||||
keyspace: tcase.keyspace,
|
||||
vschema: vschema,
|
||||
}
|
||||
cv, err := lvs.FindColVindex(tcase.tablename)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, tcase.err, tcase.tablename)
|
||||
continue
|
||||
}
|
||||
assert.NoError(t, err, tcase.tablename)
|
||||
assert.Equal(t, cv.Name, tcase.vindexname, tcase.tablename)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindOrCreateVindex(t *testing.T) {
|
||||
testSrvVSchema := &vschemapb.SrvVSchema{
|
||||
Keyspaces: map[string]*vschemapb.Keyspace{
|
||||
"ks1": {
|
||||
Sharded: true,
|
||||
|
@ -36,14 +150,6 @@ var testSrvVSchema = &vschemapb.SrvVSchema{
|
|||
Type: "hash",
|
||||
},
|
||||
},
|
||||
Tables: map[string]*vschemapb.Table{
|
||||
"t1": {
|
||||
ColumnVindexes: []*vschemapb.ColumnVindex{{
|
||||
Name: "duphash",
|
||||
Columns: []string{"id"},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
"ks2": {
|
||||
Sharded: true,
|
||||
|
@ -58,46 +164,13 @@ var testSrvVSchema = &vschemapb.SrvVSchema{
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestFindTable(t *testing.T) {
|
||||
vschema, err := vindexes.BuildVSchema(testSrvVSchema)
|
||||
require.NoError(t, err)
|
||||
|
||||
testcases := []struct {
|
||||
keyspace string
|
||||
tablename string
|
||||
err string
|
||||
}{{
|
||||
keyspace: "ks1",
|
||||
tablename: "t1",
|
||||
err: "",
|
||||
}, {
|
||||
keyspace: "ks1",
|
||||
tablename: "t2",
|
||||
err: "table t2 not found",
|
||||
}, {
|
||||
keyspace: "noks",
|
||||
tablename: "t2",
|
||||
err: "keyspace noks not found in vschema",
|
||||
}}
|
||||
for _, tcase := range testcases {
|
||||
lvs := &localVSchema{
|
||||
keyspace: tcase.keyspace,
|
||||
keyspace: "ks1",
|
||||
vschema: vschema,
|
||||
}
|
||||
table, err := lvs.FindTable(tcase.tablename)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, tcase.err, tcase.keyspace, tcase.tablename)
|
||||
continue
|
||||
}
|
||||
assert.NoError(t, err, tcase.keyspace, tcase.tablename)
|
||||
assert.Equal(t, table.Name.String(), tcase.tablename, tcase.keyspace, tcase.tablename)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindOrCreateVindex(t *testing.T) {
|
||||
vschema, err := vindexes.BuildVSchema(testSrvVSchema)
|
||||
require.NoError(t, err)
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
|
@ -125,10 +198,6 @@ func TestFindOrCreateVindex(t *testing.T) {
|
|||
err: `vindexType "none" not found`,
|
||||
}}
|
||||
for _, tcase := range testcases {
|
||||
lvs := &localVSchema{
|
||||
keyspace: "ks1",
|
||||
vschema: vschema,
|
||||
}
|
||||
vindex, err := lvs.FindOrCreateVindex(tcase.name)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, tcase.err, tcase.name)
|
||||
|
@ -140,3 +209,48 @@ func TestFindOrCreateVindex(t *testing.T) {
|
|||
assert.Equal(t, vindex.String(), want, tcase.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindTable(t *testing.T) {
|
||||
testSrvVSchema := &vschemapb.SrvVSchema{
|
||||
Keyspaces: map[string]*vschemapb.Keyspace{
|
||||
"ks1": {
|
||||
Tables: map[string]*vschemapb.Table{
|
||||
"t1": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
vschema, err := vindexes.BuildVSchema(testSrvVSchema)
|
||||
require.NoError(t, err)
|
||||
|
||||
testcases := []struct {
|
||||
keyspace string
|
||||
tablename string
|
||||
err string
|
||||
}{{
|
||||
keyspace: "ks1",
|
||||
tablename: "t1",
|
||||
err: "",
|
||||
}, {
|
||||
keyspace: "ks1",
|
||||
tablename: "t2",
|
||||
err: "table t2 not found",
|
||||
}, {
|
||||
keyspace: "noks",
|
||||
tablename: "t2",
|
||||
err: "keyspace noks not found in vschema",
|
||||
}}
|
||||
for _, tcase := range testcases {
|
||||
lvs := &localVSchema{
|
||||
keyspace: tcase.keyspace,
|
||||
vschema: vschema,
|
||||
}
|
||||
table, err := lvs.findTable(tcase.tablename)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, tcase.err, tcase.keyspace, tcase.tablename)
|
||||
continue
|
||||
}
|
||||
assert.NoError(t, err, tcase.keyspace, tcase.tablename)
|
||||
assert.Equal(t, table.Name.String(), tcase.tablename, tcase.keyspace, tcase.tablename)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -230,16 +230,12 @@ func buildREPlan(ti *Table, vschema *localVSchema, filter string) (*Plan, error)
|
|||
|
||||
// We need to additionally set VindexColumn, Vindex and KeyRange
|
||||
// based on the Primary Vindex of the table.
|
||||
table, err := vschema.FindTable(ti.Name)
|
||||
cv, err := vschema.FindColVindex(ti.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Get Primary Vindex.
|
||||
if len(table.ColumnVindexes) == 0 {
|
||||
return nil, fmt.Errorf("table %s has no primary vindex", ti.Name)
|
||||
}
|
||||
plan.Vindex = table.ColumnVindexes[0].Vindex
|
||||
plan.VindexColumns, err = buildVindexColumns(plan.Table, table.ColumnVindexes[0].Columns)
|
||||
plan.Vindex = cv.Vindex
|
||||
plan.VindexColumns, err = buildVindexColumns(plan.Table, cv.Columns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -365,20 +361,16 @@ func (plan *Plan) analyzeExpr(vschema *localVSchema, selExpr sqlparser.SelectExp
|
|||
if len(inner.Exprs) != 0 {
|
||||
return ColExpr{}, fmt.Errorf("unexpected: %v", sqlparser.String(inner))
|
||||
}
|
||||
table, err := vschema.FindTable(plan.Table.Name)
|
||||
cv, err := vschema.FindColVindex(plan.Table.Name)
|
||||
if err != nil {
|
||||
return ColExpr{}, err
|
||||
}
|
||||
// Get Primary Vindex.
|
||||
if len(table.ColumnVindexes) == 0 {
|
||||
return ColExpr{}, fmt.Errorf("table %s has no primary vindex", plan.Table.Name)
|
||||
}
|
||||
vindexColumns, err := buildVindexColumns(plan.Table, table.ColumnVindexes[0].Columns)
|
||||
vindexColumns, err := buildVindexColumns(plan.Table, cv.Columns)
|
||||
if err != nil {
|
||||
return ColExpr{}, err
|
||||
}
|
||||
return ColExpr{
|
||||
Vindex: table.ColumnVindexes[0].Vindex,
|
||||
Vindex: cv.Vindex,
|
||||
VindexColumns: vindexColumns,
|
||||
Alias: sqlparser.NewColIdent("keyspace_id"),
|
||||
Type: sqltypes.VarBinary,
|
||||
|
@ -393,16 +385,12 @@ func (plan *Plan) analyzeInKeyRange(vschema *localVSchema, exprs sqlparser.Selec
|
|||
var krExpr sqlparser.SelectExpr
|
||||
switch {
|
||||
case len(exprs) == 1:
|
||||
table, err := vschema.FindTable(plan.Table.Name)
|
||||
cv, err := vschema.FindColVindex(plan.Table.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Get Primary Vindex.
|
||||
if len(table.ColumnVindexes) == 0 {
|
||||
return fmt.Errorf("table %s has no primary vindex", plan.Table.Name)
|
||||
}
|
||||
colnames = table.ColumnVindexes[0].Columns
|
||||
plan.Vindex = table.ColumnVindexes[0].Vindex
|
||||
colnames = cv.Columns
|
||||
plan.Vindex = cv.Vindex
|
||||
krExpr = exprs[0]
|
||||
case len(exprs) >= 3:
|
||||
for _, expr := range exprs[:len(exprs)-2] {
|
||||
|
|
|
@ -481,7 +481,7 @@
|
|||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 4,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
|
@ -492,7 +492,7 @@
|
|||
"Args": [],
|
||||
"Command": [],
|
||||
"Manual": false,
|
||||
"Shard": 4,
|
||||
"Shard": 5,
|
||||
"RetryMax": 0,
|
||||
"Tags": [
|
||||
"worker_test"
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
set -euo pipefail
|
||||
|
||||
# sudo gem install --no-ri --no-rdoc fpm
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source build.env
|
||||
|
||||
SHORT_REV="$(git rev-parse --short HEAD)"
|
||||
|
@ -43,6 +43,8 @@ done;
|
|||
# But resolving symlinks
|
||||
cp -rpfL {config,vthook,web,examples} "${RELEASE_DIR}/"
|
||||
|
||||
echo "Follow the binary installation instructions at: https://vitess.io/docs/get-started/local/" > "${RELEASE_DIR}"/README.md
|
||||
|
||||
cd "${RELEASE_DIR}/.."
|
||||
tar -czf "${TAR_FILE}" "${RELEASE_ID}"
|
||||
|
||||
|
|
|
@ -27,15 +27,39 @@ fi
|
|||
|
||||
packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/vt/... | sort)
|
||||
|
||||
# endtoend tests should be in a directory called endtoend
|
||||
all_except_e2e_tests=$(echo "$packages_with_tests" | cut -d" " -f1 | grep -v "endtoend")
|
||||
# exclude end to end tests
|
||||
packages_to_test=$(echo "$packages_with_tests" | cut -d" " -f1 | grep -v "endtoend")
|
||||
all_except_flaky_tests=$(echo "$packages_to_test" | grep -vE ".+ .+_flaky_test\.go" | cut -d" " -f1 | grep -v "endtoend")
|
||||
flaky_tests=$(echo "$packages_to_test" | grep -E ".+ .+_flaky_test\.go" | cut -d" " -f1)
|
||||
|
||||
# Run non endtoend tests.
|
||||
echo "$all_except_e2e_tests" | xargs go test $VT_GO_PARALLEL -race
|
||||
# Flaky tests have the suffix "_flaky_test.go".
|
||||
# Exclude endtoend tests
|
||||
all_except_flaky_tests=$(echo "$packages_with_tests" | grep -vE ".+ .+_flaky_test\.go" | cut -d" " -f1 | grep -v "endtoend")
|
||||
flaky_tests=$(echo "$packages_with_tests" | grep -E ".+ .+_flaky_test\.go" | cut -d" " -f1)
|
||||
|
||||
# Run non-flaky tests.
|
||||
echo "$all_except_flaky_tests" | xargs go test $VT_GO_PARALLEL -race -count=1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "WARNING: POSSIBLE DATA RACE"
|
||||
echo "ERROR: Go unit tests failed. See above for errors."
|
||||
echo
|
||||
echo "ERROR: go test -race failed. See log above."
|
||||
echo "This should NOT happen. Did you introduce a flaky unit test?"
|
||||
echo "If so, please rename it to the suffix _flaky_test.go."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo '# Flaky tests (3 attempts permitted)'
|
||||
|
||||
# Run flaky tests sequentially. Retry when necessary.
|
||||
for pkg in $flaky_tests; do
|
||||
max_attempts=3
|
||||
attempt=1
|
||||
# Set a timeout because some tests may deadlock when they flake.
|
||||
until go test -timeout 30s $VT_GO_PARALLEL $pkg -race -count=1; do
|
||||
echo "FAILED (try $attempt/$max_attempts) in $pkg (return code $?). See above for errors."
|
||||
if [ $((++attempt)) -gt $max_attempts ]; then
|
||||
echo "ERROR: Flaky Go unit tests in package $pkg failed too often (after $max_attempts retries). Please reduce the flakiness."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ all_except_flaky_tests=$(echo "$packages_with_tests" | grep -vE ".+ .+_flaky_tes
|
|||
flaky_tests=$(echo "$packages_with_tests" | grep -E ".+ .+_flaky_test\.go" | cut -d" " -f1)
|
||||
|
||||
# Run non-flaky tests.
|
||||
echo "$all_except_flaky_tests" | xargs go test $VT_GO_PARALLEL
|
||||
echo "$all_except_flaky_tests" | xargs go test $VT_GO_PARALLEL -count=1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: Go unit tests failed. See above for errors."
|
||||
echo
|
||||
|
@ -62,7 +62,7 @@ for pkg in $flaky_tests; do
|
|||
max_attempts=3
|
||||
attempt=1
|
||||
# Set a timeout because some tests may deadlock when they flake.
|
||||
until go test -timeout 30s $VT_GO_PARALLEL $pkg; do
|
||||
until go test -timeout 30s $VT_GO_PARALLEL $pkg -count=1; do
|
||||
echo "FAILED (try $attempt/$max_attempts) in $pkg (return code $?). See above for errors."
|
||||
if [ $((++attempt)) -gt $max_attempts ]; then
|
||||
echo "ERROR: Flaky Go unit tests in package $pkg failed too often (after $max_attempts retries). Please reduce the flakiness."
|
||||
|
|
Загрузка…
Ссылка в новой задаче