Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>
This commit is contained in:
Shlomi Noach 2021-06-13 12:21:28 +03:00
Родитель 88d509ead7 b25b34ee3d
Коммит 913a0904a6
111 изменённых файлов: 4823 добавлений и 618 удалений

50
.github/workflows/cluster_endtoend_resharding.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (resharding)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (resharding)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard resharding

50
.github/workflows/cluster_endtoend_resharding_bytes.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (resharding_bytes)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (resharding_bytes)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard resharding_bytes

50
.github/workflows/cluster_endtoend_vtgate_buffer.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_buffer)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_buffer)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_buffer

50
.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_concurrentdml)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_concurrentdml)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_concurrentdml

50
.github/workflows/cluster_endtoend_vtgate_gen4.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_gen4)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_gen4)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_gen4

50
.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_readafterwrite)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_readafterwrite)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_readafterwrite

50
.github/workflows/cluster_endtoend_vtgate_reservedconn.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_reservedconn)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_reservedconn)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_reservedconn

50
.github/workflows/cluster_endtoend_vtgate_schema.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_schema)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_schema)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_schema

50
.github/workflows/cluster_endtoend_vtgate_topo.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_topo)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_topo)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_topo

50
.github/workflows/cluster_endtoend_vtgate_transaction.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_transaction)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_transaction)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_transaction

50
.github/workflows/cluster_endtoend_vtgate_unsharded.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_unsharded)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_unsharded)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_unsharded

50
.github/workflows/cluster_endtoend_vtgate_vindex.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_vindex)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_vindex)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_vindex

50
.github/workflows/cluster_endtoend_vtgate_vschema.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (vtgate_vschema)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_vschema)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_vschema

50
.github/workflows/cluster_endtoend_xb_recovery.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
name: Cluster (xb_recovery)
on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on Cluster (xb_recovery)
runs-on: ubuntu-18.04
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.15
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
# TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
run: |
echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
# DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
- name: Check out code
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt-get update
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
go mod download
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- name: Run cluster endtoend test
timeout-minutes: 30
run: |
source build.env
eatmydata -- go run test.go -docker=false -print-log -follow -shard xb_recovery

Просмотреть файл

@ -232,7 +232,9 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto
--go_out=. --plugin protoc-gen-go="${GOBIN}/protoc-gen-go" \
--go-grpc_out=. --plugin protoc-gen-go-grpc="${GOBIN}/protoc-gen-go-grpc" \
--go-vtproto_out=. --plugin protoc-gen-go-vtproto="${GOBIN}/protoc-gen-go-vtproto" \
--go-vtproto_opt=features=marshal+unmarshal+size \
--go-vtproto_opt=features=marshal+unmarshal+size+pool \
--go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/query.Row \
--go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/binlogdata.VStreamRowsResponse \
-I${PWD}/dist/vt-protoc-3.6.1/include:proto proto/$${name}.proto; \
done
cp -Rf vitess.io/vitess/go/vt/proto/* go/vt/proto

Просмотреть файл

@ -67,6 +67,10 @@ func main() {
logutil.LogEvent(logger, e)
})
if err != nil {
if strings.Contains(err.Error(), "flag: help requested") {
return
}
errStr := strings.Replace(err.Error(), "remote error: ", "", -1)
fmt.Printf("%s Error: %s\n", flag.Arg(0), errStr)
log.Error(err)

Просмотреть файл

@ -0,0 +1,104 @@
/*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"context"
"flag"
"fmt"
"strings"
"github.com/spf13/cobra"
"vitess.io/vitess/go/cmd/vtctldclient/cli"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/vtctl/vtctlclient"
logutilpb "vitess.io/vitess/go/vt/proto/logutil"
)
var (
// LegacyVtctlCommand provides a shim to make legacy ExecuteVtctlCommand
// RPCs. This allows users to use a single binary to make RPCs against both
// the new and old vtctld gRPC APIs.
LegacyVtctlCommand = &cobra.Command{
Use: "LegacyVtctlCommand -- <command> [flags ...] [args ...]",
Short: "Invoke a legacy vtctlclient command. Flag parsing is best effort.",
Args: cobra.ArbitraryArgs,
RunE: func(cmd *cobra.Command, args []string) error {
cli.FinishedParsing(cmd)
return runLegacyCommand(args)
},
Long: strings.TrimSpace(`
LegacyVtctlCommand uses the legacy vtctl grpc client to make an ExecuteVtctlCommand
rpc to a vtctld.
This command exists to support a smooth transition of any scripts that relied on
vtctlclient during the migration to the new vtctldclient, and will be removed,
following the Vitess project's standard deprecation cycle, once all commands
have been migrated to the new VtctldServer api.
To see the list of available legacy commands, run "LegacyVtctlCommand -- help".
Note that, as with the old client, this requires a running server, as the flag
parsing and help/usage text generation, is done server-side.
Also note that, in order to defer that flag parsing to the server side, you must
use the double-dash ("--") after the LegacyVtctlCommand subcommand string, or
the client-side flag parsing library we are using will attempt to parse those
flags (and fail).
`),
Example: strings.TrimSpace(`
LegacyVtctlCommand help # displays this help message
LegacyVtctlCommand -- help # displays help for supported legacy vtctl commands
# When using legacy command that take arguments, a double dash must be used
# before the first flag argument, like in the first example. The double dash may
# be used, however, at any point after the "LegacyVtctlCommand" string, as in
# the second example.
LegacyVtctlCommand AddCellInfo -- -server_address "localhost:1234" -root "/vitess/cell1"
LegacyVtctlCommand -- AddCellInfo -server_address "localhost:5678" -root "/vitess/cell1"`),
}
)
func runLegacyCommand(args []string) error {
// Duplicated (mostly) from go/cmd/vtctlclient/main.go.
logger := logutil.NewConsoleLogger()
ctx, cancel := context.WithTimeout(context.Background(), actionTimeout)
defer cancel()
err := vtctlclient.RunCommandAndWait(ctx, server, args, func(e *logutilpb.Event) {
logutil.LogEvent(logger, e)
})
if err != nil {
if strings.Contains(err.Error(), "flag: help requested") {
// Help is caught by SetHelpFunc, so we don't want to indicate this as an error.
return nil
}
errStr := strings.Replace(err.Error(), "remote error: ", "", -1)
fmt.Printf("%s Error: %s\n", flag.Arg(0), errStr)
log.Error(err)
}
return err
}
func init() {
Root.AddCommand(LegacyVtctlCommand)
}

Просмотреть файл

@ -25,7 +25,6 @@ import (
"github.com/spf13/cobra"
"vitess.io/vitess/go/trace"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtctl/vtctldclient"
)
@ -44,9 +43,7 @@ var (
// command context for every command.
PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) {
traceCloser = trace.StartTracing("vtctldclient")
if server == "" {
err = errors.New("please specify -server <vtctld_host:vtctld_port> to specify the vtctld server to connect to")
log.Error(err)
if err := ensureServerArg(); err != nil {
return err
}
@ -75,6 +72,17 @@ var (
}
)
var errNoServer = errors.New("please specify -server <vtctld_host:vtctld_port> to specify the vtctld server to connect to")
// ensureServerArg validates that --server was passed to the CLI.
func ensureServerArg() error {
if server == "" {
return errNoServer
}
return nil
}
func init() {
Root.PersistentFlags().StringVar(&server, "server", "", "server to use for connection")
Root.PersistentFlags().DurationVar(&actionTimeout, "action_timeout", time.Hour, "timeout for the total command")

Просмотреть файл

@ -0,0 +1,23 @@
/*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// Imports and registers the gRPC vtctl client.
import (
_ "vitess.io/vitess/go/vt/vtctl/grpcvtctlclient"
)

Просмотреть файл

@ -99,7 +99,7 @@ where table_schema = database()`
FetchUpdatedTables = `select table_name, column_name, data_type
from _vt.schemacopy
where table_schema = database() and
table_name in :tableNames
table_name in ::tableNames
order by table_name, ordinal_position`
// FetchTables queries fetches all information about tables

Просмотреть файл

@ -190,6 +190,7 @@ var stateToMysqlCode = map[vterrors.State]struct {
vterrors.WrongValueForVar: {num: ERWrongValueForVar, state: SSClientError},
vterrors.ServerNotAvailable: {num: ERServerIsntAvailable, state: SSNetError},
vterrors.CantDoThisInTransaction: {num: ERCantDoThisDuringAnTransaction, state: SSCantDoThisDuringAnTransaction},
vterrors.RequiresPrimaryKey: {num: ERRequiresPrimaryKey, state: SSClientError},
vterrors.NoSuchSession: {num: ERUnknownComError, state: SSNetError},
}

Просмотреть файл

@ -0,0 +1,165 @@
/*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"vitess.io/vitess/go/vt/log"
)
const (
vttestserverMysql57image = "vttestserver-e2etest/mysql57"
vttestserverMysql80image = "vttestserver-e2etest/mysql80"
)
type vttestserver struct {
dockerImage string
keyspaces []string
numShards []int
mysqlMaxConnecetions int
port int
}
func newVttestserver(dockerImage string, keyspaces []string, numShards []int, mysqlMaxConnections, port int) *vttestserver {
return &vttestserver{
dockerImage: dockerImage,
keyspaces: keyspaces,
numShards: numShards,
mysqlMaxConnecetions: mysqlMaxConnections,
port: port,
}
}
func (v *vttestserver) teardown() {
cmd := exec.Command("docker", "rm", "--force", "vttestserver-end2end-test")
err := cmd.Run()
if err != nil {
log.Errorf("docker teardown failed :- %s", err.Error())
}
}
// startDockerImage starts the docker image for the vttestserver
func (v *vttestserver) startDockerImage() error {
cmd := exec.Command("docker", "run")
cmd.Args = append(cmd.Args, "--name=vttestserver-end2end-test")
cmd.Args = append(cmd.Args, "-p", fmt.Sprintf("%d:33577", v.port))
cmd.Args = append(cmd.Args, "-e", "PORT=33574")
cmd.Args = append(cmd.Args, "-e", fmt.Sprintf("KEYSPACES=%s", strings.Join(v.keyspaces, ",")))
cmd.Args = append(cmd.Args, "-e", fmt.Sprintf("NUM_SHARDS=%s", strings.Join(convertToStringSlice(v.numShards), ",")))
cmd.Args = append(cmd.Args, "-e", "MYSQL_BIND_HOST=0.0.0.0")
cmd.Args = append(cmd.Args, "-e", fmt.Sprintf("MYSQL_MAX_CONNECTIONS=%d", v.mysqlMaxConnecetions))
cmd.Args = append(cmd.Args, "--health-cmd", "mysqladmin ping -h127.0.0.1 -P33577")
cmd.Args = append(cmd.Args, "--health-interval=5s")
cmd.Args = append(cmd.Args, "--health-timeout=2s")
cmd.Args = append(cmd.Args, "--health-retries=5")
cmd.Args = append(cmd.Args, v.dockerImage)
err := cmd.Start()
if err != nil {
return err
}
return nil
}
// dockerStatus is a struct used to unmarshal json output from `docker inspect`
type dockerStatus struct {
State struct {
Health struct {
Status string
}
}
}
// waitUntilDockerHealthy waits until the docker image is healthy. It takes in as argument the amount of seconds to wait before timeout
func (v *vttestserver) waitUntilDockerHealthy(timeoutDelay int) error {
timeOut := time.After(time.Duration(timeoutDelay) * time.Second)
for {
select {
case <-timeOut:
// return error due to timeout
return fmt.Errorf("timed out waiting for docker image to start")
case <-time.After(time.Second):
cmd := exec.Command("docker", "inspect", "vttestserver-end2end-test")
out, err := cmd.Output()
if err != nil {
return err
}
var x []dockerStatus
err = json.Unmarshal(out, &x)
if err != nil {
return err
}
if len(x) > 0 {
status := x[0].State.Health.Status
if status == "healthy" {
return nil
}
}
}
}
}
// convertToStringSlice converts an integer slice to string slice
func convertToStringSlice(intSlice []int) []string {
var stringSlice []string
for _, val := range intSlice {
str := strconv.Itoa(val)
stringSlice = append(stringSlice, str)
}
return stringSlice
}
//makeVttestserverDockerImages creates the vttestserver docker images for both MySQL57 and MySQL80
func makeVttestserverDockerImages() error {
mainVitessPath := path.Join(os.Getenv("PWD"), "../../../..")
dockerFilePath := path.Join(mainVitessPath, "docker/vttestserver/Dockerfile.mysql57")
cmd57 := exec.Command("docker", "build", "-f", dockerFilePath, "-t", vttestserverMysql57image, ".")
cmd57.Dir = mainVitessPath
err := cmd57.Start()
if err != nil {
return err
}
dockerFilePath = path.Join(mainVitessPath, "docker/vttestserver/Dockerfile.mysql80")
cmd80 := exec.Command("docker", "build", "-f", dockerFilePath, "-t", vttestserverMysql80image, ".")
cmd80.Dir = mainVitessPath
err = cmd80.Start()
if err != nil {
return err
}
err = cmd57.Wait()
if err != nil {
return err
}
err = cmd80.Wait()
if err != nil {
return err
}
return nil
}

Просмотреть файл

@ -0,0 +1,196 @@
/*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"fmt"
"os"
"testing"
"github.com/google/go-cmp/cmp"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/mysql"
"github.com/stretchr/testify/require"
)
func TestMain(m *testing.M) {
exitCode := func() int {
err := makeVttestserverDockerImages()
if err != nil {
return 1
}
return m.Run()
}()
os.Exit(exitCode)
}
func TestUnsharded(t *testing.T) {
dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image}
for _, image := range dockerImages {
t.Run(image, func(t *testing.T) {
vtest := newVttestserver(image, []string{"unsharded_ks"}, []int{1}, 1000, 33577)
err := vtest.startDockerImage()
require.NoError(t, err)
defer vtest.teardown()
// wait for the docker to be setup
err = vtest.waitUntilDockerHealthy(10)
require.NoError(t, err)
ctx := context.Background()
vttestParams := mysql.ConnParams{
Host: "localhost",
Port: vtest.port,
}
conn, err := mysql.Connect(ctx, &vttestParams)
require.NoError(t, err)
defer conn.Close()
assertMatches(t, conn, "show databases", `[[VARCHAR("unsharded_ks")] [VARCHAR("information_schema")] [VARCHAR("mysql")] [VARCHAR("sys")] [VARCHAR("performance_schema")]]`)
_, err = execute(t, conn, "create table unsharded_ks.t1(id int)")
require.NoError(t, err)
_, err = execute(t, conn, "insert into unsharded_ks.t1(id) values (10),(20),(30)")
require.NoError(t, err)
assertMatches(t, conn, "select * from unsharded_ks.t1", `[[INT32(10)] [INT32(20)] [INT32(30)]]`)
})
}
}
func TestSharded(t *testing.T) {
dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image}
for _, image := range dockerImages {
t.Run(image, func(t *testing.T) {
vtest := newVttestserver(image, []string{"ks"}, []int{2}, 1000, 33577)
err := vtest.startDockerImage()
require.NoError(t, err)
defer vtest.teardown()
// wait for the docker to be setup
err = vtest.waitUntilDockerHealthy(10)
require.NoError(t, err)
ctx := context.Background()
vttestParams := mysql.ConnParams{
Host: "localhost",
Port: vtest.port,
}
conn, err := mysql.Connect(ctx, &vttestParams)
require.NoError(t, err)
defer conn.Close()
assertMatches(t, conn, "show databases", `[[VARCHAR("ks")] [VARCHAR("information_schema")] [VARCHAR("mysql")] [VARCHAR("sys")] [VARCHAR("performance_schema")]]`)
_, err = execute(t, conn, "create table ks.t1(id int)")
require.NoError(t, err)
_, err = execute(t, conn, "alter vschema on ks.t1 add vindex `binary_md5`(id) using `binary_md5`")
require.NoError(t, err)
_, err = execute(t, conn, "insert into ks.t1(id) values (10),(20),(30)")
require.NoError(t, err)
assertMatches(t, conn, "select id from ks.t1 order by id", `[[INT32(10)] [INT32(20)] [INT32(30)]]`)
})
}
}
func TestMysqlMaxCons(t *testing.T) {
dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image}
for _, image := range dockerImages {
t.Run(image, func(t *testing.T) {
vtest := newVttestserver(image, []string{"ks"}, []int{2}, 100000, 33577)
err := vtest.startDockerImage()
require.NoError(t, err)
defer vtest.teardown()
// wait for the docker to be setup
err = vtest.waitUntilDockerHealthy(10)
require.NoError(t, err)
ctx := context.Background()
vttestParams := mysql.ConnParams{
Host: "localhost",
Port: vtest.port,
}
conn, err := mysql.Connect(ctx, &vttestParams)
require.NoError(t, err)
defer conn.Close()
assertMatches(t, conn, "select @@max_connections", `[[UINT64(100000)]]`)
})
}
}
func TestLargeNumberOfKeyspaces(t *testing.T) {
dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image}
for _, image := range dockerImages {
t.Run(image, func(t *testing.T) {
var keyspaces []string
var numShards []int
for i := 0; i < 100; i++ {
keyspaces = append(keyspaces, fmt.Sprintf("unsharded_ks%d", i))
numShards = append(numShards, 1)
}
vtest := newVttestserver(image, keyspaces, numShards, 100000, 33577)
err := vtest.startDockerImage()
require.NoError(t, err)
defer vtest.teardown()
// wait for the docker to be setup
err = vtest.waitUntilDockerHealthy(15)
require.NoError(t, err)
ctx := context.Background()
vttestParams := mysql.ConnParams{
Host: "localhost",
Port: vtest.port,
}
conn, err := mysql.Connect(ctx, &vttestParams)
require.NoError(t, err)
defer conn.Close()
// assert that all the keyspaces are correctly setup
for _, keyspace := range keyspaces {
_, err = execute(t, conn, "create table "+keyspace+".t1(id int)")
require.NoError(t, err)
_, err = execute(t, conn, "insert into "+keyspace+".t1(id) values (10),(20),(30)")
require.NoError(t, err)
assertMatches(t, conn, "select * from "+keyspace+".t1", `[[INT32(10)] [INT32(20)] [INT32(30)]]`)
}
})
}
}
func execute(t *testing.T, conn *mysql.Conn, query string) (*sqltypes.Result, error) {
t.Helper()
return conn.ExecuteFetch(query, 1000, true)
}
func checkedExec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result {
t.Helper()
qr, err := conn.ExecuteFetch(query, 1000, true)
require.NoError(t, err)
return qr
}
func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) {
t.Helper()
qr := checkedExec(t, conn, query)
got := fmt.Sprintf("%v", qr.Rows)
diff := cmp.Diff(expected, got)
if diff != "" {
t.Errorf("Query: %s (-want +got):\n%s", query, diff)
}
}

Просмотреть файл

@ -127,6 +127,11 @@ func getClusterConfig(idx int, dataRootDir string) *ClusterConfig {
}
func init() {
// for local debugging set this variable so that each run uses VTDATAROOT instead of a random dir
// and also does not teardown the cluster for inspecting logs and the databases
if os.Getenv("VREPLICATION_E2E_DEBUG") != "" {
debug = true
}
rand.Seed(time.Now().UTC().UnixNano())
originalVtdataroot = os.Getenv("VTDATAROOT")
var mainVtDataRoot string

Просмотреть файл

@ -6,7 +6,7 @@ create table product(pid int, description varbinary(128), primary key(pid));
create table customer(cid int, name varbinary(128), meta json default null, typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid)) CHARSET=utf8mb4;
create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
create table merchant(mname varchar(128), category varchar(128), primary key(mname)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci;
create table orders(oid int, cid int, pid int, mname varchar(128), price int, primary key(oid));
create table orders(oid int, cid int, pid int, mname varchar(128), price int, qty int, total int as (qty * price), total2 int as (qty * price) stored, primary key(oid));
create table order_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
create table customer2(cid int, name varbinary(128), typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid));
create table customer_seq2(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
@ -243,8 +243,8 @@ create table tenant(tenant_id binary(16), name varbinary(16), primary key (tenan
"targetKeyspace": "merchant",
"tableSettings": [{
"targetTable": "morders",
"sourceExpression": "select * from orders",
"create_ddl": "create table morders(oid int, cid int, mname varchar(128), pid int, price int, primary key(oid))"
"sourceExpression": "select oid, cid, mname, pid, price, qty, total from orders",
"create_ddl": "create table morders(oid int, cid int, mname varchar(128), pid int, price int, qty int, total int, total2 int as (10 * total), primary key(oid))"
}]
}
`

Просмотреть файл

@ -189,11 +189,11 @@ func validateDryRunResults(t *testing.T, output string, want []string) {
}
if !match {
fail = true
t.Logf("want %s, got %s\n", w, gotDryRun[i])
t.Fatalf("want %s, got %s\n", w, gotDryRun[i])
}
}
if fail {
t.Fatal("Dry run results don't match")
t.Fatalf("Dry run results don't match, want %s, got %s", want, gotDryRun)
}
}

Просмотреть файл

@ -0,0 +1,98 @@
/*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vreplication
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
const smSchema = `
CREATE TABLE tx (
id bigint NOT NULL,
val varbinary(10) NOT NULL,
ts timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
typ tinyint NOT NULL,
PRIMARY KEY (id),
KEY ts (ts),
KEY typ (typ)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
`
const smVSchema = `
{
"sharded": true,
"tables": {
"tx": {
"column_vindexes": [
{
"column": "id",
"name": "hash"
}
]
}
},
"vindexes": {
"hash": {
"type": "hash"
}
}
}
`
const smMaterializeSpec = `{"workflow": "wf1", "source_keyspace": "ks1", "target_keyspace": "ks2", "table_settings": [ {"target_table": "tx", "source_expression": "select * from tx where typ>=2 and val > 'abc'" }] }`
const initDataQuery = `insert into ks1.tx(id, typ, val) values (1, 1, 'abc'), (2, 1, 'def'), (3, 2, 'def'), (4, 2, 'abc'), (5, 3, 'def'), (6, 3, 'abc')`
// TestShardedMaterialize tests a materialize from a sharded (single shard) using comparison filters
func TestShardedMaterialize(t *testing.T) {
defaultCellName := "zone1"
allCells := []string{"zone1"}
allCellNames = "zone1"
vc = NewVitessCluster(t, "TestShardedMaterialize", allCells, mainClusterConfig)
ks1 := "ks1"
ks2 := "ks2"
require.NotNil(t, vc)
defaultReplicas = 0 // because of CI resource constraints we can only run this test with master tablets
defer func() { defaultReplicas = 1 }()
defer vc.TearDown(t)
defaultCell = vc.Cells[defaultCellName]
vc.AddKeyspace(t, []*Cell{defaultCell}, ks1, "-", smVSchema, smSchema, defaultReplicas, defaultRdonly, 100)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", ks1, "0"), 1)
vc.AddKeyspace(t, []*Cell{defaultCell}, ks2, "-", smVSchema, smSchema, defaultReplicas, defaultRdonly, 200)
vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", ks2, "0"), 1)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
verifyClusterHealth(t, vc)
_, err := vtgateConn.ExecuteFetch(initDataQuery, 0, false)
require.NoError(t, err)
materialize(t, smMaterializeSpec)
tab := vc.Cells[defaultCell.Name].Keyspaces[ks2].Shards["-"].Tablets["zone1-200"].Vttablet
catchup(t, tab, "wf1", "Materialize")
validateCount(t, vtgateConn, ks2, "tx", 2)
validateQuery(t, vtgateConn, "ks2:-", "select id, val from tx",
`[[INT64(3) VARBINARY("def")] [INT64(5) VARBINARY("def")]]`)
}

Просмотреть файл

@ -5,9 +5,9 @@ insert into merchant(mname, category) values('monoprice', 'electronics');
insert into merchant(mname, category) values('newegg', 'electronics');
insert into product(pid, description) values(1, 'keyboard');
insert into product(pid, description) values(2, 'monitor');
insert into orders(oid, cid, mname, pid, price) values(1, 1, 'monoprice', 1, 10);
insert into orders(oid, cid, mname, pid, price) values(2, 1, 'newegg', 2, 15);
insert into orders(oid, cid, mname, pid, price) values(3, 2, 'monoprice', 2, 20);
insert into orders(oid, cid, mname, pid, price, qty) values(1, 1, 'monoprice', 1, 10, 1);
insert into orders(oid, cid, mname, pid, price, qty) values(2, 1, 'newegg', 2, 15, 2);
insert into orders(oid, cid, mname, pid, price, qty) values(3, 2, 'monoprice', 2, 20, 3);
insert into customer2(cid, name, typ, sport) values(1, 'john',1,'football,baseball');
insert into customer2(cid, name, typ, sport) values(2, 'paul','soho','cricket');
insert into customer2(cid, name, typ, sport) values(3, 'ringo','enterprise','');

Просмотреть файл

@ -49,10 +49,10 @@ var dryRunResultsReadCustomerShard = []string{
var dryRunResultsSwitchWritesM2m3 = []string{
"Lock keyspace merchant",
"Stop streams on keyspace merchant",
"/ Id 2 Keyspace customer Shard -80 Rules rules:{match:\"morders\" filter:\"select * from orders where in_keyrange(mname, 'merchant.md5', '-80')\"} at Position ",
"/ Id 2 Keyspace customer Shard -80 Rules rules:{match:\"morders\" filter:\"select * from orders where in_keyrange(mname, 'merchant.md5', '80-')\"} at Position ",
"/ Id 3 Keyspace customer Shard 80- Rules rules:{match:\"morders\" filter:\"select * from orders where in_keyrange(mname, 'merchant.md5', '-80')\"} at Position ",
"/ Id 3 Keyspace customer Shard 80- Rules rules:{match:\"morders\" filter:\"select * from orders where in_keyrange(mname, 'merchant.md5', '80-')\"} at Position ",
"/ Id 2 Keyspace customer Shard -80 Rules rules:{match:\"morders\" filter:\"select oid, cid, mname, pid, price, qty, total from orders where in_keyrange(mname, 'merchant.md5', '-80')\"} at Position ",
"/ Id 2 Keyspace customer Shard -80 Rules rules:{match:\"morders\" filter:\"select oid, cid, mname, pid, price, qty, total from orders where in_keyrange(mname, 'merchant.md5', '80-')\"} at Position ",
"/ Id 3 Keyspace customer Shard 80- Rules rules:{match:\"morders\" filter:\"select oid, cid, mname, pid, price, qty, total from orders where in_keyrange(mname, 'merchant.md5', '-80')\"} at Position ",
"/ Id 3 Keyspace customer Shard 80- Rules rules:{match:\"morders\" filter:\"select oid, cid, mname, pid, price, qty, total from orders where in_keyrange(mname, 'merchant.md5', '80-')\"} at Position ",
"/ Id 4 Keyspace customer Shard -80 Rules rules:{match:\"msales\" filter:\"select mname as merchant_name, count(*) as kount, sum(price) as amount from orders where in_keyrange(mname, 'merchant.md5', '-80') group by merchant_name\"} at Position ",
"/ Id 4 Keyspace customer Shard -80 Rules rules:{match:\"msales\" filter:\"select mname as merchant_name, count(*) as kount, sum(price) as amount from orders where in_keyrange(mname, 'merchant.md5', '80-') group by merchant_name\"} at Position ",
"/ Id 5 Keyspace customer Shard 80- Rules rules:{match:\"msales\" filter:\"select mname as merchant_name, count(*) as kount, sum(price) as amount from orders where in_keyrange(mname, 'merchant.md5', '-80') group by merchant_name\"} at Position ",

Просмотреть файл

@ -418,7 +418,7 @@ func TestMain(m *testing.M) {
VSchema: VSchema,
}
clusterInstance.VtGateExtraArgs = []string{"-schema_change_signal"}
clusterInstance.VtTabletExtraArgs = []string{"-queryserver-config-schema-change-signal"}
clusterInstance.VtTabletExtraArgs = []string{"-queryserver-config-schema-change-signal", "-queryserver-config-schema-change-signal-interval", "0.1"}
err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, true)
if err != nil {
return 1

Просмотреть файл

@ -650,30 +650,6 @@ func TestQueryAndSubQWithLimit(t *testing.T) {
assert.Equal(t, 10, len(result.Rows))
}
func TestSchemaTracker(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
// this query only works if we know which table the testId belongs to. The vschema does not contain
// this info, so we are testing that the schema tracker has added column info to the vschema
_, err = conn.ExecuteFetch(`select testId from t8 join t2`, 1000, true)
require.NoError(t, err)
}
func TestVSchemaTrackerInit(t *testing.T) {
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
qr := exec(t, conn, "SHOW VSCHEMA TABLES")
got := fmt.Sprintf("%v", qr.Rows)
want := `[[VARCHAR("aggr_test")] [VARCHAR("dual")] [VARCHAR("t1")] [VARCHAR("t1_id2_idx")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t3")] [VARCHAR("t3_id7_idx")] [VARCHAR("t4")] [VARCHAR("t4_id2_idx")] [VARCHAR("t5_null_vindex")] [VARCHAR("t6")] [VARCHAR("t6_id2_idx")] [VARCHAR("t7_fk")] [VARCHAR("t7_xxhash")] [VARCHAR("t7_xxhash_idx")] [VARCHAR("t8")] [VARCHAR("vstream_test")]]`
assert.Equal(t, want, got)
}
func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) {
t.Helper()
qr := exec(t, conn, query)
@ -683,6 +659,7 @@ func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) {
t.Errorf("Query: %s (-want +got):\n%s", query, diff)
}
}
func assertMatchesNoOrder(t *testing.T, conn *mysql.Conn, query, expected string) {
t.Helper()
qr := exec(t, conn, query)

Просмотреть файл

@ -0,0 +1,299 @@
/*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sharded
import (
"context"
"flag"
"fmt"
"os"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"vitess.io/vitess/go/test/utils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/test/endtoend/cluster"
)
var (
clusterInstance *cluster.LocalProcessCluster
vtParams mysql.ConnParams
KeyspaceName = "ks"
Cell = "test"
SchemaSQL = `
create table t2(
id3 bigint,
id4 bigint,
primary key(id3)
) Engine=InnoDB;
create table t2_id4_idx(
id bigint not null auto_increment,
id4 bigint,
id3 bigint,
primary key(id),
key idx_id4(id4)
) Engine=InnoDB;
create table t8(
id8 bigint,
testId bigint,
primary key(id8)
) Engine=InnoDB;
`
VSchema = `
{
"sharded": true,
"vindexes": {
"unicode_loose_xxhash" : {
"type": "unicode_loose_xxhash"
},
"unicode_loose_md5" : {
"type": "unicode_loose_md5"
},
"hash": {
"type": "hash"
},
"xxhash": {
"type": "xxhash"
},
"t2_id4_idx": {
"type": "lookup_hash",
"params": {
"table": "t2_id4_idx",
"from": "id4",
"to": "id3",
"autocommit": "true"
},
"owner": "t2"
}
},
"tables": {
"t2": {
"column_vindexes": [
{
"column": "id3",
"name": "hash"
},
{
"column": "id4",
"name": "t2_id4_idx"
}
]
},
"t2_id4_idx": {
"column_vindexes": [
{
"column": "id4",
"name": "hash"
}
]
},
"t8": {
"column_vindexes": [
{
"column": "id8",
"name": "hash"
}
]
}
}
}`
)
func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()
exitCode := func() int {
clusterInstance = cluster.NewCluster(Cell, "localhost")
defer clusterInstance.Teardown()
// Start topo server
err := clusterInstance.StartTopo()
if err != nil {
return 1
}
// Start keyspace
keyspace := &cluster.Keyspace{
Name: KeyspaceName,
SchemaSQL: SchemaSQL,
VSchema: VSchema,
}
clusterInstance.VtGateExtraArgs = []string{"-schema_change_signal", "-vschema_ddl_authorized_users", "%"}
clusterInstance.VtTabletExtraArgs = []string{"-queryserver-config-schema-change-signal", "-queryserver-config-schema-change-signal-interval", "0.1"}
err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, true)
if err != nil {
return 1
}
// Start vtgate
err = clusterInstance.StartVtgate()
if err != nil {
return 1
}
vtParams = mysql.ConnParams{
Host: clusterInstance.Hostname,
Port: clusterInstance.VtgateMySQLPort,
}
return m.Run()
}()
os.Exit(exitCode)
}
func TestAmbiguousColumnJoin(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
// this query only works if we know which table the testId belongs to. The vschema does not contain
// this info, so we are testing that the schema tracker has added column info to the vschema
_, err = conn.ExecuteFetch(`select testId from t8 join t2`, 1000, true)
require.NoError(t, err)
}
func TestInitAndUpdate(t *testing.T) {
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
assertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`)
// Init
_ = exec(t, conn, "create table test_sc (id bigint primary key)")
assertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
`[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`,
100*time.Millisecond,
3*time.Second,
"test_sc not in vschema tables")
// Tables Update via health check.
_ = exec(t, conn, "create table test_sc1 (id bigint primary key)")
assertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
`[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`,
100*time.Millisecond,
3*time.Second,
"test_sc1 not in vschema tables")
_ = exec(t, conn, "drop table test_sc, test_sc1")
assertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
`[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`,
100*time.Millisecond,
3*time.Second,
"test_sc and test_sc_1 should not be in vschema tables")
}
func TestDMLOnNewTable(t *testing.T) {
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
// create a new table which is not part of the VSchema
exec(t, conn, `create table new_table_tracked(id bigint, name varchar(100), primary key(id)) Engine=InnoDB`)
// wait for vttablet's schema reload interval to pass
assertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
`[[VARCHAR("dual")] [VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`,
100*time.Millisecond,
3*time.Second,
"test_sc not in vschema tables")
assertMatches(t, conn, "select id from new_table_tracked", `[]`) // select
assertMatches(t, conn, "select id from new_table_tracked where id = 5", `[]`) // select
// DML on new table
// insert initial data ,update and delete will fail since we have not added a primary vindex
errorMessage := "table 'new_table_tracked' does not have a primary vindex (errno 1173) (sqlstate 42000)"
assertError(t, conn, `insert into new_table_tracked(id) values(0),(1)`, errorMessage)
assertError(t, conn, `update new_table_tracked set name = "newName1"`, errorMessage)
assertError(t, conn, "delete from new_table_tracked", errorMessage)
exec(t, conn, `select name from new_table_tracked join t8`)
// add a primary vindex for the table
exec(t, conn, "alter vschema on ks.new_table_tracked add vindex hash(id) using hash")
time.Sleep(1 * time.Second)
exec(t, conn, `insert into new_table_tracked(id) values(0),(1)`)
exec(t, conn, `insert into t8(id8) values(2)`)
defer exec(t, conn, `delete from t8`)
assertMatchesNoOrder(t, conn, `select id from new_table_tracked join t8`, `[[INT64(0)] [INT64(1)]]`)
}
func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) {
t.Helper()
qr := exec(t, conn, query)
got := fmt.Sprintf("%v", qr.Rows)
diff := cmp.Diff(expected, got)
if diff != "" {
t.Errorf("Query: %s (-want +got):\n%s", query, diff)
}
}
func assertMatchesWithTimeout(t *testing.T, conn *mysql.Conn, query, expected string, r time.Duration, d time.Duration, failureMsg string) {
t.Helper()
timeout := time.After(d)
diff := "actual and expectation does not match"
for len(diff) > 0 {
select {
case <-timeout:
require.Fail(t, failureMsg, diff)
case <-time.After(r):
qr := exec(t, conn, query)
diff = cmp.Diff(expected,
fmt.Sprintf("%v", qr.Rows))
}
}
}
func assertMatchesNoOrder(t *testing.T, conn *mysql.Conn, query, expected string) {
t.Helper()
qr := exec(t, conn, query)
actual := fmt.Sprintf("%v", qr.Rows)
assert.Equal(t, utils.SortString(expected), utils.SortString(actual), "for query: [%s] expected \n%s \nbut actual \n%s", query, expected, actual)
}
func assertError(t *testing.T, conn *mysql.Conn, query, errorMessage string) {
t.Helper()
_, err := conn.ExecuteFetch(query, 1000, true)
require.Error(t, err)
assert.Contains(t, err.Error(), errorMessage)
}
func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result {
t.Helper()
qr, err := conn.ExecuteFetch(query, 1000, true)
require.NoError(t, err, "for query: "+query)
return qr
}

Просмотреть файл

@ -0,0 +1,168 @@
/*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package unsharded
import (
"context"
"flag"
"fmt"
"os"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/test/endtoend/cluster"
)
var (
clusterInstance *cluster.LocalProcessCluster
vtParams mysql.ConnParams
keyspaceName = "ks"
cell = "zone1"
sqlSchema = `
create table main (
id bigint,
val varchar(128),
primary key(id)
) Engine=InnoDB;
`
)
func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()
exitCode := func() int {
clusterInstance = cluster.NewCluster(cell, "localhost")
defer clusterInstance.Teardown()
// Start topo server
err := clusterInstance.StartTopo()
if err != nil {
return 1
}
// Start keyspace
keyspace := &cluster.Keyspace{
Name: keyspaceName,
SchemaSQL: sqlSchema,
}
clusterInstance.VtTabletExtraArgs = []string{"-queryserver-config-schema-change-signal", "-queryserver-config-schema-change-signal-interval", "0.1"}
err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false)
if err != nil {
return 1
}
// Start vtgate
clusterInstance.VtGateExtraArgs = []string{"-schema_change_signal", "-vschema_ddl_authorized_users", "%"}
err = clusterInstance.StartVtgate()
if err != nil {
return 1
}
vtParams = mysql.ConnParams{
Host: clusterInstance.Hostname,
Port: clusterInstance.VtgateMySQLPort,
}
return m.Run()
}()
os.Exit(exitCode)
}
func TestNewUnshardedTable(t *testing.T) {
defer cluster.PanicHandler(t)
// create a sql connection
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
// ensuring our initial table "main" is in the schema
qr := exec(t, conn, "SHOW VSCHEMA TABLES")
got := fmt.Sprintf("%v", qr.Rows)
want := `[[VARCHAR("dual")] [VARCHAR("main")]]`
require.Equal(t, want, got)
// create a new table which is not part of the VSchema
exec(t, conn, `create table new_table_tracked(id bigint, name varchar(100), primary key(id)) Engine=InnoDB`)
// waiting for the vttablet's schema_reload interval to kick in
assertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
`[[VARCHAR("dual")] [VARCHAR("main")] [VARCHAR("new_table_tracked")]]`,
100*time.Millisecond,
3*time.Second,
"new_table_tracked not in vschema tables")
assertMatches(t, conn, "select id from new_table_tracked", `[]`) // select
assertMatches(t, conn, "select id from new_table_tracked where id = 5", `[]`) // select
// DML on new table
// insert initial data ,update and delete for the new table
exec(t, conn, `insert into new_table_tracked(id) values(0),(1)`)
exec(t, conn, `update new_table_tracked set name = "newName1"`)
exec(t, conn, "delete from new_table_tracked where id = 0")
assertMatches(t, conn, `select * from new_table_tracked`, `[[INT64(1) VARCHAR("newName1")]]`)
exec(t, conn, `drop table new_table_tracked`)
// waiting for the vttablet's schema_reload interval to kick in
assertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
`[[VARCHAR("dual")] [VARCHAR("main")]]`,
100*time.Millisecond,
3*time.Second,
"new_table_tracked not in vschema tables")
}
func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) {
t.Helper()
qr := exec(t, conn, query)
got := fmt.Sprintf("%v", qr.Rows)
diff := cmp.Diff(expected, got)
if diff != "" {
t.Errorf("Query: %s (-want +got):\n%s", query, diff)
}
}
func assertMatchesWithTimeout(t *testing.T, conn *mysql.Conn, query, expected string, r time.Duration, d time.Duration, failureMsg string) {
t.Helper()
timeout := time.After(d)
diff := "actual and expectation does not match"
for len(diff) > 0 {
select {
case <-timeout:
require.Fail(t, failureMsg, diff)
case <-time.After(r):
qr := exec(t, conn, query)
diff = cmp.Diff(expected,
fmt.Sprintf("%v", qr.Rows))
}
}
}
func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result {
t.Helper()
qr, err := conn.ExecuteFetch(query, 1000, true)
require.NoError(t, err, "for query: "+query)
return qr
}

Просмотреть файл

@ -160,5 +160,5 @@ func TestErrorInAutocommitSession(t *testing.T) {
// if we have properly working autocommit code, both the successful inserts should be visible to a second
// connection, even if we have not done an explicit commit
assert.Equal(t, `[[INT64(1) VARCHAR("foo")] [INT64(2) VARCHAR("baz")]]`, fmt.Sprintf("%v", result.Rows))
assert.Equal(t, `[[INT64(1) VARCHAR("foo")] [INT64(2) VARCHAR("baz")] [INT64(3) VARCHAR("mark")] [INT64(4) VARCHAR("doug")]]`, fmt.Sprintf("%v", result.Rows))
}

Просмотреть файл

@ -98,6 +98,9 @@ type Stats struct {
CopyLoopCount *stats.Counter
ErrorCounts *stats.CountersWithMultiLabels
NoopQueryCount *stats.CountersWithSingleLabel
VReplicationLags *stats.Timings
VReplicationLagRates *stats.Rates
}
// RecordHeartbeat updates the time the last heartbeat from vstreamer was seen
@ -154,6 +157,8 @@ func NewStats() *Stats {
bps.CopyLoopCount = stats.NewCounter("", "")
bps.ErrorCounts = stats.NewCountersWithMultiLabels("", "", []string{"type"})
bps.NoopQueryCount = stats.NewCountersWithSingleLabel("", "", "Statement", "")
bps.VReplicationLags = stats.NewTimings("", "", "")
bps.VReplicationLagRates = stats.NewRates("", bps.VReplicationLags, 15*60/5, 5*time.Second)
return bps
}

Просмотреть файл

@ -39,9 +39,6 @@ type TabletHealth struct {
MasterTermStartTime int64
LastError error
Serving bool
// TablesUpdated contains a list of all tables that we need to fetch new schema info for
TablesUpdated []string
}
// DeepEqual compares two TabletHealth. Since we include protos, we

Просмотреть файл

@ -107,7 +107,7 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table
return nil, vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired")
default:
}
candidates := tp.getMatchingTablets(ctx)
candidates := tp.GetMatchingTablets(ctx)
if len(candidates) == 0 {
// if no candidates were found, sleep and try again
@ -145,9 +145,9 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table
}
}
// getMatchingTablets returns a list of TabletInfo for tablets
// GetMatchingTablets returns a list of TabletInfo for tablets
// that match the cells, keyspace, shard and tabletTypes for this TabletPicker
func (tp *TabletPicker) getMatchingTablets(ctx context.Context) []*topo.TabletInfo {
func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletInfo {
// Special handling for MASTER tablet type
// Since there is only one master, we ignore cell and find the master
aliases := make([]*topodatapb.TabletAlias, 0)

Просмотреть файл

@ -194,8 +194,30 @@ func KeyRangeEqual(left, right *topodatapb.KeyRange) bool {
if right == nil {
return len(left.Start) == 0 && len(left.End) == 0
}
return bytes.Equal(left.Start, right.Start) &&
bytes.Equal(left.End, right.End)
return bytes.Equal(addPadding(left.Start), addPadding(right.Start)) &&
bytes.Equal(addPadding(left.End), addPadding(right.End))
}
// addPadding adds padding to make sure keyrange represents an 8 byte integer.
// From Vitess docs:
// A hash vindex produces an 8-byte number.
// This means that all numbers less than 0x8000000000000000 will fall in shard -80.
// Any number with the highest bit set will be >= 0x8000000000000000, and will therefore
// belong to shard 80-.
// This means that from a keyrange perspective -80 == 00-80 == 0000-8000 == 000000-800000
// If we don't add this padding, we could run into issues when transitioning from keyranges
// that use 2 bytes to 4 bytes.
func addPadding(kr []byte) []byte {
paddedKr := make([]byte, 8)
for i := 0; i < len(kr); i++ {
paddedKr = append(paddedKr, kr[i])
}
for i := len(kr); i < 8; i++ {
paddedKr = append(paddedKr, 0)
}
return paddedKr
}
// KeyRangeStartSmaller returns true if right's keyrange start is _after_ left's start
@ -217,7 +239,7 @@ func KeyRangeStartEqual(left, right *topodatapb.KeyRange) bool {
if right == nil {
return len(left.Start) == 0
}
return bytes.Equal(left.Start, right.Start)
return bytes.Equal(addPadding(left.Start), addPadding(right.Start))
}
// KeyRangeEndEqual returns true if both key ranges have the same end
@ -228,7 +250,7 @@ func KeyRangeEndEqual(left, right *topodatapb.KeyRange) bool {
if right == nil {
return len(left.End) == 0
}
return bytes.Equal(left.End, right.End)
return bytes.Equal(addPadding(left.End), addPadding(right.End))
}
// For more info on the following functions, see:

Просмотреть файл

@ -241,6 +241,167 @@ func TestKeyRangeAdd(t *testing.T) {
}
}
func TestKeyRangeEndEqual(t *testing.T) {
testcases := []struct {
first string
second string
out bool
}{{
first: "",
second: "",
out: true,
}, {
first: "",
second: "-80",
out: false,
}, {
first: "40-",
second: "10-",
out: true,
}, {
first: "-8000",
second: "-80",
out: true,
}, {
first: "-8000",
second: "-8000000000000000",
out: true,
}, {
first: "-80",
second: "-8000",
out: true,
}}
stringToKeyRange := func(spec string) *topodatapb.KeyRange {
if spec == "" {
return nil
}
parts := strings.Split(spec, "-")
if len(parts) != 2 {
panic("invalid spec")
}
kr, err := ParseKeyRangeParts(parts[0], parts[1])
if err != nil {
panic(err)
}
return kr
}
for _, tcase := range testcases {
first := stringToKeyRange(tcase.first)
second := stringToKeyRange(tcase.second)
out := KeyRangeEndEqual(first, second)
if out != tcase.out {
t.Fatalf("KeyRangeEndEqual(%q, %q) expected %t, got %t", tcase.first, tcase.second, tcase.out, out)
}
}
}
func TestKeyRangeStartEqual(t *testing.T) {
testcases := []struct {
first string
second string
out bool
}{{
first: "",
second: "",
out: true,
}, {
first: "",
second: "-80",
out: true,
}, {
first: "40-",
second: "20-",
out: false,
}, {
first: "-8000",
second: "-80",
out: true,
}, {
first: "-8000",
second: "-8000000000000000",
out: true,
}, {
first: "-80",
second: "-8000",
out: true,
}}
stringToKeyRange := func(spec string) *topodatapb.KeyRange {
if spec == "" {
return nil
}
parts := strings.Split(spec, "-")
if len(parts) != 2 {
panic("invalid spec")
}
kr, err := ParseKeyRangeParts(parts[0], parts[1])
if err != nil {
panic(err)
}
return kr
}
for _, tcase := range testcases {
first := stringToKeyRange(tcase.first)
second := stringToKeyRange(tcase.second)
out := KeyRangeStartEqual(first, second)
if out != tcase.out {
t.Fatalf("KeyRangeStartEqual(%q, %q) expected %t, got %t", tcase.first, tcase.second, tcase.out, out)
}
}
}
func TestKeyRangeEqual(t *testing.T) {
testcases := []struct {
first string
second string
out bool
}{{
first: "",
second: "",
out: true,
}, {
first: "",
second: "-80",
out: false,
}, {
first: "-8000",
second: "-80",
out: true,
}, {
first: "-8000",
second: "-8000000000000000",
out: true,
}, {
first: "-80",
second: "-8000",
out: true,
}}
stringToKeyRange := func(spec string) *topodatapb.KeyRange {
if spec == "" {
return nil
}
parts := strings.Split(spec, "-")
if len(parts) != 2 {
panic("invalid spec")
}
kr, err := ParseKeyRangeParts(parts[0], parts[1])
if err != nil {
panic(err)
}
return kr
}
for _, tcase := range testcases {
first := stringToKeyRange(tcase.first)
second := stringToKeyRange(tcase.second)
out := KeyRangeEqual(first, second)
if out != tcase.out {
t.Fatalf("KeyRangeEqual(%q, %q) expected %t, got %t", tcase.first, tcase.second, tcase.out, out)
}
}
}
func TestEvenShardsKeyRange_Error(t *testing.T) {
testCases := []struct {
i, n int

Просмотреть файл

@ -9,6 +9,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
io "io"
bits "math/bits"
sync "sync"
query "vitess.io/vitess/go/vt/proto/query"
topodata "vitess.io/vitess/go/vt/proto/topodata"
vtrpc "vitess.io/vitess/go/vt/proto/vtrpc"
@ -1884,6 +1885,35 @@ func encodeVarint(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
var vtprotoPool_VStreamRowsResponse = sync.Pool{
New: func() interface{} {
return &VStreamRowsResponse{}
},
}
func (m *VStreamRowsResponse) ResetVT() {
f0 := m.Fields[:0]
f1 := m.Pkfields[:0]
for _, mm := range m.Rows {
mm.ResetVT()
}
f2 := m.Rows[:0]
m.Lastpk.ReturnToVTPool()
m.Reset()
m.Fields = f0
m.Pkfields = f1
m.Rows = f2
}
func (m *VStreamRowsResponse) ReturnToVTPool() {
if m != nil {
m.ResetVT()
vtprotoPool_VStreamRowsResponse.Put(m)
}
}
func VStreamRowsResponseFromVTPool() *VStreamRowsResponse {
return vtprotoPool_VStreamRowsResponse.Get().(*VStreamRowsResponse)
}
func (m *Charset) SizeVT() (n int) {
if m == nil {
return 0
@ -6322,7 +6352,14 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Fields = append(m.Fields, &query.Field{})
if len(m.Fields) == cap(m.Fields) {
m.Fields = append(m.Fields, &query.Field{})
} else {
m.Fields = m.Fields[:len(m.Fields)+1]
if m.Fields[len(m.Fields)-1] == nil {
m.Fields[len(m.Fields)-1] = &query.Field{}
}
}
if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@ -6356,7 +6393,14 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pkfields = append(m.Pkfields, &query.Field{})
if len(m.Pkfields) == cap(m.Pkfields) {
m.Pkfields = append(m.Pkfields, &query.Field{})
} else {
m.Pkfields = m.Pkfields[:len(m.Pkfields)+1]
if m.Pkfields[len(m.Pkfields)-1] == nil {
m.Pkfields[len(m.Pkfields)-1] = &query.Field{}
}
}
if err := m.Pkfields[len(m.Pkfields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@ -6422,7 +6466,14 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Rows = append(m.Rows, &query.Row{})
if len(m.Rows) == cap(m.Rows) {
m.Rows = append(m.Rows, &query.Row{})
} else {
m.Rows = m.Rows[:len(m.Rows)+1]
if m.Rows[len(m.Rows)-1] == nil {
m.Rows[len(m.Rows)-1] = &query.Row{}
}
}
if err := m.Rows[len(m.Rows)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@ -6457,7 +6508,7 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Lastpk == nil {
m.Lastpk = &query.Row{}
m.Lastpk = query.RowFromVTPool()
}
if err := m.Lastpk.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
return err

Просмотреть файл

@ -11,6 +11,7 @@ import (
io "io"
math "math"
bits "math/bits"
sync "sync"
topodata "vitess.io/vitess/go/vt/proto/topodata"
vtrpc "vitess.io/vitess/go/vt/proto/vtrpc"
)
@ -4045,6 +4046,29 @@ func encodeVarint(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
var vtprotoPool_Row = sync.Pool{
New: func() interface{} {
return &Row{}
},
}
func (m *Row) ResetVT() {
f0 := m.Lengths[:0]
f1 := m.Values[:0]
m.Reset()
m.Lengths = f0
m.Values = f1
}
func (m *Row) ReturnToVTPool() {
if m != nil {
m.ResetVT()
vtprotoPool_Row.Put(m)
}
}
func RowFromVTPool() *Row {
return vtprotoPool_Row.Get().(*Row)
}
func (m *Target) SizeVT() (n int) {
if m == nil {
return 0
@ -7100,7 +7124,7 @@ func (m *Row) UnmarshalVT(dAtA []byte) error {
}
}
elementCount = count
if elementCount != 0 && len(m.Lengths) == 0 {
if elementCount != 0 && len(m.Lengths) == 0 && cap(m.Lengths) < elementCount {
m.Lengths = make([]int64, 0, elementCount)
}
for iNdEx < postIndex {

Просмотреть файл

@ -32,7 +32,7 @@ import (
)
// ParsedQuery represents a parsed query where
// bind locations are precompued for fast substitutions.
// bind locations are precomputed for fast substitutions.
type ParsedQuery struct {
Query string
bindLocations []bindLocation
@ -86,29 +86,57 @@ func (pq *ParsedQuery) Append(buf *strings.Builder, bindVariables map[string]*qu
}
// AppendFromRow behaves like Append but takes a querypb.Row directly, assuming that
// the fields in the row are in the same order as the placeholders in this query.
func (pq *ParsedQuery) AppendFromRow(buf *bytes2.Buffer, fields []*querypb.Field, row *querypb.Row) error {
// the fields in the row are in the same order as the placeholders in this query. The fields might include generated
// columns which are dropped, by checking against skipFields, before binding the variables
// note: there can be more fields than bind locations since extra columns might be requested from the source if not all
// primary keys columns are present in the target table, for example. Also some values in the row may not correspond for
// values from the database on the source: sum/count for aggregation queries, for example
func (pq *ParsedQuery) AppendFromRow(buf *bytes2.Buffer, fields []*querypb.Field, row *querypb.Row, skipFields map[string]bool) error {
if len(fields) < len(pq.bindLocations) {
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ", len(fields), len(pq.bindLocations))
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ",
len(fields), len(pq.bindLocations))
}
type colInfo struct {
typ querypb.Type
length int64
offset int64
}
rowInfo := make([]*colInfo, 0)
offset := int64(0)
for i, field := range fields { // collect info required for fields to be bound
length := row.Lengths[i]
if !skipFields[strings.ToLower(field.Name)] {
rowInfo = append(rowInfo, &colInfo{
typ: field.Type,
length: length,
offset: offset,
})
}
if length > 0 {
offset += row.Lengths[i]
}
}
// bind field values to locations
var offsetQuery int
var offsetRow int64
for i, loc := range pq.bindLocations {
col := rowInfo[i]
buf.WriteString(pq.Query[offsetQuery:loc.offset])
typ := fields[i].Type
typ := col.typ
if typ == querypb.Type_TUPLE {
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i)
}
length := row.Lengths[i]
length := col.length
if length < 0 {
// -1 means a null variable; serialize it directly
buf.WriteString("null")
} else {
vv := sqltypes.MakeTrusted(typ, row.Values[offsetRow:offsetRow+length])
vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length])
vv.EncodeSQLBytes2(buf)
offsetRow += length
}
offsetQuery = loc.offset + loc.length

Просмотреть файл

@ -17,12 +17,12 @@ limitations under the License.
package topo
import (
"context"
"path"
"strings"
"google.golang.org/protobuf/proto"
"context"
"k8s.io/apimachinery/pkg/util/sets"
"vitess.io/vitess/go/vt/vterrors"
@ -176,37 +176,52 @@ func (ts *Server) GetKnownCells(ctx context.Context) ([]string, error) {
// ExpandCells takes a comma-separated list of cells and returns an array of cell names
// Aliases are expanded and an empty string returns all cells
func (ts *Server) ExpandCells(ctx context.Context, cells string) ([]string, error) {
var err error
var outputCells []string
inputCells := strings.Split(cells, ",")
var (
err error
inputCells []string
outputCells = sets.NewString() // Use a set to dedupe if the input cells list includes an alias and a cell in that alias.
)
if cells == "" {
inputCells, err = ts.GetCellInfoNames(ctx)
if err != nil {
return nil, err
}
} else {
inputCells = strings.Split(cells, ",")
}
expandCell := func(ctx context.Context, cell string) error {
shortCtx, cancel := context.WithTimeout(ctx, *RemoteOperationTimeout)
defer cancel()
_, err := ts.GetCellInfo(shortCtx, cell, false /* strongRead */)
if err != nil {
// Not a valid cell name. Check whether it is an alias.
shortCtx, cancel := context.WithTimeout(ctx, *RemoteOperationTimeout)
defer cancel()
alias, err2 := ts.GetCellsAlias(shortCtx, cell, false /* strongRead */)
if err2 != nil {
return err // return the original err to indicate the cell does not exist
}
// Expand the alias cells list into the final set.
outputCells.Insert(alias.Cells...)
return nil
}
// Valid cell.
outputCells.Insert(cell)
return nil
}
for _, cell := range inputCells {
cell2 := strings.TrimSpace(cell)
shortCtx, cancel := context.WithTimeout(ctx, *RemoteOperationTimeout)
defer cancel()
_, err := ts.GetCellInfo(shortCtx, cell2, false)
if err != nil {
// not a valid cell, check whether it is a cell alias
shortCtx, cancel := context.WithTimeout(ctx, *RemoteOperationTimeout)
defer cancel()
alias, err2 := ts.GetCellsAlias(shortCtx, cell2, false)
// if we get an error, either cellAlias doesn't exist or it isn't a cell alias at all. Ignore and continue
if err2 == nil {
outputCells = append(outputCells, alias.Cells...)
}
if err != nil {
return nil, err
}
} else {
// valid cell, add it to our list
outputCells = append(outputCells, cell2)
if err := expandCell(ctx, cell2); err != nil {
return nil, err
}
}
return outputCells, nil
return outputCells.List(), nil
}

Просмотреть файл

@ -42,3 +42,25 @@ func KeyspaceTypeString(kt topodatapb.KeyspaceType) string {
return str
}
// KeyspaceTypeLString returns the lowercased string representation of a
// KeyspaceType.
func KeyspaceTypeLString(kt topodatapb.KeyspaceType) string {
return strings.ToLower(KeyspaceTypeString(kt))
}
// KeyspaceIDTypeString returns the string representation of a KeyspaceIdType.
func KeyspaceIDTypeString(kidType topodatapb.KeyspaceIdType) string {
str, ok := topodatapb.KeyspaceIdType_name[int32(kidType)]
if !ok {
return "UNKNOWN"
}
return str
}
// KeyspaceIDTypeLString returns the lowercased string representation of a
// KeyspaceIdType.
func KeyspaceIDTypeLString(kidType topodatapb.KeyspaceIdType) string {
return strings.ToLower(KeyspaceIDTypeString(kidType))
}

Просмотреть файл

@ -21,6 +21,7 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"context"
@ -176,4 +177,52 @@ func TestExpandCells(t *testing.T) {
})
}
t.Run("aliases", func(t *testing.T) {
cells := []string{"cell1", "cell2", "cell3"}
ts := memorytopo.NewServer(cells...)
err := ts.CreateCellsAlias(ctx, "alias", &topodatapb.CellsAlias{Cells: cells})
require.NoError(t, err)
tests := []struct {
name string
in string
out []string
shouldErr bool
}{
{
name: "alias only",
in: "alias",
out: []string{"cell1", "cell2", "cell3"},
},
{
name: "alias and cell in alias", // test deduping logic
in: "alias,cell1",
out: []string{"cell1", "cell2", "cell3"},
},
{
name: "just cells",
in: "cell1",
out: []string{"cell1"},
},
{
name: "missing alias",
in: "not_an_alias",
shouldErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
expanded, err := ts.ExpandCells(ctx, tt.in)
if tt.shouldErr {
assert.Error(t, err)
return
}
require.NoError(t, err)
assert.ElementsMatch(t, expanded, tt.out)
})
}
})
}

Просмотреть файл

@ -105,6 +105,18 @@ func TestValidateForReshard(t *testing.T) {
sources: []string{"-80", "80-"},
targets: []string{"-40", "40-"},
out: "",
}, {
sources: []string{"52-53"},
targets: []string{"5200-5240", "5240-5280", "5280-52c0", "52c0-5300"},
out: "",
}, {
sources: []string{"5200-5300"},
targets: []string{"520000-524000", "524000-528000", "528000-52c000", "52c000-530000"},
out: "",
}, {
sources: []string{"-80", "80-"},
targets: []string{"-4000000000000000", "4000000000000000-8000000000000000", "8000000000000000-80c0000000000000", "80c0000000000000-"},
out: "",
}, {
sources: []string{"80-", "-80"},
targets: []string{"-40", "40-"},

Просмотреть файл

@ -0,0 +1,41 @@
/*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package grpcclientcommon defines the flags shared by both grpcvtctlclient and
// grpcvtctldclient.
package grpcclientcommon
import (
"flag"
"google.golang.org/grpc"
"vitess.io/vitess/go/vt/grpcclient"
)
var (
cert = flag.String("vtctld_grpc_cert", "", "the cert to use to connect")
key = flag.String("vtctld_grpc_key", "", "the key to use to connect")
ca = flag.String("vtctld_grpc_ca", "", "the server ca to use to validate servers when connecting")
name = flag.String("vtctld_grpc_server_name", "", "the server name to use to validate server certificate")
)
// SecureDialOption returns a grpc.DialOption configured to use TLS (or
// insecure if no flags were set) based on the vtctld_grpc_* flags declared by
// this package.
func SecureDialOption() (grpc.DialOption, error) {
return grpcclient.SecureDialOption(*cert, *key, *ca, *name)
}

Просмотреть файл

@ -18,7 +18,6 @@ limitations under the License.
package grpcvtctlclient
import (
"flag"
"time"
"context"
@ -27,6 +26,7 @@ import (
"vitess.io/vitess/go/vt/grpcclient"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/vtctl/grpcclientcommon"
"vitess.io/vitess/go/vt/vtctl/vtctlclient"
logutilpb "vitess.io/vitess/go/vt/proto/logutil"
@ -34,20 +34,13 @@ import (
vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice"
)
var (
cert = flag.String("vtctld_grpc_cert", "", "the cert to use to connect")
key = flag.String("vtctld_grpc_key", "", "the key to use to connect")
ca = flag.String("vtctld_grpc_ca", "", "the server ca to use to validate servers when connecting")
name = flag.String("vtctld_grpc_server_name", "", "the server name to use to validate server certificate")
)
type gRPCVtctlClient struct {
cc *grpc.ClientConn
c vtctlservicepb.VtctlClient
}
func gRPCVtctlClientFactory(addr string) (vtctlclient.VtctlClient, error) {
opt, err := grpcclient.SecureDialOption(*cert, *key, *ca, *name)
opt, err := grpcclientcommon.SecureDialOption()
if err != nil {
return nil, err
}

Просмотреть файл

@ -19,11 +19,10 @@ limitations under the License.
package grpcvtctldclient
import (
"flag"
"google.golang.org/grpc"
"vitess.io/vitess/go/vt/grpcclient"
"vitess.io/vitess/go/vt/vtctl/grpcclientcommon"
"vitess.io/vitess/go/vt/vtctl/vtctldclient"
vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice"
@ -31,22 +30,6 @@ import (
const connClosedMsg = "grpc: the client connection is closed"
// (TODO:@amason) - These flags match exactly the flags used in grpcvtctlclient.
// If a program attempts to import both of these packages, it will panic during
// startup due to the duplicate flags.
//
// For everything else I've been doing a sed s/vtctl/vtctld, but I cannot do
// that here, since these flags are already "vtctld_*". My other options are to
// name them "vtctld2_*" or to omit them completely.
//
// Not to pitch project ideas in comments, but a nice project to solve
var (
cert = flag.String("vtctld_grpc_cert", "", "the cert to use to connect")
key = flag.String("vtctld_grpc_key", "", "the key to use to connect")
ca = flag.String("vtctld_grpc_ca", "", "the server ca to use to validate servers when connecting")
name = flag.String("vtctld_grpc_server_name", "", "the server name to use to validate server certificate")
)
type gRPCVtctldClient struct {
cc *grpc.ClientConn
c vtctlservicepb.VtctldClient
@ -56,7 +39,7 @@ type gRPCVtctldClient struct {
//go:generate grpcvtctldclient -out client_gen.go
func gRPCVtctldClientFactory(addr string) (vtctldclient.VtctldClient, error) {
opt, err := grpcclient.SecureDialOption(*cert, *key, *ca, *name)
opt, err := grpcclientcommon.SecureDialOption()
if err != nil {
return nil, err
}

Просмотреть файл

@ -21,12 +21,12 @@ import (
"errors"
"fmt"
"path/filepath"
"strings"
"sync"
"time"
"google.golang.org/protobuf/proto"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"k8s.io/apimachinery/pkg/util/sets"
"vitess.io/vitess/go/event"
@ -84,10 +84,17 @@ func NewVtctldServer(ts *topo.Server) *VtctldServer {
// AddCellInfo is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) AddCellInfo(ctx context.Context, req *vtctldatapb.AddCellInfoRequest) (*vtctldatapb.AddCellInfoResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.AddCellInfo")
defer span.Finish()
if req.CellInfo.Root == "" {
return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "CellInfo.Root must be non-empty")
}
span.Annotate("cell", req.Name)
span.Annotate("cell_root", req.CellInfo.Root)
span.Annotate("cell_address", req.CellInfo.ServerAddress)
ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout)
defer cancel()
@ -100,6 +107,12 @@ func (s *VtctldServer) AddCellInfo(ctx context.Context, req *vtctldatapb.AddCell
// AddCellsAlias is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) AddCellsAlias(ctx context.Context, req *vtctldatapb.AddCellsAliasRequest) (*vtctldatapb.AddCellsAliasResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.AddCellsAlias")
defer span.Finish()
span.Annotate("cells_alias", req.Name)
span.Annotate("cells", strings.Join(req.Cells, ","))
ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout)
defer cancel()
@ -112,6 +125,12 @@ func (s *VtctldServer) AddCellsAlias(ctx context.Context, req *vtctldatapb.AddCe
// ApplyRoutingRules is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) ApplyRoutingRules(ctx context.Context, req *vtctldatapb.ApplyRoutingRulesRequest) (*vtctldatapb.ApplyRoutingRulesResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.ApplyRoutingRules")
defer span.Finish()
span.Annotate("skip_rebuild", req.SkipRebuild)
span.Annotate("rebuild_cells", strings.Join(req.RebuildCells, ","))
if err := s.ts.SaveRoutingRules(ctx, req.RoutingRules); err != nil {
return nil, err
}
@ -132,6 +151,14 @@ func (s *VtctldServer) ApplyRoutingRules(ctx context.Context, req *vtctldatapb.A
// ApplyVSchema is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyVSchemaRequest) (*vtctldatapb.ApplyVSchemaResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.ApplyVSchema")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
span.Annotate("cells", strings.Join(req.Cells, ","))
span.Annotate("skip_rebuild", req.SkipRebuild)
span.Annotate("dry_run", req.DryRun)
if _, err := s.ts.GetKeyspace(ctx, req.Keyspace); err != nil {
if topo.IsErrType(err, topo.NoNode) {
return nil, vterrors.Wrapf(err, "keyspace(%s) doesn't exist, check if the keyspace is initialized", req.Keyspace)
@ -143,10 +170,14 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV
return nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "must pass exactly one of req.VSchema and req.Sql")
}
var vs *vschemapb.Keyspace
var err error
var (
vs *vschemapb.Keyspace
err error
)
if req.Sql != "" {
span.Annotate("sql_mode", true)
stmt, err := sqlparser.Parse(req.Sql)
if err != nil {
return nil, vterrors.Wrapf(err, "Parse(%s)", req.Sql)
@ -166,6 +197,7 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV
return nil, vterrors.Wrapf(err, "ApplyVSchemaDDL(%s,%v,%v)", req.Keyspace, vs, ddl)
}
} else { // "jsonMode"
span.Annotate("sql_mode", false)
vs = req.VSchema
}
@ -191,6 +223,13 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV
// ChangeTabletType is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.ChangeTabletTypeRequest) (*vtctldatapb.ChangeTabletTypeResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.ChangeTabletType")
defer span.Finish()
span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias))
span.Annotate("dry_run", req.DryRun)
span.Annotate("tablet_type", topoproto.TabletTypeLString(req.DbType))
ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout)
defer cancel()
@ -199,6 +238,8 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch
return nil, err
}
span.Annotate("before_tablet_type", topoproto.TabletTypeLString(tablet.Type))
if !topo.IsTrivialTypeChange(tablet.Type, req.DbType) {
return nil, fmt.Errorf("tablet %v type change %v -> %v is not an allowed transition for ChangeTabletType", req.TabletAlias, tablet.Type, req.DbType)
}
@ -237,6 +278,16 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch
// CreateKeyspace is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.CreateKeyspaceRequest) (*vtctldatapb.CreateKeyspaceResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.CreateKeyspace")
defer span.Finish()
span.Annotate("keyspace", req.Name)
span.Annotate("keyspace_type", topoproto.KeyspaceTypeLString(req.Type))
span.Annotate("sharding_column_name", req.ShardingColumnName)
span.Annotate("sharding_column_type", topoproto.KeyspaceIDTypeLString(req.ShardingColumnType))
span.Annotate("force", req.Force)
span.Annotate("allow_empty_vschema", req.AllowEmptyVSchema)
switch req.Type {
case topodatapb.KeyspaceType_NORMAL:
case topodatapb.KeyspaceType_SNAPSHOT:
@ -247,6 +298,9 @@ func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.Crea
if req.SnapshotTime == nil {
return nil, errors.New("SnapshotTime is required for SNAPSHOT keyspaces")
}
span.Annotate("base_keyspace", req.BaseKeyspace)
span.Annotate("snapshot_time", req.SnapshotTime) // TODO: get a proper string repr
default:
return nil, fmt.Errorf("unknown keyspace type %v", req.Type)
}
@ -325,6 +379,14 @@ func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.Crea
// CreateShard is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) CreateShard(ctx context.Context, req *vtctldatapb.CreateShardRequest) (*vtctldatapb.CreateShardResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.CreateShard")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
span.Annotate("shard", req.ShardName)
span.Annotate("force", req.Force)
span.Annotate("include_parent", req.IncludeParent)
if req.IncludeParent {
log.Infof("Creating empty keyspace for %s", req.Keyspace)
if err := s.ts.CreateKeyspace(ctx, req.Keyspace, &topodatapb.Keyspace{}); err != nil {
@ -376,6 +438,12 @@ func (s *VtctldServer) CreateShard(ctx context.Context, req *vtctldatapb.CreateS
// DeleteCellInfo is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) DeleteCellInfo(ctx context.Context, req *vtctldatapb.DeleteCellInfoRequest) (*vtctldatapb.DeleteCellInfoResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.DeleteCellInfo")
defer span.Finish()
span.Annotate("cell", req.Name)
span.Annotate("force", req.Force)
ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout)
defer cancel()
@ -388,6 +456,11 @@ func (s *VtctldServer) DeleteCellInfo(ctx context.Context, req *vtctldatapb.Dele
// DeleteCellsAlias is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) DeleteCellsAlias(ctx context.Context, req *vtctldatapb.DeleteCellsAliasRequest) (*vtctldatapb.DeleteCellsAliasResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.DeleteCellsAlias")
defer span.Finish()
span.Annotate("cells_alias", req.Name)
ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout)
defer cancel()
@ -400,6 +473,12 @@ func (s *VtctldServer) DeleteCellsAlias(ctx context.Context, req *vtctldatapb.De
// DeleteKeyspace is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) DeleteKeyspace(ctx context.Context, req *vtctldatapb.DeleteKeyspaceRequest) (*vtctldatapb.DeleteKeyspaceResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.DeleteKeyspace")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
span.Annotate("recursive", req.Recursive)
shards, err := s.ts.GetShardNames(ctx, req.Keyspace)
if err != nil {
return nil, err
@ -446,6 +525,13 @@ func (s *VtctldServer) DeleteKeyspace(ctx context.Context, req *vtctldatapb.Dele
// DeleteShards is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) DeleteShards(ctx context.Context, req *vtctldatapb.DeleteShardsRequest) (*vtctldatapb.DeleteShardsResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.DeleteShards")
defer span.Finish()
span.Annotate("num_shards", len(req.Shards))
span.Annotate("even_if_serving", req.EvenIfServing)
span.Annotate("recursive", req.Recursive)
for _, shard := range req.Shards {
if err := deleteShard(ctx, s.ts, shard.Keyspace, shard.Name, req.Recursive, req.EvenIfServing); err != nil {
return nil, err
@ -457,6 +543,12 @@ func (s *VtctldServer) DeleteShards(ctx context.Context, req *vtctldatapb.Delete
// DeleteTablets is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) DeleteTablets(ctx context.Context, req *vtctldatapb.DeleteTabletsRequest) (*vtctldatapb.DeleteTabletsResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.DeleteTablets")
defer span.Finish()
span.Annotate("num_tablets", len(req.TabletAliases))
span.Annotate("allow_primary", req.AllowPrimary)
for _, alias := range req.TabletAliases {
if err := deleteTablet(ctx, s.ts, alias, req.AllowPrimary); err != nil {
return nil, err
@ -468,6 +560,16 @@ func (s *VtctldServer) DeleteTablets(ctx context.Context, req *vtctldatapb.Delet
// EmergencyReparentShard is part of the vtctldservicepb.VtctldServer interface.
func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldatapb.EmergencyReparentShardRequest) (*vtctldatapb.EmergencyReparentShardResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.EmergencyReparentShard")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
span.Annotate("shard", req.Shard)
span.Annotate("new_primary_alias", topoproto.TabletAliasString(req.NewPrimary))
ignoreReplicaAliases := topoproto.TabletAliasList(req.IgnoreReplicas).ToStringSlice()
span.Annotate("ignore_replicas", strings.Join(ignoreReplicaAliases, ","))
waitReplicasTimeout, ok, err := protoutil.DurationFromProto(req.WaitReplicasTimeout)
if err != nil {
return nil, err
@ -475,6 +577,8 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat
waitReplicasTimeout = time.Second * 30
}
span.Annotate("wait_replicas_timeout_sec", waitReplicasTimeout.Seconds())
m := sync.RWMutex{}
logstream := []*logutilpb.Event{}
logger := logutil.NewCallbackLogger(func(e *logutilpb.Event) {
@ -489,7 +593,7 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat
req.Shard,
reparentutil.EmergencyReparentOptions{
NewPrimaryAlias: req.NewPrimary,
IgnoreReplicas: sets.NewString(topoproto.TabletAliasList(req.IgnoreReplicas).ToStringSlice()...),
IgnoreReplicas: sets.NewString(ignoreReplicaAliases...),
WaitReplicasTimeout: waitReplicasTimeout,
},
)
@ -519,6 +623,11 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat
// FindAllShardsInKeyspace is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) FindAllShardsInKeyspace(ctx context.Context, req *vtctldatapb.FindAllShardsInKeyspaceRequest) (*vtctldatapb.FindAllShardsInKeyspaceResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.FindAllShardsInKeyspace")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
result, err := s.ts.FindAllShardsInKeyspace(ctx, req.Keyspace)
if err != nil {
return nil, err
@ -540,14 +649,21 @@ func (s *VtctldServer) FindAllShardsInKeyspace(ctx context.Context, req *vtctlda
// GetBackups is part of the vtctldservicepb.VtctldServer interface.
func (s *VtctldServer) GetBackups(ctx context.Context, req *vtctldatapb.GetBackupsRequest) (*vtctldatapb.GetBackupsResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetBackups")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
span.Annotate("shard", req.Shard)
bs, err := backupstorage.GetBackupStorage()
if err != nil {
return nil, err
}
defer bs.Close()
bucket := filepath.Join(req.Keyspace, req.Shard)
span.Annotate("backup_path", bucket)
bhs, err := bs.ListBackups(ctx, bucket)
if err != nil {
return nil, err
@ -566,6 +682,9 @@ func (s *VtctldServer) GetBackups(ctx context.Context, req *vtctldatapb.GetBacku
// GetCellInfoNames is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetCellInfoNames(ctx context.Context, req *vtctldatapb.GetCellInfoNamesRequest) (*vtctldatapb.GetCellInfoNamesResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetCellInfoNames")
defer span.Finish()
names, err := s.ts.GetCellInfoNames(ctx)
if err != nil {
return nil, err
@ -576,10 +695,15 @@ func (s *VtctldServer) GetCellInfoNames(ctx context.Context, req *vtctldatapb.Ge
// GetCellInfo is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetCellInfo(ctx context.Context, req *vtctldatapb.GetCellInfoRequest) (*vtctldatapb.GetCellInfoResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetCellInfo")
defer span.Finish()
if req.Cell == "" {
return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell field is required")
}
span.Annotate("cell", req.Cell)
// We use a strong read, because users using this command want the latest
// data, and this is user-generated, not used in any automated process.
strongRead := true
@ -593,6 +717,9 @@ func (s *VtctldServer) GetCellInfo(ctx context.Context, req *vtctldatapb.GetCell
// GetCellsAliases is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetCellsAliases(ctx context.Context, req *vtctldatapb.GetCellsAliasesRequest) (*vtctldatapb.GetCellsAliasesResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetCellsAliases")
defer span.Finish()
strongRead := true
aliases, err := s.ts.GetCellsAliases(ctx, strongRead)
if err != nil {
@ -604,6 +731,11 @@ func (s *VtctldServer) GetCellsAliases(ctx context.Context, req *vtctldatapb.Get
// GetKeyspace is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetKeyspace(ctx context.Context, req *vtctldatapb.GetKeyspaceRequest) (*vtctldatapb.GetKeyspaceResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetKeyspace")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
keyspace, err := s.ts.GetKeyspace(ctx, req.Keyspace)
if err != nil {
return nil, err
@ -619,6 +751,9 @@ func (s *VtctldServer) GetKeyspace(ctx context.Context, req *vtctldatapb.GetKeys
// GetKeyspaces is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetKeyspaces(ctx context.Context, req *vtctldatapb.GetKeyspacesRequest) (*vtctldatapb.GetKeyspacesResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetKeyspaces")
defer span.Finish()
names, err := s.ts.GetKeyspaces(ctx)
if err != nil {
return nil, err
@ -640,6 +775,9 @@ func (s *VtctldServer) GetKeyspaces(ctx context.Context, req *vtctldatapb.GetKey
// GetRoutingRules is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetRoutingRules(ctx context.Context, req *vtctldatapb.GetRoutingRulesRequest) (*vtctldatapb.GetRoutingRulesResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetRoutingRules")
defer span.Finish()
rr, err := s.ts.GetRoutingRules(ctx)
if err != nil {
return nil, err
@ -652,11 +790,22 @@ func (s *VtctldServer) GetRoutingRules(ctx context.Context, req *vtctldatapb.Get
// GetSchema is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetSchema(ctx context.Context, req *vtctldatapb.GetSchemaRequest) (*vtctldatapb.GetSchemaResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetSchema")
defer span.Finish()
span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias))
tablet, err := s.ts.GetTablet(ctx, req.TabletAlias)
if err != nil {
return nil, fmt.Errorf("GetTablet(%v) failed: %w", req.TabletAlias, err)
}
span.Annotate("tables", strings.Join(req.Tables, ","))
span.Annotate("exclude_tables", strings.Join(req.ExcludeTables, ","))
span.Annotate("include_views", req.IncludeViews)
span.Annotate("table_names_only", req.TableNamesOnly)
span.Annotate("table_sizes_only", req.TableSizesOnly)
sd, err := s.tmc.GetSchema(ctx, tablet.Tablet, req.Tables, req.ExcludeTables, req.IncludeViews)
if err != nil {
return nil, fmt.Errorf("GetSchema(%v, %v, %v, %v) failed: %w", tablet.Tablet, req.Tables, req.ExcludeTables, req.IncludeViews, err)
@ -694,6 +843,12 @@ func (s *VtctldServer) GetSchema(ctx context.Context, req *vtctldatapb.GetSchema
// GetShard is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetShard(ctx context.Context, req *vtctldatapb.GetShardRequest) (*vtctldatapb.GetShardResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetShard")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
span.Annotate("shard", req.ShardName)
shard, err := s.ts.GetShard(ctx, req.Keyspace, req.ShardName)
if err != nil {
return nil, err
@ -710,6 +865,9 @@ func (s *VtctldServer) GetShard(ctx context.Context, req *vtctldatapb.GetShardRe
// GetSrvKeyspaces is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetSrvKeyspaces(ctx context.Context, req *vtctldatapb.GetSrvKeyspacesRequest) (*vtctldatapb.GetSrvKeyspacesResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetSrvKeyspaces")
defer span.Finish()
cells := req.Cells
if len(cells) == 0 {
@ -721,6 +879,8 @@ func (s *VtctldServer) GetSrvKeyspaces(ctx context.Context, req *vtctldatapb.Get
}
}
span.Annotate("cells", strings.Join(cells, ","))
srvKeyspaces := make(map[string]*topodatapb.SrvKeyspace, len(cells))
for _, cell := range cells {
@ -746,6 +906,11 @@ func (s *VtctldServer) GetSrvKeyspaces(ctx context.Context, req *vtctldatapb.Get
// GetSrvVSchema is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetSrvVSchema(ctx context.Context, req *vtctldatapb.GetSrvVSchemaRequest) (*vtctldatapb.GetSrvVSchemaResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetSrvVSchema")
defer span.Finish()
span.Annotate("cell", req.Cell)
vschema, err := s.ts.GetSrvVSchema(ctx, req.Cell)
if err != nil {
return nil, err
@ -758,6 +923,9 @@ func (s *VtctldServer) GetSrvVSchema(ctx context.Context, req *vtctldatapb.GetSr
// GetSrvVSchemas is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetSrvVSchemas(ctx context.Context, req *vtctldatapb.GetSrvVSchemasRequest) (*vtctldatapb.GetSrvVSchemasResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetSrvVSchemas")
defer span.Finish()
allCells, err := s.ts.GetCellInfoNames(ctx)
if err != nil {
return nil, err
@ -773,6 +941,7 @@ func (s *VtctldServer) GetSrvVSchemas(ctx context.Context, req *vtctldatapb.GetS
cells = s1.Intersection(s2).List()
}
span.Annotate("cells", strings.Join(cells, ","))
svs := make(map[string]*vschemapb.SrvVSchema, len(cells))
for _, cell := range cells {
@ -797,6 +966,11 @@ func (s *VtctldServer) GetSrvVSchemas(ctx context.Context, req *vtctldatapb.GetS
// GetTablet is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetTablet(ctx context.Context, req *vtctldatapb.GetTabletRequest) (*vtctldatapb.GetTabletResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetTablet")
defer span.Finish()
span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias))
ti, err := s.ts.GetTablet(ctx, req.TabletAlias)
if err != nil {
return nil, err
@ -809,6 +983,12 @@ func (s *VtctldServer) GetTablet(ctx context.Context, req *vtctldatapb.GetTablet
// GetTablets is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTabletsRequest) (*vtctldatapb.GetTabletsResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetTablets")
defer span.Finish()
span.Annotate("cells", strings.Join(req.Cells, ","))
span.Annotate("strict", req.Strict)
// It is possible that an old primary has not yet updated its type in the
// topo. In that case, report its type as UNKNOWN. It used to be MASTER but
// is no longer the serving primary.
@ -833,11 +1013,16 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable
switch {
case len(req.TabletAliases) > 0:
span.Annotate("tablet_aliases", strings.Join(topoproto.TabletAliasList(req.TabletAliases).ToStringSlice(), ","))
tabletMap, err = s.ts.GetTabletMap(ctx, req.TabletAliases)
if err != nil {
err = fmt.Errorf("GetTabletMap(%v) failed: %w", req.TabletAliases, err)
}
case req.Keyspace != "" && req.Shard != "":
span.Annotate("keyspace", req.Keyspace)
span.Annotate("shard", req.Shard)
tabletMap, err = s.ts.GetTabletMapForShard(ctx, req.Keyspace, req.Shard)
if err != nil {
err = fmt.Errorf("GetTabletMapForShard(%s, %s) failed: %w", req.Keyspace, req.Shard, err)
@ -970,6 +1155,11 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable
// GetVSchema is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetVSchema(ctx context.Context, req *vtctldatapb.GetVSchemaRequest) (*vtctldatapb.GetVSchemaResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetVSchema")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
vschema, err := s.ts.GetVSchema(ctx, req.Keyspace)
if err != nil {
return nil, err
@ -993,6 +1183,9 @@ func (s *VtctldServer) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWor
// InitShardPrimary is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) InitShardPrimary(ctx context.Context, req *vtctldatapb.InitShardPrimaryRequest) (*vtctldatapb.InitShardPrimaryResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.InitShardPrimary")
defer span.Finish()
if req.Keyspace == "" {
return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "keyspace field is required")
}
@ -1008,6 +1201,11 @@ func (s *VtctldServer) InitShardPrimary(ctx context.Context, req *vtctldatapb.In
waitReplicasTimeout = time.Second * 30
}
span.Annotate("keyspace", req.Keyspace)
span.Annotate("shard", req.Shard)
span.Annotate("wait_replicas_timeout_sec", waitReplicasTimeout.Seconds())
span.Annotate("force", req.Force)
ctx, unlock, err := s.ts.LockShard(ctx, req.Keyspace, req.Shard, fmt.Sprintf("InitShardPrimary(%v)", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)))
if err != nil {
return nil, err
@ -1019,7 +1217,7 @@ func (s *VtctldServer) InitShardPrimary(ctx context.Context, req *vtctldatapb.In
logstream := []*logutilpb.Event{}
resp := &vtctldatapb.InitShardPrimaryResponse{}
err = s.InitShardPrimaryLocked(ctx, ev, req, waitReplicasTimeout, tmclient.NewTabletManagerClient(), logutil.NewCallbackLogger(func(e *logutilpb.Event) {
err = s.InitShardPrimaryLocked(ctx, ev, req, waitReplicasTimeout, s.tmc, logutil.NewCallbackLogger(func(e *logutilpb.Event) {
m.Lock()
defer m.Unlock()
@ -1245,6 +1443,9 @@ func (s *VtctldServer) InitShardPrimaryLocked(
// PlannedReparentShard is part of the vtctldservicepb.VtctldServer interface.
func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatapb.PlannedReparentShardRequest) (*vtctldatapb.PlannedReparentShardResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.PlannedReparentShard")
defer span.Finish()
waitReplicasTimeout, ok, err := protoutil.DurationFromProto(req.WaitReplicasTimeout)
if err != nil {
return nil, err
@ -1252,6 +1453,18 @@ func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatap
waitReplicasTimeout = time.Second * 30
}
span.Annotate("keyspace", req.Keyspace)
span.Annotate("shard", req.Shard)
span.Annotate("wait_replicas_timeout_sec", waitReplicasTimeout.Seconds())
if req.AvoidPrimary != nil {
span.Annotate("avoid_primary_alias", topoproto.TabletAliasString(req.AvoidPrimary))
}
if req.NewPrimary != nil {
span.Annotate("new_primary_alias", topoproto.TabletAliasString(req.NewPrimary))
}
m := sync.RWMutex{}
logstream := []*logutilpb.Event{}
logger := logutil.NewCallbackLogger(func(e *logutilpb.Event) {
@ -1296,6 +1509,11 @@ func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatap
// RebuildVSchemaGraph is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) RebuildVSchemaGraph(ctx context.Context, req *vtctldatapb.RebuildVSchemaGraphRequest) (*vtctldatapb.RebuildVSchemaGraphResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.RebuildVSchemaGraph")
defer span.Finish()
span.Annotate("cells", strings.Join(req.Cells, ","))
if err := s.ts.RebuildSrvVSchema(ctx, req.Cells); err != nil {
return nil, err
}
@ -1305,6 +1523,14 @@ func (s *VtctldServer) RebuildVSchemaGraph(ctx context.Context, req *vtctldatapb
// RemoveKeyspaceCell is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) RemoveKeyspaceCell(ctx context.Context, req *vtctldatapb.RemoveKeyspaceCellRequest) (*vtctldatapb.RemoveKeyspaceCellResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.RemoveKeyspaceCell")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
span.Annotate("cell", req.Cell)
span.Annotate("force", req.Force)
span.Annotate("recursive", req.Recursive)
shards, err := s.ts.GetShardNames(ctx, req.Keyspace)
if err != nil {
return nil, err
@ -1329,6 +1555,15 @@ func (s *VtctldServer) RemoveKeyspaceCell(ctx context.Context, req *vtctldatapb.
// RemoveShardCell is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) RemoveShardCell(ctx context.Context, req *vtctldatapb.RemoveShardCellRequest) (*vtctldatapb.RemoveShardCellResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.RemoveShardCell")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
span.Annotate("shard", req.ShardName)
span.Annotate("cell", req.Cell)
span.Annotate("force", req.Force)
span.Annotate("recursive", req.Recursive)
if err := removeShardCell(ctx, s.ts, req.Cell, req.Keyspace, req.ShardName, req.Recursive, req.Force); err != nil {
return nil, err
}
@ -1338,10 +1573,15 @@ func (s *VtctldServer) RemoveShardCell(ctx context.Context, req *vtctldatapb.Rem
// ReparentTablet is part of the vtctldservicepb.VtctldServer interface.
func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.ReparentTabletRequest) (*vtctldatapb.ReparentTabletResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.ReparentTablet")
defer span.Finish()
if req.Tablet == nil {
return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "tablet alias must not be nil")
}
span.Annotate("tablet_alias", topoproto.TabletAliasString(req.Tablet))
tablet, err := s.ts.GetTablet(ctx, req.Tablet)
if err != nil {
return nil, err
@ -1386,6 +1626,12 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa
// ShardReplicationPositions is part of the vtctldservicepb.VtctldServer interface.
func (s *VtctldServer) ShardReplicationPositions(ctx context.Context, req *vtctldatapb.ShardReplicationPositionsRequest) (*vtctldatapb.ShardReplicationPositionsResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.ShardReplicationPositions")
defer span.Finish()
span.Annotate("keyspace", req.Keyspace)
span.Annotate("shard", req.Shard)
tabletInfoMap, err := s.ts.GetTabletMapForShard(ctx, req.Keyspace, req.Shard)
if err != nil {
return nil, fmt.Errorf("GetTabletMapForShard(%s, %s) failed: %w", req.Keyspace, req.Shard, err)
@ -1417,6 +1663,11 @@ func (s *VtctldServer) ShardReplicationPositions(ctx context.Context, req *vtctl
go func(ctx context.Context, alias string, tablet *topodatapb.Tablet) {
defer wg.Done()
span, ctx := trace.NewSpan(ctx, "VtctldServer.getPrimaryPosition")
defer span.Finish()
span.Annotate("tablet_alias", alias)
ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout)
defer cancel()
@ -1454,6 +1705,11 @@ func (s *VtctldServer) ShardReplicationPositions(ctx context.Context, req *vtctl
go func(ctx context.Context, alias string, tablet *topodatapb.Tablet) {
defer wg.Done()
span, ctx := trace.NewSpan(ctx, "VtctldServer.getReplicationStatus")
defer span.Finish()
span.Annotate("tablet_alias", alias)
ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout)
defer cancel()
@ -1497,10 +1753,15 @@ func (s *VtctldServer) ShardReplicationPositions(ctx context.Context, req *vtctl
// TabletExternallyReparented is part of the vtctldservicepb.VtctldServer interface.
func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtctldatapb.TabletExternallyReparentedRequest) (*vtctldatapb.TabletExternallyReparentedResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.TabletExternallyReparented")
defer span.Finish()
if req.Tablet == nil {
return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "TabletExternallyReparentedRequest.Tablet must not be nil")
}
span.Annotate("tablet_alias", topoproto.TabletAliasString(req.Tablet))
tablet, err := s.ts.GetTablet(ctx, req.Tablet)
if err != nil {
log.Warningf("TabletExternallyReparented: failed to read tablet record for %v: %v", topoproto.TabletAliasString(req.Tablet), err)
@ -1557,6 +1818,13 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct
// UpdateCellInfo is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) UpdateCellInfo(ctx context.Context, req *vtctldatapb.UpdateCellInfoRequest) (*vtctldatapb.UpdateCellInfoResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.UpdateCellInfo")
defer span.Finish()
span.Annotate("cell", req.Name)
span.Annotate("cell_server_address", req.CellInfo.ServerAddress)
span.Annotate("cell_root", req.CellInfo.Root)
ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout)
defer cancel()
@ -1597,6 +1865,12 @@ func (s *VtctldServer) UpdateCellInfo(ctx context.Context, req *vtctldatapb.Upda
// UpdateCellsAlias is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) UpdateCellsAlias(ctx context.Context, req *vtctldatapb.UpdateCellsAliasRequest) (*vtctldatapb.UpdateCellsAliasResponse, error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.UpdateCellsAlias")
defer span.Finish()
span.Annotate("cells_alias", req.Name)
span.Annotate("cells_alias_cells", strings.Join(req.CellsAlias.Cells, ","))
ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout)
defer cancel()

Просмотреть файл

@ -304,7 +304,9 @@ func TestApplyRoutingRules(t *testing.T) {
factory.SetError(errors.New("topo down for testing"))
}
vtctld := NewVtctldServer(ts)
vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer {
return NewVtctldServer(ts)
})
_, err := vtctld.ApplyRoutingRules(ctx, tt.req)
if tt.shouldErr {
assert.Error(t, err)
@ -1326,7 +1328,9 @@ func TestDeleteCellsAlias(t *testing.T) {
require.NoError(t, err, "test setup failed")
}
vtctld := NewVtctldServer(tt.ts)
vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer {
return NewVtctldServer(ts)
})
_, err := vtctld.DeleteCellsAlias(ctx, tt.req)
if tt.shouldErr {
assert.Error(t, err)
@ -2717,6 +2721,8 @@ func TestFindAllShardsInKeyspace(t *testing.T) {
}
func TestGetBackups(t *testing.T) {
t.Parallel()
ctx := context.Background()
ts := memorytopo.NewServer()
vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer {
@ -3001,7 +3007,9 @@ func TestGetRoutingRules(t *testing.T) {
factory.SetError(errors.New("topo down for testing"))
}
vtctld := NewVtctldServer(ts)
vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer {
return NewVtctldServer(ts)
})
resp, err := vtctld.GetRoutingRules(ctx, &vtctldatapb.GetRoutingRulesRequest{})
if tt.shouldErr {
assert.Error(t, err)
@ -4513,7 +4521,9 @@ func TestRebuildVSchemaGraph(t *testing.T) {
factory.SetError(errors.New("topo down for testing"))
}
vtctld := NewVtctldServer(ts)
vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer {
return NewVtctldServer(ts)
})
_, err := vtctld.RebuildVSchemaGraph(ctx, req)
if tt.shouldErr {
assert.Error(t, err)

Просмотреть файл

@ -20,6 +20,7 @@ import (
"context"
"fmt"
"vitess.io/vitess/go/trace"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
@ -31,6 +32,14 @@ import (
)
func deleteShard(ctx context.Context, ts *topo.Server, keyspace string, shard string, recursive bool, evenIfServing bool) error {
span, ctx := trace.NewSpan(ctx, "VtctldServer.deleteShard")
defer span.Finish()
span.Annotate("keyspace", keyspace)
span.Annotate("shard", shard)
span.Annotate("recursive", recursive)
span.Annotate("even_if_serving", evenIfServing)
// Read the Shard object. If it's not in the topo, try to clean up the topo
// anyway.
shardInfo, err := ts.GetShard(ctx, keyspace, shard)
@ -81,6 +90,14 @@ func deleteShard(ctx context.Context, ts *topo.Server, keyspace string, shard st
// distinct from the RemoveShardCell rpc. Despite having similar names, they are
// **not** the same!
func deleteShardCell(ctx context.Context, ts *topo.Server, keyspace string, shard string, cell string, recursive bool) error {
span, ctx := trace.NewSpan(ctx, "VtctldServer.deleteShardCell")
defer span.Finish()
span.Annotate("keyspace", keyspace)
span.Annotate("shard", shard)
span.Annotate("cell", cell)
span.Annotate("recursive", recursive)
var aliases []*topodatapb.TabletAlias
// Get the ShardReplication object for the cell. Collect all the tablets
@ -156,6 +173,12 @@ func deleteShardCell(ctx context.Context, ts *topo.Server, keyspace string, shar
}
func deleteTablet(ctx context.Context, ts *topo.Server, alias *topodatapb.TabletAlias, allowPrimary bool) (err error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.deleteTablet")
defer span.Finish()
span.Annotate("tablet_alias", topoproto.TabletAliasString(alias))
span.Annotate("allow_primary", allowPrimary)
tablet, err := ts.GetTablet(ctx, alias)
if err != nil {
return err
@ -166,6 +189,8 @@ func deleteTablet(ctx context.Context, ts *topo.Server, alias *topodatapb.Tablet
return err
}
span.Annotate("is_primary", isPrimary)
if isPrimary && !allowPrimary {
return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot delete tablet %v as it is a master, pass AllowPrimary = true", topoproto.TabletAliasString(alias))
}
@ -209,6 +234,15 @@ func deleteTablet(ctx context.Context, ts *topo.Server, alias *topodatapb.Tablet
}
func removeShardCell(ctx context.Context, ts *topo.Server, cell string, keyspace string, shardName string, recursive bool, force bool) error {
span, ctx := trace.NewSpan(ctx, "VtctldServer.removeShardCell")
defer span.Finish()
span.Annotate("keyspace", keyspace)
span.Annotate("shard", shardName)
span.Annotate("cell", cell)
span.Annotate("recursive", recursive)
span.Annotate("force", force)
shard, err := ts.GetShard(ctx, keyspace, shardName)
if err != nil {
return err
@ -234,7 +268,7 @@ func removeShardCell(ctx context.Context, ts *topo.Server, cell string, keyspace
if recursive {
log.Infof("Deleting all tablets in cell %v in shard %v/%v", cell, keyspace, shardName)
for _, node := range replication.Nodes {
// We don't care about scraping our updating the replication
// We don't care about scrapping or updating the replication
// graph, because we're about to delete the entire replication
// graph.
log.Infof("Deleting tablet %v", topoproto.TabletAliasString(node.TabletAlias))

Просмотреть файл

@ -2132,9 +2132,14 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla
s := ""
var progress wrangler.TableCopyProgress
for table := range *copyProgress {
var rowCountPct, tableSizePct int64
progress = *(*copyProgress)[table]
rowCountPct := 100.0 * progress.TargetRowCount / progress.SourceRowCount
tableSizePct := 100.0 * progress.TargetTableSize / progress.SourceTableSize
if progress.SourceRowCount > 0 {
rowCountPct = 100.0 * progress.TargetRowCount / progress.SourceRowCount
}
if progress.SourceTableSize > 0 {
tableSizePct = 100.0 * progress.TargetTableSize / progress.SourceTableSize
}
s += fmt.Sprintf("%s: rows copied %d/%d (%d%%), size copied %d/%d (%d%%)\n",
table, progress.TargetRowCount, progress.SourceRowCount, rowCountPct,
progress.TargetTableSize, progress.SourceTableSize, tableSizePct)

Просмотреть файл

@ -32,3 +32,9 @@ const WrongTablet = "wrong tablet type"
// RxWrongTablet regex for invalid tablet type error
var RxWrongTablet = regexp.MustCompile("(wrong|invalid) tablet type")
// Constants for error messages
const (
// PrimaryVindexNotSet is the error message to be used when there is no primary vindex found on a table
PrimaryVindexNotSet = "table '%s' does not have a primary vindex"
)

Просмотреть файл

@ -43,6 +43,7 @@ const (
InnodbReadOnly
WrongNumberOfColumnsInSelect
CantDoThisInTransaction
RequiresPrimaryKey
// not found
BadDb

Просмотреть файл

@ -81,7 +81,7 @@ func createVSchema() (vschema *vindexes.VSchema, err error) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
vs := vindexes.BuildVSchema(invschema)
if err != nil {
return nil, err
}

Просмотреть файл

@ -394,6 +394,9 @@ func (ins *Insert) getInsertShardedRoute(vcursor VCursor, bindVars map[string]*q
// keyspace ids. For regular inserts, a failure to find a route
// results in an error. For 'ignore' type inserts, the keyspace
// id is returned as nil, which is used later to drop the corresponding rows.
if len(vindexRowsValues) == 0 || len(ins.Table.ColumnVindexes) == 0 {
return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RequiresPrimaryKey, vterrors.PrimaryVindexNotSet, ins.Table.Name)
}
keyspaceIDs, err := ins.processPrimary(vcursor, vindexRowsValues[0], ins.Table.ColumnVindexes[0])
if err != nil {
return nil, nil, err

Просмотреть файл

@ -195,10 +195,7 @@ func TestInsertShardedSimple(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
// A single row insert should be autocommitted
@ -222,7 +219,7 @@ func TestInsertShardedSimple(t *testing.T) {
vc := newDMLTestVCursor("-20", "20-")
vc.shardForKsid = []string{"20-", "-20", "20-"}
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
if err != nil {
t.Fatal(err)
}
@ -342,10 +339,7 @@ func TestInsertShardedFail(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -369,7 +363,7 @@ func TestInsertShardedFail(t *testing.T) {
vc := &loggingVCursor{}
// The lookup will fail to map to a keyspace id.
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
require.EqualError(t, err, `could not map [INT64(1)] to a keyspace id`)
}
@ -394,10 +388,7 @@ func TestInsertShardedGenerate(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -519,10 +510,7 @@ func TestInsertShardedOwned(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -583,7 +571,7 @@ func TestInsertShardedOwned(t *testing.T) {
vc := newDMLTestVCursor("-20", "20-")
vc.shardForKsid = []string{"20-", "-20", "20-"}
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
if err != nil {
t.Fatal(err)
}
@ -646,10 +634,7 @@ func TestInsertShardedOwnedWithNull(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -681,7 +666,7 @@ func TestInsertShardedOwnedWithNull(t *testing.T) {
vc := newDMLTestVCursor("-20", "20-")
vc.shardForKsid = []string{"20-", "-20", "20-"}
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
if err != nil {
t.Fatal(err)
}
@ -728,10 +713,7 @@ func TestInsertShardedGeo(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -774,7 +756,7 @@ func TestInsertShardedGeo(t *testing.T) {
vc := newDMLTestVCursor("-20", "20-")
vc.shardForKsid = []string{"20-", "-20"}
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
if err != nil {
t.Fatal(err)
}
@ -843,10 +825,7 @@ func TestInsertShardedIgnoreOwned(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -947,7 +926,7 @@ func TestInsertShardedIgnoreOwned(t *testing.T) {
ksid0,
}
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
if err != nil {
t.Fatal(err)
}
@ -1020,10 +999,7 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -1067,7 +1043,7 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) {
ksid0,
}
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
if err != nil {
t.Fatal(err)
}
@ -1122,10 +1098,7 @@ func TestInsertShardedUnownedVerify(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -1202,7 +1175,7 @@ func TestInsertShardedUnownedVerify(t *testing.T) {
nonemptyResult,
nonemptyResult,
}
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
if err != nil {
t.Fatal(err)
}
@ -1264,10 +1237,7 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -1321,7 +1291,7 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) {
{},
nonemptyResult,
}
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
if err != nil {
t.Fatal(err)
}
@ -1376,10 +1346,7 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -1410,7 +1377,7 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) {
vc := newDMLTestVCursor("-20", "20-")
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
require.EqualError(t, err, `values [[INT64(2)]] for column [c3] does not map to keyspace ids`)
}
@ -1457,10 +1424,7 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -1533,7 +1497,7 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) {
nonemptyResult,
}
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
if err != nil {
t.Fatal(err)
}
@ -1586,10 +1550,7 @@ func TestInsertShardedUnownedReverseMapFail(t *testing.T) {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
t.Fatal(err)
}
vs := vindexes.BuildVSchema(invschema)
ks := vs.Keyspaces["sharded"]
ins := NewInsert(
@ -1620,6 +1581,6 @@ func TestInsertShardedUnownedReverseMapFail(t *testing.T) {
vc := newDMLTestVCursor("-20", "20-")
_, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
_, err := ins.Execute(vc, map[string]*querypb.BindVariable{}, false)
require.EqualError(t, err, `value must be supplied for column [c3]`)
}

Просмотреть файл

@ -652,10 +652,7 @@ func buildTestVSchema() *vindexes.VSchema {
},
},
}
vs, err := vindexes.BuildVSchema(invschema)
if err != nil {
panic(err)
}
vs := vindexes.BuildVSchema(invschema)
return vs
}

Просмотреть файл

@ -56,7 +56,7 @@ func getDMLRouting(where *sqlparser.Where, table *vindexes.Table) (engine.DMLOpc
}
}
if ksidVindex == nil {
return engine.Scatter, nil, "", nil, nil, vterrors.New(vtrpcpb.Code_INTERNAL, "table without a primary vindex is not expected")
return engine.Scatter, nil, "", nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RequiresPrimaryKey, vterrors.PrimaryVindexNotSet, table.Name)
}
return engine.Scatter, ksidVindex, ksidCol, nil, nil, nil
}

Просмотреть файл

@ -290,7 +290,7 @@ func loadSchema(t testing.TB, filename string) *vindexes.VSchema {
if err != nil {
t.Fatal(err)
}
vschema, err := vindexes.BuildVSchema(formal)
vschema := vindexes.BuildVSchema(formal)
if err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -34,8 +34,8 @@ import (
)
type (
keyspace = string
tableName = string
keyspaceStr = string
tableNameStr = string
// Tracker contains the required fields to perform schema tracking.
Tracker struct {
@ -48,7 +48,7 @@ type (
signal func() // a function that we'll call whenever we have new schema data
// map of keyspace currently tracked
tracked map[keyspace]*updateController
tracked map[keyspaceStr]*updateController
consumeDelay time.Duration
}
)
@ -61,8 +61,8 @@ func NewTracker(ch chan *discovery.TabletHealth) *Tracker {
return &Tracker{
ctx: context.Background(),
ch: ch,
tables: &tableMap{m: map[keyspace]map[tableName][]vindexes.Column{}},
tracked: map[keyspace]*updateController{},
tables: &tableMap{m: map[keyspaceStr]map[tableNameStr][]vindexes.Column{}},
tracked: map[keyspaceStr]*updateController{},
consumeDelay: defaultConsumeDelay,
}
}
@ -76,6 +76,7 @@ func (t *Tracker) LoadKeyspace(conn queryservice.QueryService, target *querypb.T
t.mu.Lock()
defer t.mu.Unlock()
t.updateTables(target.Keyspace, res)
t.tracked[target.Keyspace].setLoaded(true)
log.Infof("finished loading schema for keyspace %s. Found %d tables", target.Keyspace, len(res.Rows))
return nil
}
@ -92,7 +93,7 @@ func (t *Tracker) Start() {
ksUpdater := t.getKeyspaceUpdateController(th)
ksUpdater.add(th)
case <-ctx.Done():
close(t.ch)
// closing of the channel happens outside the scope of the tracker. It is the responsibility of the one who created this tracker.
return
}
}
@ -105,22 +106,27 @@ func (t *Tracker) getKeyspaceUpdateController(th *discovery.TabletHealth) *updat
t.mu.Lock()
defer t.mu.Unlock()
ksUpdater, ok := t.tracked[th.Target.Keyspace]
if !ok {
init := func(th *discovery.TabletHealth) bool {
err := t.LoadKeyspace(th.Conn, th.Target)
if err != nil {
log.Warningf("Unable to add keyspace to tracker: %v", err)
return false
}
return true
}
ksUpdater = &updateController{update: t.updateSchema, init: init, signal: t.signal, consumeDelay: t.consumeDelay}
ksUpdater, exists := t.tracked[th.Target.Keyspace]
if !exists {
ksUpdater = t.newUpdateController()
t.tracked[th.Target.Keyspace] = ksUpdater
}
return ksUpdater
}
func (t *Tracker) newUpdateController() *updateController {
return &updateController{update: t.updateSchema, reloadKeyspace: t.initKeyspace, signal: t.signal, consumeDelay: t.consumeDelay}
}
func (t *Tracker) initKeyspace(th *discovery.TabletHealth) bool {
err := t.LoadKeyspace(th.Conn, th.Target)
if err != nil {
log.Warningf("Unable to add keyspace to tracker: %v", err)
return false
}
return true
}
// Stop stops the schema tracking
func (t *Tracker) Stop() {
log.Info("Stopping schema tracking")
@ -149,7 +155,8 @@ func (t *Tracker) Tables(ks string) map[string][]vindexes.Column {
}
func (t *Tracker) updateSchema(th *discovery.TabletHealth) bool {
tables, err := sqltypes.BuildBindVariable(th.TablesUpdated)
tablesUpdated := th.Stats.TableSchemaChanged
tables, err := sqltypes.BuildBindVariable(tablesUpdated)
if err != nil {
log.Errorf("failed to read updated tables from TabletHealth: %v", err)
return false
@ -157,8 +164,9 @@ func (t *Tracker) updateSchema(th *discovery.TabletHealth) bool {
bv := map[string]*querypb.BindVariable{"tableNames": tables}
res, err := th.Conn.Execute(t.ctx, th.Target, mysql.FetchUpdatedTables, bv, 0, 0, nil)
if err != nil {
// TODO: these tables should now become non-authoritative
log.Warningf("error fetching new schema for %v, making them non-authoritative: %v", th.TablesUpdated, err)
t.tracked[th.Target.Keyspace].setLoaded(false)
// TODO: optimize for the tables that got errored out.
log.Warningf("error fetching new schema for %v, making them non-authoritative: %v", tablesUpdated, err)
return false
}
@ -167,7 +175,7 @@ func (t *Tracker) updateSchema(th *discovery.TabletHealth) bool {
// first we empty all prior schema. deleted tables will not show up in the result,
// so this is the only chance to delete
for _, tbl := range th.TablesUpdated {
for _, tbl := range tablesUpdated {
t.tables.delete(th.Target.Keyspace, tbl)
}
t.updateTables(th.Target.Keyspace, res)
@ -190,17 +198,28 @@ func (t *Tracker) updateTables(keyspace string, res *sqltypes.Result) {
// RegisterSignalReceiver allows a function to register to be called when new schema is available
func (t *Tracker) RegisterSignalReceiver(f func()) {
t.mu.Lock()
defer t.mu.Unlock()
for _, controller := range t.tracked {
controller.signal = f
}
t.signal = f
}
// AddNewKeyspace adds keyspace to the tracker.
func (t *Tracker) AddNewKeyspace(conn queryservice.QueryService, target *querypb.Target) error {
t.tracked[target.Keyspace] = t.newUpdateController()
return t.LoadKeyspace(conn, target)
}
type tableMap struct {
m map[keyspace]map[tableName][]vindexes.Column
m map[keyspaceStr]map[tableNameStr][]vindexes.Column
}
func (tm *tableMap) set(ks, tbl string, cols []vindexes.Column) {
m := tm.m[ks]
if m == nil {
m = make(map[tableName][]vindexes.Column)
m = make(map[tableNameStr][]vindexes.Column)
tm.m[ks] = m
}
m[tbl] = cols

Просмотреть файл

@ -22,6 +22,8 @@ import (
"testing"
"time"
"vitess.io/vitess/go/mysql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -168,11 +170,11 @@ func TestTracking(t *testing.T) {
for _, d := range tcase.deltas {
ch <- &discovery.TabletHealth{
Conn: sbc,
Tablet: tablet,
Target: target,
Serving: true,
TablesUpdated: d.updTbl,
Conn: sbc,
Tablet: tablet,
Target: target,
Serving: true,
Stats: &querypb.RealtimeStats{TableSchemaChanged: d.updTbl},
}
}
@ -190,6 +192,74 @@ func TestTracking(t *testing.T) {
}
}
func TestTrackingUnHealthyTablet(t *testing.T) {
target := &querypb.Target{
Keyspace: "ks",
Shard: "-80",
TabletType: topodatapb.TabletType_MASTER,
Cell: "aa",
}
tablet := &topodatapb.Tablet{
Keyspace: target.Keyspace,
Shard: target.Shard,
Type: target.TabletType,
}
sbc := sandboxconn.NewSandboxConn(tablet)
ch := make(chan *discovery.TabletHealth)
tracker := NewTracker(ch)
tracker.consumeDelay = 1 * time.Millisecond
tracker.Start()
defer tracker.Stop()
// the test are written in a way that it expects 3 signals to be sent from the tracker to the subscriber.
wg := sync.WaitGroup{}
wg.Add(3)
tracker.RegisterSignalReceiver(func() {
wg.Done()
})
tcases := []struct {
name string
serving bool
expectedQuery string
updatedTbls []string
}{
{
name: "initial load",
serving: true,
},
{
name: "initial load",
serving: true,
updatedTbls: []string{"a"},
},
{
name: "non serving tablet",
serving: false,
},
{
name: "now serving tablet",
serving: true,
},
}
sbc.SetResults([]*sqltypes.Result{{}, {}, {}})
for _, tcase := range tcases {
ch <- &discovery.TabletHealth{
Conn: sbc,
Tablet: tablet,
Target: target,
Serving: tcase.serving,
Stats: &querypb.RealtimeStats{TableSchemaChanged: tcase.updatedTbls},
}
time.Sleep(5 * time.Millisecond)
}
require.False(t, waitTimeout(&wg, time.Second), "schema was updated but received no signal")
require.Equal(t, []string{mysql.FetchTables, mysql.FetchUpdatedTables, mysql.FetchTables}, sbc.StringQueries())
}
func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
c := make(chan struct{})
go func() {
@ -207,7 +277,7 @@ func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
func TestTrackerGetKeyspaceUpdateController(t *testing.T) {
ks3 := &updateController{}
tracker := Tracker{
tracked: map[keyspace]*updateController{
tracked: map[keyspaceStr]*updateController{
"ks3": ks3,
},
}
@ -231,7 +301,7 @@ func TestTrackerGetKeyspaceUpdateController(t *testing.T) {
assert.Equal(t, ks2, tracker.getKeyspaceUpdateController(th2), "received different updateController")
assert.Equal(t, ks3, tracker.getKeyspaceUpdateController(th3), "received different updateController")
assert.NotNil(t, ks1.init, "ks1 needs to be initialized")
assert.NotNil(t, ks2.init, "ks2 needs to be initialized")
assert.Nil(t, ks3.init, "ks3 already initialized")
assert.NotNil(t, ks1.reloadKeyspace, "ks1 needs to be initialized")
assert.NotNil(t, ks2.reloadKeyspace, "ks2 needs to be initialized")
assert.Nil(t, ks3.reloadKeyspace, "ks3 already initialized")
}

Просмотреть файл

@ -20,6 +20,8 @@ import (
"sync"
"time"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/discovery"
)
@ -29,12 +31,13 @@ type (
}
updateController struct {
mu sync.Mutex
queue *queue
consumeDelay time.Duration
update func(th *discovery.TabletHealth) bool
init func(th *discovery.TabletHealth) bool
signal func()
mu sync.Mutex
queue *queue
consumeDelay time.Duration
update func(th *discovery.TabletHealth) bool
reloadKeyspace func(th *discovery.TabletHealth) bool
signal func()
loaded bool
}
)
@ -54,13 +57,10 @@ func (u *updateController) consume() {
u.mu.Unlock()
var success bool
if u.init != nil {
success = u.init(item)
if success {
u.init = nil
}
} else {
if u.loaded {
success = u.update(item)
} else {
success = u.reloadKeyspace(item)
}
if success && u.signal != nil {
u.signal()
@ -70,36 +70,59 @@ func (u *updateController) consume() {
func (u *updateController) getItemFromQueueLocked() *discovery.TabletHealth {
item := u.queue.items[0]
i := 0
for ; i < len(u.queue.items); i++ {
for _, table := range u.queue.items[i].TablesUpdated {
found := false
for _, itemTable := range item.TablesUpdated {
if itemTable == table {
found = true
break
itemsCount := len(u.queue.items)
// Only when we want to update selected tables.
if u.loaded {
for i := 1; i < itemsCount; i++ {
for _, table := range u.queue.items[i].Stats.TableSchemaChanged {
found := false
for _, itemTable := range item.Stats.TableSchemaChanged {
if itemTable == table {
found = true
break
}
}
if !found {
item.Stats.TableSchemaChanged = append(item.Stats.TableSchemaChanged, table)
}
}
if !found {
item.TablesUpdated = append(item.TablesUpdated, table)
}
}
}
// emptying queue's items as all items from 0 to i (length of the queue) are merged
u.queue.items = u.queue.items[i:]
u.queue.items = u.queue.items[itemsCount:]
return item
}
func (u *updateController) add(th *discovery.TabletHealth) {
// For non-primary tablet health, there is no schema tracking.
if th.Tablet.Type != topodatapb.TabletType_MASTER {
return
}
// Received a health check from primary tablet that is not reachable from VTGate.
// The connection will get reset and the tracker needs to reload the schema for the keyspace.
if !th.Serving {
u.loaded = false
return
}
u.mu.Lock()
defer u.mu.Unlock()
if len(th.TablesUpdated) == 0 && u.init == nil {
// If the keyspace schema is loaded and there is no schema change detected. Then there is nothing to process.
if len(th.Stats.TableSchemaChanged) == 0 && u.loaded {
return
}
if u.queue == nil {
u.queue = &queue{}
go u.consume()
}
u.queue.items = append(u.queue.items, th)
}
func (u *updateController) setLoaded(loaded bool) {
u.mu.Lock()
defer u.mu.Unlock()
u.loaded = loaded
}

Просмотреть файл

@ -118,7 +118,7 @@ func TestMultipleUpdatesFromDifferentShards(t *testing.T) {
var signalNb, initNb int
var updatedTables []string
update := func(th *discovery.TabletHealth) bool {
updatedTables = th.TablesUpdated
updatedTables = th.Stats.TableSchemaChanged
return !test.updateFail
}
signal := func() {
@ -128,19 +128,18 @@ func TestMultipleUpdatesFromDifferentShards(t *testing.T) {
update: update,
signal: signal,
consumeDelay: 5 * time.Millisecond,
}
if test.init {
kUpdate.init = func(th *discovery.TabletHealth) bool {
reloadKeyspace: func(th *discovery.TabletHealth) bool {
initNb++
return !test.initFail
}
},
loaded: !test.init,
}
for _, in := range test.inputs {
target := &querypb.Target{
Keyspace: "ks",
Shard: in.shard,
Keyspace: "ks",
Shard: in.shard,
TabletType: topodatapb.TabletType_MASTER,
}
tablet := &topodatapb.Tablet{
Keyspace: target.Keyspace,
@ -148,10 +147,10 @@ func TestMultipleUpdatesFromDifferentShards(t *testing.T) {
Type: target.TabletType,
}
d := &discovery.TabletHealth{
Tablet: tablet,
Target: target,
Serving: true,
TablesUpdated: in.tablesUpdates,
Tablet: tablet,
Target: target,
Serving: true,
Stats: &querypb.RealtimeStats{TableSchemaChanged: in.tablesUpdates},
}
if test.delay > 0 {
time.Sleep(test.delay)

Просмотреть файл

@ -162,7 +162,7 @@ type AutoIncrement struct {
}
// BuildVSchema builds a VSchema from a SrvVSchema.
func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema, err error) {
func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) {
vschema = &VSchema{
RoutingRules: make(map[string]*RoutingRule),
uniqueTables: make(map[string]*Table),
@ -173,7 +173,7 @@ func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema, err error) {
resolveAutoIncrement(source, vschema)
addDual(vschema)
buildRoutingRule(source, vschema)
return vschema, nil
return vschema
}
// BuildKeyspaceSchema builds the vschema portion for one keyspace.

Просмотреть файл

@ -206,7 +206,7 @@ func TestUnshardedVSchema(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
err := got.Keyspaces["unsharded"].Error
require.NoError(t, err)
ks := &Keyspace{
@ -261,7 +261,7 @@ func TestVSchemaColumns(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
err := got.Keyspaces["unsharded"].Error
require.NoError(t, err)
ks := &Keyspace{
@ -326,8 +326,7 @@ func TestVSchemaColumnListAuthoritative(t *testing.T) {
},
},
}
got, err := BuildVSchema(&good)
require.NoError(t, err)
got := BuildVSchema(&good)
ks := &Keyspace{
Name: "unsharded",
}
@ -389,7 +388,7 @@ func TestVSchemaColumnsFail(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
want := "duplicate column name 'c1' for table: t1"
err := got.Keyspaces["unsharded"].Error
if err == nil || err.Error() != want {
@ -410,7 +409,7 @@ func TestVSchemaPinned(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
err := got.Keyspaces["sharded"].Error
require.NoError(t, err)
ks := &Keyspace{
@ -486,7 +485,7 @@ func TestShardedVSchemaOwned(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
err := got.Keyspaces["sharded"].Error
require.NoError(t, err)
ks := &Keyspace{
@ -614,7 +613,7 @@ func TestShardedVSchemaOwnerInfo(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
err := got.Keyspaces["sharded"].Error
require.NoError(t, err)
results := []struct {
@ -714,7 +713,7 @@ func TestVSchemaRoutingRules(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&input)
got := BuildVSchema(&input)
ks1 := &Keyspace{
Name: "ks1",
Sharded: true,
@ -987,8 +986,7 @@ func TestFindBestColVindex(t *testing.T) {
},
},
}
vschema, err := BuildVSchema(testSrvVSchema)
require.NoError(t, err)
vs := BuildVSchema(testSrvVSchema)
testcases := []struct {
tablename string
@ -1011,7 +1009,7 @@ func TestFindBestColVindex(t *testing.T) {
err: "table t2 has no vindex",
}}
for _, tcase := range testcases {
table, err := vschema.FindTable("", tcase.tablename)
table, err := vs.FindTable("", tcase.tablename)
require.NoError(t, err)
cv, err := FindBestColVindex(table)
if err != nil {
@ -1162,7 +1160,7 @@ func TestShardedVSchemaMultiColumnVindex(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
err := got.Keyspaces["sharded"].Error
require.NoError(t, err)
ks := &Keyspace{
@ -1255,7 +1253,7 @@ func TestShardedVSchemaNotOwned(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
err := got.Keyspaces["sharded"].Error
require.NoError(t, err)
ks := &Keyspace{
@ -1347,7 +1345,7 @@ func TestBuildVSchemaVindexNotFoundFail(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&bad)
got := BuildVSchema(&bad)
err := got.Keyspaces["sharded"].Error
want := `vindexType "noexist" not found`
if err == nil || err.Error() != want {
@ -1371,7 +1369,7 @@ func TestBuildVSchemaNoColumnVindexFail(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&bad)
got := BuildVSchema(&bad)
err := got.Keyspaces["sharded"].Error
want := "missing primary col vindex for table: t1"
if err == nil || err.Error() != want {
@ -1404,7 +1402,7 @@ func TestBuildVSchemaDupSeq(t *testing.T) {
ksb := &Keyspace{
Name: "ksb",
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
t1a := &Table{
Name: sqlparser.NewTableIdent("t1"),
Keyspace: ksa,
@ -1473,7 +1471,7 @@ func TestBuildVSchemaDupTable(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
ksa := &Keyspace{
Name: "ksa",
}
@ -1574,7 +1572,7 @@ func TestBuildVSchemaDupVindex(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
err := got.Keyspaces["ksa"].Error
err1 := got.Keyspaces["ksb"].Error
require.NoError(t, err)
@ -1694,7 +1692,7 @@ func TestBuildVSchemaNoindexFail(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&bad)
got := BuildVSchema(&bad)
err := got.Keyspaces["sharded"].Error
want := "vindex notexist not found for table t1"
if err == nil || err.Error() != want {
@ -1726,7 +1724,7 @@ func TestBuildVSchemaColumnAndColumnsFail(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&bad)
got := BuildVSchema(&bad)
err := got.Keyspaces["sharded"].Error
want := `can't use column and columns at the same time in vindex (stfu) and table (t1)`
if err == nil || err.Error() != want {
@ -1756,7 +1754,7 @@ func TestBuildVSchemaNoColumnsFail(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&bad)
got := BuildVSchema(&bad)
err := got.Keyspaces["sharded"].Error
want := `must specify at least one column for vindex (stfu) and table (t1)`
if err == nil || err.Error() != want {
@ -1787,7 +1785,7 @@ func TestBuildVSchemaNotUniqueFail(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&bad)
got := BuildVSchema(&bad)
err := got.Keyspaces["sharded"].Error
want := "primary vindex stln is not Unique for table t1"
if err == nil || err.Error() != want {
@ -1819,7 +1817,7 @@ func TestBuildVSchemaPrimaryCannotBeOwned(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&bad)
got := BuildVSchema(&bad)
err := got.Keyspaces["sharded"].Error
want := "primary vindex stlu cannot be owned for table t1"
if err == nil || err.Error() != want {
@ -1876,7 +1874,7 @@ func TestSequence(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&good)
got := BuildVSchema(&good)
err := got.Keyspaces["sharded"].Error
require.NoError(t, err)
err1 := got.Keyspaces["unsharded"].Error
@ -2035,7 +2033,7 @@ func TestBadSequence(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&bad)
got := BuildVSchema(&bad)
err := got.Keyspaces["sharded"].Error
want := "cannot resolve sequence invalid_seq: table invalid_seq not found"
if err == nil || err.Error() != want {
@ -2083,7 +2081,7 @@ func TestBadSequenceName(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&bad)
got := BuildVSchema(&bad)
err := got.Keyspaces["sharded"].Error
want := "invalid table name: a.b.seq"
if err == nil || !strings.Contains(err.Error(), want) {
@ -2107,7 +2105,7 @@ func TestBadShardedSequence(t *testing.T) {
},
},
}
got, _ := BuildVSchema(&bad)
got := BuildVSchema(&bad)
err := got.Keyspaces["sharded"].Error
want := "sequence table has to be in an unsharded keyspace or must be pinned: t1"
if err == nil || err.Error() != want {
@ -2155,7 +2153,7 @@ func TestFindTable(t *testing.T) {
},
},
}
vschema, _ := BuildVSchema(&input)
vschema := BuildVSchema(&input)
_, err := vschema.FindTable("", "t1")
require.EqualError(t, err, "ambiguous table reference: t1")
@ -2253,7 +2251,7 @@ func TestFindTableOrVindex(t *testing.T) {
},
},
}
vschema, _ := BuildVSchema(&input)
vschema := BuildVSchema(&input)
ta := vschema.Keyspaces["ksa"].Tables["ta"]
t1 := vschema.Keyspaces["ksb"].Tables["t1"]
@ -2596,7 +2594,7 @@ func TestFindSingleKeyspace(t *testing.T) {
},
},
}
vschema, _ := BuildVSchema(&input)
vschema := BuildVSchema(&input)
none := &Table{
Name: sqlparser.NewTableIdent("none"),
Keyspace: &Keyspace{
@ -2637,7 +2635,7 @@ func TestFindSingleKeyspace(t *testing.T) {
},
},
}
vschema, _ = BuildVSchema(&input)
vschema = BuildVSchema(&input)
_, err := vschema.FindTable("", "none")
wantErr := "table none not found"
if err == nil || err.Error() != wantErr {

Просмотреть файл

@ -18,7 +18,6 @@ package vtgate
import (
"context"
"fmt"
"sync"
"vitess.io/vitess/go/vt/sqlparser"
@ -120,10 +119,10 @@ func (vm *VSchemaManager) VSchemaUpdate(v *vschemapb.SrvVSchema, err error) {
if v == nil {
// We encountered an error, build an empty vschema.
if vm.currentVschema == nil {
vschema, _ = vindexes.BuildVSchema(&vschemapb.SrvVSchema{})
vschema = vindexes.BuildVSchema(&vschemapb.SrvVSchema{})
}
} else {
vschema, err = vm.buildAndEnhanceVSchema(v)
vschema = vm.buildAndEnhanceVSchema(v)
vm.currentVschema = vschema
}
@ -155,41 +154,30 @@ func (vm *VSchemaManager) Rebuild() {
v := vm.currentSrvVschema
vm.mu.Unlock()
var vschema *vindexes.VSchema
var err error
log.Infof("Received schema update")
if v == nil {
// We encountered an error, we should always have a current vschema
log.Warning("got a schema changed signal with no loaded vschema. if this persist, something is wrong")
vschema, _ = vindexes.BuildVSchema(&vschemapb.SrvVSchema{})
} else {
vschema, err = vm.buildAndEnhanceVSchema(v)
if err != nil {
log.Error("failed to reload vschema after schema change")
return
}
log.Infof("No vschema to enhance")
return
}
vschema := vm.buildAndEnhanceVSchema(v)
vm.mu.Lock()
vm.currentVschema = vschema
vm.mu.Unlock()
if vm.subscriber != nil {
vm.subscriber(vschema, vSchemaStats(err, vschema))
vm.subscriber(vschema, vSchemaStats(nil, vschema))
log.Infof("Sent vschema to subscriber")
}
}
// buildAndEnhanceVSchema builds a new VSchema and uses information from the schema tracker to update it
func (vm *VSchemaManager) buildAndEnhanceVSchema(v *vschemapb.SrvVSchema) (*vindexes.VSchema, error) {
vschema, err := vindexes.BuildVSchema(v)
if err == nil {
if vm.schema != nil {
vm.updateFromSchema(vschema)
}
} else {
log.Warningf("Error creating VSchema for cell %v (will try again next update): %v", vm.cell, err)
err = fmt.Errorf("error creating VSchema for cell %v: %v", vm.cell, err)
if vschemaCounters != nil {
vschemaCounters.Add("Parsing", 1)
}
func (vm *VSchemaManager) buildAndEnhanceVSchema(v *vschemapb.SrvVSchema) *vindexes.VSchema {
vschema := vindexes.BuildVSchema(v)
if vm.schema != nil {
vm.updateFromSchema(vschema)
}
return vschema, err
return vschema
}
func (vm *VSchemaManager) updateFromSchema(vschema *vindexes.VSchema) {

Просмотреть файл

@ -3,8 +3,6 @@ package vtgate
import (
"testing"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/utils"
querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/sqlparser"
@ -13,13 +11,13 @@ import (
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
func TestWatchSrvVSchema(t *testing.T) {
cols := []vindexes.Column{{
func TestVSchemaUpdate(t *testing.T) {
cols1 := []vindexes.Column{{
Name: sqlparser.NewColIdent("id"),
Type: querypb.Type_INT64,
}}
cols2 := []vindexes.Column{{
Name: sqlparser.NewColIdent("id"),
Name: sqlparser.NewColIdent("uid"),
Type: querypb.Type_INT64,
}, {
Name: sqlparser.NewColIdent("name"),
@ -27,63 +25,76 @@ func TestWatchSrvVSchema(t *testing.T) {
}}
ks := &vindexes.Keyspace{Name: "ks"}
dual := &vindexes.Table{Type: vindexes.TypeReference, Name: sqlparser.NewTableIdent("dual"), Keyspace: ks}
tblNoCol := &vindexes.Table{Name: sqlparser.NewTableIdent("tbl"), Keyspace: ks, ColumnListAuthoritative: true}
tblCol1 := &vindexes.Table{Name: sqlparser.NewTableIdent("tbl"), Keyspace: ks, Columns: cols1, ColumnListAuthoritative: true}
tblCol2 := &vindexes.Table{Name: sqlparser.NewTableIdent("tbl"), Keyspace: ks, Columns: cols2, ColumnListAuthoritative: true}
tblCol2NA := &vindexes.Table{Name: sqlparser.NewTableIdent("tbl"), Keyspace: ks, Columns: cols2}
tcases := []struct {
name string
srvVschema *vschemapb.SrvVSchema
schema map[string][]vindexes.Column
expected map[string]*vindexes.Table
name string
srvVschema *vschemapb.SrvVSchema
currentVSchema *vindexes.VSchema
schema map[string][]vindexes.Column
expected *vindexes.VSchema
}{{
name: "Single table known by mysql schema and not by vschema",
srvVschema: &vschemapb.SrvVSchema{Keyspaces: map[string]*vschemapb.Keyspace{"ks": {}}},
schema: map[string][]vindexes.Column{"tbl": cols},
expected: map[string]*vindexes.Table{
"dual": dual,
name: "0 Schematracking- 1 srvVSchema",
srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{
"tbl": {
Name: sqlparser.NewTableIdent("tbl"),
Keyspace: ks,
Columns: cols,
ColumnListAuthoritative: true,
Columns: []*vschemapb.Column{{Name: "uid", Type: querypb.Type_INT64}, {Name: "name", Type: querypb.Type_VARCHAR}},
ColumnListAuthoritative: false,
},
},
}),
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblCol2NA}),
}, {
name: "Single table known by both - vschema is not authoritative",
srvVschema: &vschemapb.SrvVSchema{Keyspaces: map[string]*vschemapb.Keyspace{"ks": {
Tables: map[string]*vschemapb.Table{
"tbl": {}, // we know of it, but nothing else
},
}}},
schema: map[string][]vindexes.Column{"tbl": cols},
expected: map[string]*vindexes.Table{
"dual": dual,
"tbl": {
Name: sqlparser.NewTableIdent("tbl"),
Keyspace: ks,
Columns: cols,
ColumnListAuthoritative: true,
},
},
name: "1 Schematracking- 0 srvVSchema",
srvVschema: makeTestSrvVSchema("ks", false, nil),
schema: map[string][]vindexes.Column{"tbl": cols1},
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblCol1}),
}, {
name: "Single table known by both - vschema is authoritative",
srvVschema: &vschemapb.SrvVSchema{Keyspaces: map[string]*vschemapb.Keyspace{"ks": {
Tables: map[string]*vschemapb.Table{
"tbl": {
Columns: []*vschemapb.Column{
{Name: "id", Type: querypb.Type_INT64},
{Name: "name", Type: querypb.Type_VARCHAR},
},
ColumnListAuthoritative: true},
},
}}},
schema: map[string][]vindexes.Column{"tbl": cols},
expected: map[string]*vindexes.Table{
"dual": dual,
name: "1 Schematracking - 1 srvVSchema (no columns) not authoritative",
srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": {}}),
schema: map[string][]vindexes.Column{"tbl": cols1},
// schema will override what srvSchema has.
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblCol1}),
}, {
name: "1 Schematracking - 1 srvVSchema (have columns) not authoritative",
srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{
"tbl": {
Name: sqlparser.NewTableIdent("tbl"),
Keyspace: ks,
Columns: cols2,
Columns: []*vschemapb.Column{{Name: "uid", Type: querypb.Type_INT64}, {Name: "name", Type: querypb.Type_VARCHAR}},
ColumnListAuthoritative: false,
},
}),
schema: map[string][]vindexes.Column{"tbl": cols1},
// schema will override what srvSchema has.
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblCol1}),
}, {
name: "1 Schematracking - 1 srvVSchema (no columns) authoritative",
srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": {
ColumnListAuthoritative: true,
}}),
schema: map[string][]vindexes.Column{"tbl": cols1},
// schema will override what srvSchema has.
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblNoCol}),
}, {
name: "1 Schematracking - 1 srvVSchema (have columns) authoritative",
srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{
"tbl": {
Columns: []*vschemapb.Column{{Name: "uid", Type: querypb.Type_INT64}, {Name: "name", Type: querypb.Type_VARCHAR}},
ColumnListAuthoritative: true,
},
},
}),
schema: map[string][]vindexes.Column{"tbl": cols1},
// schema tracker will be ignored for authoritative tables.
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblCol2}),
}, {
name: "srvVschema received as nil",
schema: map[string][]vindexes.Column{"tbl": cols1},
expected: makeTestEmptyVSchema(),
}, {
name: "srvVschema received as nil - have existing vschema",
currentVSchema: &vindexes.VSchema{},
schema: map[string][]vindexes.Column{"tbl": cols1},
expected: &vindexes.VSchema{},
}}
vm := &VSchemaManager{}
@ -92,30 +103,152 @@ func TestWatchSrvVSchema(t *testing.T) {
vs = vschema
}
for _, tcase := range tcases {
t.Run("VSchemaUpdate - "+tcase.name, func(t *testing.T) {
t.Run(tcase.name, func(t *testing.T) {
vs = nil
vm.schema = &fakeSchema{t: tcase.schema}
vm.currentSrvVschema = nil
vm.currentVschema = tcase.currentVSchema
vm.VSchemaUpdate(tcase.srvVschema, nil)
require.NotNil(t, vs)
ks := vs.Keyspaces["ks"]
require.NotNil(t, ks, "keyspace was not found")
utils.MustMatch(t, tcase.expected, ks.Tables)
utils.MustMatchFn(".uniqueTables", ".uniqueVindexes")(t, tcase.expected, vs)
if tcase.srvVschema != nil {
utils.MustMatch(t, vs, vm.currentVschema, "currentVschema should have same reference as Vschema")
}
})
t.Run("Schema updated - "+tcase.name, func(t *testing.T) {
}
}
func TestRebuildVSchema(t *testing.T) {
cols1 := []vindexes.Column{{
Name: sqlparser.NewColIdent("id"),
Type: querypb.Type_INT64,
}}
cols2 := []vindexes.Column{{
Name: sqlparser.NewColIdent("uid"),
Type: querypb.Type_INT64,
}, {
Name: sqlparser.NewColIdent("name"),
Type: querypb.Type_VARCHAR,
}}
ks := &vindexes.Keyspace{Name: "ks"}
dual := &vindexes.Table{Type: vindexes.TypeReference, Name: sqlparser.NewTableIdent("dual"), Keyspace: ks}
tblNoCol := &vindexes.Table{Name: sqlparser.NewTableIdent("tbl"), Keyspace: ks, ColumnListAuthoritative: true}
tblCol1 := &vindexes.Table{Name: sqlparser.NewTableIdent("tbl"), Keyspace: ks, Columns: cols1, ColumnListAuthoritative: true}
tblCol2 := &vindexes.Table{Name: sqlparser.NewTableIdent("tbl"), Keyspace: ks, Columns: cols2, ColumnListAuthoritative: true}
tblCol2NA := &vindexes.Table{Name: sqlparser.NewTableIdent("tbl"), Keyspace: ks, Columns: cols2}
tcases := []struct {
name string
srvVschema *vschemapb.SrvVSchema
schema map[string][]vindexes.Column
expected *vindexes.VSchema
}{{
name: "0 Schematracking- 1 srvVSchema",
srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{
"tbl": {
Columns: []*vschemapb.Column{{Name: "uid", Type: querypb.Type_INT64}, {Name: "name", Type: querypb.Type_VARCHAR}},
ColumnListAuthoritative: false,
},
}),
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblCol2NA}),
}, {
name: "1 Schematracking- 0 srvVSchema",
srvVschema: makeTestSrvVSchema("ks", false, nil),
schema: map[string][]vindexes.Column{"tbl": cols1},
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblCol1}),
}, {
name: "1 Schematracking - 1 srvVSchema (no columns) not authoritative",
srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": {}}),
schema: map[string][]vindexes.Column{"tbl": cols1},
// schema will override what srvSchema has.
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblCol1}),
}, {
name: "1 Schematracking - 1 srvVSchema (have columns) not authoritative",
srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{
"tbl": {
Columns: []*vschemapb.Column{{Name: "uid", Type: querypb.Type_INT64}, {Name: "name", Type: querypb.Type_VARCHAR}},
ColumnListAuthoritative: false,
},
}),
schema: map[string][]vindexes.Column{"tbl": cols1},
// schema will override what srvSchema has.
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblCol1}),
}, {
name: "1 Schematracking - 1 srvVSchema (no columns) authoritative",
srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": {
ColumnListAuthoritative: true,
}}),
schema: map[string][]vindexes.Column{"tbl": cols1},
// schema will override what srvSchema has.
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblNoCol}),
}, {
name: "1 Schematracking - 1 srvVSchema (have columns) authoritative",
srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{
"tbl": {
Columns: []*vschemapb.Column{{Name: "uid", Type: querypb.Type_INT64}, {Name: "name", Type: querypb.Type_VARCHAR}},
ColumnListAuthoritative: true,
},
}),
schema: map[string][]vindexes.Column{"tbl": cols1},
// schema tracker will be ignored for authoritative tables.
expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"dual": dual, "tbl": tblCol2}),
}, {
name: "srvVschema received as nil",
schema: map[string][]vindexes.Column{"tbl": cols1},
}}
vm := &VSchemaManager{}
var vs *vindexes.VSchema
vm.subscriber = func(vschema *vindexes.VSchema, _ *VSchemaStats) {
vs = vschema
}
for _, tcase := range tcases {
t.Run(tcase.name, func(t *testing.T) {
vs = nil
vm.schema = &fakeSchema{t: tcase.schema}
vm.currentSrvVschema = tcase.srvVschema
vm.currentVschema = nil
vm.Rebuild()
require.NotNil(t, vs)
ks := vs.Keyspaces["ks"]
require.NotNil(t, ks, "keyspace was not found")
utils.MustMatch(t, tcase.expected, ks.Tables)
utils.MustMatchFn(".uniqueTables", ".uniqueVindexes")(t, tcase.expected, vs)
if vs != nil {
utils.MustMatch(t, vs, vm.currentVschema, "currentVschema should have same reference as Vschema")
}
})
}
}
func makeTestVSchema(ks string, sharded bool, tbls map[string]*vindexes.Table) *vindexes.VSchema {
kSchema := &vindexes.KeyspaceSchema{
Keyspace: &vindexes.Keyspace{
Name: ks,
Sharded: sharded,
},
Tables: tbls,
Vindexes: map[string]vindexes.Vindex{},
}
vs := makeTestEmptyVSchema()
vs.Keyspaces[ks] = kSchema
return vs
}
func makeTestEmptyVSchema() *vindexes.VSchema {
return &vindexes.VSchema{
RoutingRules: map[string]*vindexes.RoutingRule{},
Keyspaces: map[string]*vindexes.KeyspaceSchema{},
}
}
func makeTestSrvVSchema(ks string, sharded bool, tbls map[string]*vschemapb.Table) *vschemapb.SrvVSchema {
kSchema := &vschemapb.Keyspace{
Sharded: sharded,
Tables: tbls,
}
return &vschemapb.SrvVSchema{
Keyspaces: map[string]*vschemapb.Keyspace{ks: kSchema},
}
}
type fakeSchema struct {
t map[string][]vindexes.Column
}

Просмотреть файл

@ -311,7 +311,7 @@ func resolveAndLoadKeyspace(ctx context.Context, srvResolver *srvtopo.Resolver,
return
case <-time.After(500 * time.Millisecond):
for _, shard := range dest {
err := st.LoadKeyspace(gw, shard.Target)
err := st.AddNewKeyspace(gw, shard.Target)
if err == nil {
return
}

Просмотреть файл

@ -357,6 +357,13 @@ func expectLogs(ctx context.Context, t *testing.T, query string, eventCh chan []
if !ok {
t.Fatal("expectLogs: not ok, stream ended early")
}
// Ignore unrelated gtid progress events that can race with the events that the test expects
if len(allevs) == 3 &&
allevs[0].Type == binlogdatapb.VEventType_BEGIN &&
allevs[1].Type == binlogdatapb.VEventType_GTID &&
allevs[2].Type == binlogdatapb.VEventType_COMMIT {
continue
}
for _, ev := range allevs {
// Ignore spurious heartbeats that can happen on slow machines.
if ev.Type == binlogdatapb.VEventType_HEARTBEAT {

Просмотреть файл

@ -17,10 +17,10 @@ limitations under the License.
package grpcqueryservice
import (
"google.golang.org/grpc"
"context"
"google.golang.org/grpc"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/callerid"
"vitess.io/vitess/go/vt/callinfo"

Просмотреть файл

@ -675,18 +675,18 @@ func (conn *gRPCQueryClient) VStreamRows(ctx context.Context, target *querypb.Ta
return err
}
for {
r, err := stream.Recv()
r := binlogdatapb.VStreamRowsResponseFromVTPool()
err := stream.RecvMsg(r)
if err != nil {
return tabletconn.ErrorFromGRPC(err)
}
select {
case <-ctx.Done():
if ctx.Err() != nil {
return ctx.Err()
default:
}
if err := send(r); err != nil {
return err
}
r.ReturnToVTPool()
}
}

Просмотреть файл

@ -736,8 +736,11 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int, pos string) error {
select {
case <-ctx.Done():
log.Errorf("Error waiting for pos: %s, last pos: %s: %v, wait time: %v", pos, qr.Rows[0][0].ToString(), ctx.Err(), time.Since(start))
return fmt.Errorf("error waiting for pos: %s, last pos: %s: %v, wait time: %v", pos, qr.Rows[0][0].ToString(), ctx.Err(), time.Since(start))
err = fmt.Errorf("error waiting for pos: %s, last pos: %s: %v, wait time: %v: %s",
pos, qr.Rows[0][0].ToString(), ctx.Err(), time.Since(start),
"possibly no tablets are available to stream in the source keyspace for your cell and tablet_types setting")
log.Error(err.Error())
return err
case <-vre.ctx.Done():
return fmt.Errorf("vreplication is closing: %v", vre.ctx.Err())
case <-tkr.C:

Просмотреть файл

@ -53,7 +53,7 @@ type ReplicatorPlan struct {
VStreamFilter *binlogdatapb.Filter
TargetTables map[string]*TablePlan
TablePlans map[string]*TablePlan
PKInfoMap map[string][]*PrimaryKeyInfo
ColInfoMap map[string][]*ColumnInfo
stats *binlogplayer.Stats
}
@ -94,13 +94,26 @@ func (rp *ReplicatorPlan) buildExecutionPlan(fieldEvent *binlogdatapb.FieldEvent
// requires us to wait for the field info sent by the source.
func (rp *ReplicatorPlan) buildFromFields(tableName string, lastpk *sqltypes.Result, fields []*querypb.Field) (*TablePlan, error) {
tpb := &tablePlanBuilder{
name: sqlparser.NewTableIdent(tableName),
lastpk: lastpk,
pkInfos: rp.PKInfoMap[tableName],
stats: rp.stats,
name: sqlparser.NewTableIdent(tableName),
lastpk: lastpk,
colInfos: rp.ColInfoMap[tableName],
stats: rp.stats,
}
for _, field := range fields {
colName := sqlparser.NewColIdent(field.Name)
isGenerated := false
for _, colInfo := range tpb.colInfos {
if !strings.EqualFold(colInfo.Name, field.Name) {
continue
}
if colInfo.IsGenerated {
isGenerated = true
}
break
}
if isGenerated {
continue
}
cexpr := &colExpr{
colName: colName,
colType: field.Type,
@ -114,7 +127,7 @@ func (rp *ReplicatorPlan) buildFromFields(tableName string, lastpk *sqltypes.Res
tpb.colExprs = append(tpb.colExprs, cexpr)
}
// The following actions are a subset of buildTablePlan.
if err := tpb.analyzePK(rp.PKInfoMap); err != nil {
if err := tpb.analyzePK(rp.ColInfoMap); err != nil {
return nil, err
}
return tpb.generate(), nil
@ -183,6 +196,7 @@ type TablePlan struct {
// a primary key column (row move).
PKReferences []string
Stats *binlogplayer.Stats
FieldsToSkip map[string]bool
}
// MarshalJSON performs a custom JSON Marshalling.
@ -220,7 +234,7 @@ func (tp *TablePlan) applyBulkInsert(sqlbuffer *bytes2.Buffer, rows *binlogdatap
if i > 0 {
sqlbuffer.WriteString(", ")
}
if err := tp.BulkInsertValues.AppendFromRow(sqlbuffer, tp.Fields, row); err != nil {
if err := tp.BulkInsertValues.AppendFromRow(sqlbuffer, tp.Fields, row, tp.FieldsToSkip); err != nil {
return nil, err
}
}

Просмотреть файл

@ -669,8 +669,8 @@ func TestBuildPlayerPlan(t *testing.T) {
err: "group by expression is not allowed to reference an aggregate expression: a",
}}
PrimaryKeyInfos := map[string][]*PrimaryKeyInfo{
"t1": {&PrimaryKeyInfo{Name: "c1"}},
PrimaryKeyInfos := map[string][]*ColumnInfo{
"t1": {&ColumnInfo{Name: "c1", IsPK: true}},
}
copyState := map[string]*sqltypes.Result{
@ -711,9 +711,9 @@ func TestBuildPlayerPlan(t *testing.T) {
}
func TestBuildPlayerPlanNoDup(t *testing.T) {
PrimaryKeyInfos := map[string][]*PrimaryKeyInfo{
"t1": {&PrimaryKeyInfo{Name: "c1"}},
"t2": {&PrimaryKeyInfo{Name: "c2"}},
PrimaryKeyInfos := map[string][]*ColumnInfo{
"t1": {&ColumnInfo{Name: "c1"}},
"t2": {&ColumnInfo{Name: "c2"}},
}
input := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
@ -732,9 +732,9 @@ func TestBuildPlayerPlanNoDup(t *testing.T) {
}
func TestBuildPlayerPlanExclude(t *testing.T) {
PrimaryKeyInfos := map[string][]*PrimaryKeyInfo{
"t1": {&PrimaryKeyInfo{Name: "c1"}},
"t2": {&PrimaryKeyInfo{Name: "c2"}},
PrimaryKeyInfos := map[string][]*ColumnInfo{
"t1": {&ColumnInfo{Name: "c1"}},
"t2": {&ColumnInfo{Name: "c2"}},
}
input := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{

Просмотреть файл

@ -113,6 +113,21 @@ func (st *vrStats) register() {
return result
})
stats.NewRateFunc(
"VReplicationLag",
"vreplication lag per stream",
func() map[string][]float64 {
st.mu.Lock()
defer st.mu.Unlock()
result := make(map[string][]float64)
for _, ct := range st.controllers {
for k, v := range ct.blpStats.VReplicationLagRates.Get() {
result[k] = v
}
}
return result
})
stats.Publish("VReplicationSource", stats.StringMapFunc(func() map[string]string {
st.mu.Lock()
defer st.mu.Unlock()

Просмотреть файл

@ -52,7 +52,7 @@ type tablePlanBuilder struct {
onInsert insertType
pkCols []*colExpr
lastpk *sqltypes.Result
pkInfos []*PrimaryKeyInfo
colInfos []*ColumnInfo
stats *binlogplayer.Stats
}
@ -112,7 +112,7 @@ const (
// a table-specific rule is built to be sent to the source. We don't send the
// original rule to the source because it may not match the same tables as the
// target.
// pkInfoMap specifies the list of primary key columns for each table.
// colInfoMap specifies the list of primary key columns for each table.
// copyState is a map of tables that have not been fully copied yet.
// If a table is not present in copyState, then it has been fully copied. If so,
// all replication events are applied. The table still has to match a Filter.Rule.
@ -123,15 +123,15 @@ const (
// The TablePlan built is a partial plan. The full plan for a table is built
// when we receive field information from events or rows sent by the source.
// buildExecutionPlan is the function that builds the full plan.
func buildReplicatorPlan(filter *binlogdatapb.Filter, pkInfoMap map[string][]*PrimaryKeyInfo, copyState map[string]*sqltypes.Result, stats *binlogplayer.Stats) (*ReplicatorPlan, error) {
func buildReplicatorPlan(filter *binlogdatapb.Filter, colInfoMap map[string][]*ColumnInfo, copyState map[string]*sqltypes.Result, stats *binlogplayer.Stats) (*ReplicatorPlan, error) {
plan := &ReplicatorPlan{
VStreamFilter: &binlogdatapb.Filter{FieldEventMode: filter.FieldEventMode},
TargetTables: make(map[string]*TablePlan),
TablePlans: make(map[string]*TablePlan),
PKInfoMap: pkInfoMap,
ColInfoMap: colInfoMap,
stats: stats,
}
for tableName := range pkInfoMap {
for tableName := range colInfoMap {
lastpk, ok := copyState[tableName]
if ok && lastpk == nil {
// Don't replicate uncopied tables.
@ -144,7 +144,7 @@ func buildReplicatorPlan(filter *binlogdatapb.Filter, pkInfoMap map[string][]*Pr
if rule == nil {
continue
}
tablePlan, err := buildTablePlan(tableName, rule.Filter, pkInfoMap, lastpk, stats)
tablePlan, err := buildTablePlan(tableName, rule.Filter, colInfoMap, lastpk, stats)
if err != nil {
return nil, err
}
@ -183,7 +183,7 @@ func MatchTable(tableName string, filter *binlogdatapb.Filter) (*binlogdatapb.Ru
return nil, nil
}
func buildTablePlan(tableName, filter string, pkInfoMap map[string][]*PrimaryKeyInfo, lastpk *sqltypes.Result, stats *binlogplayer.Stats) (*TablePlan, error) {
func buildTablePlan(tableName, filter string, colInfoMap map[string][]*ColumnInfo, lastpk *sqltypes.Result, stats *binlogplayer.Stats) (*TablePlan, error) {
query := filter
// generate equivalent select statement if filter is empty or a keyrange.
fmt.Printf("============ filter: %v\n", filter)
@ -234,7 +234,7 @@ func buildTablePlan(tableName, filter string, pkInfoMap map[string][]*PrimaryKey
},
selColumns: make(map[string]bool),
lastpk: lastpk,
pkInfos: pkInfoMap[tableName],
colInfos: colInfoMap[tableName],
stats: stats,
}
@ -259,7 +259,7 @@ func buildTablePlan(tableName, filter string, pkInfoMap map[string][]*PrimaryKey
if err := tpb.analyzeGroupBy(sel.GroupBy); err != nil {
return nil, err
}
if err := tpb.analyzePK(pkInfoMap); err != nil {
if err := tpb.analyzePK(colInfoMap); err != nil {
return nil, err
}
@ -301,6 +301,13 @@ func (tpb *tablePlanBuilder) generate() *TablePlan {
bvf := &bindvarFormatter{}
fieldsToSkip := make(map[string]bool)
for _, colInfo := range tpb.colInfos {
if colInfo.IsGenerated {
fieldsToSkip[colInfo.Name] = true
}
}
return &TablePlan{
TargetName: tpb.name.String(),
Lastpk: tpb.lastpk,
@ -312,6 +319,7 @@ func (tpb *tablePlanBuilder) generate() *TablePlan {
Delete: tpb.generateDeleteStatement(),
PKReferences: pkrefs,
Stats: tpb.stats,
FieldsToSkip: fieldsToSkip,
}
}
@ -528,22 +536,25 @@ func (tpb *tablePlanBuilder) analyzeGroupBy(groupBy sqlparser.GroupBy) error {
}
// analyzePK builds tpb.pkCols.
func (tpb *tablePlanBuilder) analyzePK(pkInfoMap map[string][]*PrimaryKeyInfo) error {
pkcols, ok := pkInfoMap[tpb.name.String()]
func (tpb *tablePlanBuilder) analyzePK(colInfoMap map[string][]*ColumnInfo) error {
cols, ok := colInfoMap[tpb.name.String()]
if !ok {
return fmt.Errorf("table %s not found in schema", tpb.name)
}
for _, pkcol := range pkcols {
cexpr := tpb.findCol(sqlparser.NewColIdent(pkcol.Name))
for _, col := range cols {
if !col.IsPK {
continue
}
cexpr := tpb.findCol(sqlparser.NewColIdent(col.Name))
if cexpr == nil {
return fmt.Errorf("primary key column %v not found in select list", pkcol)
return fmt.Errorf("primary key column %v not found in select list", col)
}
if cexpr.operation != opExpr {
return fmt.Errorf("primary key column %v is not allowed to reference an aggregate expression", pkcol)
return fmt.Errorf("primary key column %v is not allowed to reference an aggregate expression", col)
}
cexpr.isPK = true
cexpr.dataType = pkcol.DataType
cexpr.columnType = pkcol.ColumnType
cexpr.dataType = col.DataType
cexpr.columnType = col.ColumnType
tpb.pkCols = append(tpb.pkCols, cexpr)
}
return nil
@ -585,6 +596,9 @@ func (tpb *tablePlanBuilder) generateInsertPart(buf *sqlparser.TrackedBuffer) *s
}
separator := ""
for _, cexpr := range tpb.colExprs {
if tpb.isColumnGenerated(cexpr.colName) {
continue
}
buf.Myprintf("%s%v", separator, cexpr.colName)
separator = ","
}
@ -596,6 +610,9 @@ func (tpb *tablePlanBuilder) generateValuesPart(buf *sqlparser.TrackedBuffer, bv
bvf.mode = bvAfter
separator := "("
for _, cexpr := range tpb.colExprs {
if tpb.isColumnGenerated(cexpr.colName) {
continue
}
buf.Myprintf("%s", separator)
separator = ","
switch cexpr.operation {
@ -621,6 +638,9 @@ func (tpb *tablePlanBuilder) generateSelectPart(buf *sqlparser.TrackedBuffer, bv
buf.WriteString(" select ")
separator := ""
for _, cexpr := range tpb.colExprs {
if tpb.isColumnGenerated(cexpr.colName) {
continue
}
buf.Myprintf("%s", separator)
separator = ", "
switch cexpr.operation {
@ -654,6 +674,9 @@ func (tpb *tablePlanBuilder) generateOnDupPart(buf *sqlparser.TrackedBuffer) *sq
if cexpr.isGrouped || cexpr.isPK {
continue
}
if tpb.isColumnGenerated(cexpr.colName) {
continue
}
buf.Myprintf("%s%v=", separator, cexpr.colName)
separator = ", "
switch cexpr.operation {
@ -681,6 +704,9 @@ func (tpb *tablePlanBuilder) generateUpdateStatement() *sqlparser.ParsedQuery {
if cexpr.isGrouped || cexpr.isPK {
continue
}
if tpb.isColumnGenerated(cexpr.colName) {
continue
}
buf.Myprintf("%s%v=", separator, cexpr.colName)
separator = ", "
switch cexpr.operation {
@ -761,13 +787,13 @@ func (tpb *tablePlanBuilder) generateWhere(buf *sqlparser.TrackedBuffer, bvf *bi
}
func (tpb *tablePlanBuilder) getCharsetAndCollation(pkname string) (charSet string, collation string) {
for _, pkInfo := range tpb.pkInfos {
if strings.EqualFold(pkInfo.Name, pkname) {
if pkInfo.CharSet != "" {
charSet = fmt.Sprintf(" _%s ", pkInfo.CharSet)
for _, colInfo := range tpb.colInfos {
if colInfo.IsPK && strings.EqualFold(colInfo.Name, pkname) {
if colInfo.CharSet != "" {
charSet = fmt.Sprintf(" _%s ", colInfo.CharSet)
}
if pkInfo.Collation != "" {
collation = fmt.Sprintf(" COLLATE %s ", pkInfo.Collation)
if colInfo.Collation != "" {
collation = fmt.Sprintf(" COLLATE %s ", colInfo.Collation)
}
}
}
@ -798,6 +824,15 @@ func (tpb *tablePlanBuilder) generatePKConstraint(buf *sqlparser.TrackedBuffer,
buf.WriteString(")")
}
func (tpb *tablePlanBuilder) isColumnGenerated(col sqlparser.ColIdent) bool {
for _, colInfo := range tpb.colInfos {
if col.EqualString(colInfo.Name) && colInfo.IsGenerated {
return true
}
}
return false
}
// bindvarFormatter is a dual mode formatter. Its behavior
// can be changed dynamically changed to generate bind vars
// for the 'before' row or 'after' row by setting its mode

Просмотреть файл

@ -57,7 +57,7 @@ func newVCopier(vr *vreplicator) *vcopier {
func (vc *vcopier) initTablesForCopy(ctx context.Context) error {
defer vc.vr.dbClient.Rollback()
plan, err := buildReplicatorPlan(vc.vr.source.Filter, vc.vr.pkInfoMap, nil, vc.vr.stats)
plan, err := buildReplicatorPlan(vc.vr.source.Filter, vc.vr.colInfoMap, nil, vc.vr.stats)
if err != nil {
return err
}
@ -201,7 +201,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma
log.Infof("Copying table %s, lastpk: %v", tableName, copyState[tableName])
fmt.Printf("============ buildReplicatorPlan( ivc.vr.source.Filter: %v\n", vc.vr.source.Filter)
plan, err := buildReplicatorPlan(vc.vr.source.Filter, vc.vr.pkInfoMap, nil, vc.vr.stats)
plan, err := buildReplicatorPlan(vc.vr.source.Filter, vc.vr.colInfoMap, nil, vc.vr.stats)
if err != nil {
return err
}
@ -246,13 +246,13 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma
}
fieldEvent := &binlogdatapb.FieldEvent{
TableName: initialPlan.SendRule.Match,
Fields: rows.Fields,
}
fieldEvent.Fields = append(fieldEvent.Fields, rows.Fields...)
vc.tablePlan, err = plan.buildExecutionPlan(fieldEvent)
if err != nil {
return err
}
pkfields = rows.Pkfields
pkfields = append(pkfields, rows.Pkfields...)
buf := sqlparser.NewTrackedBuffer(nil)
buf.Myprintf("update _vt.copy_state set lastpk=%a where vrepl_id=%s and table_name=%s", ":lastpk", strconv.Itoa(int(vc.vr.id)), encodeString(tableName))
updateCopyState = buf.ParsedQuery()

Просмотреть файл

@ -1280,3 +1280,84 @@ func TestPlayerCopyTableCancel(t *testing.T) {
{"2", "bbb"},
})
}
func TestPlayerCopyTablesWithGeneratedColumn(t *testing.T) {
flavor := strings.ToLower(env.Flavor)
// Disable tests on percona (which identifies as mysql56) and mariadb platforms in CI since they
// generated columns support was added in 5.7 and mariadb added mysql compatible generated columns in 10.2
if !strings.Contains(flavor, "mysql57") && !strings.Contains(flavor, "mysql80") {
return
}
defer deleteTablet(addTablet(100))
execStatements(t, []string{
"create table src1(id int, val varbinary(128), val2 varbinary(128) as (concat(id, val)), val3 varbinary(128) as (concat(val, id)), id2 int, primary key(id))",
"insert into src1(id, val, id2) values(2, 'bbb', 20), (1, 'aaa', 10)",
fmt.Sprintf("create table %s.dst1(id int, val varbinary(128), val2 varbinary(128) as (concat(id, val)), val3 varbinary(128), id2 int, primary key(id))", vrepldb),
"create table src2(id int, val varbinary(128), val2 varbinary(128) as (concat(id, val)), val3 varbinary(128) as (concat(val, id)), id2 int, primary key(id))",
"insert into src2(id, val, id2) values(2, 'bbb', 20), (1, 'aaa', 10)",
fmt.Sprintf("create table %s.dst2(val3 varbinary(128), val varbinary(128), id2 int)", vrepldb),
})
defer execStatements(t, []string{
"drop table src1",
fmt.Sprintf("drop table %s.dst1", vrepldb),
"drop table src2",
fmt.Sprintf("drop table %s.dst2", vrepldb),
})
env.SchemaEngine.Reload(context.Background())
filter := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
Match: "dst1",
Filter: "select * from src1",
}, {
Match: "dst2",
Filter: "select val3, val, id2 from src2",
}},
}
bls := &binlogdatapb.BinlogSource{
Keyspace: env.KeyspaceName,
Shard: env.ShardName,
Filter: filter,
OnDdl: binlogdatapb.OnDDLAction_IGNORE,
}
query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName)
qr, err := playerEngine.Exec(query)
if err != nil {
t.Fatal(err)
}
defer func() {
query := fmt.Sprintf("delete from _vt.vreplication where id = %d", qr.InsertID)
if _, err := playerEngine.Exec(query); err != nil {
t.Fatal(err)
}
expectDeleteQueries(t)
}()
expectNontxQueries(t, []string{
// Create the list of tables to copy and transition to Copying state.
"/insert into _vt.vreplication",
"/update _vt.vreplication set message=",
"/insert into _vt.copy_state",
"/update _vt.vreplication set state",
// The first fast-forward has no starting point. So, it just saves the current position.
"insert into dst1(id,val,val3,id2) values (1,'aaa','aaa1',10), (2,'bbb','bbb2',20)",
`/update _vt.copy_state set lastpk='fields:<name:\\"id\\" type:INT32 > rows:<lengths:1 values:\\"2\\" > ' where vrepl_id=.*`,
// copy of dst1 is done: delete from copy_state.
"/delete from _vt.copy_state.*dst1",
"insert into dst2(val3,val,id2) values ('aaa1','aaa',10), ('bbb2','bbb',20)",
`/update _vt.copy_state set lastpk='fields:<name:\\"id\\" type:INT32 > rows:<lengths:1 values:\\"2\\" > ' where vrepl_id=.*`,
// copy of dst2 is done: delete from copy_state.
"/delete from _vt.copy_state.*dst2",
"/update _vt.vreplication set state",
})
expectData(t, "dst1", [][]string{
{"1", "aaa", "1aaa", "aaa1", "10"},
{"2", "bbb", "2bbb", "bbb2", "20"},
})
expectData(t, "dst2", [][]string{
{"aaa1", "aaa", "10"},
{"bbb2", "bbb", "20"},
})
}

Просмотреть файл

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"math"
"strconv"
"strings"
"time"
@ -105,7 +106,7 @@ func (vp *vplayer) play(ctx context.Context) error {
return nil
}
plan, err := buildReplicatorPlan(vp.vr.source.Filter, vp.vr.pkInfoMap, vp.copyState, vp.vr.stats)
plan, err := buildReplicatorPlan(vp.vr.source.Filter, vp.vr.colInfoMap, vp.copyState, vp.vr.stats)
if err != nil {
vp.vr.stats.ErrorCounts.Add([]string{"Plan"}, 1)
return err
@ -331,6 +332,7 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error {
// TODO(sougou): if we also stored the time of the last event, we
// can estimate this value more accurately.
defer vp.vr.stats.SecondsBehindMaster.Set(math.MaxInt64)
defer vp.vr.stats.VReplicationLags.Add(strconv.Itoa(int(vp.vr.id)), math.MaxInt64)
var sbm int64 = -1
for {
// check throttler.
@ -347,6 +349,7 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error {
if len(items) == 0 {
behind := time.Now().UnixNano() - vp.lastTimestampNs - vp.timeOffsetNs
vp.vr.stats.SecondsBehindMaster.Set(behind / 1e9)
vp.vr.stats.VReplicationLags.Add(strconv.Itoa(int(vp.vr.id)), time.Duration(behind/1e9)*time.Second)
}
// Empty transactions are saved at most once every idleTimeout.
// This covers two situations:
@ -401,6 +404,7 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error {
}
if sbm >= 0 {
vp.vr.stats.SecondsBehindMaster.Set(sbm)
vp.vr.stats.VReplicationLags.Add(strconv.Itoa(int(vp.vr.id)), time.Duration(sbm)*time.Second)
}
}

Просмотреть файл

@ -2618,6 +2618,94 @@ func TestVReplicationLogs(t *testing.T) {
}
}
func TestGeneratedColumns(t *testing.T) {
flavor := strings.ToLower(env.Flavor)
// Disable tests on percona (which identifies as mysql56) and mariadb platforms in CI since they
// generated columns support was added in 5.7 and mariadb added mysql compatible generated columns in 10.2
if !strings.Contains(flavor, "mysql57") && !strings.Contains(flavor, "mysql80") {
return
}
defer deleteTablet(addTablet(100))
execStatements(t, []string{
"create table t1(id int, val varbinary(6), val2 varbinary(6) as (concat(id, val)), val3 varbinary(6) as (concat(val, id)), id2 int, primary key(id))",
fmt.Sprintf("create table %s.t1(id int, val varbinary(6), val2 varbinary(6) as (concat(id, val)), val3 varbinary(6), id2 int, primary key(id))", vrepldb),
"create table t2(id int, val varbinary(128), val2 varbinary(128) as (concat(id, val)) stored, val3 varbinary(128) as (concat(val, id)), id2 int, primary key(id))",
fmt.Sprintf("create table %s.t2(id int, val3 varbinary(128), val varbinary(128), id2 int, primary key(id))", vrepldb),
})
defer execStatements(t, []string{
"drop table t1",
fmt.Sprintf("drop table %s.t1", vrepldb),
"drop table t2",
fmt.Sprintf("drop table %s.t2", vrepldb),
})
env.SchemaEngine.Reload(context.Background())
filter := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
Match: "t1",
Filter: "select * from t1",
}, {
Match: "t2",
Filter: "select id, val3, val, id2 from t2",
}},
}
bls := &binlogdatapb.BinlogSource{
Keyspace: env.KeyspaceName,
Shard: env.ShardName,
Filter: filter,
OnDdl: binlogdatapb.OnDDLAction_IGNORE,
}
cancel, _ := startVReplication(t, bls, "")
defer cancel()
testcases := []struct {
input string
output string
table string
data [][]string
}{{
input: "insert into t1(id, val, id2) values (1, 'aaa', 10)",
output: "insert into t1(id,val,val3,id2) values (1,'aaa','aaa1',10)",
table: "t1",
data: [][]string{
{"1", "aaa", "1aaa", "aaa1", "10"},
},
}, {
input: "update t1 set val = 'bbb', id2 = 11 where id = 1",
output: "update t1 set val='bbb', val3='bbb1', id2=11 where id=1",
table: "t1",
data: [][]string{
{"1", "bbb", "1bbb", "bbb1", "11"},
},
}, {
input: "insert into t2(id, val, id2) values (1, 'aaa', 10)",
output: "insert into t2(id,val3,val,id2) values (1,'aaa1','aaa',10)",
table: "t2",
data: [][]string{
{"1", "aaa1", "aaa", "10"},
},
}, {
input: "update t2 set val = 'bbb', id2 = 11 where id = 1",
output: "update t2 set val3='bbb1', val='bbb', id2=11 where id=1",
table: "t2",
data: [][]string{
{"1", "bbb1", "bbb", "11"},
},
}}
for _, tcases := range testcases {
execStatements(t, []string{tcases.input})
output := []string{
tcases.output,
}
expectNontxQueries(t, output)
if tcases.table != "" {
expectData(t, tcases.table, tcases.data)
}
}
}
func expectJSON(t *testing.T, table string, values [][]string, id int, exec func(ctx context.Context, query string) (*sqltypes.Result, error)) {
t.Helper()

Просмотреть файл

@ -78,8 +78,8 @@ type vreplicator struct {
state string
stats *binlogplayer.Stats
// mysqld is used to fetch the local schema.
mysqld mysqlctl.MysqlDaemon
pkInfoMap map[string][]*PrimaryKeyInfo
mysqld mysqlctl.MysqlDaemon
colInfoMap map[string][]*ColumnInfo
originalFKCheckSetting int64
}
@ -154,11 +154,11 @@ func (vr *vreplicator) Replicate(ctx context.Context) error {
}
func (vr *vreplicator) replicate(ctx context.Context) error {
pkInfo, err := vr.buildPkInfoMap(ctx)
colInfo, err := vr.buildColInfoMap(ctx)
if err != nil {
return err
}
vr.pkInfoMap = pkInfo
vr.colInfoMap = colInfo
if err := vr.getSettingFKCheck(); err != nil {
return err
}
@ -224,22 +224,24 @@ func (vr *vreplicator) replicate(ctx context.Context) error {
}
}
// PrimaryKeyInfo is used to store charset and collation for primary keys where applicable
type PrimaryKeyInfo struct {
Name string
CharSet string
Collation string
DataType string
ColumnType string
// ColumnInfo is used to store charset and collation
type ColumnInfo struct {
Name string
CharSet string
Collation string
DataType string
ColumnType string
IsPK bool
IsGenerated bool
}
func (vr *vreplicator) buildPkInfoMap(ctx context.Context) (map[string][]*PrimaryKeyInfo, error) {
func (vr *vreplicator) buildColInfoMap(ctx context.Context) (map[string][]*ColumnInfo, error) {
schema, err := vr.mysqld.GetSchema(ctx, vr.dbClient.DBName(), []string{"/.*/"}, nil, false)
if err != nil {
return nil, err
}
queryTemplate := "select character_set_name, collation_name, column_name, data_type, column_type from information_schema.columns where table_schema=%s and table_name=%s;"
pkInfoMap := make(map[string][]*PrimaryKeyInfo)
queryTemplate := "select character_set_name, collation_name, column_name, data_type, column_type, extra from information_schema.columns where table_schema=%s and table_name=%s;"
colInfoMap := make(map[string][]*ColumnInfo)
for _, td := range schema.TableDefinitions {
query := fmt.Sprintf(queryTemplate, encodeString(vr.dbClient.DBName()), encodeString(td.Name))
@ -257,47 +259,56 @@ func (vr *vreplicator) buildPkInfoMap(ctx context.Context) (map[string][]*Primar
} else {
pks = td.Columns
}
var pkInfos []*PrimaryKeyInfo
for _, pk := range pks {
var colInfo []*ColumnInfo
for _, row := range qr.Rows {
charSet := ""
collation := ""
columnName := ""
isPK := false
isGenerated := false
var dataType, columnType string
for _, row := range qr.Rows {
columnName := row[2].ToString()
if strings.EqualFold(columnName, pk) {
var currentField *querypb.Field
for _, field := range td.Fields {
if field.Name == pk {
currentField = field
break
}
}
if currentField == nil {
continue
}
dataType = row[3].ToString()
columnType = row[4].ToString()
if sqltypes.IsText(currentField.Type) {
charSet = row[0].ToString()
collation = row[1].ToString()
}
columnName = row[2].ToString()
var currentField *querypb.Field
for _, field := range td.Fields {
if field.Name == columnName {
currentField = field
break
}
}
if dataType == "" || columnType == "" {
return nil, fmt.Errorf("no dataType/columnType found in information_schema.columns for table %s, column %s", td.Name, pk)
if currentField == nil {
continue
}
pkInfos = append(pkInfos, &PrimaryKeyInfo{
Name: pk,
CharSet: charSet,
Collation: collation,
DataType: dataType,
ColumnType: columnType,
dataType = row[3].ToString()
columnType = row[4].ToString()
if sqltypes.IsText(currentField.Type) {
charSet = row[0].ToString()
collation = row[1].ToString()
}
if dataType == "" || columnType == "" {
return nil, fmt.Errorf("no dataType/columnType found in information_schema.columns for table %s, column %s", td.Name, columnName)
}
for _, pk := range pks {
if columnName == pk {
isPK = true
}
}
extra := strings.ToLower(row[5].ToString())
if strings.Contains(extra, "generated") || strings.Contains(extra, "virtual") {
isGenerated = true
}
colInfo = append(colInfo, &ColumnInfo{
Name: columnName,
CharSet: charSet,
Collation: collation,
DataType: dataType,
ColumnType: columnType,
IsPK: isPK,
IsGenerated: isGenerated,
})
}
pkInfoMap[td.Name] = pkInfos
colInfoMap[td.Name] = colInfo
}
return pkInfoMap, nil
return colInfoMap, nil
}
func (vr *vreplicator) readSettings(ctx context.Context) (settings binlogplayer.VRSettings, numTablesToCopy int64, err error) {

Просмотреть файл

@ -346,7 +346,7 @@ func (vse *Engine) setWatch() {
}
var vschema *vindexes.VSchema
if v != nil {
vschema, err = vindexes.BuildVSchema(v)
vschema = vindexes.BuildVSchema(v)
if err != nil {
log.Errorf("Error building vschema: %v", err)
vse.vschemaErrors.Add(1)

Просмотреть файл

@ -38,7 +38,7 @@ func Fuzz(data []byte) int {
"ks": &kspb,
},
}
vschema, err := vindexes.BuildVSchema(srvVSchema)
vschema := vindexes.BuildVSchema(srvVSchema)
if err != nil {
return -1
}

Просмотреть файл

@ -21,7 +21,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
"vitess.io/vitess/go/vt/vtgate/vindexes"
@ -87,8 +86,7 @@ func TestFindColVindex(t *testing.T) {
},
},
}
vschema, err := vindexes.BuildVSchema(testSrvVSchema)
require.NoError(t, err)
vschema := vindexes.BuildVSchema(testSrvVSchema)
testcases := []struct {
keyspace string
@ -151,8 +149,7 @@ func TestFindOrCreateVindex(t *testing.T) {
},
},
}
vschema, err := vindexes.BuildVSchema(testSrvVSchema)
require.NoError(t, err)
vschema := vindexes.BuildVSchema(testSrvVSchema)
lvs := &localVSchema{
keyspace: "ks1",
@ -207,8 +204,7 @@ func TestFindTable(t *testing.T) {
},
},
}
vschema, err := vindexes.BuildVSchema(testSrvVSchema)
require.NoError(t, err)
vschema := vindexes.BuildVSchema(testSrvVSchema)
testcases := []struct {
keyspace string

Просмотреть файл

@ -22,13 +22,13 @@ import (
"strconv"
"strings"
"vitess.io/vitess/go/vt/vtgate/evalengine"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtgate/evalengine"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/key"
@ -59,10 +59,20 @@ type Plan struct {
type Opcode int
const (
// Equal is used to filter an integer column on a specific value
// Equal is used to filter a comparable column on a specific value
Equal = Opcode(iota)
// VindexMatch is used for an in_keyrange() construct
VindexMatch
// LessThan is used to filter a comparable column if < specific value
LessThan
// LessThanEqual is used to filter a comparable column if <= specific value
LessThanEqual
// GreaterThan is used to filter a comparable column if > specific value
GreaterThan
// GreaterThanEqual is used to filter a comparable column if >= specific value
GreaterThanEqual
// NotEqual is used to filter a comparable column if != specific value
NotEqual
)
// Filter contains opcodes for filtering.
@ -113,6 +123,72 @@ func (plan *Plan) fields() []*querypb.Field {
return fields
}
// getOpcode returns the equivalent planbuilder opcode for operators that are supported in Filters
func getOpcode(comparison *sqlparser.ComparisonExpr) (Opcode, error) {
var opcode Opcode
switch comparison.Operator {
case sqlparser.EqualOp:
opcode = Equal
case sqlparser.LessThanOp:
opcode = LessThan
case sqlparser.LessEqualOp:
opcode = LessThanEqual
case sqlparser.GreaterThanOp:
opcode = GreaterThan
case sqlparser.GreaterEqualOp:
opcode = GreaterThanEqual
case sqlparser.NotEqualOp:
opcode = NotEqual
default:
return -1, fmt.Errorf("comparison operator %s not supported", comparison.Operator.ToString())
}
return opcode, nil
}
// compare returns true after applying the comparison specified in the Filter to the actual data in the column
func compare(comparison Opcode, columnValue, filterValue sqltypes.Value) (bool, error) {
// use null semantics: return false if either value is null
if columnValue.IsNull() || filterValue.IsNull() {
return false, nil
}
// at this point neither values can be null
// NullsafeCompare returns 0 if values match, -1 if columnValue < filterValue, 1 if columnValue > filterValue
result, err := evalengine.NullsafeCompare(columnValue, filterValue)
if err != nil {
return false, err
}
switch comparison {
case Equal:
if result == 0 {
return true, nil
}
case NotEqual:
if result != 0 {
return true, nil
}
case LessThan:
if result == -1 {
return true, nil
}
case LessThanEqual:
if result <= 0 {
return true, nil
}
case GreaterThan:
if result == 1 {
return true, nil
}
case GreaterThanEqual:
if result >= 0 {
return true, nil
}
default:
return false, fmt.Errorf("comparison operator %d not supported", comparison)
}
return false, nil
}
// filter filters the row against the plan. It returns false if the row did not match.
// The output of the filtering operation is stored in the 'result' argument because
// filtering cannot be performed in-place. The result argument must be a slice of
@ -123,14 +199,6 @@ func (plan *Plan) filter(values, result []sqltypes.Value) (bool, error) {
}
for _, filter := range plan.Filters {
switch filter.Opcode {
case Equal:
result, err := evalengine.NullsafeCompare(values[filter.ColNum], filter.Value)
if err != nil {
return false, err
}
if result != 0 {
return false, nil
}
case VindexMatch:
ksid, err := getKeyspaceID(values, filter.Vindex, filter.VindexColumns, plan.Table.Fields)
if err != nil {
@ -139,6 +207,14 @@ func (plan *Plan) filter(values, result []sqltypes.Value) (bool, error) {
if !key.KeyRangeContains(filter.KeyRange, ksid) {
return false, nil
}
default:
match, err := compare(filter.Opcode, values[filter.ColNum], filter.Value)
if err != nil {
return false, err
}
if !match {
return false, nil
}
}
}
for i, colExpr := range plan.ColExprs {
@ -376,6 +452,10 @@ func (plan *Plan) analyzeWhere(vschema *localVSchema, where *sqlparser.Where) er
for _, expr := range exprs {
switch expr := expr.(type) {
case *sqlparser.ComparisonExpr:
opcode, err := getOpcode(expr)
if err != nil {
return err
}
qualifiedName, ok := expr.Left.(*sqlparser.ColName)
if !ok {
return fmt.Errorf("unexpected: %v", sqlparser.String(expr))
@ -404,7 +484,7 @@ func (plan *Plan) analyzeWhere(vschema *localVSchema, where *sqlparser.Where) er
return err
}
plan.Filters = append(plan.Filters, Filter{
Opcode: Equal,
Opcode: opcode,
ColNum: colnum,
Value: resolved,
})

Просмотреть файл

@ -20,6 +20,8 @@ import (
"fmt"
"testing"
"vitess.io/vitess/go/vt/proto/topodata"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/utils"
@ -83,10 +85,7 @@ func init() {
"ks": &kspb,
},
}
vschema, err := vindexes.BuildVSchema(srvVSchema)
if err != nil {
panic(err)
}
vschema := vindexes.BuildVSchema(srvVSchema)
testLocalVSchema = &localVSchema{
keyspace: "ks",
vschema: vschema,
@ -585,3 +584,116 @@ func TestPlanBuilder(t *testing.T) {
})
}
}
func TestPlanBuilderFilterComparison(t *testing.T) {
t1 := &Table{
Name: "t1",
Fields: []*querypb.Field{{
Name: "id",
Type: sqltypes.Int64,
}, {
Name: "val",
Type: sqltypes.VarBinary,
}},
}
hashVindex, err := vindexes.NewHash("hash", nil)
require.NoError(t, err)
testcases := []struct {
name string
inFilter string
outFilters []Filter
outErr string
}{{
name: "equal",
inFilter: "select * from t1 where id = 1",
outFilters: []Filter{{Opcode: Equal, ColNum: 0, Value: sqltypes.NewInt64(1)}},
}, {
name: "not-equal",
inFilter: "select * from t1 where id <> 1",
outFilters: []Filter{{Opcode: NotEqual, ColNum: 0, Value: sqltypes.NewInt64(1)}},
}, {
name: "greater",
inFilter: "select * from t1 where val > 'abc'",
outFilters: []Filter{{Opcode: GreaterThan, ColNum: 1, Value: sqltypes.NewVarBinary("abc")}},
}, {
name: "greater-than",
inFilter: "select * from t1 where id >= 1",
outFilters: []Filter{{Opcode: GreaterThanEqual, ColNum: 0, Value: sqltypes.NewInt64(1)}},
}, {
name: "less-than-with-and",
inFilter: "select * from t1 where id < 2 and val <= 'xyz'",
outFilters: []Filter{{Opcode: LessThan, ColNum: 0, Value: sqltypes.NewInt64(2)},
{Opcode: LessThanEqual, ColNum: 1, Value: sqltypes.NewVarBinary("xyz")},
},
}, {
name: "vindex-and-operators",
inFilter: "select * from t1 where in_keyrange(id, 'hash', '-80') and id = 2 and val <> 'xyz'",
outFilters: []Filter{
{
Opcode: VindexMatch,
ColNum: 0,
Value: sqltypes.NULL,
Vindex: hashVindex,
VindexColumns: []int{0},
KeyRange: &topodata.KeyRange{
Start: nil,
End: []byte("\200"),
},
},
{Opcode: Equal, ColNum: 0, Value: sqltypes.NewInt64(2)},
{Opcode: NotEqual, ColNum: 1, Value: sqltypes.NewVarBinary("xyz")},
},
}}
for _, tcase := range testcases {
t.Run(tcase.name, func(t *testing.T) {
plan, err := buildPlan(t1, testLocalVSchema, &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{Match: "t1", Filter: tcase.inFilter}},
})
if tcase.outErr != "" {
assert.Nil(t, plan)
assert.EqualError(t, err, tcase.outErr)
return
}
require.NotNil(t, plan)
require.ElementsMatchf(t, tcase.outFilters, plan.Filters, "want %+v, got: %+v", tcase.outFilters, plan.Filters)
})
}
}
func TestCompare(t *testing.T) {
type testcase struct {
opcode Opcode
columnValue, filterValue sqltypes.Value
want bool
}
int1 := sqltypes.NewInt32(1)
int2 := sqltypes.NewInt32(2)
testcases := []*testcase{
{opcode: Equal, columnValue: int1, filterValue: int1, want: true},
{opcode: Equal, columnValue: int1, filterValue: int2, want: false},
{opcode: Equal, columnValue: int1, filterValue: sqltypes.NULL, want: false},
{opcode: LessThan, columnValue: int2, filterValue: int1, want: false},
{opcode: LessThan, columnValue: int1, filterValue: int2, want: true},
{opcode: LessThan, columnValue: int1, filterValue: sqltypes.NULL, want: false},
{opcode: GreaterThan, columnValue: int2, filterValue: int1, want: true},
{opcode: GreaterThan, columnValue: int1, filterValue: int2, want: false},
{opcode: GreaterThan, columnValue: int1, filterValue: sqltypes.NULL, want: false},
{opcode: NotEqual, columnValue: int1, filterValue: int1, want: false},
{opcode: NotEqual, columnValue: int1, filterValue: int2, want: true},
{opcode: NotEqual, columnValue: sqltypes.NULL, filterValue: int1, want: false},
{opcode: LessThanEqual, columnValue: int1, filterValue: sqltypes.NULL, want: false},
{opcode: GreaterThanEqual, columnValue: int2, filterValue: int1, want: true},
{opcode: LessThanEqual, columnValue: int2, filterValue: int1, want: false},
{opcode: GreaterThanEqual, columnValue: int1, filterValue: int1, want: true},
{opcode: LessThanEqual, columnValue: int1, filterValue: int2, want: true},
}
for _, tc := range testcases {
t.Run("", func(t *testing.T) {
got, err := compare(tc.opcode, tc.columnValue, tc.filterValue)
require.NoError(t, err)
require.Equal(t, tc.want, got)
})
}
}

Просмотреть файл

@ -102,7 +102,7 @@ type streamerPlan struct {
// "select * from t where in_keyrange(col1, 'hash', '-80')",
// "select col1, col2 from t where...",
// "select col1, keyspace_id() from t where...".
// Only "in_keyrange" expressions are supported in the where clause.
// Only "in_keyrange" and limited comparison operators (see enum Opcode in planbuilder.go) are supported in the where clause.
// Other constructs like joins, group by, etc. are not supported.
// vschema: the current vschema. This value can later be changed through the SetVSchema method.
// send: callback function to send events.

Просмотреть файл

@ -1922,6 +1922,54 @@ func TestFilteredMultipleWhere(t *testing.T) {
runCases(t, filter, testcases, "", nil)
}
// TestGeneratedColumns just confirms that generated columns are sent in a vstream as expected
func TestGeneratedColumns(t *testing.T) {
flavor := strings.ToLower(env.Flavor)
// Disable tests on percona (which identifies as mysql56) and mariadb platforms in CI since they
// generated columns support was added in 5.7 and mariadb added mysql compatible generated columns in 10.2
if !strings.Contains(flavor, "mysql57") && !strings.Contains(flavor, "mysql80") {
return
}
execStatements(t, []string{
"create table t1(id int, val varbinary(6), val2 varbinary(6) as (concat(id, val)), val3 varbinary(6) as (concat(val, id)), id2 int, primary key(id))",
})
defer execStatements(t, []string{
"drop table t1",
})
engine.se.Reload(context.Background())
queries := []string{
"begin",
"insert into t1(id, val, id2) values (1, 'aaa', 10)",
"insert into t1(id, val, id2) values (2, 'bbb', 20)",
"commit",
}
fe := &TestFieldEvent{
table: "t1",
db: "vttest",
cols: []*TestColumn{
{name: "id", dataType: "INT32", colType: "", len: 11, charset: 63},
{name: "val", dataType: "VARBINARY", colType: "", len: 6, charset: 63},
{name: "val2", dataType: "VARBINARY", colType: "", len: 6, charset: 63},
{name: "val3", dataType: "VARBINARY", colType: "", len: 6, charset: 63},
{name: "id2", dataType: "INT32", colType: "", len: 11, charset: 63},
},
}
testcases := []testcase{{
input: queries,
output: [][]string{{
`begin`,
fe.String(),
`type:ROW row_event:<table_name:"t1" row_changes:<after:<lengths:1 lengths:3 lengths:4 lengths:4 lengths:2 values:"1aaa1aaaaaa110" > > > `,
`type:ROW row_event:<table_name:"t1" row_changes:<after:<lengths:1 lengths:3 lengths:4 lengths:4 lengths:2 values:"2bbb2bbbbbb220" > > > `,
`gtid`,
`commit`,
}},
}}
runCases(t, nil, testcases, "current", nil)
}
func runCases(t *testing.T, filter *binlogdatapb.Filter, testcases []testcase, position string, tablePK []*binlogdatapb.TableLastPK) {
ctx, cancel := context.WithCancel(context.Background())

Просмотреть файл

@ -100,6 +100,11 @@ func (dc *fakeDBClient) addQueryRE(query string, result *sqltypes.Result, err er
dc.queriesRE[query] = &dbResults{results: []*dbResult{dbr}, err: err}
}
func (dc *fakeDBClient) getInvariant(query string) *sqltypes.Result {
return dc.invariants[query]
}
// note: addInvariant will replace a previous result for a query with the provided one: this is used in the tests
func (dc *fakeDBClient) addInvariant(query string, result *sqltypes.Result) {
dc.invariants[query] = result
}
@ -138,6 +143,7 @@ func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Re
if testMode == "debug" {
fmt.Printf("ExecuteFetch: %s\n", query)
}
if dbrs := dc.queries[query]; dbrs != nil {
return dbrs.next(query)
}

Просмотреть файл

@ -1032,7 +1032,6 @@ func (mz *materializer) generateInserts(ctx context.Context) (string, error) {
if !ok {
return "", fmt.Errorf("unrecognized statement: %s", ts.SourceExpression)
}
filter := ts.SourceExpression
if mz.targetVSchema.Keyspace.Sharded && mz.targetVSchema.Tables[ts.TargetTable].Type != vindexes.TypeReference {
cv, err := vindexes.FindBestColVindex(mz.targetVSchema.Tables[ts.TargetTable])
@ -1054,12 +1053,23 @@ func (mz *materializer) generateInserts(ctx context.Context) (string, error) {
vindexName := fmt.Sprintf("%s.%s", mz.ms.TargetKeyspace, cv.Name)
subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vindexName)})
subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral("{{.keyrange}}")})
sel.Where = &sqlparser.Where{
Type: sqlparser.WhereClause,
Expr: &sqlparser.FuncExpr{
Name: sqlparser.NewColIdent("in_keyrange"),
Exprs: subExprs,
},
inKeyRange := &sqlparser.FuncExpr{
Name: sqlparser.NewColIdent("in_keyrange"),
Exprs: subExprs,
}
if sel.Where != nil {
sel.Where = &sqlparser.Where{
Type: sqlparser.WhereClause,
Expr: &sqlparser.AndExpr{
Left: inKeyRange,
Right: sel.Where.Expr,
},
}
} else {
sel.Where = &sqlparser.Where{
Type: sqlparser.WhereClause,
Expr: inKeyRange,
}
}
filter = sqlparser.String(sel)

Просмотреть файл

@ -80,6 +80,7 @@ func (wr *Wrangler) Reshard(ctx context.Context, keyspace, workflow string, sour
if err != nil {
return vterrors.Wrap(err, "buildResharder")
}
rs.stopAfterCopy = stopAfterCopy
if !skipSchemaCopy {
if err := rs.copySchema(ctx); err != nil {

Просмотреть файл

@ -26,6 +26,8 @@ import (
"sync"
"time"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/concurrency"
@ -367,8 +369,52 @@ func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflowNam
return sw.logs(), nil
}
func (wr *Wrangler) areTabletsAvailableToStreamFrom(ctx context.Context, ts *trafficSwitcher, keyspace string, shards []*topo.ShardInfo) error {
var cells []string
tabletTypes := ts.optTabletTypes
if ts.optCells != "" {
cells = strings.Split(ts.optCells, ",")
}
// FIXME: currently there is a default setting in the tablet that is used if user does not specify a tablet type,
// we use the value specified in the tablet flag `-vreplication_tablet_type`
// but ideally we should populate the vreplication table with a default value when we setup the workflow
if tabletTypes == "" {
tabletTypes = "MASTER,REPLICA"
}
var wg sync.WaitGroup
allErrors := &concurrency.AllErrorRecorder{}
for _, shard := range shards {
wg.Add(1)
go func(cells []string, keyspace string, shard *topo.ShardInfo) {
defer wg.Done()
if cells == nil {
cells = append(cells, shard.MasterAlias.Cell)
}
tp, err := discovery.NewTabletPicker(wr.ts, cells, keyspace, shard.ShardName(), tabletTypes)
if err != nil {
allErrors.RecordError(err)
return
}
tablets := tp.GetMatchingTablets(ctx)
if len(tablets) == 0 {
allErrors.RecordError(fmt.Errorf("no tablet found to source data in keyspace %s, shard %s", keyspace, shard.ShardName()))
return
}
}(cells, keyspace, shard)
}
wg.Wait()
if allErrors.HasErrors() {
log.Errorf("%s", allErrors.Error())
return allErrors.Error()
}
return nil
}
// SwitchWrites is a generic way of migrating write traffic for a resharding workflow.
func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowName string, timeout time.Duration, cancel, reverse, reverseReplication bool, dryRun bool) (journalID int64, dryRunResults *[]string, err error) {
func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowName string, timeout time.Duration,
cancel, reverse, reverseReplication bool, dryRun bool) (journalID int64, dryRunResults *[]string, err error) {
ts, ws, err := wr.getWorkflowState(ctx, targetKeyspace, workflowName)
_ = ws
if err != nil {
@ -399,6 +445,13 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa
return 0, nil, err
}
if reverseReplication {
err := wr.areTabletsAvailableToStreamFrom(ctx, ts, ts.targetKeyspace, ts.targetShards())
if err != nil {
return 0, nil, err
}
}
// Need to lock both source and target keyspaces.
tctx, sourceUnlock, lockErr := sw.lockKeyspace(ctx, ts.sourceKeyspace, "SwitchWrites")
if lockErr != nil {

Просмотреть файл

@ -314,9 +314,8 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe
tme.startTablets(t)
tme.createDBClients(ctx, t)
tme.setMasterPositions()
for i, targetShard := range targetShards {
var rows []string
var rows, rowsRdOnly []string
for j, sourceShard := range sourceShards {
if !key.KeyRangesIntersect(tme.targetKeyRanges[i], tme.sourceKeyRanges[j]) {
continue
@ -332,12 +331,18 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe
},
}
rows = append(rows, fmt.Sprintf("%d|%v|||", j+1, bls))
rowsRdOnly = append(rows, fmt.Sprintf("%d|%v|||RDONLY", j+1, bls))
}
tme.dbTargetClients[i].addInvariant(vreplQueryks, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"id|source|message|cell|tablet_types",
"int64|varchar|varchar|varchar|varchar"),
rows...),
)
tme.dbTargetClients[i].addInvariant(vreplQueryks+"-rdonly", sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"id|source|message|cell|tablet_types",
"int64|varchar|varchar|varchar|varchar"),
rowsRdOnly...),
)
}
tme.targetKeyspace = "ks"

Просмотреть файл

@ -1735,6 +1735,303 @@ func TestReverseVReplicationUpdateQuery(t *testing.T) {
}
}
func TestShardMigrateNoAvailableTabletsForReverseReplication(t *testing.T) {
ctx := context.Background()
tme := newTestShardMigrater(ctx, t, []string{"-40", "40-"}, []string{"-80", "80-"})
defer tme.stopTablets(t)
// Initial check
checkServedTypes(t, tme.ts, "ks:-40", 3)
checkServedTypes(t, tme.ts, "ks:40-", 3)
checkServedTypes(t, tme.ts, "ks:-80", 0)
checkServedTypes(t, tme.ts, "ks:80-", 0)
tme.expectNoPreviousJournals()
//-------------------------------------------------------------------------------------------------------------------
// Single cell RDONLY migration.
_, err := tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_RDONLY}, []string{"cell1"}, workflow.DirectionForward, false)
if err != nil {
t.Fatal(err)
}
checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2)
checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2)
checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1)
checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1)
checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 3)
checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 3)
checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 0)
checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 0)
verifyQueries(t, tme.allDBClients)
tme.expectNoPreviousJournals()
//-------------------------------------------------------------------------------------------------------------------
// Other cell REPLICA migration.
_, err = tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}, []string{"cell2"}, workflow.DirectionForward, false)
if err != nil {
t.Fatal(err)
}
checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2)
checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2)
checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1)
checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1)
checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 1)
checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 1)
checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 2)
checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 2)
verifyQueries(t, tme.allDBClients)
tme.expectNoPreviousJournals()
//-------------------------------------------------------------------------------------------------------------------
// Single cell backward REPLICA migration.
_, err = tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}, []string{"cell2"}, workflow.DirectionBackward, false)
if err != nil {
t.Fatal(err)
}
checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2)
checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2)
checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1)
checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1)
checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 3)
checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 3)
checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 0)
checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 0)
verifyQueries(t, tme.allDBClients)
tme.expectNoPreviousJournals()
//-------------------------------------------------------------------------------------------------------------------
// Switch all RDONLY.
// This is an extra step that does not exist in the tables test.
// The per-cell migration mechanism is different for tables. So, this
// extra step is needed to bring things in sync.
_, err = tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_RDONLY}, nil, workflow.DirectionForward, false)
if err != nil {
t.Fatal(err)
}
checkServedTypes(t, tme.ts, "ks:-40", 2)
checkServedTypes(t, tme.ts, "ks:40-", 2)
checkServedTypes(t, tme.ts, "ks:-80", 1)
checkServedTypes(t, tme.ts, "ks:80-", 1)
verifyQueries(t, tme.allDBClients)
tme.expectNoPreviousJournals()
//-------------------------------------------------------------------------------------------------------------------
// Switch all REPLICA.
_, err = tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}, nil, workflow.DirectionForward, false)
if err != nil {
t.Fatal(err)
}
checkServedTypes(t, tme.ts, "ks:-40", 1)
checkServedTypes(t, tme.ts, "ks:40-", 1)
checkServedTypes(t, tme.ts, "ks:-80", 2)
checkServedTypes(t, tme.ts, "ks:80-", 2)
verifyQueries(t, tme.allDBClients)
tme.expectNoPreviousJournals()
//-------------------------------------------------------------------------------------------------------------------
// All cells RDONLY backward migration.
_, err = tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_RDONLY}, nil, workflow.DirectionBackward, false)
if err != nil {
t.Fatal(err)
}
checkServedTypes(t, tme.ts, "ks:-40", 2)
checkServedTypes(t, tme.ts, "ks:40-", 2)
checkServedTypes(t, tme.ts, "ks:-80", 1)
checkServedTypes(t, tme.ts, "ks:80-", 1)
verifyQueries(t, tme.allDBClients)
//-------------------------------------------------------------------------------------------------------------------
// Can't switch master with SwitchReads.
_, err = tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_MASTER}, nil, workflow.DirectionForward, false)
want := "tablet type must be REPLICA or RDONLY: MASTER"
if err == nil || err.Error() != want {
t.Errorf("SwitchReads(master) err: %v, want %v", err, want)
}
verifyQueries(t, tme.allDBClients)
//-------------------------------------------------------------------------------------------------------------------
// Test SwitchWrites cancelation on failure.
tme.expectNoPreviousJournals()
// Switch all the reads first.
_, err = tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_RDONLY}, nil, workflow.DirectionForward, false)
if err != nil {
t.Fatal(err)
}
checkServedTypes(t, tme.ts, "ks:-40", 1)
checkServedTypes(t, tme.ts, "ks:40-", 1)
checkServedTypes(t, tme.ts, "ks:-80", 2)
checkServedTypes(t, tme.ts, "ks:80-", 2)
checkIsMasterServing(t, tme.ts, "ks:-40", true)
checkIsMasterServing(t, tme.ts, "ks:40-", true)
checkIsMasterServing(t, tme.ts, "ks:-80", false)
checkIsMasterServing(t, tme.ts, "ks:80-", false)
checkJournals := func() {
tme.dbSourceClients[0].addQuery("select val from _vt.resharding_journal where id=6432976123657117097", &sqltypes.Result{}, nil)
tme.dbSourceClients[1].addQuery("select val from _vt.resharding_journal where id=6432976123657117097", &sqltypes.Result{}, nil)
}
checkJournals()
stopStreams := func() {
tme.dbSourceClients[0].addQuery("select id, workflow, source, pos from _vt.vreplication where db_name='vt_ks' and workflow != 'test_reverse' and state = 'Stopped' and message != 'FROZEN'", &sqltypes.Result{}, nil)
tme.dbSourceClients[1].addQuery("select id, workflow, source, pos from _vt.vreplication where db_name='vt_ks' and workflow != 'test_reverse' and state = 'Stopped' and message != 'FROZEN'", &sqltypes.Result{}, nil)
tme.dbSourceClients[0].addQuery("select id, workflow, source, pos from _vt.vreplication where db_name='vt_ks' and workflow != 'test_reverse'", &sqltypes.Result{}, nil)
tme.dbSourceClients[1].addQuery("select id, workflow, source, pos from _vt.vreplication where db_name='vt_ks' and workflow != 'test_reverse'", &sqltypes.Result{}, nil)
}
stopStreams()
deleteReverseReplicaion := func() {
tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid3, nil)
tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil)
tme.dbSourceClients[0].addQuery("delete from _vt.vreplication where id in (3)", &sqltypes.Result{}, nil)
tme.dbSourceClients[1].addQuery("delete from _vt.vreplication where id in (3, 4)", &sqltypes.Result{}, nil)
tme.dbSourceClients[0].addQuery("delete from _vt.copy_state where vrepl_id in (3)", &sqltypes.Result{}, nil)
tme.dbSourceClients[1].addQuery("delete from _vt.copy_state where vrepl_id in (3, 4)", &sqltypes.Result{}, nil)
}
cancelMigration := func() {
tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow != 'test_reverse'", &sqltypes.Result{}, nil)
tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow != 'test_reverse'", &sqltypes.Result{}, nil)
tme.dbTargetClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test'", resultid12, nil)
tme.dbTargetClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test'", resultid2, nil)
tme.dbTargetClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (1, 2)", &sqltypes.Result{}, nil)
tme.dbTargetClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (2)", &sqltypes.Result{}, nil)
tme.dbTargetClients[0].addQuery("select * from _vt.vreplication where id = 1", runningResult(1), nil)
tme.dbTargetClients[0].addQuery("select * from _vt.vreplication where id = 2", runningResult(2), nil)
tme.dbTargetClients[1].addQuery("select * from _vt.vreplication where id = 2", runningResult(2), nil)
deleteReverseReplicaion()
}
cancelMigration()
_, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 0*time.Second, false, false, true, false)
want = "DeadlineExceeded"
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("SwitchWrites(0 timeout) err: %v, must contain %v", err, want)
}
verifyQueries(t, tme.allDBClients)
checkServedTypes(t, tme.ts, "ks:-40", 1)
checkServedTypes(t, tme.ts, "ks:40-", 1)
checkServedTypes(t, tme.ts, "ks:-80", 2)
checkServedTypes(t, tme.ts, "ks:80-", 2)
checkIsMasterServing(t, tme.ts, "ks:-40", true)
checkIsMasterServing(t, tme.ts, "ks:40-", true)
checkIsMasterServing(t, tme.ts, "ks:-80", false)
checkIsMasterServing(t, tme.ts, "ks:80-", false)
//-------------------------------------------------------------------------------------------------------------------
// Test successful SwitchWrites.
checkJournals()
stopStreams()
waitForCatchup := func() {
// mi.waitForCatchup-> mi.wr.tmc.VReplicationWaitForPos
state := sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"pos|state|message",
"varchar|varchar|varchar"),
"MariaDB/5-456-892|Running",
)
tme.dbTargetClients[0].addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil)
tme.dbTargetClients[1].addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil)
tme.dbTargetClients[0].addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil)
// mi.waitForCatchup-> mi.wr.tmc.VReplicationExec('stopped for cutover')
tme.dbTargetClients[0].addQuery("select id from _vt.vreplication where id = 1", resultid1, nil)
tme.dbTargetClients[0].addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id in (1)", &sqltypes.Result{}, nil)
tme.dbTargetClients[0].addQuery("select id from _vt.vreplication where id = 2", resultid2, nil)
tme.dbTargetClients[0].addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id in (2)", &sqltypes.Result{}, nil)
tme.dbTargetClients[1].addQuery("select id from _vt.vreplication where id = 2", resultid2, nil)
tme.dbTargetClients[1].addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id in (2)", &sqltypes.Result{}, nil)
tme.dbTargetClients[0].addQuery("select * from _vt.vreplication where id = 1", stoppedResult(1), nil)
tme.dbTargetClients[1].addQuery("select * from _vt.vreplication where id = 2", stoppedResult(2), nil)
tme.dbTargetClients[0].addQuery("select * from _vt.vreplication where id = 2", stoppedResult(2), nil)
}
waitForCatchup()
createReverseVReplication := func() {
deleteReverseReplicaion()
tme.dbSourceClients[0].addQueryRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil)
tme.dbSourceClients[1].addQueryRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil)
tme.dbSourceClients[1].addQueryRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil)
tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 1", stoppedResult(1), nil)
tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 1", stoppedResult(1), nil)
tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 2", stoppedResult(2), nil)
}
createReverseVReplication()
createJournals := func() {
journal1 := "insert into _vt.resharding_journal.*6432976123657117097.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40"
tme.dbSourceClients[0].addQueryRE(journal1, &sqltypes.Result{}, nil)
journal2 := "insert into _vt.resharding_journal.*6432976123657117097.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40"
tme.dbSourceClients[1].addQueryRE(journal2, &sqltypes.Result{}, nil)
}
createJournals()
startReverseVReplication := func() {
tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil)
tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil)
tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil)
tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil)
tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil)
tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil)
tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil)
tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil)
}
startReverseVReplication()
freezeTargetVReplication := func() {
tme.dbTargetClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test'", resultid12, nil)
tme.dbTargetClients[0].addQuery("update _vt.vreplication set message = 'FROZEN' where id in (1, 2)", &sqltypes.Result{}, nil)
tme.dbTargetClients[0].addQuery("select * from _vt.vreplication where id = 1", stoppedResult(1), nil)
tme.dbTargetClients[0].addQuery("select * from _vt.vreplication where id = 2", stoppedResult(2), nil)
tme.dbTargetClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test'", resultid2, nil)
tme.dbTargetClients[1].addQuery("update _vt.vreplication set message = 'FROZEN' where id in (2)", &sqltypes.Result{}, nil)
tme.dbTargetClients[1].addQuery("select * from _vt.vreplication where id = 2", stoppedResult(2), nil)
}
freezeTargetVReplication()
// Temporarily set tablet types to RDONLY to test that SwitchWrites fails if no tablets of rdonly are available
invariants := make(map[string]*sqltypes.Result)
for i := range tme.targetShards {
invariants[fmt.Sprintf("%s-%d", vreplQueryks, i)] = tme.dbTargetClients[i].getInvariant(vreplQueryks)
tme.dbTargetClients[i].addInvariant(vreplQueryks, tme.dbTargetClients[i].getInvariant(vreplQueryks+"-rdonly"))
}
_, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false)
require.Error(t, err)
require.True(t, strings.Contains(err.Error(), "no tablet found"))
require.True(t, strings.Contains(err.Error(), "-80"))
require.True(t, strings.Contains(err.Error(), "80-"))
require.False(t, strings.Contains(err.Error(), "40"))
for i := range tme.targetShards {
tme.dbTargetClients[i].addInvariant(vreplQueryks, invariants[fmt.Sprintf("%s-%d", vreplQueryks, i)])
}
journalID, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false)
if err != nil {
t.Fatal(err)
}
if journalID != 6432976123657117097 {
t.Errorf("journal id: %d, want 6432976123657117097", journalID)
}
verifyQueries(t, tme.allDBClients)
checkServedTypes(t, tme.ts, "ks:-40", 0)
checkServedTypes(t, tme.ts, "ks:40-", 0)
checkServedTypes(t, tme.ts, "ks:-80", 3)
checkServedTypes(t, tme.ts, "ks:80-", 3)
checkIsMasterServing(t, tme.ts, "ks:-40", false)
checkIsMasterServing(t, tme.ts, "ks:40-", false)
checkIsMasterServing(t, tme.ts, "ks:-80", true)
checkIsMasterServing(t, tme.ts, "ks:80-", true)
verifyQueries(t, tme.allDBClients)
}
func checkRouting(t *testing.T, wr *Wrangler, want map[string][]string) {
t.Helper()
ctx := context.Background()

Просмотреть файл

@ -67,6 +67,20 @@ var (
"tabletmanager_throttler_custom_config",
"tabletmanager_tablegc",
"vtorc",
"vtgate_buffer",
"vtgate_concurrentdml",
"vtgate_gen4",
"vtgate_readafterwrite",
"vtgate_reservedconn",
"vtgate_schema",
"vtgate_topo",
"vtgate_transaction",
"vtgate_unsharded",
"vtgate_vindex",
"vtgate_vschema",
"xb_recovery",
"resharding",
"resharding_bytes",
}
// TODO: currently some percona tools including xtrabackup are installed on all clusters, we can possibly optimize
// this by only installing them in the required clusters

Просмотреть файл

@ -418,7 +418,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/sharding/resharding/v3"],
"Command": [],
"Manual": false,
"Shard": "15",
"Shard": "resharding",
"RetryMax": 0,
"Tags": [
"worker_test"
@ -429,7 +429,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/sharding/resharding/string"],
"Command": [],
"Manual": false,
"Shard": "15",
"Shard": "resharding_bytes",
"RetryMax": 0,
"Tags": [
"worker_test"
@ -584,7 +584,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/buffer"],
"Command": [],
"Manual": false,
"Shard": "17",
"Shard": "vtgate_buffer",
"RetryMax": 0,
"Tags": []
},
@ -593,7 +593,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/concurrentdml"],
"Command": [],
"Manual": false,
"Shard": "17",
"Shard": "vtgate_concurrentdml",
"RetryMax": 0,
"Tags": []
},
@ -602,6 +602,42 @@
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/schema"],
"Command": [],
"Manual": false,
"Shard": "vtgate_schema",
"RetryMax": 0,
"Tags": []
},
"vtgate_schematracker_loadkeyspace": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/schematracker/loadkeyspace"],
"Command": [],
"Manual": false,
"Shard": "17",
"RetryMax": 0,
"Tags": []
},
"vtgate_schematracker_restarttablet": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/schematracker/restarttablet"],
"Command": [],
"Manual": false,
"Shard": "17",
"RetryMax": 0,
"Tags": []
},
"vtgate_schematracker_sharded": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/schematracker/sharded"],
"Command": [],
"Manual": false,
"Shard": "17",
"RetryMax": 0,
"Tags": []
},
"vtgate_schematracker_unsharded": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/schematracker/unsharded"],
"Command": [],
"Manual": false,
"Shard": "17",
"RetryMax": 0,
"Tags": []
@ -620,7 +656,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/reservedconn"],
"Command": [],
"Manual": false,
"Shard": "17",
"Shard": "vtgate_reservedconn",
"RetryMax": 0,
"Tags": []
},
@ -629,7 +665,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/reservedconn/reconnect1"],
"Command": [],
"Manual": false,
"Shard": "17",
"Shard": "vtgate_reservedconn",
"RetryMax": 0,
"Tags": []
},
@ -638,7 +674,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/reservedconn/reconnect2"],
"Command": [],
"Manual": false,
"Shard": "17",
"Shard": "vtgate_reservedconn",
"RetryMax": 0,
"Tags": []
},
@ -647,7 +683,16 @@
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/transaction"],
"Command": [],
"Manual": false,
"Shard": "17",
"Shard": "vtgate_transaction",
"RetryMax": 0,
"Tags": []
},
"vtgate_transaction_rollback": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/transaction/rollback"],
"Command": [],
"Manual": false,
"Shard": "vtgate_transaction",
"RetryMax": 0,
"Tags": []
},
@ -656,7 +701,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/unsharded"],
"Command": [],
"Manual": false,
"Shard": "17",
"Shard": "vtgate_unsharded",
"RetryMax": 0,
"Tags": []
},
@ -665,7 +710,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/vschema"],
"Command": [],
"Manual": false,
"Shard": "17",
"Shard": "vtgate_vschema",
"RetryMax": 0,
"Tags": []
},
@ -674,10 +719,37 @@
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/readafterwrite"],
"Command": [],
"Manual": false,
"Shard": "vtgate_readafterwrite",
"RetryMax": 0,
"Tags": []
},
"vtgate_dbddlplugin": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/createdb_plugin"],
"Command": [],
"Manual": false,
"Shard": "17",
"RetryMax": 0,
"Tags": []
},
"vtgate_gen4": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/gen4"],
"Command": [],
"Manual": false,
"Shard": "vtgate_gen4",
"RetryMax": 0,
"Tags": []
},
"vtgate_watchkeyspace": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/keyspace_watches"],
"Command": [],
"Manual": false,
"Shard": "vtgate_topo",
"RetryMax": 0,
"Tags": []
},
"topo_zk2": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/topotest/zk2", "--topo-flavor=zk2"],
@ -701,7 +773,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/topotest/etcd2"],
"Command": [],
"Manual": false,
"Shard": "17",
"Shard": "vtgate_topo",
"RetryMax": 0,
"Tags": []
},
@ -714,6 +786,24 @@
"RetryMax": 0,
"Tags": []
},
"prefixfanout": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/prefixfanout"],
"Command": [],
"Manual": false,
"Shard": "vtgate_vindex",
"RetryMax": 0,
"Tags": []
},
"vindex_bindvars": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/vindex_bindvars"],
"Command": [],
"Manual": false,
"Shard": "vtgate_vindex",
"RetryMax": 0,
"Tags": []
},
"web_test": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtctldweb"],
@ -737,7 +827,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/recovery/xtrabackup"],
"Command": [],
"Manual": false,
"Shard": "17",
"Shard": "xb_recovery",
"RetryMax": 0,
"Tags": []
},
@ -761,6 +851,15 @@
"RetryMax": 0,
"Tags": []
},
"vreplication_materialize": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "ShardedMaterialize"],
"Command": [],
"Manual": false,
"Shard": "vreplication_multicell",
"RetryMax": 0,
"Tags": []
},
"vreplication_cellalias": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "CellAlias"],

70
web/vtadmin/package-lock.json сгенерированный
Просмотреть файл

@ -1137,6 +1137,58 @@
"resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz",
"integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw=="
},
"@bugsnag/browser": {
"version": "7.10.1",
"resolved": "https://registry.npmjs.org/@bugsnag/browser/-/browser-7.10.1.tgz",
"integrity": "sha512-Yxm/DheT/NHX2PhadBDuafuHBhP547Iav6Y9jf+skBBSi1n0ZYkGhtVxh8ZWLgqz5W8MsJ0HFiLBqcg/mulSvQ==",
"requires": {
"@bugsnag/core": "^7.10.0"
}
},
"@bugsnag/core": {
"version": "7.10.0",
"resolved": "https://registry.npmjs.org/@bugsnag/core/-/core-7.10.0.tgz",
"integrity": "sha512-sDa2nDxwsxHQx2/2/tsBWjYqH0TewCR8N/r5at6B+irwVkI0uts7Qc2JyqDTfiEiBXKVEXFK+fHTz1x9b8tsiA==",
"requires": {
"@bugsnag/cuid": "^3.0.0",
"@bugsnag/safe-json-stringify": "^6.0.0",
"error-stack-parser": "^2.0.3",
"iserror": "0.0.2",
"stack-generator": "^2.0.3"
}
},
"@bugsnag/cuid": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/@bugsnag/cuid/-/cuid-3.0.0.tgz",
"integrity": "sha512-LOt8aaBI+KvOQGneBtpuCz3YqzyEAehd1f3nC5yr9TIYW1+IzYKa2xWS4EiMz5pPOnRPHkyyS5t/wmSmN51Gjg=="
},
"@bugsnag/js": {
"version": "7.10.1",
"resolved": "https://registry.npmjs.org/@bugsnag/js/-/js-7.10.1.tgz",
"integrity": "sha512-1/MK/Bw2ViFx1hMG2TOX8MOq/LzT2VRd0VswknF4LYsZSgzohkRzz/hi6P2TSlLeapRs+bkDC6u2RCq4zYvyiA==",
"requires": {
"@bugsnag/browser": "^7.10.1",
"@bugsnag/node": "^7.10.1"
}
},
"@bugsnag/node": {
"version": "7.10.1",
"resolved": "https://registry.npmjs.org/@bugsnag/node/-/node-7.10.1.tgz",
"integrity": "sha512-kpasrz/im5ljptt2JOqrjbOu4b0i5sAZOYU4L0psWXlD31/wXytk7im11QlNALdI8gZZBxIFsVo8ks6dR6mHzg==",
"requires": {
"@bugsnag/core": "^7.10.0",
"byline": "^5.0.0",
"error-stack-parser": "^2.0.2",
"iserror": "^0.0.2",
"pump": "^3.0.0",
"stack-generator": "^2.0.3"
}
},
"@bugsnag/safe-json-stringify": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/@bugsnag/safe-json-stringify/-/safe-json-stringify-6.0.0.tgz",
"integrity": "sha512-htzFO1Zc57S8kgdRK9mLcPVTW1BY2ijfH7Dk2CeZmspTWKdKqSo1iwmqrq2WtRjFlo8aRZYgLX0wFrDXF/9DLA=="
},
"@cnakazawa/watch": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/@cnakazawa/watch/-/watch-1.0.4.tgz",
@ -4047,6 +4099,11 @@
"resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz",
"integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug="
},
"byline": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz",
"integrity": "sha1-dBxSFkaOrcRXsDQQEYrXfejB3bE="
},
"bytes": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
@ -8517,6 +8574,11 @@
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
},
"iserror": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/iserror/-/iserror-0.0.2.tgz",
"integrity": "sha1-vVNFH+L2aLnyQCwZZnh6qix8C/U="
},
"isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
@ -15754,6 +15816,14 @@
"resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz",
"integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w=="
},
"stack-generator": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/stack-generator/-/stack-generator-2.0.5.tgz",
"integrity": "sha512-/t1ebrbHkrLrDuNMdeAcsvynWgoH/i4o8EGGfX7dEYDoTXOYVAkEpFdtshlvabzc6JlJ8Kf9YdFEoz7JkzGN9Q==",
"requires": {
"stackframe": "^1.1.1"
}
},
"stack-utils": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.3.tgz",

Просмотреть файл

@ -7,6 +7,7 @@
"npm": ">=6.14.9"
},
"dependencies": {
"@bugsnag/js": "^7.10.1",
"@testing-library/user-event": "^12.6.0",
"@types/classnames": "^2.2.11",
"@types/jest": "^26.0.19",

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше